2026-02-09 18:07:14 +01:00
|
|
|
#!/usr/bin/env python3
|
|
|
|
|
"""
|
2026-02-09 18:46:45 +01:00
|
|
|
Discord bot for #remora channel - analyzes links in real-time with web_fetch + AI
|
|
|
|
|
Posts summaries, adds to Tududi inbox, maintains JSON history + logs
|
2026-02-09 18:07:14 +01:00
|
|
|
"""
|
|
|
|
|
|
|
|
|
|
import discord
|
|
|
|
|
import os
|
|
|
|
|
import json
|
|
|
|
|
import re
|
|
|
|
|
import requests
|
|
|
|
|
from datetime import datetime
|
|
|
|
|
from pathlib import Path
|
|
|
|
|
from dotenv import load_dotenv
|
2026-02-09 18:46:45 +01:00
|
|
|
import logging
|
|
|
|
|
from urllib.parse import urlparse
|
2026-02-09 18:07:14 +01:00
|
|
|
|
|
|
|
|
# Load .env file
|
|
|
|
|
load_dotenv()
|
|
|
|
|
|
2026-02-09 18:46:45 +01:00
|
|
|
# Setup logging
|
|
|
|
|
log_file = Path(__file__).parent / "bot.log"
|
|
|
|
|
logging.basicConfig(
|
|
|
|
|
level=logging.DEBUG,
|
|
|
|
|
format='[%(asctime)s] [%(levelname)-8s] %(message)s',
|
|
|
|
|
handlers=[
|
|
|
|
|
logging.FileHandler(log_file),
|
|
|
|
|
logging.StreamHandler()
|
|
|
|
|
]
|
|
|
|
|
)
|
|
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
|
|
2026-02-09 18:07:14 +01:00
|
|
|
# Config
|
|
|
|
|
CHANNEL_ID = 1467557082583535729
|
|
|
|
|
TRACKER_FILE = Path(__file__).parent / "tracker.json"
|
|
|
|
|
TUDUDI_API_URL = os.getenv("TUDUDI_API_URL", "https://todo.dilain.com/api/v1")
|
|
|
|
|
TUDUDI_API_KEY = os.getenv("TUDUDI_API_KEY")
|
|
|
|
|
GATEWAY_URL = os.getenv("OPENCLAW_GATEWAY", "http://127.0.0.1:18789")
|
2026-02-09 18:46:45 +01:00
|
|
|
GATEWAY_TOKEN = os.getenv("OPENCLAW_GATEWAY_TOKEN", "")
|
|
|
|
|
|
|
|
|
|
logger.info("=" * 60)
|
|
|
|
|
logger.info("Bot startup")
|
|
|
|
|
logger.info(f" Channel ID: {CHANNEL_ID}")
|
|
|
|
|
logger.info(f" Tududi API: {TUDUDI_API_URL}")
|
|
|
|
|
logger.info(f" Gateway: {GATEWAY_URL}")
|
|
|
|
|
logger.info("=" * 60)
|
2026-02-09 18:07:14 +01:00
|
|
|
|
|
|
|
|
# Load or init tracker
|
|
|
|
|
def load_tracker():
|
|
|
|
|
if TRACKER_FILE.exists():
|
|
|
|
|
with open(TRACKER_FILE) as f:
|
|
|
|
|
return json.load(f)
|
|
|
|
|
return {
|
|
|
|
|
"channel_id": CHANNEL_ID,
|
|
|
|
|
"processed_message_ids": [],
|
|
|
|
|
"links": []
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
def save_tracker(data):
|
|
|
|
|
with open(TRACKER_FILE, "w") as f:
|
|
|
|
|
json.dump(data, f, indent=2)
|
|
|
|
|
|
|
|
|
|
# Detect links in text
|
|
|
|
|
def extract_urls(text):
|
|
|
|
|
url_pattern = r'https?://[^\s<>"{}|\\^`\[\]]+'
|
|
|
|
|
return re.findall(url_pattern, text)
|
|
|
|
|
|
2026-02-09 18:46:45 +01:00
|
|
|
# Detect link type
|
|
|
|
|
def detect_link_type(url):
|
|
|
|
|
domain = urlparse(url).netloc.lower()
|
|
|
|
|
|
|
|
|
|
if "github.com" in domain:
|
|
|
|
|
return "GitHub"
|
|
|
|
|
elif "reddit.com" in domain:
|
|
|
|
|
return "Reddit"
|
|
|
|
|
elif "youtube.com" in domain or "youtu.be" in domain:
|
|
|
|
|
return "YouTube"
|
|
|
|
|
elif "tiktok.com" in domain:
|
|
|
|
|
return "TikTok"
|
|
|
|
|
elif "twitter.com" in domain or "x.com" in domain:
|
|
|
|
|
return "Twitter/X"
|
|
|
|
|
elif "medium.com" in domain:
|
|
|
|
|
return "Medium"
|
|
|
|
|
elif "dev.to" in domain:
|
|
|
|
|
return "Dev.to"
|
|
|
|
|
elif "arxiv.org" in domain:
|
|
|
|
|
return "arXiv"
|
|
|
|
|
else:
|
|
|
|
|
return "Article"
|
|
|
|
|
|
|
|
|
|
# Fetch URL content using requests
|
|
|
|
|
def fetch_url_content(url):
|
|
|
|
|
"""Fetch URL and return title + excerpt"""
|
|
|
|
|
logger.debug(f" 📥 Fetching: {url}")
|
|
|
|
|
|
2026-02-09 18:07:14 +01:00
|
|
|
try:
|
2026-02-09 18:46:45 +01:00
|
|
|
response = requests.get(
|
|
|
|
|
url,
|
2026-02-09 18:50:14 +01:00
|
|
|
timeout=8,
|
|
|
|
|
headers={
|
|
|
|
|
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36',
|
|
|
|
|
'Accept': 'text/html,application/xhtml+xml'
|
|
|
|
|
},
|
2026-02-09 18:46:45 +01:00
|
|
|
allow_redirects=True
|
|
|
|
|
)
|
|
|
|
|
response.raise_for_status()
|
2026-02-09 18:50:14 +01:00
|
|
|
content = response.text[:4000] # First 4k chars
|
|
|
|
|
|
|
|
|
|
# Try multiple patterns for title
|
|
|
|
|
title = None
|
|
|
|
|
|
|
|
|
|
# Pattern 1: <title> tag
|
|
|
|
|
title_match = re.search(r'<title[^>]*>\s*([^<]+?)\s*</title>', content, re.IGNORECASE)
|
|
|
|
|
if title_match:
|
|
|
|
|
title = title_match.group(1).strip()
|
2026-02-09 18:07:14 +01:00
|
|
|
|
2026-02-09 18:50:14 +01:00
|
|
|
# Pattern 2: og:title meta tag (for GitHub, etc.)
|
|
|
|
|
if not title:
|
|
|
|
|
og_match = re.search(r'<meta\s+property="og:title"\s+content="([^"]+)"', content, re.IGNORECASE)
|
|
|
|
|
if og_match:
|
|
|
|
|
title = og_match.group(1).strip()
|
|
|
|
|
|
|
|
|
|
# Pattern 3: h1 tag (for GitHub README)
|
|
|
|
|
if not title:
|
|
|
|
|
h1_match = re.search(r'<h1[^>]*>([^<]+)</h1>', content, re.IGNORECASE)
|
|
|
|
|
if h1_match:
|
|
|
|
|
title = h1_match.group(1).strip()
|
|
|
|
|
|
|
|
|
|
# Fallback
|
|
|
|
|
if not title:
|
|
|
|
|
title = url.split('/')[-1] or "Untitled"
|
2026-02-09 18:07:14 +01:00
|
|
|
|
2026-02-09 18:46:45 +01:00
|
|
|
# Extract meta description
|
|
|
|
|
desc_match = re.search(r'<meta\s+name="description"\s+content="([^"]+)"', content, re.IGNORECASE)
|
|
|
|
|
description = desc_match.group(1) if desc_match else ""
|
2026-02-09 18:07:14 +01:00
|
|
|
|
2026-02-09 18:50:14 +01:00
|
|
|
# Extract og:description
|
|
|
|
|
if not description:
|
|
|
|
|
og_desc = re.search(r'<meta\s+property="og:description"\s+content="([^"]+)"', content, re.IGNORECASE)
|
|
|
|
|
if og_desc:
|
|
|
|
|
description = og_desc.group(1)
|
|
|
|
|
|
2026-02-09 18:46:45 +01:00
|
|
|
logger.debug(f" ✓ Fetched: {title}")
|
2026-02-09 18:07:14 +01:00
|
|
|
return {
|
|
|
|
|
"title": title,
|
2026-02-09 18:46:45 +01:00
|
|
|
"description": description,
|
|
|
|
|
"content": content,
|
2026-02-09 18:07:14 +01:00
|
|
|
"status": "ok"
|
|
|
|
|
}
|
2026-02-09 18:46:45 +01:00
|
|
|
except requests.Timeout:
|
|
|
|
|
logger.warning(f" ⏱️ Timeout: {url}")
|
|
|
|
|
return {"title": "Request timeout", "status": "timeout", "content": ""}
|
|
|
|
|
except requests.HTTPError as e:
|
|
|
|
|
logger.warning(f" ❌ HTTP {e.response.status_code}: {url}")
|
|
|
|
|
return {"title": f"HTTP {e.response.status_code}", "status": "http_error", "content": ""}
|
2026-02-09 18:07:14 +01:00
|
|
|
except Exception as e:
|
2026-02-09 18:46:45 +01:00
|
|
|
logger.error(f" ❌ Error: {e}")
|
|
|
|
|
return {"title": "Fetch failed", "status": "error", "error": str(e), "content": ""}
|
|
|
|
|
|
2026-02-09 19:05:31 +01:00
|
|
|
# Analyze content (local heuristic-based)
|
2026-02-09 18:56:26 +01:00
|
|
|
def analyze_content(url, title, content, link_type):
|
2026-02-09 19:05:31 +01:00
|
|
|
"""Analyze content and suggest summary + tag locally"""
|
2026-02-09 18:56:26 +01:00
|
|
|
logger.debug(f" 🤖 Analyzing content: {url}")
|
2026-02-09 19:11:34 +01:00
|
|
|
logger.debug(f" Content length: {len(content)} chars")
|
|
|
|
|
logger.debug(f" Link type: {link_type}")
|
2026-02-09 18:46:45 +01:00
|
|
|
|
|
|
|
|
try:
|
2026-02-09 19:05:31 +01:00
|
|
|
# Extract useful info from HTML content
|
|
|
|
|
description = ""
|
|
|
|
|
|
|
|
|
|
# Looking for meta description
|
|
|
|
|
desc_match = re.search(r'<meta\s+name="description"\s+content="([^"]+)"', content, re.IGNORECASE)
|
|
|
|
|
if desc_match:
|
|
|
|
|
description = desc_match.group(1).strip()
|
2026-02-09 19:11:34 +01:00
|
|
|
logger.debug(f" Found meta description: {description[:80]}")
|
2026-02-09 19:05:31 +01:00
|
|
|
|
|
|
|
|
# Looking for og:description
|
|
|
|
|
if not description:
|
|
|
|
|
og_desc = re.search(r'<meta\s+property="og:description"\s+content="([^"]+)"', content, re.IGNORECASE)
|
|
|
|
|
if og_desc:
|
|
|
|
|
description = og_desc.group(1).strip()
|
2026-02-09 19:11:34 +01:00
|
|
|
logger.debug(f" Found og:description: {description[:80]}")
|
2026-02-09 19:05:31 +01:00
|
|
|
|
|
|
|
|
# Looking for first paragraph after title
|
|
|
|
|
if not description:
|
|
|
|
|
p_match = re.search(r'<p[^>]*>([^<]+)</p>', content, re.IGNORECASE)
|
|
|
|
|
if p_match:
|
|
|
|
|
description = p_match.group(1).strip()[:200]
|
|
|
|
|
|
|
|
|
|
# Determine tag based on content + URL + type
|
|
|
|
|
tag = "interesting"
|
|
|
|
|
summary = ""
|
|
|
|
|
|
|
|
|
|
if link_type == "GitHub":
|
|
|
|
|
tag = "project"
|
|
|
|
|
summary = f"GitHub repository: {title}"
|
|
|
|
|
# Try to extract more info from README
|
|
|
|
|
readme_match = re.search(r'README[^<]*</h[1-3]>[^<]*<p[^>]*>([^<]+)', content, re.IGNORECASE)
|
|
|
|
|
if readme_match:
|
|
|
|
|
summary += f". {readme_match.group(1)[:100]}"
|
|
|
|
|
|
|
|
|
|
elif link_type == "YouTube":
|
|
|
|
|
tag = "video"
|
|
|
|
|
summary = f"Video: {title}"
|
|
|
|
|
if description:
|
|
|
|
|
summary += f". {description[:80]}"
|
|
|
|
|
|
|
|
|
|
elif link_type == "Reddit":
|
|
|
|
|
tag = "discussion"
|
|
|
|
|
summary = f"Reddit discussion: {title}"
|
|
|
|
|
|
|
|
|
|
elif link_type == "Medium" or link_type == "Dev.to":
|
|
|
|
|
tag = "article"
|
|
|
|
|
summary = f"Article: {title}"
|
|
|
|
|
if description:
|
|
|
|
|
summary += f". {description[:80]}"
|
|
|
|
|
|
|
|
|
|
elif link_type == "arXiv":
|
|
|
|
|
tag = "learning"
|
|
|
|
|
summary = f"Research paper: {title}"
|
2026-02-09 18:46:45 +01:00
|
|
|
|
|
|
|
|
else:
|
2026-02-09 19:05:31 +01:00
|
|
|
# Generic web article
|
|
|
|
|
tag = "to-read"
|
|
|
|
|
summary = title
|
|
|
|
|
if description:
|
|
|
|
|
summary += f". {description[:100]}"
|
|
|
|
|
|
|
|
|
|
# Truncate summary to reasonable length
|
|
|
|
|
summary = summary[:200]
|
|
|
|
|
|
2026-02-09 19:11:34 +01:00
|
|
|
logger.info(f" ✓ Analysis complete - Tag: {tag}, Summary: {summary[:60]}")
|
2026-02-09 19:05:31 +01:00
|
|
|
|
2026-02-09 19:11:34 +01:00
|
|
|
result = {
|
2026-02-09 19:05:31 +01:00
|
|
|
"summary": summary,
|
|
|
|
|
"tag": tag,
|
|
|
|
|
"relevance": "relevant"
|
|
|
|
|
}
|
2026-02-09 19:11:34 +01:00
|
|
|
logger.debug(f" Returning: {result}")
|
|
|
|
|
return result
|
2026-02-09 18:56:26 +01:00
|
|
|
|
2026-02-09 18:46:45 +01:00
|
|
|
except Exception as e:
|
2026-02-09 19:05:31 +01:00
|
|
|
logger.error(f" Analysis error: {e}")
|
2026-02-09 19:11:34 +01:00
|
|
|
import traceback
|
|
|
|
|
logger.error(traceback.format_exc())
|
2026-02-09 19:05:31 +01:00
|
|
|
# Return minimal analysis
|
|
|
|
|
return {
|
|
|
|
|
"summary": title,
|
|
|
|
|
"tag": "interesting",
|
|
|
|
|
"relevance": "relevant"
|
|
|
|
|
}
|
2026-02-09 18:07:14 +01:00
|
|
|
|
|
|
|
|
# Send to Tududi inbox
|
2026-02-09 18:56:26 +01:00
|
|
|
def add_to_tududi(title, url, link_type, summary="", tag=""):
|
|
|
|
|
"""Add to Tududi inbox with intelligent summary"""
|
2026-02-09 18:46:45 +01:00
|
|
|
logger.debug(f" 📌 Adding to Tududi: {title}")
|
|
|
|
|
|
2026-02-09 18:07:14 +01:00
|
|
|
try:
|
|
|
|
|
if not TUDUDI_API_KEY:
|
2026-02-09 18:46:45 +01:00
|
|
|
logger.warning(" TUDUDI_API_KEY not set")
|
2026-02-09 18:07:14 +01:00
|
|
|
return False
|
|
|
|
|
|
2026-02-09 18:56:26 +01:00
|
|
|
# Format the inbox content
|
2026-02-09 18:46:45 +01:00
|
|
|
content = f"📌 **{link_type}**: {title}\n🔗 {url}"
|
2026-02-09 18:56:26 +01:00
|
|
|
|
|
|
|
|
if summary:
|
|
|
|
|
content += f"\n\n💡 **Summary**:\n{summary}"
|
|
|
|
|
|
|
|
|
|
if tag:
|
|
|
|
|
content += f"\n\n🏷️ **Tag**: {tag}"
|
2026-02-09 18:07:14 +01:00
|
|
|
|
|
|
|
|
response = requests.post(
|
|
|
|
|
f"{TUDUDI_API_URL}/inbox",
|
|
|
|
|
headers={
|
|
|
|
|
"Authorization": f"Bearer {TUDUDI_API_KEY}",
|
|
|
|
|
"Content-Type": "application/json"
|
|
|
|
|
},
|
|
|
|
|
json={"content": content},
|
|
|
|
|
timeout=5
|
|
|
|
|
)
|
|
|
|
|
|
2026-02-09 18:56:26 +01:00
|
|
|
if response.status_code in [200, 201]: # 200 or 201 are both OK
|
|
|
|
|
logger.info(f" ✓ Added to Tududi inbox with tag: {tag}")
|
2026-02-09 18:07:14 +01:00
|
|
|
return True
|
|
|
|
|
else:
|
2026-02-09 18:46:45 +01:00
|
|
|
logger.warning(f" Tududi error: {response.status_code}")
|
2026-02-09 18:07:14 +01:00
|
|
|
return False
|
|
|
|
|
except Exception as e:
|
2026-02-09 18:46:45 +01:00
|
|
|
logger.error(f" Tududi error: {e}")
|
2026-02-09 18:07:14 +01:00
|
|
|
return False
|
|
|
|
|
|
|
|
|
|
# Discord bot
|
|
|
|
|
intents = discord.Intents.default()
|
|
|
|
|
intents.message_content = True
|
|
|
|
|
|
|
|
|
|
class LinkAnalyzerBot(discord.Client):
|
|
|
|
|
async def on_ready(self):
|
2026-02-09 18:46:45 +01:00
|
|
|
logger.info(f"✅ Bot logged in as {self.user}")
|
|
|
|
|
logger.info(f"📍 Watching channel #remora ({CHANNEL_ID})")
|
2026-02-09 18:07:14 +01:00
|
|
|
|
|
|
|
|
async def on_message(self, message):
|
|
|
|
|
# Ignore bot's own messages
|
|
|
|
|
if message.author == self.user:
|
|
|
|
|
return
|
|
|
|
|
|
|
|
|
|
# Only process #remora channel
|
|
|
|
|
if message.channel.id != CHANNEL_ID:
|
|
|
|
|
return
|
|
|
|
|
|
|
|
|
|
# Check for URLs
|
|
|
|
|
urls = extract_urls(message.content)
|
|
|
|
|
if not urls:
|
2026-02-09 18:46:45 +01:00
|
|
|
logger.debug(f"No URLs in message from {message.author}")
|
2026-02-09 18:07:14 +01:00
|
|
|
return
|
|
|
|
|
|
|
|
|
|
# Skip if already processed
|
|
|
|
|
tracker = load_tracker()
|
|
|
|
|
if message.id in tracker["processed_message_ids"]:
|
2026-02-09 18:46:45 +01:00
|
|
|
logger.debug(f"Skipping already-processed message {message.id}")
|
2026-02-09 18:07:14 +01:00
|
|
|
return
|
|
|
|
|
|
2026-02-09 18:46:45 +01:00
|
|
|
logger.info(f"🔗 New link(s) from {message.author}: {message.content}")
|
2026-02-09 18:07:14 +01:00
|
|
|
|
|
|
|
|
# Process each URL
|
|
|
|
|
for url in urls:
|
2026-02-09 18:46:45 +01:00
|
|
|
try:
|
|
|
|
|
logger.info(f"Processing: {url}")
|
|
|
|
|
link_type = detect_link_type(url)
|
|
|
|
|
|
|
|
|
|
# Fetch content
|
|
|
|
|
fetch_result = fetch_url_content(url)
|
|
|
|
|
title = fetch_result["title"]
|
|
|
|
|
|
2026-02-09 18:56:26 +01:00
|
|
|
# Analyze content if fetch was successful
|
|
|
|
|
analysis_data = None
|
2026-02-09 19:11:34 +01:00
|
|
|
logger.debug(f" 📊 Fetch status: {fetch_result['status']}")
|
|
|
|
|
|
2026-02-09 18:56:26 +01:00
|
|
|
if fetch_result["status"] == "ok":
|
2026-02-09 19:11:34 +01:00
|
|
|
logger.debug(f" 🔍 Starting analysis...")
|
2026-02-09 18:56:26 +01:00
|
|
|
analysis_data = analyze_content(url, title, fetch_result.get("content", ""), link_type)
|
2026-02-09 19:11:34 +01:00
|
|
|
logger.debug(f" Analysis result: {analysis_data}")
|
|
|
|
|
else:
|
|
|
|
|
logger.debug(f" ⚠️ Fetch failed, skipping analysis")
|
2026-02-09 18:56:26 +01:00
|
|
|
|
|
|
|
|
# Prepare summary for Tududi
|
|
|
|
|
summary_text = ""
|
|
|
|
|
tag = "interesting"
|
|
|
|
|
if analysis_data:
|
|
|
|
|
summary_text = analysis_data.get("summary", "")
|
|
|
|
|
tag = analysis_data.get("tag", "interesting")
|
2026-02-09 19:11:34 +01:00
|
|
|
logger.debug(f" ✓ Got summary: {summary_text[:80]}")
|
|
|
|
|
else:
|
|
|
|
|
logger.warning(f" ❌ No analysis data returned")
|
2026-02-09 18:46:45 +01:00
|
|
|
|
2026-02-09 18:56:26 +01:00
|
|
|
# Add to Tududi with summary
|
|
|
|
|
tududi_ok = add_to_tududi(title, url, link_type, summary_text, tag)
|
2026-02-09 18:46:45 +01:00
|
|
|
|
|
|
|
|
# Format response for Discord
|
|
|
|
|
response_text = f"📌 **{link_type}**: {title}"
|
2026-02-09 18:56:26 +01:00
|
|
|
if summary_text:
|
|
|
|
|
response_text += f"\n\n💡 {summary_text}"
|
|
|
|
|
if tag:
|
|
|
|
|
response_text += f"\n\n🏷️ Tag: `{tag}`"
|
2026-02-09 18:46:45 +01:00
|
|
|
|
|
|
|
|
logger.debug(f"Posting response: {response_text}")
|
|
|
|
|
|
|
|
|
|
# Post in channel
|
|
|
|
|
await message.reply(response_text, mention_author=False)
|
|
|
|
|
|
|
|
|
|
# Update tracker
|
|
|
|
|
tracker["links"].append({
|
|
|
|
|
"url": url,
|
|
|
|
|
"title": title,
|
|
|
|
|
"type": link_type,
|
|
|
|
|
"author": str(message.author),
|
|
|
|
|
"message_id": message.id,
|
|
|
|
|
"date": datetime.now().isoformat(),
|
2026-02-09 18:59:09 +01:00
|
|
|
"analysis": analysis_data,
|
2026-02-09 18:46:45 +01:00
|
|
|
"tududi": tududi_ok,
|
|
|
|
|
"fetch_status": fetch_result["status"]
|
|
|
|
|
})
|
|
|
|
|
|
|
|
|
|
logger.info(f"✓ Processed: {url}")
|
|
|
|
|
|
|
|
|
|
except Exception as e:
|
|
|
|
|
logger.error(f"❌ Error processing {url}: {e}")
|
|
|
|
|
await message.reply(f"❌ Error analyzing link: {e}", mention_author=False)
|
2026-02-09 18:07:14 +01:00
|
|
|
|
|
|
|
|
# Update processed IDs
|
|
|
|
|
tracker["processed_message_ids"].append(message.id)
|
|
|
|
|
save_tracker(tracker)
|
2026-02-09 18:46:45 +01:00
|
|
|
logger.info(f"Updated tracker, total links: {len(tracker['links'])}")
|
2026-02-09 18:07:14 +01:00
|
|
|
|
|
|
|
|
# Main
|
|
|
|
|
if __name__ == "__main__":
|
|
|
|
|
token = os.getenv("DISCORD_BOT_TOKEN")
|
|
|
|
|
if not token:
|
2026-02-09 18:46:45 +01:00
|
|
|
logger.error("❌ DISCORD_BOT_TOKEN not set!")
|
2026-02-09 18:07:14 +01:00
|
|
|
exit(1)
|
|
|
|
|
|
2026-02-09 18:46:45 +01:00
|
|
|
logger.info("Starting bot...")
|
2026-02-09 18:07:14 +01:00
|
|
|
bot = LinkAnalyzerBot(intents=intents)
|
|
|
|
|
bot.run(token)
|