#!/usr/bin/env python3 """ Discord bot for #remora channel - analyzes links in real-time with Haiku Fetches content, sends to gateway for AI analysis, adds to Tududi inbox """ import discord import os import json import re import requests from datetime import datetime from pathlib import Path from dotenv import load_dotenv import logging from urllib.parse import urlparse # Load .env file load_dotenv() # Setup logging log_file = Path(__file__).parent / "bot.log" logging.basicConfig( level=logging.DEBUG, format='[%(asctime)s] [%(levelname)-8s] %(message)s', handlers=[ logging.FileHandler(log_file), logging.StreamHandler() ] ) logger = logging.getLogger(__name__) # Config CHANNEL_ID = 1467557082583535729 TRACKER_FILE = Path(__file__).parent / "tracker.json" TUDUDI_API_URL = os.getenv("TUDUDI_API_URL", "https://todo.dilain.com/api/v1") TUDUDI_API_KEY = os.getenv("TUDUDI_API_KEY") GATEWAY_URL = os.getenv("OPENCLAW_GATEWAY", "http://127.0.0.1:18789") GATEWAY_TOKEN = os.getenv("OPENCLAW_GATEWAY_TOKEN", "") logger.info("=" * 60) logger.info("Bot startup") logger.info(f" Channel ID: {CHANNEL_ID}") logger.info(f" Tududi API: {TUDUDI_API_URL}") logger.info(f" Gateway: {GATEWAY_URL}") logger.info("=" * 60) # Load or init tracker def load_tracker(): if TRACKER_FILE.exists(): with open(TRACKER_FILE) as f: return json.load(f) return { "channel_id": CHANNEL_ID, "processed_message_ids": [], "links": [] } def save_tracker(data): with open(TRACKER_FILE, "w") as f: json.dump(data, f, indent=2) # Detect links in text def extract_urls(text): url_pattern = r'https?://[^\s<>"{}|\\^`\[\]]+' return re.findall(url_pattern, text) # Detect link type def detect_link_type(url): domain = urlparse(url).netloc.lower() if "github.com" in domain: return "GitHub" elif "reddit.com" in domain: return "Reddit" elif "youtube.com" in domain or "youtu.be" in domain: return "YouTube" elif "tiktok.com" in domain: return "TikTok" elif "twitter.com" in domain or "x.com" in domain: return "Twitter/X" elif "medium.com" in domain: return "Medium" elif "dev.to" in domain: return "Dev.to" elif "arxiv.org" in domain: return "arXiv" else: return "Article" # Fetch URL content def fetch_url_content(url): """Fetch URL and return content""" logger.debug(f" šŸ“„ Fetching: {url}") try: response = requests.get( url, timeout=8, headers={ 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36', 'Accept': 'text/html,application/xhtml+xml' }, allow_redirects=True ) response.raise_for_status() content = response.text[:5000] # First 5k chars # Try to find title title = None title_match = re.search(r']*>\s*([^<]+?)\s*', content, re.IGNORECASE) if title_match: title = title_match.group(1).strip() if not title: og_match = re.search(r'