All checks were successful
continuous-integration/drone/push Build is passing
85 lines
3.5 KiB
Python
85 lines
3.5 KiB
Python
import asyncio, json
|
|
from telethon import TelegramClient, events
|
|
from telethon.tl.types import User
|
|
from . import config, ai, tg_helpers
|
|
|
|
def _load_last_ids(path):
|
|
if path.exists():
|
|
try: return json.loads(path.read_text(encoding="utf-8"))
|
|
except Exception: pass
|
|
return {}
|
|
|
|
def _save_last_ids(path, data):
|
|
try: path.write_text(json.dumps(data), encoding="utf-8")
|
|
except Exception as e: print(f"Warning: failed to save last ids: {e}")
|
|
|
|
async def run():
|
|
config.require()
|
|
client = TelegramClient(config.SESSION_PREFIX, config.API_ID, config.API_HASH)
|
|
await client.start()
|
|
|
|
target_entity = await tg_helpers.resolve_target_entity(client)
|
|
if target_entity:
|
|
print(f"Target resolved: id={target_entity.id}, username={getattr(target_entity,'username',None)}")
|
|
tg_helpers.cache_target_id(target_entity)
|
|
else:
|
|
print("Target not resolved yet. Will match dynamically on first incoming message from target.")
|
|
|
|
# Startup catch-up: only if needed (simple version relies on handler later)
|
|
|
|
# Debounce state
|
|
pending = {}
|
|
last_ids = _load_last_ids(config.LAST_IDS_FILE)
|
|
|
|
async def _estimate_short_mode(chat):
|
|
# Short for first 1-2 user messages in the thread
|
|
msgs = await client.get_messages(chat, limit=10)
|
|
user_count = sum(1 for m in msgs if not getattr(m, "out", False))
|
|
return user_count in (1, 2)
|
|
|
|
async def debounced_reply(chat_id: int):
|
|
await asyncio.sleep(config.DEBOUNCE_SEC)
|
|
try:
|
|
short_mode = await _estimate_short_mode(chat_id)
|
|
reply = await ai.generate_reply(client, chat_id, short=short_mode)
|
|
except Exception as e:
|
|
print(f"OpenAI error: {e}")
|
|
reply = "hey—sorry, got a bit mixed up. how are you doing?"
|
|
await tg_helpers.send_with_catchup(client, chat_id, reply, lambda: ai.generate_reply(client, chat_id, short=short_mode))
|
|
|
|
@client.on(events.NewMessage(incoming=True))
|
|
async def on_msg(event):
|
|
nonlocal target_entity, pending, last_ids
|
|
sender = await event.get_sender()
|
|
if not isinstance(sender, User): return
|
|
|
|
if (not target_entity) and (config.TARGET_USER_ID == 0 and not config.TARGET_USERNAME and not config.TARGET_DISPLAY_NAME):
|
|
target_entity = sender
|
|
tg_helpers.cache_target_id(target_entity)
|
|
print(f"Auto-targeted sender id={sender.id}, username={sender.username}")
|
|
|
|
if not tg_helpers.sender_matches_target(sender, target_entity):
|
|
return
|
|
|
|
# duplicate guard
|
|
last_seen = last_ids.get(event.chat_id, 0)
|
|
if event.message.id <= last_seen:
|
|
return
|
|
|
|
last_ids[event.chat_id] = event.message.id
|
|
_save_last_ids(config.LAST_IDS_FILE, last_ids)
|
|
|
|
# debounce
|
|
task = pending.get(event.chat_id)
|
|
if task and not task.done():
|
|
task.cancel()
|
|
pending[event.chat_id] = asyncio.create_task(debounced_reply(event.chat_id))
|
|
|
|
# Optional opener on a fresh start (only if target already known and enabled)
|
|
if config.AUTO_OPENER_ENABLED and config.OPENER_TEXT and target_entity and not config.HISTORY_FILE.exists():
|
|
await tg_helpers.send_with_catchup(client, target_entity, config.OPENER_TEXT, lambda: ai.generate_reply(client, target_entity))
|
|
elif not config.AUTO_OPENER_ENABLED and not config.HISTORY_FILE.exists():
|
|
print("Opener disabled. Waiting for incoming message.")
|
|
|
|
print("Listening…")
|
|
await client.run_until_disconnected() |