Correctly handles multiple messages.
All checks were successful
continuous-integration/drone/push Build is passing

This commit is contained in:
2025-10-06 16:12:38 -07:00
parent 56b20f6e69
commit 6ce7117cc4
3 changed files with 53 additions and 21 deletions

View File

@@ -1,12 +1,44 @@
from openai import OpenAI
from . import config, prompt, history
from . import config, prompt
import logging
import time
_client = OpenAI(api_key=config.OPENAI_API_KEY)
async def generate_reply(short: bool = False) -> str:
msgs = history.build_messages(prompt.SYSTEM_PROMPT)
async def _build_messages_from_telegram(client, entity, system_prompt: str, short: bool):
# Pull recent Telegram messages and convert to OpenAI chat format.
# Note: we estimate tokens to keep the context in check.
MAX_FETCH = max(20, getattr(config, "MAX_MESSAGES_HISTORY", 40))
msgs = await client.get_messages(entity, limit=MAX_FETCH)
msgs = list(reversed(msgs)) # oldest -> newest
chat_msgs = [{"role": "system", "content": system_prompt}]
total_tokens_est = max(1, len(system_prompt) // 4)
def can_add(text: str) -> bool:
nonlocal total_tokens_est
add = max(1, len(text or "") // 4)
limit = getattr(config, "MAX_TOKENS_HISTORY", 2000)
if total_tokens_est + add > limit and len(chat_msgs) > 10:
return False
total_tokens_est += add
return True
for m in msgs:
text = (getattr(m, "message", None) or "").strip()
if not text:
continue
role = "assistant" if getattr(m, "out", False) else "user"
if can_add(text):
chat_msgs.append({"role": role, "content": text})
# Use a tighter nudge for early small-talk
msgs.append({"role": "user", "content": prompt.EARLY_NUDGE if short else prompt.USER_NUDGE})
nudge = prompt.EARLY_NUDGE if short else prompt.USER_NUDGE
chat_msgs.append({"role": "user", "content": nudge})
return chat_msgs
async def generate_reply(client, entity, short: bool = False) -> str:
msgs = await _build_messages_from_telegram(client, entity, prompt.SYSTEM_PROMPT, short)
resp = _client.chat.completions.create(
model=config.OPENAI_MODEL,
messages=msgs,
@@ -15,4 +47,5 @@ async def generate_reply(short: bool = False) -> str:
presence_penalty=0.3,
frequency_penalty=0.2,
)
return resp.choices[0].message.content.strip()
content = resp.choices[0].message.content
return content.strip() if content else ""

View File

@@ -1,7 +1,7 @@
import asyncio, json
from telethon import TelegramClient, events
from telethon.tl.types import User
from . import config, history, ai, tg_helpers
from . import config, ai, tg_helpers
def _load_last_ids(path):
if path.exists():
@@ -31,18 +31,21 @@ async def run():
pending = {}
last_ids = _load_last_ids(config.LAST_IDS_FILE)
async def _estimate_short_mode(chat):
# Short for first 1-2 user messages in the thread
msgs = await client.get_messages(chat, limit=10)
user_count = sum(1 for m in msgs if not getattr(m, "out", False))
return user_count in (1, 2)
async def debounced_reply(chat_id: int):
await asyncio.sleep(config.DEBOUNCE_SEC)
try:
# If it's the 1st or 2nd user message in the conversation, keep it extra short
user_count = history.count_role("user")
short_mode = user_count in (1, 2)
reply = await ai.generate_reply(short=short_mode)
short_mode = await _estimate_short_mode(chat_id)
reply = await ai.generate_reply(client, chat_id, short=short_mode)
except Exception as e:
print(f"OpenAI error: {e}")
reply = "hey—sorry, got a bit mixed up. how are you doing?"
history.append("assistant", reply)
await tg_helpers.send_with_catchup(client, chat_id, reply, lambda: ai.generate_reply(short=short_mode))
await tg_helpers.send_with_catchup(client, chat_id, reply, lambda: ai.generate_reply(client, chat_id, short=short_mode))
@client.on(events.NewMessage(incoming=True))
async def on_msg(event):
@@ -62,9 +65,6 @@ async def run():
last_seen = last_ids.get(event.chat_id, 0)
if event.message.id <= last_seen:
return
text = (event.message.message or "").strip()
if text:
history.append("user", text)
last_ids[event.chat_id] = event.message.id
_save_last_ids(config.LAST_IDS_FILE, last_ids)
@@ -77,8 +77,7 @@ async def run():
# Optional opener on a fresh start (only if target already known and enabled)
if config.AUTO_OPENER_ENABLED and config.OPENER_TEXT and target_entity and not config.HISTORY_FILE.exists():
history.append("assistant", config.OPENER_TEXT)
await tg_helpers.send_with_catchup(client, target_entity, config.OPENER_TEXT, ai.generate_reply)
await tg_helpers.send_with_catchup(client, target_entity, config.OPENER_TEXT, lambda: ai.generate_reply(client, target_entity))
elif not config.AUTO_OPENER_ENABLED and not config.HISTORY_FILE.exists():
print("Opener disabled. Waiting for incoming message.")

View File

@@ -76,16 +76,16 @@ async def simulate_typing(client, entity, seconds: float):
async def catchup_and_regen(client, entity, draft_text: str, regen_fn):
MAX_ROUNDS = 3
last_seen_id = None
for _ in range(MAX_ROUNDS):
msgs = await client.get_messages(entity, limit=1)
if not msgs: break
last = msgs[0]
if last_seen_id is not None and last.id == last_seen_id:
break
last_seen_id = last.id
latest_text = (last.message or "").strip()
if last.out or not latest_text: break
last_rec = history.last_record()
if last_rec and last_rec.get("role") == "user" and (last_rec.get("content") or "").strip() == latest_text:
break
history.append("user", latest_text)
draft_text = await regen_fn()
return draft_text