Handle race conditions before sending messages by adding pre-send catch-up logic.
This commit is contained in:
66
main.py
66
main.py
@@ -342,6 +342,70 @@ async def _simulate_typing(client: TelegramClient, entity, seconds: float):
|
||||
# Typing is best-effort; fall back silently
|
||||
print(f"Typing simulation warning: {e}")
|
||||
|
||||
async def _catchup_new_incoming_and_regenerate(client: TelegramClient, entity, draft_text: str) -> str:
|
||||
"""
|
||||
Right before sending, check if new incoming messages arrived after we prepared the draft.
|
||||
If so, append them to history and regenerate the reply. Repeat up to a few times.
|
||||
"""
|
||||
MAX_ROUNDS = 3
|
||||
for _ in range(MAX_ROUNDS):
|
||||
# Get most recent message in the dialog
|
||||
msgs = await client.get_messages(entity, limit=1)
|
||||
if not msgs:
|
||||
break
|
||||
last_msg = msgs[0]
|
||||
# Only consider new incoming text messages
|
||||
latest_text = (last_msg.message or "").strip()
|
||||
if last_msg.out or not latest_text:
|
||||
break
|
||||
|
||||
# If the last history record already contains this exact text from the user, we're up to date
|
||||
last_rec = last_history_record()
|
||||
if last_rec and last_rec.get("role") == "user" and (last_rec.get("content") or "").strip() == latest_text:
|
||||
break
|
||||
|
||||
# Append the new incoming and regenerate a response
|
||||
append_history("user", latest_text)
|
||||
try:
|
||||
draft_text = await generate_reply_via_openai()
|
||||
except Exception as e:
|
||||
print(f"OpenAI error during pre-send catch-up: {e}")
|
||||
draft_text = random.choice([
|
||||
"Sorry, just saw the new messages—could you say a bit more?",
|
||||
"Got it—what part matters most to you there?",
|
||||
"Interesting—why do you think that is?",
|
||||
])
|
||||
# Loop again in case even more arrived while regenerating
|
||||
return draft_text
|
||||
|
||||
async def send_with_catchup(client: TelegramClient, entity, text: str):
|
||||
"""
|
||||
Send a reply while handling race conditions:
|
||||
- Wait a human-like delay.
|
||||
- Right before typing, check for any new incoming messages and regenerate if needed.
|
||||
- Show typing indicator for a human-ish duration based on final text.
|
||||
- Do a final quick check again after typing, then send.
|
||||
"""
|
||||
# Initial pause
|
||||
await human_delay()
|
||||
|
||||
# First catch-up before typing
|
||||
text = await _catchup_new_incoming_and_regenerate(client, entity, text)
|
||||
|
||||
# Typing simulation based on the (possibly updated) text
|
||||
if TYPING_SIM_ENABLED:
|
||||
seconds = _estimate_typing_seconds(text)
|
||||
await _simulate_typing(client, entity, seconds)
|
||||
|
||||
# Final quick catch-up in case they sent more during typing
|
||||
text = await _catchup_new_incoming_and_regenerate(client, entity, text)
|
||||
|
||||
try:
|
||||
await client.send_message(entity, text)
|
||||
except FloodWaitError as e:
|
||||
await asyncio.sleep(e.seconds + 3)
|
||||
await client.send_message(entity, text)
|
||||
|
||||
async def safe_send(client: TelegramClient, entity, text: str):
|
||||
# Initial human-like pause before reacting at all
|
||||
await human_delay()
|
||||
@@ -440,7 +504,7 @@ async def main():
|
||||
|
||||
# Persist and send
|
||||
append_history("assistant", reply)
|
||||
await safe_send(client, event.chat_id, reply)
|
||||
await send_with_catchup(client, event.chat_id, reply)
|
||||
|
||||
print("Listening for target messages…")
|
||||
await client.run_until_disconnected()
|
||||
|
||||
Reference in New Issue
Block a user