Compare commits

...

2 Commits

Author SHA1 Message Date
Cameron Grant
9b227fccbd Added typing simulation. 2025-10-03 17:43:05 -07:00
Cameron Grant
2fbda0c4cd Removed bad opener. Allow for custom openers. 2025-10-03 17:43:05 -07:00
3 changed files with 84 additions and 11 deletions

View File

@@ -32,5 +32,20 @@ MAX_TOKENS_HISTORY=2200
# Hard cap on number of messages kept in history
MAX_MESSAGES_HISTORY=30
# Optional opener (disabled by default).
# When enabled, the opener is sent only on a fresh start (no history),
# when a target is already resolved, and no catch-up reply was sent.
AUTO_OPENER_ENABLED=false
OPENER_TEXT=
# Typing simulation (show "typing..." before sending)
# Enable/disable typing indicator and tune how long it appears based on message length.
TYPING_SIM_ENABLED=true
# Approximate words per minute used to estimate typing duration (5 chars ≈ 1 word)
TYPING_WPM=22
# Clamp the simulated typing duration to this range (in seconds)
TYPING_MIN_SEC=2.0
TYPING_MAX_SEC=18.0
# Optional: ensure unbuffered logs in some environments
PYTHONUNBUFFERED=1

7
README
View File

@@ -24,10 +24,17 @@ Telethon + OpenAI bot that engages unsolicited DMs with safe, time-wasting small
| HISTORY_FILE | no | chat_history.jsonl | Path to local JSONL file for conversation history |
| MAX_TOKENS_HISTORY | no | 2200 | Rough token budget for messages passed to the model |
| MAX_MESSAGES_HISTORY | no | 30 | Hard cap on number of messages kept in rolling history |
| AUTO_OPENER_ENABLED | no | false | If true, send an initial opener only on fresh start when a target is resolved and no catch-up reply was needed |
| OPENER_TEXT | no | — | The opener text to send when AUTO_OPENER_ENABLED=true |
| TYPING_SIM_ENABLED | no | true | Show a "typing…" indicator before sending the message |
| TYPING_WPM | no | 22 | Approximate words per minute to estimate typing duration |
| TYPING_MIN_SEC | no | 2.0 | Minimum typing duration (seconds) |
| TYPING_MAX_SEC | no | 18.0 | Maximum typing duration (seconds) |
| PYTHONUNBUFFERED | no | 1 | If set, forces unbuffered output in some environments |
Notes:
- Set one of TARGET_USERNAME, TARGET_USER_ID, or TARGET_DISPLAY_NAME. If none are set, the first inbound DM will become the target automatically.
- The opener is disabled by default to avoid sending context-specific messages; enable explicitly with AUTO_OPENER_ENABLED and provide OPENER_TEXT.
- Increase delays if you hit Telegram flood limits.
## License

71
main.py
View File

@@ -37,6 +37,18 @@ MAX_MESSAGES_HISTORY = int(os.environ.get("MAX_MESSAGES_HISTORY", "30"))
TARGET_DISPLAY_NAME = os.environ.get("TARGET_DISPLAY_NAME", "").strip()
TARGET_CACHE_FILE = Path(os.environ.get("TARGET_CACHE_FILE", "target_id.txt"))
# Opener controls (disabled by default)
AUTO_OPENER_ENABLED = os.environ.get("AUTO_OPENER_ENABLED", "false").lower() == "true"
OPENER_TEXT = os.environ.get("OPENER_TEXT", "").strip()
# Typing simulation controls
TYPING_SIM_ENABLED = os.environ.get("TYPING_SIM_ENABLED", "true").lower() == "true"
# Approx words per minute to estimate how long to "type"
TYPING_WPM = int(os.environ.get("TYPING_WPM", "22"))
# Clamp typing duration into [min, max] seconds
TYPING_MIN_SEC = float(os.environ.get("TYPING_MIN_SEC", "2.0"))
TYPING_MAX_SEC = float(os.environ.get("TYPING_MAX_SEC", "18.0"))
# ---------- Validation ----------
def _require(cond: bool, msg: str):
if not cond:
@@ -295,14 +307,50 @@ async def startup_catchup_if_needed(client: TelegramClient, target_entity) -> bo
await safe_send(client, target_entity, reply)
return True
async def human_delay():
await asyncio.sleep(random.randint(MIN_DELAY_SEC, MAX_DELAY_SEC))
def _estimate_typing_seconds(text: str) -> float:
"""
Estimate a human-like typing duration based on configured WPM.
Roughly assumes 5 characters per word.
"""
if not text:
return TYPING_MIN_SEC
words = max(1, len(text) / 5.0)
seconds = (words / max(1, TYPING_WPM)) * 60.0
return max(TYPING_MIN_SEC, min(TYPING_MAX_SEC, seconds))
async def _simulate_typing(client: TelegramClient, entity, seconds: float):
"""
Sends 'typing...' chat actions periodically so the peer sees the typing indicator.
"""
if seconds <= 0:
return
# Telethon auto-refreshes typing when using the context manager.
# We slice the sleep into small chunks to keep the indicator alive.
slice_len = 3.5
total = 0.0
try:
async with client.action(entity, 'typing'):
while total < seconds:
remaining = seconds - total
sl = slice_len if remaining > slice_len else remaining
await asyncio.sleep(sl)
total += sl
except Exception as e:
# Typing is best-effort; fall back silently
print(f"Typing simulation warning: {e}")
async def safe_send(client: TelegramClient, entity, text: str):
# Initial human-like pause before reacting at all
await human_delay()
# Show "typing..." before sending the full message
if TYPING_SIM_ENABLED:
seconds = _estimate_typing_seconds(text)
await _simulate_typing(client, entity, seconds)
try:
await client.send_message(entity, text)
except FloodWaitError as e:
@@ -343,14 +391,17 @@ async def main():
if target_entity:
catchup_sent = await startup_catchup_if_needed(client, target_entity)
# Optional: send a gentle opener once (only if history is empty and we didn't just catch up)
if not HISTORY_FILE.exists() and not catchup_sent:
opener = "Oh neat, Houston. I'm up in the Pacific Northwest these days, sort of near the coast. What brought you from the UK to Houston?"
append_history("assistant", opener)
if target_entity:
await safe_send(client, target_entity, opener)
else:
print("Opener queued in history; will start replying when the target speaks.")
# Optional opener: disabled by default. If enabled, only send when:
# - no history file exists (fresh start)
# - no catch-up reply was sent
# - a target is already resolved
# - and OPENER_TEXT is provided
if not HISTORY_FILE.exists() and not catchup_sent and target_entity and AUTO_OPENER_ENABLED and OPENER_TEXT:
append_history("assistant", OPENER_TEXT)
await safe_send(client, target_entity, OPENER_TEXT)
elif not HISTORY_FILE.exists() and not catchup_sent and not AUTO_OPENER_ENABLED:
print("Opener is disabled (AUTO_OPENER_ENABLED=false). Waiting for incoming message.")
@client.on(events.NewMessage(incoming=True))
async def on_msg(event):