Files
batch-bot/bot/ollama.py
bilal a18ad30961 Initial commit: Batch Bot - Telegram Comment Bot
0.0.1
Features:
- Multi-account support via session files
- AI comments generation via Ollama (local LLM)
- Telegram bot for moderation (approve/reject/regenerate)
- Docker support (controller + worker)
- Auto-join public groups
- Comment regeneration on group re-add
- Statistics tracking

Tech stack:
- Python 3.11
- Telethon 1.34 (Telegram user client)
- Aiogram 3.4 (Telegram bot framework)
- SQLite (Database)
- Docker & Docker Compose
- Ollama (Local LLM)
2026-02-24 04:40:07 +03:00

59 lines
2.3 KiB
Python
Raw Blame History

This file contains ambiguous Unicode characters
This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.
import logging
import asyncio
import aiohttp
from bot.config import OLLAMA_URL, OLLAMA_MODEL, PROMPT_FILE
logger = logging.getLogger('ollama')
def load_prompt() -> str:
"""Загрузка промпта из файла"""
try:
with open(PROMPT_FILE, 'r', encoding='utf-8') as f:
return f.read().strip()
except Exception as e:
logger.error(f"Ошибка при загрузке промпта: {e}")
return "Напиши комментарий к следующему посту:\n\n{text}"
PROMPT_TEMPLATE = load_prompt()
async def generate_comment(text: str, max_retries: int = 3) -> str | None:
"""Генерация комментария с помощью Ollama"""
prompt = PROMPT_TEMPLATE.format(text=text)
for attempt in range(max_retries):
try:
async with aiohttp.ClientSession() as session:
async with session.post(
f"{OLLAMA_URL}/api/generate",
json={
"model": OLLAMA_MODEL,
"prompt": prompt,
"stream": False
},
timeout=aiohttp.ClientTimeout(total=60)
) as response:
response.raise_for_status()
data = await response.json()
comment = data.get('response', '')
# Очистка от тегов
comment = comment.replace('<think>', '').replace('</think>', '').strip()
logger.info(f"Комментарий сгенерирован (попытка {attempt + 1})")
return comment
except aiohttp.ClientError as e:
logger.warning(f"Ошибка подключения к Ollama (попытка {attempt + 1}): {e}")
if attempt < max_retries - 1:
await asyncio.sleep(2 ** attempt) # Exponential backoff
else:
logger.error(f"Не удалось подключиться к Ollama после {max_retries} попыток")
except Exception as e:
logger.error(f"Ошибка при генерации комментария: {e}")
return None
return None