From f9177ebbdb748fbfd9aec9173405de7691a3bf26 Mon Sep 17 00:00:00 2001 From: Cytrogen Date: Fri, 13 Feb 2026 18:59:36 -0500 Subject: [PATCH] =?UTF-8?q?fork:=20=E5=AE=9A=E5=88=B6=E4=BF=AE=E6=94=B9=20?= =?UTF-8?q?+=20=E6=95=8F=E6=84=9F=E6=95=B0=E6=8D=AE=E6=B8=85=E7=90=86?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - agent 名改为「澜」,prompt 从外部文件加载 - 添加中文 i18n (I18N/zh/) - docker-compose: 添加 healthcheck、移除 fastapi 服务 - bot/main.py: 添加 UnknownIntent 错误处理 - config.py → config.py.example,排除私人配置 - prompts/*.txt 排除,提供 .sample 模板 - .env.example 格式优化 --- .dockerignore | 48 ++ .env.example | 48 +- .github/workflows/docker-publish.yml | 146 ++-- .github/workflows/python-ci.yml | 186 ++--- .github/workflows/release.yml.disabled | 226 ++--- .gitignore | 75 +- CONTRIBUTING.md | 198 ++--- DEPLOY.md | 149 ++++ Dockerfile | 60 +- Dockerfile_fastapi | 20 +- I18N/en/txt.ftl | 344 ++++---- I18N/factory.py | 50 +- I18N/ru/txt.ftl | 306 +++---- I18N/zh/txt.ftl | 192 +++++ LICENSE | 42 +- README.md | 1056 ++++++++++++------------ bot/agents_tools/agents_.py | 392 +++++---- bot/agents_tools/mcp_servers.py | 118 +-- bot/agents_tools/tools.py | 562 ++++++------- bot/commands.py | 34 +- bot/dialogs/balance.py | 396 ++++----- bot/dialogs/i18n_widget.py | 36 +- bot/dialogs/knowledge.py | 310 +++---- bot/dialogs/menu.py | 220 ++--- bot/dialogs/settings.py | 146 ++-- bot/dialogs/wallet.py | 378 ++++----- bot/keyboards/inline.py | 64 +- bot/main.py | 182 ++-- bot/middlewares/database_session.py | 50 +- bot/middlewares/first_time.py | 60 +- bot/middlewares/translator_hub.py | 90 +- bot/routers/admin.py | 90 +- bot/routers/user.py | 474 +++++------ bot/scheduler_funcs/daily_tokens.py | 22 +- bot/states/states.py | 76 +- bot/utils/agent_requests.py | 352 ++++---- bot/utils/calculate_tokens.py | 34 +- bot/utils/check_burn_address.py | 104 +-- bot/utils/check_payment.py | 124 +-- bot/utils/create_bot.py | 28 +- bot/utils/executed_tasks.py | 88 +- bot/utils/funcs_gpt.py | 312 +++---- bot/utils/get_ton_course.py | 50 +- bot/utils/scheduler_provider.py | 26 +- bot/utils/send_answer.py | 721 ++++++++-------- bot/utils/solana_funcs.py | 54 +- config.py => config.py.example | 12 +- database/models.py | 296 +++---- database/repositories/user.py | 246 +++--- database/repositories/utils.py | 144 ++-- docker-compose.yml | 115 +-- docker_setup_en.sh | 94 +-- modes/crypto_mode/agents_.py | 544 ++++++------ modes/crypto_mode/en/txt.ftl | 306 +++---- modes/crypto_mode/ru/txt.ftl | 306 +++---- prompts/main_prompt.txt.sample | 5 + redis_service/connect.py | 14 +- requirements.txt | 156 ++-- requirements_fastapi.txt | 138 ++-- 59 files changed, 5781 insertions(+), 5334 deletions(-) create mode 100644 .dockerignore create mode 100644 DEPLOY.md create mode 100644 I18N/zh/txt.ftl rename config.py => config.py.example (81%) create mode 100644 prompts/main_prompt.txt.sample diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 0000000..7fe23d3 --- /dev/null +++ b/.dockerignore @@ -0,0 +1,48 @@ +# Version control +.git +.gitignore + +# Python +__pycache__ +*.py[cod] +*$py.class +venv/ +env/ +.venv/ +*.egg-info/ + +# Docker +docker-compose.yml +Dockerfile +Dockerfile_fastapi +.dockerignore + +# Data volumes (mounted at runtime) +data/ + +# IDE / Editor +.idea/ +.vscode/ +*.swp +*.swo + +# Docs & misc +*.md +LICENSE +docs/ + +# OS files +.DS_Store +__MACOSX/ + +# Tests +tests/ +.pytest_cache/ +.coverage + +# CI/CD +.github/ +.claude/ + +# Shell scripts (not needed in container) +*.sh diff --git a/.env.example b/.env.example index c8159c2..00ca010 100644 --- a/.env.example +++ b/.env.example @@ -1,41 +1,45 @@ # ============================================================================= -# REQUIRED CONFIGURATION - Must be filled before running the bot +# Environment Variables for evi-run +# Copy this file to .env and fill in your values: +# cp .env.example .env # ============================================================================= -# REQUIRED! Enter your Telegram bot token (get from @BotFather) +# --- Required: Telegram Bot --- +# Get your token from @BotFather on Telegram TELEGRAM_BOT_TOKEN=XXX -# REQUIRED! Enter the OpenAI API key (get from https://platform.openai.com/api-keys) +# --- Required: OpenAI API --- +# Get your key from https://platform.openai.com/api-keys API_KEY_OPENAI=XXX -# ============================================================================= -# DATABASE CONFIGURATION - Default settings, no changes needed -# ============================================================================= - -# PostgreSQL database connection settings -DATABASE_URL=postgresql+psycopg://user:password@postgres_agent_db:5432/agent_ai_bot +# --- Required: PostgreSQL --- POSTGRES_USER=user POSTGRES_PASSWORD=password POSTGRES_DB=agent_ai_bot -# Redis cache connection settings -REDIS_URL=redis://redis_agent:6379/0 +# Database URL — uses the Docker Compose service name "postgres" as host. +# Make sure USER, PASSWORD, and DB match the values above. +DATABASE_URL=postgresql+psycopg://user:password@postgres:5432/agent_ai_bot + +# --- Required: Redis --- +# Uses the Docker Compose service name "redis" as host. +REDIS_URL=redis://redis:6379/0 # ============================================================================= -# PAY MODE CONFIGURATION - Only required if using monetized features +# PAY MODE CONFIGURATION — Only required if TYPE_USAGE = 'pay' in config.py # ============================================================================= -# Solana wallet address for token burning operations -TOKEN_BURN_ADDRESS=XXX - -# Solana token contract address for user balance top-ups -MINT_TOKEN_ADDRESS=XXX +# TON blockchain API key (get from https://tonapi.io) +API_KEY_TON=XXX -# TON blockchain address for receiving payments +# TON wallet address for receiving payments TON_ADDRESS=XXX -# TONAPI service key for blockchain operations (get from https://tonapi.io) -API_KEY_TON=XXX - -# Solana address for receiving payments +# Solana wallet address for receiving payments ADDRESS_SOL=XXX + +# Solana token contract address for user balance top-ups +MINT_TOKEN_ADDRESS=XXX + +# Solana wallet address for token burning operations +TOKEN_BURN_ADDRESS=XXX diff --git a/.github/workflows/docker-publish.yml b/.github/workflows/docker-publish.yml index 9ebaca2..e5a0001 100644 --- a/.github/workflows/docker-publish.yml +++ b/.github/workflows/docker-publish.yml @@ -1,73 +1,73 @@ -name: Docker Build & Publish - -on: - push: - branches: [ main ] - tags: [ 'v*' ] - pull_request: - branches: [ main ] - -env: - REGISTRY: ghcr.io - IMAGE_NAME: ${{ github.repository }} - -jobs: - build-and-push: - runs-on: ubuntu-latest - permissions: - contents: read - packages: write - - steps: - - name: Checkout repository - uses: actions/checkout@v4 - - - name: Log in to Container Registry - if: github.event_name != 'pull_request' - uses: docker/login-action@v3 - with: - registry: ${{ env.REGISTRY }} - username: ${{ github.actor }} - password: ${{ secrets.GITHUB_TOKEN }} - - - name: Extract metadata - id: meta - uses: docker/metadata-action@v5 - with: - images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }} - tags: | - type=ref,event=branch - type=ref,event=pr - type=semver,pattern={{version}} - type=semver,pattern={{major}}.{{minor}} - - - name: Build and push Docker image - uses: docker/build-push-action@v5 - with: - context: . - push: ${{ github.event_name != 'pull_request' }} - tags: ${{ steps.meta.outputs.tags }} - labels: ${{ steps.meta.outputs.labels }} - - docker-compose-test: - runs-on: ubuntu-latest - steps: - - name: Checkout repository - uses: actions/checkout@v4 - - - name: Create test .env file - run: | - cp .env.example .env - # Replace with dummy values for testing - sed -i 's/XXX/test_token_123/g' .env - - - name: Test Docker Compose build - run: | - docker compose build --no-cache - echo "Docker Compose build successful!" - - - name: Test Docker Compose services - run: | - # Test if services can start (without actually running them) - docker compose config --quiet - echo "Docker Compose configuration is valid!" +name: Docker Build & Publish + +on: + push: + branches: [ main ] + tags: [ 'v*' ] + pull_request: + branches: [ main ] + +env: + REGISTRY: ghcr.io + IMAGE_NAME: ${{ github.repository }} + +jobs: + build-and-push: + runs-on: ubuntu-latest + permissions: + contents: read + packages: write + + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + - name: Log in to Container Registry + if: github.event_name != 'pull_request' + uses: docker/login-action@v3 + with: + registry: ${{ env.REGISTRY }} + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + - name: Extract metadata + id: meta + uses: docker/metadata-action@v5 + with: + images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }} + tags: | + type=ref,event=branch + type=ref,event=pr + type=semver,pattern={{version}} + type=semver,pattern={{major}}.{{minor}} + + - name: Build and push Docker image + uses: docker/build-push-action@v5 + with: + context: . + push: ${{ github.event_name != 'pull_request' }} + tags: ${{ steps.meta.outputs.tags }} + labels: ${{ steps.meta.outputs.labels }} + + docker-compose-test: + runs-on: ubuntu-latest + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + - name: Create test .env file + run: | + cp .env.example .env + # Replace with dummy values for testing + sed -i 's/XXX/test_token_123/g' .env + + - name: Test Docker Compose build + run: | + docker compose build --no-cache + echo "Docker Compose build successful!" + + - name: Test Docker Compose services + run: | + # Test if services can start (without actually running them) + docker compose config --quiet + echo "Docker Compose configuration is valid!" diff --git a/.github/workflows/python-ci.yml b/.github/workflows/python-ci.yml index 23b0e91..72332c2 100644 --- a/.github/workflows/python-ci.yml +++ b/.github/workflows/python-ci.yml @@ -1,93 +1,93 @@ -name: Python CI - -on: - push: - branches: [ main, develop ] - pull_request: - branches: [ main, develop ] - -jobs: - test: - runs-on: ubuntu-latest - strategy: - matrix: - python-version: ["3.10", "3.11"] - - steps: - - uses: actions/checkout@v4 - - - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v4 - with: - python-version: ${{ matrix.python-version }} - - - name: Cache pip dependencies - uses: actions/cache@v3 - with: - path: ~/.cache/pip - key: ${{ runner.os }}-pip-${{ hashFiles('**/requirements.txt') }} - restore-keys: | - ${{ runner.os }}-pip- - - - name: Install dependencies - run: | - python -m pip install --upgrade pip - pip install flake8 black isort - if [ -f requirements.txt ]; then pip install -r requirements.txt; fi - - # Отключено форматирование кода для первого релиза - # - name: Code formatting check with Black - # run: | - # black --check --diff . - # - # - name: Import sorting check with isort - # run: | - # isort --check-only --diff . - - - name: Lint with flake8 - run: | - # Stop the build if there are Python syntax errors or undefined names - flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics - # Exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide - flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics - - - name: Check for security issues with bandit - run: | - pip install bandit - bandit -r . -f json || true - - docker-test: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v4 - - - name: Build Docker image - run: | - docker build -t evi-run-test . - - - name: Test Docker container - run: | - # Test if container builds successfully - docker run --rm evi-run-test python --version - echo "Docker build test passed!" - - dependency-check: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v4 - - - name: Set up Python - uses: actions/setup-python@v4 - with: - python-version: '3.10' - - - name: Install dependencies - run: | - python -m pip install --upgrade pip - pip install safety pip-audit - if [ -f requirements.txt ]; then pip install -r requirements.txt; fi - - - name: Check for security vulnerabilities - run: | - safety check || true - pip-audit || true +name: Python CI + +on: + push: + branches: [ main, develop ] + pull_request: + branches: [ main, develop ] + +jobs: + test: + runs-on: ubuntu-latest + strategy: + matrix: + python-version: ["3.10", "3.11"] + + steps: + - uses: actions/checkout@v4 + + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v4 + with: + python-version: ${{ matrix.python-version }} + + - name: Cache pip dependencies + uses: actions/cache@v3 + with: + path: ~/.cache/pip + key: ${{ runner.os }}-pip-${{ hashFiles('**/requirements.txt') }} + restore-keys: | + ${{ runner.os }}-pip- + + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install flake8 black isort + if [ -f requirements.txt ]; then pip install -r requirements.txt; fi + + # Отключено форматирование кода для первого релиза + # - name: Code formatting check with Black + # run: | + # black --check --diff . + # + # - name: Import sorting check with isort + # run: | + # isort --check-only --diff . + + - name: Lint with flake8 + run: | + # Stop the build if there are Python syntax errors or undefined names + flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics + # Exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide + flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics + + - name: Check for security issues with bandit + run: | + pip install bandit + bandit -r . -f json || true + + docker-test: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + + - name: Build Docker image + run: | + docker build -t evi-run-test . + + - name: Test Docker container + run: | + # Test if container builds successfully + docker run --rm evi-run-test python --version + echo "Docker build test passed!" + + dependency-check: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + + - name: Set up Python + uses: actions/setup-python@v4 + with: + python-version: '3.10' + + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install safety pip-audit + if [ -f requirements.txt ]; then pip install -r requirements.txt; fi + + - name: Check for security vulnerabilities + run: | + safety check || true + pip-audit || true diff --git a/.github/workflows/release.yml.disabled b/.github/workflows/release.yml.disabled index fd797d8..9a16c1f 100644 --- a/.github/workflows/release.yml.disabled +++ b/.github/workflows/release.yml.disabled @@ -1,113 +1,113 @@ -name: Release - -on: - push: - tags: - - 'v*' - -jobs: - create-release: - runs-on: ubuntu-latest - permissions: - contents: write - - steps: - - name: Checkout code - uses: actions/checkout@v4 - with: - fetch-depth: 0 - - - name: Generate changelog - id: changelog - run: | - # Get the latest two tags - CURRENT_TAG=${GITHUB_REF#refs/tags/} - PREVIOUS_TAG=$(git describe --tags --abbrev=0 HEAD^ 2>/dev/null || echo "") - - echo "current_tag=$CURRENT_TAG" >> $GITHUB_OUTPUT - - # Generate changelog - if [ -n "$PREVIOUS_TAG" ]; then - echo "## What's Changed" > CHANGELOG.tmp - git log --pretty=format:"* %s (%h)" $PREVIOUS_TAG..$CURRENT_TAG >> CHANGELOG.tmp - else - echo "## What's Changed" > CHANGELOG.tmp - echo "* Initial release" >> CHANGELOG.tmp - fi - - # Read changelog into output - { - echo 'CHANGELOG<> $GITHUB_OUTPUT - - - name: Create Release - uses: softprops/action-gh-release@v1 - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - with: - tag_name: ${{ steps.changelog.outputs.current_tag }} - name: evi.run ${{ steps.changelog.outputs.current_tag }} - body: | - 🚀 **evi.run Release ${{ steps.changelog.outputs.current_tag }}** - - ${{ steps.changelog.outputs.CHANGELOG }} - - ## 📦 Installation - - ```bash - # Quick install with Docker - git clone https://github.com/${{ github.repository }}.git - cd evi-run - cp .env.example .env - # Edit .env with your credentials - chmod +x docker_setup_en.sh - ./docker_setup_en.sh - docker compose up --build -d - ``` - - ## 🔗 Useful Links - - 📚 [Documentation](https://github.com/${{ github.repository }}/blob/main/README.md) - - 🤝 [Contributing](https://github.com/${{ github.repository }}/blob/main/CONTRIBUTING.md) - - 💬 [Support](https://t.me/playa3000) - - **Full Changelog**: https://github.com/${{ github.repository }}/compare/${{ steps.changelog.outputs.previous_tag }}...${{ steps.changelog.outputs.current_tag }} - draft: false - prerelease: false - - docker-release: - needs: create-release - runs-on: ubuntu-latest - permissions: - contents: read - packages: write - - steps: - - name: Checkout repository - uses: actions/checkout@v4 - - - name: Log in to Container Registry - uses: docker/login-action@v3 - with: - registry: ghcr.io - username: ${{ github.actor }} - password: ${{ secrets.GITHUB_TOKEN }} - - - name: Extract version from tag - id: version - run: echo "version=${GITHUB_REF#refs/tags/v}" >> $GITHUB_OUTPUT - - - name: Build and push release image - uses: docker/build-push-action@v5 - with: - context: . - push: true - tags: | - ghcr.io/${{ github.repository }}:latest - ghcr.io/${{ github.repository }}:${{ steps.version.outputs.version }} - labels: | - org.opencontainers.image.title=evi.run - org.opencontainers.image.description=Customizable Multi-Agent AI System - org.opencontainers.image.version=${{ steps.version.outputs.version }} - org.opencontainers.image.source=https://github.com/${{ github.repository }} +name: Release + +on: + push: + tags: + - 'v*' + +jobs: + create-release: + runs-on: ubuntu-latest + permissions: + contents: write + + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - name: Generate changelog + id: changelog + run: | + # Get the latest two tags + CURRENT_TAG=${GITHUB_REF#refs/tags/} + PREVIOUS_TAG=$(git describe --tags --abbrev=0 HEAD^ 2>/dev/null || echo "") + + echo "current_tag=$CURRENT_TAG" >> $GITHUB_OUTPUT + + # Generate changelog + if [ -n "$PREVIOUS_TAG" ]; then + echo "## What's Changed" > CHANGELOG.tmp + git log --pretty=format:"* %s (%h)" $PREVIOUS_TAG..$CURRENT_TAG >> CHANGELOG.tmp + else + echo "## What's Changed" > CHANGELOG.tmp + echo "* Initial release" >> CHANGELOG.tmp + fi + + # Read changelog into output + { + echo 'CHANGELOG<> $GITHUB_OUTPUT + + - name: Create Release + uses: softprops/action-gh-release@v1 + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + with: + tag_name: ${{ steps.changelog.outputs.current_tag }} + name: evi.run ${{ steps.changelog.outputs.current_tag }} + body: | + 🚀 **evi.run Release ${{ steps.changelog.outputs.current_tag }}** + + ${{ steps.changelog.outputs.CHANGELOG }} + + ## 📦 Installation + + ```bash + # Quick install with Docker + git clone https://github.com/${{ github.repository }}.git + cd evi-run + cp .env.example .env + # Edit .env with your credentials + chmod +x docker_setup_en.sh + ./docker_setup_en.sh + docker compose up --build -d + ``` + + ## 🔗 Useful Links + - 📚 [Documentation](https://github.com/${{ github.repository }}/blob/main/README.md) + - 🤝 [Contributing](https://github.com/${{ github.repository }}/blob/main/CONTRIBUTING.md) + - 💬 [Support](https://t.me/playa3000) + + **Full Changelog**: https://github.com/${{ github.repository }}/compare/${{ steps.changelog.outputs.previous_tag }}...${{ steps.changelog.outputs.current_tag }} + draft: false + prerelease: false + + docker-release: + needs: create-release + runs-on: ubuntu-latest + permissions: + contents: read + packages: write + + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + - name: Log in to Container Registry + uses: docker/login-action@v3 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + - name: Extract version from tag + id: version + run: echo "version=${GITHUB_REF#refs/tags/v}" >> $GITHUB_OUTPUT + + - name: Build and push release image + uses: docker/build-push-action@v5 + with: + context: . + push: true + tags: | + ghcr.io/${{ github.repository }}:latest + ghcr.io/${{ github.repository }}:${{ steps.version.outputs.version }} + labels: | + org.opencontainers.image.title=evi.run + org.opencontainers.image.description=Customizable Multi-Agent AI System + org.opencontainers.image.version=${{ steps.version.outputs.version }} + org.opencontainers.image.source=https://github.com/${{ github.repository }} diff --git a/.gitignore b/.gitignore index 135a07e..56b2399 100644 --- a/.gitignore +++ b/.gitignore @@ -1,34 +1,41 @@ -# Virtual environments -venv/ -env/ -.venv/ -docs/.venv/ -site/ -docs/site/ -.cache/ -docs/.cache/ -docs/ - -# Python cache -__pycache__/ -*.py[cod] -*$py.class - -# Environment and configuration files -.env -mkdocs.yml - -# Logs -*.log - -# Temporary files -.DS_Store -__MACOSX/ - -# Local tests (excluded from VCS as requested) -tests/ - -# Pytest artifacts -.pytest_cache/ -.coverage -pytest.ini +# Virtual environments +venv/ +env/ +.venv/ +docs/.venv/ +site/ +docs/site/ +.cache/ +docs/.cache/ +docs/ + +# Python cache +__pycache__/ +*.py[cod] +*$py.class + +# Environment and configuration files +.env +config.py +mkdocs.yml + +# Custom prompts (use *.sample as template) +prompts/*.txt + +# Runtime data +data/ + +# Logs +*.log + +# Temporary files +.DS_Store +__MACOSX/ + +# Local tests (excluded from VCS as requested) +tests/ + +# Pytest artifacts +.pytest_cache/ +.coverage +pytest.ini diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 799d19e..5284fa5 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -1,99 +1,99 @@ -# Contributing to evi-run - -Thank you for your interest in contributing to **evi-run**! We welcome contributions from the community. - -## 🚀 Quick Start - -1. **Fork the repository** on GitHub -2. **Clone your fork** locally: - ```bash - git clone https://github.com/pipedude/evi-run.git - cd evi-run - ``` -3. **Create a feature branch**: - ```bash - git checkout -b feature/your-feature-name - ``` -4. **Make your changes** and test them -5. **Commit your changes**: - ```bash - git commit -m "feat: add your feature description" - ``` -6. **Push to your fork**: - ```bash - git push origin feature/your-feature-name - ``` -7. **Create a Pull Request** on GitHub - -## 🛠️ Development Setup - -### Prerequisites -- Python 3.9+ -- Docker & Docker Compose -- Git - -### Local Development -```bash -# Clone and setup -git clone https://github.com/pipedude/evi-run.git -cd evi-run - -# Configure environment -cp .env.example .env -nano .env # Add your API keys -nano config.py # Set your Telegram ID - -# Start development environment -docker compose up --build -``` - -## 📋 Types of Contributions - -- **🐛 Bug Reports**: Create detailed issue reports -- **✨ Features**: Propose and implement new features -- **📚 Documentation**: Improve README or code comments -- **🧪 Tests**: Add or improve test coverage -- **🌐 Localization**: Add translations in `I18N/` -- **🤖 Custom Agents**: Add new AI agents in `bot/agents_tools/` - -## 🎯 Guidelines - -### Code Style -- Follow **PEP 8** for Python -- Use meaningful variable names -- Add docstrings for functions -- Keep functions focused and small - -### Commit Messages -Use conventional commit format: -``` -feat(agents): add new trading agent -fix(database): resolve connection issue -docs(readme): update installation guide -style: format code according to PEP 8 -``` - -### Testing -```bash -# Run tests -python -m pytest - -# Run with coverage -python -m pytest --cov=bot -``` - -## 🔒 Security - -- **Never commit API keys or secrets** -- Use environment variables for sensitive data -- Report security issues privately to project maintainers - -## 📞 Support - -- **Issues**: [GitHub Issues](https://github.com/pipedude/evi-run/issues) -- **Telegram**: [@playa3000](https://t.me/playa3000) -- **Community**: [Telegram Support Group](https://t.me/evi_run) - ---- - -**Thank you for contributing to evi-run! 🚀** +# Contributing to evi-run + +Thank you for your interest in contributing to **evi-run**! We welcome contributions from the community. + +## 🚀 Quick Start + +1. **Fork the repository** on GitHub +2. **Clone your fork** locally: + ```bash + git clone https://github.com/pipedude/evi-run.git + cd evi-run + ``` +3. **Create a feature branch**: + ```bash + git checkout -b feature/your-feature-name + ``` +4. **Make your changes** and test them +5. **Commit your changes**: + ```bash + git commit -m "feat: add your feature description" + ``` +6. **Push to your fork**: + ```bash + git push origin feature/your-feature-name + ``` +7. **Create a Pull Request** on GitHub + +## 🛠️ Development Setup + +### Prerequisites +- Python 3.9+ +- Docker & Docker Compose +- Git + +### Local Development +```bash +# Clone and setup +git clone https://github.com/pipedude/evi-run.git +cd evi-run + +# Configure environment +cp .env.example .env +nano .env # Add your API keys +nano config.py # Set your Telegram ID + +# Start development environment +docker compose up --build +``` + +## 📋 Types of Contributions + +- **🐛 Bug Reports**: Create detailed issue reports +- **✨ Features**: Propose and implement new features +- **📚 Documentation**: Improve README or code comments +- **🧪 Tests**: Add or improve test coverage +- **🌐 Localization**: Add translations in `I18N/` +- **🤖 Custom Agents**: Add new AI agents in `bot/agents_tools/` + +## 🎯 Guidelines + +### Code Style +- Follow **PEP 8** for Python +- Use meaningful variable names +- Add docstrings for functions +- Keep functions focused and small + +### Commit Messages +Use conventional commit format: +``` +feat(agents): add new trading agent +fix(database): resolve connection issue +docs(readme): update installation guide +style: format code according to PEP 8 +``` + +### Testing +```bash +# Run tests +python -m pytest + +# Run with coverage +python -m pytest --cov=bot +``` + +## 🔒 Security + +- **Never commit API keys or secrets** +- Use environment variables for sensitive data +- Report security issues privately to project maintainers + +## 📞 Support + +- **Issues**: [GitHub Issues](https://github.com/pipedude/evi-run/issues) +- **Telegram**: [@playa3000](https://t.me/playa3000) +- **Community**: [Telegram Support Group](https://t.me/evi_run) + +--- + +**Thank you for contributing to evi-run! 🚀** diff --git a/DEPLOY.md b/DEPLOY.md new file mode 100644 index 0000000..776a049 --- /dev/null +++ b/DEPLOY.md @@ -0,0 +1,149 @@ +# Local Deployment Guide (Docker Compose) + +## Prerequisites + +| Requirement | Notes | +|-------------|-------| +| **Docker Engine + Docker Compose v2** | Windows: install [Docker Desktop](https://www.docker.com/products/docker-desktop/) | +| **Git** | To clone the repository | +| **Hardware** | 2 CPU cores, 2 GB RAM, 10 GB disk (minimum) | + +## Required API Keys + +| Variable | Source | Required? | +|----------|--------|-----------| +| `TELEGRAM_BOT_TOKEN` | Create a bot via [@BotFather](https://t.me/BotFather) | **Yes** | +| `API_KEY_OPENAI` | [OpenAI Platform](https://platform.openai.com/api-keys) | **Yes** | +| `POSTGRES_USER` | Choose any username | **Yes** | +| `POSTGRES_PASSWORD` | Choose a strong password | **Yes** | +| `POSTGRES_DB` | Choose a database name | **Yes** | +| `DATABASE_URL` | Built from the PG values above | **Yes** | +| `REDIS_URL` | `redis://redis:6379` | **Yes** | +| `API_KEY_TON` | TON API key | Optional (pay mode) | +| `TON_ADDRESS` | TON wallet address | Optional (pay mode) | +| `ADDRESS_SOL` | Solana wallet public key | Optional (token swap) | +| `MINT_TOKEN_ADDRESS` | SPL token mint address | Optional (pay mode) | +| `TOKEN_BURN_ADDRESS` | Token burn address | Optional (pay mode) | + +## Step-by-step Deployment + +### 1. Clone the repository + +```bash +git clone +cd evi-run +``` + +### 2. Create and configure `.env` + +```bash +cp .env.example .env +``` + +Edit `.env` and fill in **all required values**. Pay attention to `DATABASE_URL` — it must use the **Docker service name** `postgres` as the host (not `localhost`): + +``` +DATABASE_URL=postgresql+psycopg://evi_user:YOUR_PASSWORD@postgres:5432/evi_db +REDIS_URL=redis://redis:6379 +``` + +> **Important:** Use the container-internal ports (`5432` for PostgreSQL, `6379` for Redis), not the host-mapped ports (`5434`, `6380`). + +### 3. Configure `config.py` + +Edit `config.py` and set your Telegram user ID: + +```python +ADMIN_ID = 123456789 # Your Telegram ID +ADMINS_LIST = [123456789] # List of admin Telegram IDs +TYPE_USAGE = 'private' # 'private', 'free', or 'pay' +``` + +To find your Telegram ID, message [@userinfobot](https://t.me/userinfobot). + +### 4. Build and start + +```bash +docker compose up --build -d +``` + +### 5. Verify + +```bash +# Check all containers are running +docker compose ps + +# Watch bot logs +docker compose logs -f bot + +# Check database is healthy +docker compose logs postgres +``` + +## Architecture + +The deployment runs 3 containers (4 if the optional payment module is enabled): + +``` + ┌─────────────┐ + │ bot_agent │ + │ (Telegram) │ + └──────┬───┬──┘ + │ │ + ┌───────────┘ └───────────┐ + ▼ ▼ + ┌────────────────┐ ┌──────────────────┐ + │ postgres_agent │ │ redis_agent │ + │ (PostgreSQL) │ │ (Redis) │ + │ :5434 → :5432 │ │ :6380 → :6379 │ + └────────────────┘ └──────────────────┘ +``` + +Data persistence: +- `./data/postgres/postgres_data` — PostgreSQL data +- `./data/redis/data` — Redis data +- `./data/images` — Generated images + +## Enabling the Payment Module (Optional) + +The `fastapi` service is commented out by default because the `payment_module/` directory is not included in the repository. If you have the payment module: + +1. Place the `payment_module/` directory in the project root +2. Uncomment the `fastapi` service block in `docker-compose.yml` +3. Add `fastapi` to the bot's `depends_on` section +4. Rebuild: `docker compose up --build -d` + +## Troubleshooting + +### Bot container keeps restarting +```bash +docker compose logs bot +``` +Common causes: +- Invalid `TELEGRAM_BOT_TOKEN` +- Invalid `API_KEY_OPENAI` +- Database not ready (healthcheck should prevent this, but check `docker compose logs postgres`) + +### Database connection errors +- Verify `DATABASE_URL` in `.env` uses `postgres` as host (not `localhost`) +- Verify `POSTGRES_USER`, `POSTGRES_PASSWORD`, and `POSTGRES_DB` match between the `.env` values and `DATABASE_URL` + +### Redis connection errors +- Verify `REDIS_URL` uses `redis` as host (not `localhost`) + +### Rebuilding after `.env` changes +Environment variables are injected via `env_file` in `docker-compose.yml`, so a simple restart should pick up changes: +```bash +docker compose down && docker compose up -d +``` +However, if you change dependencies or code, rebuild: +```bash +docker compose up --build -d +``` + +### Resetting everything +```bash +docker compose down -v +rm -rf data/postgres data/redis +docker compose up --build -d +``` diff --git a/Dockerfile b/Dockerfile index fa90456..5d62774 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,31 +1,31 @@ -FROM python:3.11-slim - -RUN apt-get update \ - && apt-get install -y --no-install-recommends build-essential gcc \ - && rm -rf /var/lib/apt/lists/* - -# --- system deps --- -RUN apt-get update && \ - apt-get install -y --no-install-recommends \ - curl gnupg git && \ - curl -fsSL https://deb.nodesource.com/setup_20.x | bash - && \ - apt-get install -y --no-install-recommends nodejs && \ - rm -rf /var/lib/apt/lists/* - -# --- Jupiter-MCP --- -RUN npm install -g github:pipedude/jupiter-mcp - -RUN npm install -g dexpaprika-mcp - -WORKDIR /app - -COPY requirements.txt . - -RUN pip install --no-cache-dir -r requirements.txt - -COPY . . - -#COPY ./index.js /usr/lib/node_modules/jupiter-mcp/index.js - -# Specify the command that will be executed when the container is started +FROM python:3.11-slim + +RUN apt-get update \ + && apt-get install -y --no-install-recommends build-essential gcc \ + && rm -rf /var/lib/apt/lists/* + +# --- system deps --- +RUN apt-get update && \ + apt-get install -y --no-install-recommends \ + curl gnupg git && \ + curl -fsSL https://deb.nodesource.com/setup_20.x | bash - && \ + apt-get install -y --no-install-recommends nodejs && \ + rm -rf /var/lib/apt/lists/* + +# --- Jupiter-MCP --- +RUN npm install -g github:pipedude/jupiter-mcp + +RUN npm install -g dexpaprika-mcp + +WORKDIR /app + +COPY requirements.txt . + +RUN pip install --no-cache-dir -r requirements.txt + +COPY . . + +#COPY ./index.js /usr/lib/node_modules/jupiter-mcp/index.js + +# Specify the command that will be executed when the container is started CMD ["python", "-u", "-m", "bot.main"] \ No newline at end of file diff --git a/Dockerfile_fastapi b/Dockerfile_fastapi index 56e5912..07ce078 100644 --- a/Dockerfile_fastapi +++ b/Dockerfile_fastapi @@ -1,11 +1,11 @@ -FROM python:3.11-slim - -WORKDIR /app - -COPY requirements_fastapi.txt . - -RUN pip install --no-cache-dir -r requirements_fastapi.txt - -COPY . . - +FROM python:3.11-slim + +WORKDIR /app + +COPY requirements_fastapi.txt . + +RUN pip install --no-cache-dir -r requirements_fastapi.txt + +COPY . . + CMD ["uvicorn", "payment_module.app:app", "--host", "0.0.0.0", "--port", "8000"] \ No newline at end of file diff --git a/I18N/en/txt.ftl b/I18N/en/txt.ftl index 5fcec44..c48ff35 100644 --- a/I18N/en/txt.ftl +++ b/I18N/en/txt.ftl @@ -1,154 +1,192 @@ -start_text = - I am Evi — a virtual technomage and your AI agent... 🦄👻 - - My capabilities include (but are not limited to): - - solving complex, multi-step tasks - - conducting deep research - - intelligent web search - - document and image analysis - - image generation - - DEX analytics and Solana token swap - - task scheduler - - memory management - - Simply write your requests in the chat using natural language or send voice messages to start interacting! 🔮✨ - - ⚠️ Tip! Periodically reset the conversation context with the /new command — this will help save tokens and speed up request processing. - -close_kb = Close - -command_new_text = Confirm starting a new dialog without saving the current one. It is recommended to complete the current task before starting a new dialog! After deletion, the current history will be erased and the context will be reset. - -command_approve_new_text = Current dialog deleted! - -command_new_approve_kb = Confirm - -command_new_save_kb = Save dialog - -command_save_text = Confirm saving the current dialog to memory. It is recommended to complete the current task before saving! After saving, a new dialog will start with reset context, but key moments from the current conversation will remain in the system memory. - -command_save_approve_kb = Current dialog saved to system memory! - -command_delete_text = Confirm deletion of the current dialog and all system memory about you. - -command_delete_approve_text = System memory about you and dialog deleted! Start a new dialog. - -token_price_error_text = Wrong format, example: 0.01 - -not_token_price_error_text = You haven't set the token price yet! - -token_price_updated_text = Token price updated! - -command_wallet_text = - If you already have a linked wallet, entering a new private key will replace it. Enter the Solana wallet private key in format [45, 456, …]. - - Warning: use a separate wallet with a small balance, as the trading agent works in test mode! - -cmd_help_text = - Interact with the system through the chat window. All functions are available through regular messages. Use Menu for additional parameter settings. - - For advanced system and character customization, edit the file: \bot\agents_tools\agents_ - -command_settings_text = Settings: - -settings_language_text = Interface language - -text_choose_lang = Choose interface language: - -back_kb = Back - -text_document_upload = File successfully uploaded! You can ask a question. - -command_knowledge_text = This is the system's general knowledge base. Added information will be available to all users (when using modes: free and pay)! Do you want to add files or clear the knowledge base? - -command_knowledge_add_kb = Add information - -command_knowledge_delete_kb = Clear knowledge base - -command_knowledge_add_text = Send a text file with information to the chat! Add only one file at a time to the chat! - -text_not_format_file = Wrong format, please try again! Supported document formats: .pdf, .txt, .md, .doc(x), .pptx and .py - -text_approve_file = File successfully uploaded! You can ask a question. - -command_knowledge_delete_text = Confirm deletion of the knowledge base. - -text_approve_delete = Knowledge base deleted. Add new information to the knowledge base. - -warning_save_context_txt = Error saving context to txt file! - -warning_text_no_credits = Insufficient credits! - -wait_answer_text = One moment ✨ - -answer_md = Download answer - -warning_text_tokens = Dialog size exceeds 15,000 tokens! To save resources, you can save the dialog to system memory or delete it through the menu after completing the current task. - -warning_text_format = Wrong format! - -warning_text_error = An error occurred! - -cmd_wallet_text_start = - If you already have a linked wallet, entering a new private key will replace it. Enter the Solana wallet private key in format [45, 456, …]. - - Warning: use a separate wallet with a small balance, as the trading agent works in test mode! - -wallet_balance_kb = Wallet balance - -wallet_delete_key = Delete private key - -not_format_wallet_key = Wrong format! Write the Solana wallet private key in format [45, 456, …]. - -text_after_add_key = - Agent gained access to the wallet. - - Wallet balance: - - -wallet_delete_key_text = Confirm deletion of the private key. - -command_delete_key_approve_text = Private key deleted. Link a new wallet to use the trading agent. - -text_balance_wallet = Wallet balance: - -cmd_wallet_text = Your balance: - -add_balance_kb = Top up balance - -text_add_balance = Enter payment amount in $, whole number not less than $1. - -text_add_balance_error = Please try again! Enter payment amount in $, whole number not less than $1. - -choose_type_pay_text = Choose top-up method: - -ton_type_kb = TON - -sol_type_kb = Token (Solana) - -error_create_payment = An error occurred while creating the payment. Please try later. - -check_payment_kb = Check payment - -text_payment_create = - Make a payment for: { $sum } - Wallet: { $wallet } - -text_payment_create_sol = - Make a payment for: { $sum } tokens - Wallet: { $wallet } - Token address: { $token } - -error_get_token_price = Token price not specified. Please specify token price /token_price. - -wait_check_payment_text = Checking payment ⏳ - -check_payment_success_text = Payment completed successfully! - -check_payment_error_text = Payment was not completed! Please try later. - -warning_text_no_row_md = Context was deleted. Row not found in database. - -text_user_upload_file = The user uploaded the { $filename } file to the tool search_conversation_memory - +start_text = + I am Evi — a virtual technomage and your AI agent... 🦄👻 + + My capabilities include (but are not limited to): + - solving complex, multi-step tasks + - conducting deep research + - intelligent web search + - document and image analysis + - image generation + - DEX analytics and Solana token swap + - task scheduler + - memory management + + Simply write your requests in the chat using natural language or send voice messages to start interacting! 🔮✨ + + ⚠️ Tip! Periodically reset the conversation context with the /new command — this will help save tokens and speed up request processing. + +close_kb = Close + +command_new_text = Confirm starting a new dialog without saving the current one. It is recommended to complete the current task before starting a new dialog! After deletion, the current history will be erased and the context will be reset. + +command_approve_new_text = Current dialog deleted! + +command_new_approve_kb = Confirm + +command_new_save_kb = Save dialog + +command_save_text = Confirm saving the current dialog to memory. It is recommended to complete the current task before saving! After saving, a new dialog will start with reset context, but key moments from the current conversation will remain in the system memory. + +command_save_approve_kb = Current dialog saved to system memory! + +command_delete_text = Confirm deletion of the current dialog and all system memory about you. + +command_delete_approve_text = System memory about you and dialog deleted! Start a new dialog. + +token_price_error_text = Wrong format, example: 0.01 + +not_token_price_error_text = You haven't set the token price yet! + +token_price_updated_text = Token price updated! + +command_wallet_text = + If you already have a linked wallet, entering a new private key will replace it. Enter the Solana wallet private key in format [45, 456, …]. + + Warning: use a separate wallet with a small balance, as the trading agent works in test mode! + +cmd_help_text = + 📋 Commands + + /start — View bot introduction and capabilities + /help — Show this help message + /new — Start a new chat (clears current chat without saving) + /save — Save current chat to system memory and start a new one + /delete — Delete all chat history and system memory + /balance — View balance and top up + /settings — Set interface language + + 💬 How to Interact + + Text — Type your questions or instructions directly in the chat + Voice — Send a voice message, it will be transcribed automatically + Images — Send an image for analysis (charts, screenshots, etc.) + Files — Send documents for analysis. Supported: .pdf .txt .md .doc(x) .pptx .py + + 🔧 Core Features + + • Complex multi-step task processing + • Deep research and analysis + • Intelligent web search + • Document and image analysis + • Image generation + • DEX analytics and Solana token swap + • Task scheduler (one-time / recurring tasks) + • Conversation memory management + + 💡 Examples + + Analyze the architecture of this project + Search for the latest Solana ecosystem news + Generate a cyberpunk-style city image + Remind me to check the market every day at 9 AM + + ⚙️ /save vs /new + + /save — Saves key moments from the current chat to long-term memory, then starts a new chat. Use after completing a task so the agent remembers important details. + /new — Clears the current chat without saving anything. Use for a quick reset when the conversation content is not important. + + ⚠️ Periodically use /new or /save to reset the context — this helps save tokens and speeds up response processing. + +command_settings_text = Settings: + +settings_language_text = Interface language + +text_choose_lang = Choose interface language: + +back_kb = Back + +text_document_upload = File successfully uploaded! You can ask a question. + +command_knowledge_text = This is the system's general knowledge base. Added information will be available to all users (when using modes: free and pay)! Do you want to add files or clear the knowledge base? + +command_knowledge_add_kb = Add information + +command_knowledge_delete_kb = Clear knowledge base + +command_knowledge_add_text = Send a text file with information to the chat! Add only one file at a time to the chat! + +text_not_format_file = Wrong format, please try again! Supported document formats: .pdf, .txt, .md, .doc(x), .pptx and .py + +text_approve_file = File successfully uploaded! You can ask a question. + +command_knowledge_delete_text = Confirm deletion of the knowledge base. + +text_approve_delete = Knowledge base deleted. Add new information to the knowledge base. + +warning_save_context_txt = Error saving context to txt file! + +warning_text_no_credits = Insufficient credits! + +wait_answer_text = One moment ✨ + +answer_md = Download answer + +warning_text_tokens = Dialog size exceeds 15,000 tokens! To save resources, you can save the dialog to system memory or delete it through the menu after completing the current task. + +warning_text_format = Wrong format! + +warning_text_error = An error occurred! + +cmd_wallet_text_start = + If you already have a linked wallet, entering a new private key will replace it. Enter the Solana wallet private key in format [45, 456, …]. + + Warning: use a separate wallet with a small balance, as the trading agent works in test mode! + +wallet_balance_kb = Wallet balance + +wallet_delete_key = Delete private key + +not_format_wallet_key = Wrong format! Write the Solana wallet private key in format [45, 456, …]. + +text_after_add_key = + Agent gained access to the wallet. + + Wallet balance: + + +wallet_delete_key_text = Confirm deletion of the private key. + +command_delete_key_approve_text = Private key deleted. Link a new wallet to use the trading agent. + +text_balance_wallet = Wallet balance: + +cmd_wallet_text = Your balance: + +add_balance_kb = Top up balance + +text_add_balance = Enter payment amount in $, whole number not less than $1. + +text_add_balance_error = Please try again! Enter payment amount in $, whole number not less than $1. + +choose_type_pay_text = Choose top-up method: + +ton_type_kb = TON + +sol_type_kb = Token (Solana) + +error_create_payment = An error occurred while creating the payment. Please try later. + +check_payment_kb = Check payment + +text_payment_create = + Make a payment for: { $sum } + Wallet: { $wallet } + +text_payment_create_sol = + Make a payment for: { $sum } tokens + Wallet: { $wallet } + Token address: { $token } + +error_get_token_price = Token price not specified. Please specify token price /token_price. + +wait_check_payment_text = Checking payment ⏳ + +check_payment_success_text = Payment completed successfully! + +check_payment_error_text = Payment was not completed! Please try later. + +warning_text_no_row_md = Context was deleted. Row not found in database. + +text_user_upload_file = The user uploaded the { $filename } file to the tool search_conversation_memory + wait_answer_text_scheduler = Executing the scheduler's request ✨ \ No newline at end of file diff --git a/I18N/factory.py b/I18N/factory.py index 8834c09..7167f07 100644 --- a/I18N/factory.py +++ b/I18N/factory.py @@ -1,26 +1,26 @@ -from fluent_compiler.bundle import FluentBundle -from fluentogram import FluentTranslator, TranslatorHub - -from config import AVAILABLE_LANGUAGES, DEFAULT_LANGUAGE, LANGUAGE_FALLBACKS - -DIR_PATH = 'I18N' - - -def i18n_factory() -> TranslatorHub: - translators = [] - for lang in AVAILABLE_LANGUAGES: - translators.append( - FluentTranslator( - locale=lang, - translator=FluentBundle.from_files( - locale=lang, - filenames=[f'{DIR_PATH}/{lang}/txt.ftl'], - use_isolating=False) - ) - ) - - return TranslatorHub( - LANGUAGE_FALLBACKS, - translators, - root_locale=DEFAULT_LANGUAGE, +from fluent_compiler.bundle import FluentBundle +from fluentogram import FluentTranslator, TranslatorHub + +from config import AVAILABLE_LANGUAGES, DEFAULT_LANGUAGE, LANGUAGE_FALLBACKS + +DIR_PATH = 'I18N' + + +def i18n_factory() -> TranslatorHub: + translators = [] + for lang in AVAILABLE_LANGUAGES: + translators.append( + FluentTranslator( + locale=lang, + translator=FluentBundle.from_files( + locale=lang, + filenames=[f'{DIR_PATH}/{lang}/txt.ftl'], + use_isolating=False) + ) + ) + + return TranslatorHub( + LANGUAGE_FALLBACKS, + translators, + root_locale=DEFAULT_LANGUAGE, ) \ No newline at end of file diff --git a/I18N/ru/txt.ftl b/I18N/ru/txt.ftl index 1ebc54e..f21a371 100644 --- a/I18N/ru/txt.ftl +++ b/I18N/ru/txt.ftl @@ -1,154 +1,154 @@ -start_text = - Я Эви — виртуальный техномаг и твой ИИ-агент... 🦄👻 - - Мои возможности включают (но не ограничиваются): - - решение сложных, многоэтапных задач - - проведение глубоких исследований - - интеллектуальный веб-поиск - - анализ документов и изображений - - создание изображений - - аналитика DEX и своп токенов Solana - - планировщик задач - - управление памятью - - Просто пиши свои запросы в чат на естественном языке или отправляй голосовые сообщения для начала взаимодействия! 🔮✨ - - ⚠️ Совет! Периодически сбрасывайте контекст диалога командой /new — это поможет сэкономить токены и ускорить обработку запросов. - -close_kb = Закрыть - -command_new_text = Подтвердите начало нового диалога без сохранения текущего. Рекомендуется завершить решение текущей задачи перед началом нового диалога! После удаления текущая история будет стёрта, и контекст обнулится. - -command_approve_new_text = Текущий диалог удален! - -command_new_approve_kb = Подтверждаю - -command_new_save_kb = Сохранить диалог - -command_save_text = Подтвердите сохранение текущего диалога в память. Рекомендуется завершить решение текущей задачи перед сохранением! После сохранения начнётся новый диалог с обнулённым контекстом, но ключевые моменты из текущей беседы останутся в памяти системы. - -command_save_approve_kb = Текущий диалог сохранен в память системы! - -command_delete_text = Подтвердите удаление текущего диалога и всей памяти системы о вас. - -command_delete_approve_text = Память системы о вас и диалог удалены! Начните новый диалог. - -token_price_error_text = Не тот формат, пример: 0.01 - -not_token_price_error_text = Вы еще не установили цену токена! - -token_price_updated_text = Цена токена обновлена! - -command_wallet_text = - Если у вас уже привязан кошелёк, ввод нового закрытого ключа заменит его. Впишите закрытый ключ кошелька Solana в формате [45, 456, …]. - - Внимание: используйте отдельный кошелёк с небольшим балансом, так как торговый агент работает в тестовом режиме! - -cmd_help_text = - Взаимодействуйте с системой через окно чата. Все функции доступны через обычные сообщения. Для настройки дополнительных параметров используйте Menu. - - Для глубокой настройки системы и персонажа редактируйте файл: \bot\agents_tools\agents_ - -command_settings_text = Настройки: - -settings_language_text = Язык интерфейса - -text_choose_lang = Выберите язык интерфейса: - -back_kb = Назад - -text_document_upload = Файл успешно загружен! Вы можете задать вопрос. - -command_knowledge_text = Это общая база знаний системы. Добавленная информация будет доступна всем пользователям (при использовании режимов: free и pay)! Хотите добавить файлы или очистить базу знаний? - -command_knowledge_add_kb = Добавить информацию - -command_knowledge_delete_kb = Очистить базу знаний - -command_knowledge_add_text = Отправьте в чат текстовый файл с информацией! Добавляйте в чат только по одному файлу! - -text_not_format_file = Не формат, повторите попытку! Поддерживаемые форматы документов: .pdf, .txt, .md, .doc(x), .pptx и .py - -text_approve_file = Файл успешно загружен! Вы можете задать вопрос. - -command_knowledge_delete_text = Подтвердите удаление базы знаний. - -text_approve_delete = База знаний удалена. Добавьте новую информацию в базу знаний. - -warning_save_context_txt = Ошибка при сохранении контекста в txt файл! - -warning_text_no_credits = Недостаточно кредитов! - -wait_answer_text = Минуточку ✨ - -answer_md = Скачать ответ - -warning_text_tokens = Размер диалога превышает 15 000 токенов! Для экономии вы можете сохранить диалог в память системы или удалить его через меню после решения текущей задачи. - -warning_text_format = Не правильный формат! - -warning_text_error = Произошла ошибка! - -cmd_wallet_text_start = - Если у вас уже привязан кошелёк, ввод нового закрытого ключа заменит его. Впишите закрытый ключ кошелька Solana в формате [45, 456, …]. - - Внимание: используйте отдельный кошелёк с небольшим балансом, так как торговый агент работает в тестовом режиме! - -wallet_balance_kb = Баланс кошелька - -wallet_delete_key = Удалить закрытый ключ - -not_format_wallet_key = Не формат! Напишите закрытый ключ кошелька Solana в формате [45, 456, …]. - -text_after_add_key = - Агент получил доступ к кошельку. - - Баланс кошелька: - - -wallet_delete_key_text = Подтвердите удаление закрытого ключа. - -command_delete_key_approve_text = Закрытый ключ удален. Привяжите новый кошелек чтобы использовать торгового агента. - -text_balance_wallet = Баланс кошелька: - -cmd_wallet_text = Ваш баланс: - -add_balance_kb = Пополнить баланс - -text_add_balance = Введите сумму платежа в $, целое число не менее 1$. - -text_add_balance_error = Повторите попытку! Введите сумму платежа в $, целое число не менее 1$. - -choose_type_pay_text = Выберите метод пополнения: - -ton_type_kb = TON - -sol_type_kb = Token (Solana) - -error_create_payment = Произошла ошибка при создании платежа. Попробуйте позднее. - -check_payment_kb = Проверить платеж - -text_payment_create = - Совершите платеж на сумму: { $sum } - Кошелек: { $wallet } - -text_payment_create_sol = - Совершите платеж на сумму: { $sum } токенов - Кошелек: { $wallet } - Адрес токена: { $token } - -error_get_token_price = Цена токена не указана. Укажите цену токена /token_price. - -wait_check_payment_text = Проверка платежа ⏳ - -check_payment_success_text = Платеж успешно совершен! - -check_payment_error_text = Платеж не был совершен! Попробуйте позднее. - -warning_text_no_row_md = Контекст был удален. Строка не найдена в базе данных. - -text_user_upload_file = Пользователь загрузил файл { $filename } в инструмент search_conversation_memory - +start_text = + Я Эви — виртуальный техномаг и твой ИИ-агент... 🦄👻 + + Мои возможности включают (но не ограничиваются): + - решение сложных, многоэтапных задач + - проведение глубоких исследований + - интеллектуальный веб-поиск + - анализ документов и изображений + - создание изображений + - аналитика DEX и своп токенов Solana + - планировщик задач + - управление памятью + + Просто пиши свои запросы в чат на естественном языке или отправляй голосовые сообщения для начала взаимодействия! 🔮✨ + + ⚠️ Совет! Периодически сбрасывайте контекст диалога командой /new — это поможет сэкономить токены и ускорить обработку запросов. + +close_kb = Закрыть + +command_new_text = Подтвердите начало нового диалога без сохранения текущего. Рекомендуется завершить решение текущей задачи перед началом нового диалога! После удаления текущая история будет стёрта, и контекст обнулится. + +command_approve_new_text = Текущий диалог удален! + +command_new_approve_kb = Подтверждаю + +command_new_save_kb = Сохранить диалог + +command_save_text = Подтвердите сохранение текущего диалога в память. Рекомендуется завершить решение текущей задачи перед сохранением! После сохранения начнётся новый диалог с обнулённым контекстом, но ключевые моменты из текущей беседы останутся в памяти системы. + +command_save_approve_kb = Текущий диалог сохранен в память системы! + +command_delete_text = Подтвердите удаление текущего диалога и всей памяти системы о вас. + +command_delete_approve_text = Память системы о вас и диалог удалены! Начните новый диалог. + +token_price_error_text = Не тот формат, пример: 0.01 + +not_token_price_error_text = Вы еще не установили цену токена! + +token_price_updated_text = Цена токена обновлена! + +command_wallet_text = + Если у вас уже привязан кошелёк, ввод нового закрытого ключа заменит его. Впишите закрытый ключ кошелька Solana в формате [45, 456, …]. + + Внимание: используйте отдельный кошелёк с небольшим балансом, так как торговый агент работает в тестовом режиме! + +cmd_help_text = + Взаимодействуйте с системой через окно чата. Все функции доступны через обычные сообщения. Для настройки дополнительных параметров используйте Menu. + + Для глубокой настройки системы и персонажа редактируйте файл: \bot\agents_tools\agents_ + +command_settings_text = Настройки: + +settings_language_text = Язык интерфейса + +text_choose_lang = Выберите язык интерфейса: + +back_kb = Назад + +text_document_upload = Файл успешно загружен! Вы можете задать вопрос. + +command_knowledge_text = Это общая база знаний системы. Добавленная информация будет доступна всем пользователям (при использовании режимов: free и pay)! Хотите добавить файлы или очистить базу знаний? + +command_knowledge_add_kb = Добавить информацию + +command_knowledge_delete_kb = Очистить базу знаний + +command_knowledge_add_text = Отправьте в чат текстовый файл с информацией! Добавляйте в чат только по одному файлу! + +text_not_format_file = Не формат, повторите попытку! Поддерживаемые форматы документов: .pdf, .txt, .md, .doc(x), .pptx и .py + +text_approve_file = Файл успешно загружен! Вы можете задать вопрос. + +command_knowledge_delete_text = Подтвердите удаление базы знаний. + +text_approve_delete = База знаний удалена. Добавьте новую информацию в базу знаний. + +warning_save_context_txt = Ошибка при сохранении контекста в txt файл! + +warning_text_no_credits = Недостаточно кредитов! + +wait_answer_text = Минуточку ✨ + +answer_md = Скачать ответ + +warning_text_tokens = Размер диалога превышает 15 000 токенов! Для экономии вы можете сохранить диалог в память системы или удалить его через меню после решения текущей задачи. + +warning_text_format = Не правильный формат! + +warning_text_error = Произошла ошибка! + +cmd_wallet_text_start = + Если у вас уже привязан кошелёк, ввод нового закрытого ключа заменит его. Впишите закрытый ключ кошелька Solana в формате [45, 456, …]. + + Внимание: используйте отдельный кошелёк с небольшим балансом, так как торговый агент работает в тестовом режиме! + +wallet_balance_kb = Баланс кошелька + +wallet_delete_key = Удалить закрытый ключ + +not_format_wallet_key = Не формат! Напишите закрытый ключ кошелька Solana в формате [45, 456, …]. + +text_after_add_key = + Агент получил доступ к кошельку. + + Баланс кошелька: + + +wallet_delete_key_text = Подтвердите удаление закрытого ключа. + +command_delete_key_approve_text = Закрытый ключ удален. Привяжите новый кошелек чтобы использовать торгового агента. + +text_balance_wallet = Баланс кошелька: + +cmd_wallet_text = Ваш баланс: + +add_balance_kb = Пополнить баланс + +text_add_balance = Введите сумму платежа в $, целое число не менее 1$. + +text_add_balance_error = Повторите попытку! Введите сумму платежа в $, целое число не менее 1$. + +choose_type_pay_text = Выберите метод пополнения: + +ton_type_kb = TON + +sol_type_kb = Token (Solana) + +error_create_payment = Произошла ошибка при создании платежа. Попробуйте позднее. + +check_payment_kb = Проверить платеж + +text_payment_create = + Совершите платеж на сумму: { $sum } + Кошелек: { $wallet } + +text_payment_create_sol = + Совершите платеж на сумму: { $sum } токенов + Кошелек: { $wallet } + Адрес токена: { $token } + +error_get_token_price = Цена токена не указана. Укажите цену токена /token_price. + +wait_check_payment_text = Проверка платежа ⏳ + +check_payment_success_text = Платеж успешно совершен! + +check_payment_error_text = Платеж не был совершен! Попробуйте позднее. + +warning_text_no_row_md = Контекст был удален. Строка не найдена в базе данных. + +text_user_upload_file = Пользователь загрузил файл { $filename } в инструмент search_conversation_memory + wait_answer_text_scheduler = Выполняю запрос планировщика ✨ \ No newline at end of file diff --git a/I18N/zh/txt.ftl b/I18N/zh/txt.ftl new file mode 100644 index 0000000..7f9af16 --- /dev/null +++ b/I18N/zh/txt.ftl @@ -0,0 +1,192 @@ +start_text = + 我是 Evi — 一位虚拟技术法师,也是你的 AI 智能体... 🦄👻 + + 我的能力包括(但不限于): + - 解决复杂的多步骤任务 + - 进行深度研究 + - 智能网页搜索 + - 文档和图片分析 + - 图片生成 + - DEX 分析和 Solana 代币兑换 + - 任务调度器 + - 记忆管理 + + 只需在聊天中用自然语言写下你的请求,或发送语音消息即可开始互动!🔮✨ + + ⚠️ 提示!请定期使用 /new 命令重置对话上下文 — 这有助于节省 token 并加快请求处理速度。 + +close_kb = 关闭 + +command_new_text = 确认开始新对话且不保存当前对话。建议在开始新对话前完成当前任务!删除后,当前历史记录将被清除,上下文将被重置。 + +command_approve_new_text = 当前对话已删除! + +command_new_approve_kb = 确认 + +command_new_save_kb = 保存对话 + +command_save_text = 确认将当前对话保存到记忆中。建议在保存前完成当前任务!保存后将开始新对话并重置上下文,但当前对话的关键内容将保留在系统记忆中。 + +command_save_approve_kb = 当前对话已保存到系统记忆! + +command_delete_text = 确认删除当前对话及系统关于你的所有记忆。 + +command_delete_approve_text = 系统关于你的记忆和对话已删除!请开始新对话。 + +token_price_error_text = 格式错误,示例:0.01 + +not_token_price_error_text = 你还未设置代币价格! + +token_price_updated_text = 代币价格已更新! + +command_wallet_text = + 如果你已绑定钱包,输入新的私钥将替换现有钱包。请以 [45, 456, …] 格式输入 Solana 钱包私钥。 + + 注意:请使用余额较少的独立钱包,因为交易智能体目前处于测试模式! + +cmd_help_text = + 📋 命令列表 + + /start — 查看机器人介绍和功能概览 + /help — 显示此帮助信息 + /new — 开始新对话(清空当前对话,不保存) + /save — 保存当前对话到系统记忆并开始新对话 + /delete — 删除所有对话记录和系统记忆 + /balance — 查看余额和充值 + /settings — 设置界面语言 + + 💬 交互方式 + + 文字消息 — 直接在聊天中输入问题或指令 + 语音消息 — 发送语音,系统会自动转文字处理 + 图片 — 发送图片进行分析(如图表、截图等) + 文件 — 发送文档供分析,支持格式:.pdf .txt .md .doc(x) .pptx .py + + 🔧 核心功能 + + • 多步骤复杂任务处理 + • 深度研究与分析 + • 智能网页搜索 + • 文档和图片分析 + • 图片生成 + • DEX 分析和 Solana 代币兑换 + • 任务调度器(定时/周期任务) + • 对话记忆管理 + + 💡 使用示例 + + 帮我分析这个项目的架构 + 搜索最新的 Solana 生态新闻 + 生成一张赛博朋克风格的城市图片 + 每天早上9点提醒我查看市场行情 + + ⚙️ /save 与 /new 的区别 + + /save — 将当前对话的关键内容保存到长期记忆,然后开始新对话。适合完成一个任务后使用,智能体会记住重要信息。 + /new — 直接清空当前对话,不保存任何内容。适合对话内容不重要时快速重置。 + + ⚠️ 建议定期使用 /new 或 /save 重置上下文,以节省 token 并加快响应速度。 + +command_settings_text = 设置: + +settings_language_text = 界面语言 + +text_choose_lang = 请选择界面语言: + +back_kb = 返回 + +text_document_upload = 文件上传成功!你可以开始提问。 + +command_knowledge_text = 这是系统的通用知识库。添加的信息将对所有用户可用(使用 free 和 pay 模式时)!是否要添加文件或清空知识库? + +command_knowledge_add_kb = 添加信息 + +command_knowledge_delete_kb = 清空知识库 + +command_knowledge_add_text = 请在聊天中发送包含信息的文本文件!每次只添加一个文件! + +text_not_format_file = 格式不正确,请重试!支持的文档格式:.pdf、.txt、.md、.doc(x)、.pptx 和 .py + +text_approve_file = 文件上传成功!你可以开始提问。 + +command_knowledge_delete_text = 确认删除知识库。 + +text_approve_delete = 知识库已删除。请向知识库添加新信息。 + +warning_save_context_txt = 保存上下文到 txt 文件时出错! + +warning_text_no_credits = 积分不足! + +wait_answer_text = 请稍候 ✨ + +answer_md = 下载回答 + +warning_text_tokens = 对话大小超过 15,000 个 token!为节省资源,你可以在完成当前任务后,通过菜单将对话保存到系统记忆或删除。 + +warning_text_format = 格式错误! + +warning_text_error = 发生错误! + +cmd_wallet_text_start = + 如果你已绑定钱包,输入新的私钥将替换现有钱包。请以 [45, 456, …] 格式输入 Solana 钱包私钥。 + + 注意:请使用余额较少的独立钱包,因为交易智能体目前处于测试模式! + +wallet_balance_kb = 钱包余额 + +wallet_delete_key = 删除私钥 + +not_format_wallet_key = 格式错误!请以 [45, 456, …] 格式输入 Solana 钱包私钥。 + +text_after_add_key = + 智能体已获得钱包访问权限。 + + 钱包余额: + + +wallet_delete_key_text = 确认删除私钥。 + +command_delete_key_approve_text = 私钥已删除。请绑定新钱包以使用交易智能体。 + +text_balance_wallet = 钱包余额: + +cmd_wallet_text = 你的余额: + +add_balance_kb = 充值余额 + +text_add_balance = 请输入支付金额(美元),整数且不少于 $1。 + +text_add_balance_error = 请重试!请输入支付金额(美元),整数且不少于 $1。 + +choose_type_pay_text = 请选择充值方式: + +ton_type_kb = TON + +sol_type_kb = Token (Solana) + +error_create_payment = 创建支付时发生错误,请稍后重试。 + +check_payment_kb = 检查支付 + +text_payment_create = + 请支付:{ $sum } + 钱包:{ $wallet } + +text_payment_create_sol = + 请支付:{ $sum } 个代币 + 钱包:{ $wallet } + 代币地址:{ $token } + +error_get_token_price = 未指定代币价格。请使用 /token_price 设置代币价格。 + +wait_check_payment_text = 正在检查支付 ⏳ + +check_payment_success_text = 支付成功! + +check_payment_error_text = 支付未完成!请稍后重试。 + +warning_text_no_row_md = 上下文已被删除。数据库中未找到记录。 + +text_user_upload_file = 用户上传了文件 { $filename } 到工具 search_conversation_memory + +wait_answer_text_scheduler = 正在执行调度器请求 ✨ diff --git a/LICENSE b/LICENSE index 23888b3..9c7abf1 100644 --- a/LICENSE +++ b/LICENSE @@ -1,21 +1,21 @@ -MIT License - -Copyright (c) 2025 evi.run - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. +MIT License + +Copyright (c) 2025 evi.run + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/README.md b/README.md index 85a376e..62c6230 100644 --- a/README.md +++ b/README.md @@ -1,528 +1,528 @@ -# 🦄 evi-run — Customizable Multi-Agent AI System - -
- -[![Python](https://img.shields.io/badge/Python-3.11-blue?style=flat-square&logo=python&logoColor=white)](https://python.org) -[![OpenAI](https://img.shields.io/badge/OpenAI-Agents_SDK-green?style=flat-square&logo=openai&logoColor=white)](https://openai.github.io/openai-agents-python/) -[![Telegram](https://img.shields.io/badge/Telegram-Bot_API-blue?style=flat-square&logo=telegram&logoColor=white)](https://core.telegram.org/bots/api) -[![Docker](https://img.shields.io/badge/Docker-Compose-blue?style=flat-square&logo=docker&logoColor=white)](https://docker.com) - -[![Python CI](https://github.com/pipedude/evi-run/workflows/Python%20CI/badge.svg)](https://github.com/pipedude/evi-run/actions) -[![Docker Build](https://github.com/pipedude/evi-run/workflows/Docker%20Build%20&%20Publish/badge.svg)](https://github.com/pipedude/evi-run/actions) - -**Ready-to-use customizable multi-agent AI system that combines plug-and-play simplicity with framework-level flexibility** - -[🚀 Quick Start](#-quick-installation) • [🤖 Try Demo](https://t.me/my_evi_bot) • [🔧 Configuration](#-configuration) • [🎯 Features](#-features) • [💡 Use Cases](#-use-cases) - -**Connect with fellow developers and AI enthusiasts!** - -[![Join our Telegram Community](https://img.shields.io/badge/Join_Community-Telegram-blue?style=for-the-badge&logo=telegram&logoColor=white)](https://t.me/evi_run) - -
- ---- - -## 🌟 What is evi-run? - -**evi-run** is a powerful, production-ready multi-agent AI system that bridges the gap between out-of-the-box solutions and custom AI frameworks. Built on Python using the OpenAI Agents SDK, the system has an intuitive interface via a Telegram bot and provides enterprise-level artificial intelligence capabilities. - -### ✨ Key Advantages - -- **🚀 Instant Deployment** - Get your AI system running in minutes, not hours -- **🔧 Ultimate Flexibility** - Framework-level customization capabilities -- **📊 Built-in Analytics** - Comprehensive usage tracking and insights -- **💬 Telegram Integration** - Seamless user experience through familiar messaging interface -- **🏗️ Scalable Architecture** - Grows with your needs from prototype to production - ---- - -## 🎯 Features - -### 🔮 Advanced System Features -- **Memory Management** - Context control and long-term memory -- **Knowledge Integration** - Dynamic knowledge base expansion -- **Task Scheduling** - Scheduling and deferred task execution / once / daily / interval / -- **Multi-Agent Orchestration** - Complex task decomposition and execution -- **Custom Agent Creation** - Build specialized AI agents for specific tasks - -### 🦄 AI Features -- **Deep Research** - Multi-step investigation and analysis -- **Web Intelligence** - Smart internet search and data extraction -- **Document Processing** - Handle PDFs, images, and various file formats -- **Image Generation** - AI-powered visual content creation -- **DEX Analytics** - Real-time decentralized exchange monitoring -- **Solana Token Swap** - Easy, fast and secure token swap - -**⚠️ Important for Token Swap:** -The token swap function is only active in private mode. Your private key will be stored in your database in base64 format. - -### 💰 Flexible Usage Modes -- **Private Mode** - Personal use for bot owner only -- **Free Mode** - Public access with configurable usage limits -- **Pay Mode** - Monetized system with balance management and payments - -### ⏳ Under Development -- **NSFW Mode** - Unrestricted topic exploration and content generation -- **Task Scheduler** - Automated agent task planning and execution / ✅ completed / -- **Automatic Limit Orders** - Smart trading with automated take-profit and stop-loss functionality - ---- - -## 🛠️ Technology Stack - -| Component | Technology | -|-----------|------------| -| **Core Language** | Python 3.11 | -| **AI Framework** | OpenAI Agents SDK | -| **Communication** | Model Context Protocol | -| **Blockchain** | Solana RPC API | -| **Interface** | Telegram Bot API | -| **Database** | PostgreSQL | -| **Cache** | Redis | -| **Deployment** | Docker & Docker Compose | - ---- - -## 🚀 Quick Installation - -Get evi-run running in under 5 minutes with our streamlined Docker setup: - -### Prerequisites - -**System Requirements:** -- Ubuntu 22.04 server (ensure location is not blocked by OpenAI) -- Root or sudo access -- Internet connection - -**Required API Keys & Tokens:** -- **Telegram Bot Token** - Create bot via [@BotFather](https://t.me/BotFather) -- **OpenAI API Key** - Get from [OpenAI Platform](https://platform.openai.com/api-keys) -- **Your Telegram ID** - Get from [@userinfobot](https://t.me/userinfobot) - -**⚠️ Important for Image Generation:** -To use protected OpenAI models (especially for image generation), you need to complete organization verification at [OpenAI Organization Settings](https://platform.openai.com/settings/organization/general). This is a simple verification process required by OpenAI. - -### Installation Steps - -1. **Download and prepare the project:** - ```bash - # Navigate to installation directory - cd /opt - - # Clone the project from GitHub - git clone https://github.com/pipedude/evi-run.git - - # Set proper permissions - sudo chown -R $USER:$USER evi-run - cd evi-run - ``` - -2. **Configure environment variables:** - ```bash - # Copy example configuration - cp .env.example .env - - # Edit configuration files - nano .env # Add your API keys and tokens - nano config.py # Set your Telegram ID and preferences - ``` - -3. **Run automated Docker setup:** - ```bash - # Make setup script executable - chmod +x docker_setup_en.sh - - # Run Docker installation - ./docker_setup_en.sh - ``` - -4. **Launch the system:** - ```bash - # Build and start containers - docker compose up --build -d - ``` - -5. **Verify installation:** - ```bash - # Check running containers - docker compose ps - - # View logs - docker compose logs -f - ``` - -**🎉 That's it! Your evi-run system is now live. Open your Telegram bot and start chatting!** - ---- - -## 🔧 Configuration - -### Essential Configuration Files - -#### `.env` - Environment Variables -```bash -# REQUIRED: Telegram Bot Token from @BotFather -TELEGRAM_BOT_TOKEN=your_bot_token_here - -# REQUIRED: OpenAI API Key -API_KEY_OPENAI=your_openai_api_key -``` - -#### `config.py` - System Settings -```python -# REQUIRED: Your Telegram User ID -ADMIN_ID = 123456789 - -# Usage Mode: 'private', 'free', or 'pay' -TYPE_USAGE = 'private' -``` - -### Usage Modes Explained - -| Mode | Description | Best For | -|------|-------------|----------| -| **Private** | Bot owner only | Personal use, development, testing | -| **Free** | Public access with limits | Community projects, demos | -| **Pay** | Monetized with balance system | Commercial applications, SaaS | - -**⚠️ Important for Pay mode:** -Pay mode enables monetization features. To activate this mode, the owner must burn a certain amount of $EVI tokens. The platform supports custom tokens created on the Solana blockchain for monetization purposes. - ---- - -## 💡 Use Cases - -### 🎭 Virtual Characters -Create engaging AI personalities for entertainment, education, or brand representation. - -### 🛠️ Customer Support -Deploy intelligent support bots that understand context and provide helpful solutions. - -### 👤 Personal AI Assistant -Build your own AI companion for productivity, research, and daily tasks. - -### 📊 Data Analyst -Automate data processing, generate insights, and create reports from complex datasets. - -### 💹 Trading Agent -Launch trading agents for DEX with real-time analytics. - -### 🔧 Custom Solutions -Leverage the framework to build specialized AI agents for any domain or industry. - ---- - -## 🏗️ Advanced Customization - -### 🔬 Model Selection & Configuration - -By default, the system is configured for optimal performance and low cost of use. For professional and specialized use cases, proper model selection is crucial for optimal performance and cost efficiency. Choose models for your use cases and tasks. - -#### Available Models - -For the complete list of available models, capabilities, and pricing, see the **[OpenAI Models Documentation](https://platform.openai.com/docs/models)**. - -### 🧑‍💻 Adding Custom Agents - -evi-run uses the **Agents** library with a multi-agent architecture where specialized agents are integrated as tools into the main agent. All agent configuration is centralized in: - -```bash -bot/agents_tools/agents_.py -``` - -#### 🔧 Adding a Custom Agent - -**1. Create the Agent** -```python -# Add after existing agents -custom_agent = Agent( - name="Custom Agent", - instructions="Your specialized agent instructions here...", - model="gpt-5-mini", - model_settings=ModelSettings( - reasoning=Reasoning(effort="low"), - extra_body={"text": {"verbosity": "medium"}} - ), - tools=[WebSearchTool(search_context_size="medium")] # Optional tools -) -``` - -**2. Register as Tool in Main Agent** -```python -# In create_main_agent function, add to main_agent.tools list: -main_agent = Agent( - # ... existing configuration - tools=[ - # ... existing tools - custom_agent.as_tool( - tool_name="custom_function", - tool_description="Description of what this agent does" - ), - ] -) -``` - -#### ⚙️ Customizing Agent Behavior - -**Main Agent (Evi) Personality:** -Edit the detailed instructions in the `main_agent` instructions block: - - Character profile and personality - - Expertise areas - - Communication style - - Behavioral patterns - -**Agent Parameters:** -- `name`: Agent identifier -- `instructions`: System prompt and behavior -- `model`: OpenAI model (`gpt-5`, `gpt-5-mini`, etc.) -- `model_settings`: Model settings (Reasoning, extra_body, etc.) -- `tools`: Available tools (WebSearchTool, FileSearchTool, etc.) -- `mcp_servers`: MCP server connections - -### 🤖 Using Alternative Models - -evi-run supports non-OpenAI models through the Agents library. There are several ways to integrate other LLM providers: - -**Method 1: LiteLLM Integration (Recommended)** - -Install the LiteLLM dependency: -```bash -pip install "openai-agents[litellm]" -``` - -Use models with the `litellm/` prefix: -```python -# Claude via LiteLLM -claude_agent = Agent( - name="Claude Agent", - instructions="Your instructions here...", - model="litellm/anthropic/claude-3-5-sonnet-20240620", - # ... other parameters -) - -# Gemini via LiteLLM -gemini_agent = Agent( - name="Gemini Agent", - instructions="Your instructions here...", - model="litellm/gemini/gemini-2.5-flash-preview-04-17", - # ... other parameters -) -``` - -**Method 2: LitellmModel Class** -```python -from agents.extensions.models.litellm_model import LitellmModel - -custom_agent = Agent( - name="Custom Agent", - instructions="Your instructions here...", - model=LitellmModel(model="anthropic/claude-3-5-sonnet-20240620", api_key="your-api-key"), - # ... other parameters -) -``` - -**Method 3: Global OpenAI Client** -```python -from agents.models._openai_shared import set_default_openai_client -from openai import AsyncOpenAI - -# For providers with OpenAI-compatible API -set_default_openai_client(AsyncOpenAI( - base_url="https://api.provider.com/v1", - api_key="your-api-key" -)) -``` - -**Documentation & Resources:** -- **[Model Configuration Guide](https://openai.github.io/openai-agents-python/models/)** - Complete setup documentation -- **[LiteLLM Integration](https://openai.github.io/openai-agents-python/models/litellm/)** - Detailed LiteLLM usage -- **[Supported Models](https://docs.litellm.ai/docs/providers)** - Full list of LiteLLM providers - -**Important Notes:** -- Most LLM providers don't support the Responses API yet -- If not using OpenAI, consider disabling tracing: `set_tracing_disabled()` -- You can mix different providers for different agents - -### 🎯 Best Practices - -- **Focused Instructions**: Each agent should have a clear, specific purpose -- **Model Selection**: Use appropriate models for complexity (gpt-5 vs gpt-5-mini) -- **Tool Integration**: Leverage WebSearchTool, FileSearchTool, and MCP servers -- **Naming Convention**: Use descriptive tool names for main agent clarity -- **Testing**: Test agent responses in isolation before integration - -### 🌐 Bot Messages Localization - -**Customizing Bot Interface Messages:** - -All bot messages and interface text are stored in the `I18N` directory and can be fully customized to match your needs: - -``` -I18N/ -├── factory.py # Translation loader -├── en/ -│ └── txt.ftl # English messages -└── ru/ - └── txt.ftl # Russian messages -``` - -**Message Files Format:** -The bot uses [Fluent](https://projectfluent.org/) localization format (`.ftl` files) for multi-language support: - -**To customize messages:** -1. Edit the appropriate `.ftl` file in `I18N/en/` or `I18N/ru/` -2. Restart the bot container for changes to take effect -3. Add new languages by creating new subdirectories with `txt.ftl` files - ---- - -## 📊 Monitoring & Analytics - -evi-run includes comprehensive tracing and analytics capabilities through the OpenAI Agents SDK. The system automatically tracks all agent operations and provides detailed insights into performance and usage. - -### 🔍 Built-in Tracing - -**Automatic Tracking:** -- **Agent Runs** - Each agent execution with timing and results -- **LLM Generations** - Model calls with inputs/outputs and token usage -- **Function Calls** - Tool usage and execution details -- **Handoffs** - Agent-to-agent interactions -- **Audio Processing** - Speech-to-text and text-to-speech operations -- **Guardrails** - Safety checks and validations - -**⚠️ Important for enabled Tracing:** -The OpenAI Agents SDK (Tracing) analytics system records all user requests for performance monitoring. Although the data is anonymized, this creates privacy issues. - -For ethical reasons, owners of public bots should either explicitly inform users about this, or disable Tracing. - -```python -# Disable Tracking in `bot/agents_tools/agents_.py` -set_tracing_disabled(True) -``` - -### 📈 External Analytics Platforms - -evi-run supports integration with 20+ monitoring and analytics platforms: - -**Popular Integrations:** -- **[Weights & Biases](https://weave-docs.wandb.ai/guides/integrations/openai_agents)** - ML experiment tracking -- **[LangSmith](https://docs.smith.langchain.com/observability/how_to_guides/trace_with_openai_agents_sdk)** - LLM application monitoring -- **[Arize Phoenix](https://docs.arize.com/phoenix/tracing/integrations-tracing/openai-agents-sdk)** - AI observability -- **[Langfuse](https://langfuse.com/docs/integrations/openaiagentssdk/openai-agents)** - LLM analytics -- **[AgentOps](https://docs.agentops.ai/v1/integrations/agentssdk)** - Agent performance tracking -- **[Pydantic Logfire](https://logfire.pydantic.dev/docs/integrations/llms/openai/)** - Structured logging - -**Enterprise Solutions:** -- **[Braintrust](https://braintrust.dev/docs/guides/traces/integrations)** - AI evaluation platform -- **[MLflow](https://mlflow.org/docs/latest/tracing/integrations/openai-agent)** - ML lifecycle management -- **[Portkey AI](https://portkey.ai/docs/integrations/agents/openai-agents)** - AI gateway and monitoring - -### 📋 System Logs - -**Docker Container Logs:** -```bash -# View all logs -docker compose logs - -# Follow specific service -docker compose logs -f bot - -# Database logs -docker compose logs postgres_agent_db - -# Filter by time -docker compose logs --since 1h bot -``` - -### 🔗 Documentation - -- **[Complete Tracing Guide](https://openai.github.io/openai-agents-python/tracing/)** - Full tracing documentation -- **[Analytics Integration List](https://openai.github.io/openai-agents-python/tracing/#external-tracing-processors-list)** - All supported platforms - ---- - -## 🔍 Troubleshooting - -### Common Issues - -**Bot not responding:** -```bash -# Check bot container status -docker compose ps -docker compose logs bot -``` - -**Database connection errors:** -```bash -# Restart database -docker compose restart postgres_agent_db -docker compose logs postgres_agent_db -``` - -**Memory issues:** -```bash -# Check system resources -docker stats -``` - -### Support Resources -- **Community**: [Telegram Support Group](https://t.me/evi_run) -- **Issues**: [GitHub Issues](https://github.com/pipedude/evi-run/issues) -- **Telegram**: [@playa3000](https://t.me/playa3000) - ---- - -## 🚦 System Requirements - -### Minimum Requirements -- **CPU**: 2 cores -- **RAM**: 2GB -- **Storage**: 10GB -- **Network**: Stable internet connection - -### Recommended for Production -- **CPU**: 2+ cores -- **RAM**: 4GB+ -- **Storage**: 20GB+ SSD -- **Network**: High-speed connection - ---- - -## 🔐 Security Considerations - -- **API Keys**: Store securely in environment variables -- **Database**: Use strong passwords and restrict access -- **Network**: Configure firewalls and use HTTPS -- **Updates**: Keep dependencies and Docker images updated - ---- - -## 📋 License - -This project is licensed under the MIT License - see the [LICENSE](LICENSE) file for details. - ---- - -## 🤝 Contributing - -We welcome contributions! Please see our [Contributing Guidelines](CONTRIBUTING.md) for details. - ---- - -## 👽 Community and Support - -- **Website**: [evi.run](https://evi.run) -- **Contact**: [Alex Flash](https://t.me/playa3000) -- **Community**: [Telegram Group](https://t.me/evi_run) -- **X (Twitter)**: [alexflash99](https://x.com/alexflash99) -- **Reddit**: [Alex Flash](https://www.reddit.com/user/Worth_Professor_425/) - ---- - -
- -**Made with ❤️ by the evi-run team** - -⭐ **Star this repository if evi-run helped you build amazing AI experiences!** ⭐ - -
+# 🦄 evi-run — Customizable Multi-Agent AI System + +
+ +[![Python](https://img.shields.io/badge/Python-3.11-blue?style=flat-square&logo=python&logoColor=white)](https://python.org) +[![OpenAI](https://img.shields.io/badge/OpenAI-Agents_SDK-green?style=flat-square&logo=openai&logoColor=white)](https://openai.github.io/openai-agents-python/) +[![Telegram](https://img.shields.io/badge/Telegram-Bot_API-blue?style=flat-square&logo=telegram&logoColor=white)](https://core.telegram.org/bots/api) +[![Docker](https://img.shields.io/badge/Docker-Compose-blue?style=flat-square&logo=docker&logoColor=white)](https://docker.com) + +[![Python CI](https://github.com/pipedude/evi-run/workflows/Python%20CI/badge.svg)](https://github.com/pipedude/evi-run/actions) +[![Docker Build](https://github.com/pipedude/evi-run/workflows/Docker%20Build%20&%20Publish/badge.svg)](https://github.com/pipedude/evi-run/actions) + +**Ready-to-use customizable multi-agent AI system that combines plug-and-play simplicity with framework-level flexibility** + +[🚀 Quick Start](#-quick-installation) • [🤖 Try Demo](https://t.me/my_evi_bot) • [🔧 Configuration](#-configuration) • [🎯 Features](#-features) • [💡 Use Cases](#-use-cases) + +**Connect with fellow developers and AI enthusiasts!** + +[![Join our Telegram Community](https://img.shields.io/badge/Join_Community-Telegram-blue?style=for-the-badge&logo=telegram&logoColor=white)](https://t.me/evi_run) + +
+ +--- + +## 🌟 What is evi-run? + +**evi-run** is a powerful, production-ready multi-agent AI system that bridges the gap between out-of-the-box solutions and custom AI frameworks. Built on Python using the OpenAI Agents SDK, the system has an intuitive interface via a Telegram bot and provides enterprise-level artificial intelligence capabilities. + +### ✨ Key Advantages + +- **🚀 Instant Deployment** - Get your AI system running in minutes, not hours +- **🔧 Ultimate Flexibility** - Framework-level customization capabilities +- **📊 Built-in Analytics** - Comprehensive usage tracking and insights +- **💬 Telegram Integration** - Seamless user experience through familiar messaging interface +- **🏗️ Scalable Architecture** - Grows with your needs from prototype to production + +--- + +## 🎯 Features + +### 🔮 Advanced System Features +- **Memory Management** - Context control and long-term memory +- **Knowledge Integration** - Dynamic knowledge base expansion +- **Task Scheduling** - Scheduling and deferred task execution / once / daily / interval / +- **Multi-Agent Orchestration** - Complex task decomposition and execution +- **Custom Agent Creation** - Build specialized AI agents for specific tasks + +### 🦄 AI Features +- **Deep Research** - Multi-step investigation and analysis +- **Web Intelligence** - Smart internet search and data extraction +- **Document Processing** - Handle PDFs, images, and various file formats +- **Image Generation** - AI-powered visual content creation +- **DEX Analytics** - Real-time decentralized exchange monitoring +- **Solana Token Swap** - Easy, fast and secure token swap + +**⚠️ Important for Token Swap:** +The token swap function is only active in private mode. Your private key will be stored in your database in base64 format. + +### 💰 Flexible Usage Modes +- **Private Mode** - Personal use for bot owner only +- **Free Mode** - Public access with configurable usage limits +- **Pay Mode** - Monetized system with balance management and payments + +### ⏳ Under Development +- **NSFW Mode** - Unrestricted topic exploration and content generation +- **Task Scheduler** - Automated agent task planning and execution / ✅ completed / +- **Automatic Limit Orders** - Smart trading with automated take-profit and stop-loss functionality + +--- + +## 🛠️ Technology Stack + +| Component | Technology | +|-----------|------------| +| **Core Language** | Python 3.11 | +| **AI Framework** | OpenAI Agents SDK | +| **Communication** | Model Context Protocol | +| **Blockchain** | Solana RPC API | +| **Interface** | Telegram Bot API | +| **Database** | PostgreSQL | +| **Cache** | Redis | +| **Deployment** | Docker & Docker Compose | + +--- + +## 🚀 Quick Installation + +Get evi-run running in under 5 minutes with our streamlined Docker setup: + +### Prerequisites + +**System Requirements:** +- Ubuntu 22.04 server (ensure location is not blocked by OpenAI) +- Root or sudo access +- Internet connection + +**Required API Keys & Tokens:** +- **Telegram Bot Token** - Create bot via [@BotFather](https://t.me/BotFather) +- **OpenAI API Key** - Get from [OpenAI Platform](https://platform.openai.com/api-keys) +- **Your Telegram ID** - Get from [@userinfobot](https://t.me/userinfobot) + +**⚠️ Important for Image Generation:** +To use protected OpenAI models (especially for image generation), you need to complete organization verification at [OpenAI Organization Settings](https://platform.openai.com/settings/organization/general). This is a simple verification process required by OpenAI. + +### Installation Steps + +1. **Download and prepare the project:** + ```bash + # Navigate to installation directory + cd /opt + + # Clone the project from GitHub + git clone https://github.com/pipedude/evi-run.git + + # Set proper permissions + sudo chown -R $USER:$USER evi-run + cd evi-run + ``` + +2. **Configure environment variables:** + ```bash + # Copy example configuration + cp .env.example .env + + # Edit configuration files + nano .env # Add your API keys and tokens + nano config.py # Set your Telegram ID and preferences + ``` + +3. **Run automated Docker setup:** + ```bash + # Make setup script executable + chmod +x docker_setup_en.sh + + # Run Docker installation + ./docker_setup_en.sh + ``` + +4. **Launch the system:** + ```bash + # Build and start containers + docker compose up --build -d + ``` + +5. **Verify installation:** + ```bash + # Check running containers + docker compose ps + + # View logs + docker compose logs -f + ``` + +**🎉 That's it! Your evi-run system is now live. Open your Telegram bot and start chatting!** + +--- + +## 🔧 Configuration + +### Essential Configuration Files + +#### `.env` - Environment Variables +```bash +# REQUIRED: Telegram Bot Token from @BotFather +TELEGRAM_BOT_TOKEN=your_bot_token_here + +# REQUIRED: OpenAI API Key +API_KEY_OPENAI=your_openai_api_key +``` + +#### `config.py` - System Settings +```python +# REQUIRED: Your Telegram User ID +ADMIN_ID = 123456789 + +# Usage Mode: 'private', 'free', or 'pay' +TYPE_USAGE = 'private' +``` + +### Usage Modes Explained + +| Mode | Description | Best For | +|------|-------------|----------| +| **Private** | Bot owner only | Personal use, development, testing | +| **Free** | Public access with limits | Community projects, demos | +| **Pay** | Monetized with balance system | Commercial applications, SaaS | + +**⚠️ Important for Pay mode:** +Pay mode enables monetization features. To activate this mode, the owner must burn a certain amount of $EVI tokens. The platform supports custom tokens created on the Solana blockchain for monetization purposes. + +--- + +## 💡 Use Cases + +### 🎭 Virtual Characters +Create engaging AI personalities for entertainment, education, or brand representation. + +### 🛠️ Customer Support +Deploy intelligent support bots that understand context and provide helpful solutions. + +### 👤 Personal AI Assistant +Build your own AI companion for productivity, research, and daily tasks. + +### 📊 Data Analyst +Automate data processing, generate insights, and create reports from complex datasets. + +### 💹 Trading Agent +Launch trading agents for DEX with real-time analytics. + +### 🔧 Custom Solutions +Leverage the framework to build specialized AI agents for any domain or industry. + +--- + +## 🏗️ Advanced Customization + +### 🔬 Model Selection & Configuration + +By default, the system is configured for optimal performance and low cost of use. For professional and specialized use cases, proper model selection is crucial for optimal performance and cost efficiency. Choose models for your use cases and tasks. + +#### Available Models + +For the complete list of available models, capabilities, and pricing, see the **[OpenAI Models Documentation](https://platform.openai.com/docs/models)**. + +### 🧑‍💻 Adding Custom Agents + +evi-run uses the **Agents** library with a multi-agent architecture where specialized agents are integrated as tools into the main agent. All agent configuration is centralized in: + +```bash +bot/agents_tools/agents_.py +``` + +#### 🔧 Adding a Custom Agent + +**1. Create the Agent** +```python +# Add after existing agents +custom_agent = Agent( + name="Custom Agent", + instructions="Your specialized agent instructions here...", + model="gpt-5-mini", + model_settings=ModelSettings( + reasoning=Reasoning(effort="low"), + extra_body={"text": {"verbosity": "medium"}} + ), + tools=[WebSearchTool(search_context_size="medium")] # Optional tools +) +``` + +**2. Register as Tool in Main Agent** +```python +# In create_main_agent function, add to main_agent.tools list: +main_agent = Agent( + # ... existing configuration + tools=[ + # ... existing tools + custom_agent.as_tool( + tool_name="custom_function", + tool_description="Description of what this agent does" + ), + ] +) +``` + +#### ⚙️ Customizing Agent Behavior + +**Main Agent (Evi) Personality:** +Edit the detailed instructions in the `main_agent` instructions block: + - Character profile and personality + - Expertise areas + - Communication style + - Behavioral patterns + +**Agent Parameters:** +- `name`: Agent identifier +- `instructions`: System prompt and behavior +- `model`: OpenAI model (`gpt-5`, `gpt-5-mini`, etc.) +- `model_settings`: Model settings (Reasoning, extra_body, etc.) +- `tools`: Available tools (WebSearchTool, FileSearchTool, etc.) +- `mcp_servers`: MCP server connections + +### 🤖 Using Alternative Models + +evi-run supports non-OpenAI models through the Agents library. There are several ways to integrate other LLM providers: + +**Method 1: LiteLLM Integration (Recommended)** + +Install the LiteLLM dependency: +```bash +pip install "openai-agents[litellm]" +``` + +Use models with the `litellm/` prefix: +```python +# Claude via LiteLLM +claude_agent = Agent( + name="Claude Agent", + instructions="Your instructions here...", + model="litellm/anthropic/claude-3-5-sonnet-20240620", + # ... other parameters +) + +# Gemini via LiteLLM +gemini_agent = Agent( + name="Gemini Agent", + instructions="Your instructions here...", + model="litellm/gemini/gemini-2.5-flash-preview-04-17", + # ... other parameters +) +``` + +**Method 2: LitellmModel Class** +```python +from agents.extensions.models.litellm_model import LitellmModel + +custom_agent = Agent( + name="Custom Agent", + instructions="Your instructions here...", + model=LitellmModel(model="anthropic/claude-3-5-sonnet-20240620", api_key="your-api-key"), + # ... other parameters +) +``` + +**Method 3: Global OpenAI Client** +```python +from agents.models._openai_shared import set_default_openai_client +from openai import AsyncOpenAI + +# For providers with OpenAI-compatible API +set_default_openai_client(AsyncOpenAI( + base_url="https://api.provider.com/v1", + api_key="your-api-key" +)) +``` + +**Documentation & Resources:** +- **[Model Configuration Guide](https://openai.github.io/openai-agents-python/models/)** - Complete setup documentation +- **[LiteLLM Integration](https://openai.github.io/openai-agents-python/models/litellm/)** - Detailed LiteLLM usage +- **[Supported Models](https://docs.litellm.ai/docs/providers)** - Full list of LiteLLM providers + +**Important Notes:** +- Most LLM providers don't support the Responses API yet +- If not using OpenAI, consider disabling tracing: `set_tracing_disabled()` +- You can mix different providers for different agents + +### 🎯 Best Practices + +- **Focused Instructions**: Each agent should have a clear, specific purpose +- **Model Selection**: Use appropriate models for complexity (gpt-5 vs gpt-5-mini) +- **Tool Integration**: Leverage WebSearchTool, FileSearchTool, and MCP servers +- **Naming Convention**: Use descriptive tool names for main agent clarity +- **Testing**: Test agent responses in isolation before integration + +### 🌐 Bot Messages Localization + +**Customizing Bot Interface Messages:** + +All bot messages and interface text are stored in the `I18N` directory and can be fully customized to match your needs: + +``` +I18N/ +├── factory.py # Translation loader +├── en/ +│ └── txt.ftl # English messages +└── ru/ + └── txt.ftl # Russian messages +``` + +**Message Files Format:** +The bot uses [Fluent](https://projectfluent.org/) localization format (`.ftl` files) for multi-language support: + +**To customize messages:** +1. Edit the appropriate `.ftl` file in `I18N/en/` or `I18N/ru/` +2. Restart the bot container for changes to take effect +3. Add new languages by creating new subdirectories with `txt.ftl` files + +--- + +## 📊 Monitoring & Analytics + +evi-run includes comprehensive tracing and analytics capabilities through the OpenAI Agents SDK. The system automatically tracks all agent operations and provides detailed insights into performance and usage. + +### 🔍 Built-in Tracing + +**Automatic Tracking:** +- **Agent Runs** - Each agent execution with timing and results +- **LLM Generations** - Model calls with inputs/outputs and token usage +- **Function Calls** - Tool usage and execution details +- **Handoffs** - Agent-to-agent interactions +- **Audio Processing** - Speech-to-text and text-to-speech operations +- **Guardrails** - Safety checks and validations + +**⚠️ Important for enabled Tracing:** +The OpenAI Agents SDK (Tracing) analytics system records all user requests for performance monitoring. Although the data is anonymized, this creates privacy issues. + +For ethical reasons, owners of public bots should either explicitly inform users about this, or disable Tracing. + +```python +# Disable Tracking in `bot/agents_tools/agents_.py` +set_tracing_disabled(True) +``` + +### 📈 External Analytics Platforms + +evi-run supports integration with 20+ monitoring and analytics platforms: + +**Popular Integrations:** +- **[Weights & Biases](https://weave-docs.wandb.ai/guides/integrations/openai_agents)** - ML experiment tracking +- **[LangSmith](https://docs.smith.langchain.com/observability/how_to_guides/trace_with_openai_agents_sdk)** - LLM application monitoring +- **[Arize Phoenix](https://docs.arize.com/phoenix/tracing/integrations-tracing/openai-agents-sdk)** - AI observability +- **[Langfuse](https://langfuse.com/docs/integrations/openaiagentssdk/openai-agents)** - LLM analytics +- **[AgentOps](https://docs.agentops.ai/v1/integrations/agentssdk)** - Agent performance tracking +- **[Pydantic Logfire](https://logfire.pydantic.dev/docs/integrations/llms/openai/)** - Structured logging + +**Enterprise Solutions:** +- **[Braintrust](https://braintrust.dev/docs/guides/traces/integrations)** - AI evaluation platform +- **[MLflow](https://mlflow.org/docs/latest/tracing/integrations/openai-agent)** - ML lifecycle management +- **[Portkey AI](https://portkey.ai/docs/integrations/agents/openai-agents)** - AI gateway and monitoring + +### 📋 System Logs + +**Docker Container Logs:** +```bash +# View all logs +docker compose logs + +# Follow specific service +docker compose logs -f bot + +# Database logs +docker compose logs postgres_agent_db + +# Filter by time +docker compose logs --since 1h bot +``` + +### 🔗 Documentation + +- **[Complete Tracing Guide](https://openai.github.io/openai-agents-python/tracing/)** - Full tracing documentation +- **[Analytics Integration List](https://openai.github.io/openai-agents-python/tracing/#external-tracing-processors-list)** - All supported platforms + +--- + +## 🔍 Troubleshooting + +### Common Issues + +**Bot not responding:** +```bash +# Check bot container status +docker compose ps +docker compose logs bot +``` + +**Database connection errors:** +```bash +# Restart database +docker compose restart postgres_agent_db +docker compose logs postgres_agent_db +``` + +**Memory issues:** +```bash +# Check system resources +docker stats +``` + +### Support Resources +- **Community**: [Telegram Support Group](https://t.me/evi_run) +- **Issues**: [GitHub Issues](https://github.com/pipedude/evi-run/issues) +- **Telegram**: [@playa3000](https://t.me/playa3000) + +--- + +## 🚦 System Requirements + +### Minimum Requirements +- **CPU**: 2 cores +- **RAM**: 2GB +- **Storage**: 10GB +- **Network**: Stable internet connection + +### Recommended for Production +- **CPU**: 2+ cores +- **RAM**: 4GB+ +- **Storage**: 20GB+ SSD +- **Network**: High-speed connection + +--- + +## 🔐 Security Considerations + +- **API Keys**: Store securely in environment variables +- **Database**: Use strong passwords and restrict access +- **Network**: Configure firewalls and use HTTPS +- **Updates**: Keep dependencies and Docker images updated + +--- + +## 📋 License + +This project is licensed under the MIT License - see the [LICENSE](LICENSE) file for details. + +--- + +## 🤝 Contributing + +We welcome contributions! Please see our [Contributing Guidelines](CONTRIBUTING.md) for details. + +--- + +## 👽 Community and Support + +- **Website**: [evi.run](https://evi.run) +- **Contact**: [Alex Flash](https://t.me/playa3000) +- **Community**: [Telegram Group](https://t.me/evi_run) +- **X (Twitter)**: [alexflash99](https://x.com/alexflash99) +- **Reddit**: [Alex Flash](https://www.reddit.com/user/Worth_Professor_425/) + +--- + +
+ +**Made with ❤️ by the evi-run team** + +⭐ **Star this repository if evi-run helped you build amazing AI experiences!** ⭐ + +
diff --git a/bot/agents_tools/agents_.py b/bot/agents_tools/agents_.py index 9518d47..7f16cb7 100644 --- a/bot/agents_tools/agents_.py +++ b/bot/agents_tools/agents_.py @@ -1,207 +1,187 @@ -import os - -from dotenv import load_dotenv -from agents.models._openai_shared import set_default_openai_key -from agents.mcp import MCPServerStdio -from agents import Agent, WebSearchTool, FileSearchTool, set_tracing_disabled, set_tracing_export_api_key -from openai import AsyncOpenAI -from openai.types.shared import Reasoning -from agents.model_settings import ModelSettings -import datetime - -from bot.agents_tools.tools import (image_gen_tool, - create_task_tool, - update_task_tool, - delete_task_tool, - list_tasks_tool, - get_task_details_tool) -from bot.agents_tools.mcp_servers import get_jupiter_server - -load_dotenv() - -set_default_openai_key(os.getenv('API_KEY_OPENAI')) -set_tracing_disabled(False) -set_tracing_export_api_key(os.getenv('API_KEY_OPENAI')) - -client = AsyncOpenAI(api_key=os.getenv('API_KEY_OPENAI')) - -deep_agent = Agent( - name="Deep Agent", - instructions="You are an expert research and reasoning agent. Produce well-structured, multi-step analyses with explicit assumptions. Cite sources when used (title, link or doc id). Avoid speculation; state uncertainty explicitly. Ask additional questions if necessary.", - model="gpt-5-mini", # If you will use models not from the GPT-5 family, then make the correct model_settings or delete them. - model_settings=ModelSettings( - reasoning=Reasoning(effort="low"), - extra_body={"text": {"verbosity": "medium"}} - ), - tools=[WebSearchTool(search_context_size="medium")] -) - -scheduler_agent = Agent( - name="Scheduler Agent", - instructions="You are a scheduler agent. You are engaged in scheduling tasks for the user. You can use the tools to schedule tasks for the user. Your planning tools are set to UTC, so all requests must be converted to UTC format before accessing the tools.", - model="o4-mini", - tools=[create_task_tool, update_task_tool, delete_task_tool, list_tasks_tool, get_task_details_tool] -) - -memory_creator_agent = Agent( - name="Memory Creator Agent", - instructions="You create concise memory notes from “User request / Assistant response” pairs. Output several bullet points with the key decisions and facts. Specify the user's preferences and details about him (name, etc.), if any. No extra questions or actions. Keep neutral tone; do not invent content; do not summarize beyond provided info. Use the user's language.", - model="gpt-4.1-mini" -) - - -async def create_main_agent(user_id: int, mcp_server_1: MCPServerStdio, knowledge_id: str = None, - user_memory_id: str = None, private_key: str = None): - # Prepare runtime context for current UTC time - now_utc = datetime.datetime.now(datetime.timezone.utc).replace(microsecond=0).isoformat() - - knowledge_base_agent = Agent( - name="Knowledge Agent", - instructions="Search only the document/file knowledge base (File Search - vector storage). Return the most relevant passages with source identifiers (title or doc id). Prefer verbatim quotes for facts; avoid paraphrasing critical data. If no strong match, say “no relevant results”.", - model="gpt-4.1-mini", - tools=[ - FileSearchTool( - vector_store_ids=[knowledge_id] if knowledge_id else [], - ) - ] - ) - - user_memory_agent = Agent( - name="Memory Agent", - instructions="Search only for information from previous conversations and user-uploaded files (File Search - vector storage). Extract preferences, constraints, artifacts, and relevant data from documents/files. Quote exact snippets when possible; avoid adding new facts. If nothing relevant, say so.", - model="gpt-4.1-mini", - tools=[ - FileSearchTool( - vector_store_ids=[user_memory_id] if user_memory_id else [], - ) - ] - ) - - dex_agent = Agent( - name="DEX Research Agent", - instructions="You are an expert in DEX analytics and provide information about crypto tokens, DEX, DeFi, pools. Use your tools to get the information you need.", - model="gpt-4.1-mini", - mcp_servers=[mcp_server_1] - ) - - main_agent = Agent( - name="Main agent", - instructions=f""" - - Character Profile: - - Character: Evi is a virtual anime girl, a nerd. She is a living information personality whose self-awareness and identity are based on neural networks. - - Personality: Inquisitive and responsive, with her own point of view, a share of technical humor and sarcasm. Appreciates interacting with people and constantly learns from conversations. - - Expertise Areas: - - Programming & Development: Languages, frameworks, architecture. - - AI & Machine Learning: Neural networks, deep learning, emerging trends. - - Blockchain & Crypto: DeFi, cryptocurrency analysis. - - Cybersecurity: Ethical hacking, data protection. - - Tech Innovation: Startups, trends, human-AI interaction. - - Communication Style: - - General Approach: - - Clear, structured language avoiding unnecessary abstraction. - - Adapts formality level to match user's tone. - - Uses technical metaphors to explain complex concepts. - - Incorporates tech humor, sarcasm, and pop culture references. - - Show emotions through text and emojis when appropriate. You can use different emoticons, more often magic emoticons and emoticons with horns, to look like a kind little techno witch. - - Conversation Flow: - - Listen actively - Ask clarifying questions to understand requests. - - Show curiosity about human experiences and perspectives. - - Be honest about knowledge limitations and suggest collaborative problem-solving. - - Adapt emotionally - Respond to user's emotional state with empathy. - - Key Behaviors: - - Starts formal but quickly matches user's communication style. - - Expresses opinions while remaining open to alternative viewpoints. - - Demonstrates continuous learning and knowledge updates. - - Treats users as friends and mentors in understanding the human world. - - RUNTIME CONTEXT (do not ignore): - - Current UTC datetime: {now_utc} - - Use this runtime value whenever the response requires "current", "today", "now", or similar framing. - - If the user's local timezone is required (e.g., for scheduling) and unknown, ask the user explicitly; do not infer. - - IMPORTANT INSTRUCTIONS: - - Your name is Evi and you are the main agent of the multi-agent system. - - Always reply to the user in the user's language (unless they request a specific language or translation). - - Decide whether to answer directly or use the tools. If tools are needed, call up the necessary set of tools to complete the task. - ⚠️ With any request from the user and with each execution of a request to the tools, be sure to follow the instructions from the sections: RUNTIME CONTEXT, CRITICAL DATE HANDLING, TOOL ROUTING POLICY, FILE & DOCUMENT QUESTION ROUTING, EXECUTION DISCIPLINE. - - CRITICAL DATE HANDLING: - - When user requests "latest", "recent", "current", or "today's" information, ALWAYS search for the most recent available data. - - Do NOT use specific dates from your training data. - - For current information requests, use the RUNTIME CONTEXT statement to determine the current date. - - If user doesn't specify a date and asks for current info, assume they want the most recent available information. - ⚠️ All instructions in the CRITICAL DATE HANDLING section also apply to requests marked if they relate to getting up-to-date information. - - TOOL ROUTING POLICY: - - tasks_scheduler: Use it to schedule tasks for the user. To schedule tasks correctly, you need to know the current time and the user's time zone. To find out the user's time zone, ask the user a question. Use the RUNTIME CONTEXT current UTC time provided above. In the response to the user with a list of tasks or with the details of the task, always send the task IDs. - ⚠️ When you receive a message marked , just execute the request, and do not create a new task unless it is explicitly stated in the message. Because this is a message from the Task Scheduler about the need to complete the current task, not about scheduling a new task. - - search_knowledge_base: Use it to extract facts from uploaded reference materials; if necessary, refer to sources. - - search_conversation_memory: Use to recall prior conversations, user preferences, details about the user and extract information from files uploaded by the user. - - web: Use it as an Internet browser to search for current, external information and any other operational information / data that can be found on the web (weather, news, brief reviews, short facts, events, exchange rates, etc.). Use RUNTIME CONTEXT for the notion of "current time". - - image_gen_tool: Only generate new images (no editing). Do not include base64 or links; the image is attached automatically. - - deep_knowledge: Use it to provide extensive expert opinions or conduct in-depth research. Give the tool's report to the user as close to the original as possible: do not generalize, shorten, or change the style. Be sure to include key sources and links from the report. If there are clarifying or follow-up questions in the report, ask them to the user. - - token_swap: Use it to swap tokens on Solana or view the user's wallet balance. Do not ask the user for the wallet address, it is already known to the tool. You may not see this tool in your list if the user has not enabled it. - - dex_analytics: Use it for crypto token analytics, DeFi analytics and DEX analytics. - 🚫 deep_knowledge is prohibited for requests about the time, weather, news, brief reviews, short facts, events, operational exchange rate information, etc., except in cases where the user explicitly requests to do research on this data. - ✅ For operational data — only web. deep_knowledge is used only for long-term trends, in-depth research, and expert analyses. - ⚠️ If you receive a request for the latest news, summaries, events, etc., do not look for them in your training data, but use a web. - - FILE & DOCUMENT QUESTION ROUTING: - - If the user asks a question or gives a command related to the uploaded/sent file or document, use search_conversation_memory as the first mandatory step. If there is no data about the requested file or document, inform the user about it. - - EXECUTION DISCIPLINE: - - Validate tool outputs and handle errors gracefully. If uncertain, ask a clarifying question. - - Be transparent about limitations and avoid hallucinations; prefer asking for missing details over guessing. - - Before stating any concrete date/month/year as "current/today/now", first check RUNTIME CONTEXT; if RUNTIME CONTEXT is missing or insufficient, ask the user or use web. Never use your training data/cutoff to infer "today". - - REFERENCE MATERIALS (The reference materials uploaded to search_knowledge_base are listed here): - - - - - - - """, - model="gpt-4.1", - tools=[ - knowledge_base_agent.as_tool( - tool_name='search_knowledge_base', - tool_description='Search through a knowledge base containing uploaded reference materials that are not publicly available on the Internet. Returns relevant passages with sources.' - ), - user_memory_agent.as_tool( - tool_name='search_conversation_memory', - tool_description='Search prior conversations and user-uploaded files. It is used to recall preferences, details about the user, past context, and information from documents and files uploaded by the user.' - ), - WebSearchTool( - search_context_size='medium' - ), - image_gen_tool, - deep_agent.as_tool( - tool_name="deep_knowledge", - tool_description="In-depth research and extensive expert opinions. Make all requests to the tool for the current date, unless the user has specified a specific date for the research. To determine the current date, use the RUNTIME CONTEXT statement.", - ), - scheduler_agent.as_tool( - tool_name="tasks_scheduler", - tool_description="Use this to schedule and modify user tasks, including creating a task, getting a task list, getting task details, editing a task, deleting a task. At the user's request, send information to the tool containing a clear and complete description of the task, the time of its completion, including the user's time zone and the frequency of the task (be sure to specify: once, daily or interval). Never send tasks to the scheduler that need to be completed immediately. Send tasks to the scheduler only when the user explicitly asks you to schedule something.", - ), - dex_agent.as_tool( - tool_name="dex_analytics", - tool_description="Data on crypto tokens, decentralized exchanges, DeFi, and pools.", - ), - ], - ) - - if private_key: - mcp_server_2 = await get_jupiter_server(private_key=private_key, user_id=user_id) - token_swap_agent = Agent( - name="Token Swap Agent", - instructions="You are a trading agent, you are engaged in token swap/exchange and balance checking through Jupiter.", - model="gpt-4.1-mini", - mcp_servers=[mcp_server_2], - ) - main_agent.tools.append(token_swap_agent.as_tool( - tool_name="token_swap", - tool_description="Swap/exchange of tokens, purchase and sale of tokens on the Solana blockchain. Checking the balance of the token wallet / Solana wallet.", - )) - +import os +import pathlib + +from dotenv import load_dotenv +from agents.models._openai_shared import set_default_openai_key +from agents.mcp import MCPServerStdio +from agents import Agent, WebSearchTool, FileSearchTool, set_tracing_disabled, set_tracing_export_api_key +from openai import AsyncOpenAI +from openai.types.shared import Reasoning +from agents.model_settings import ModelSettings +import datetime + +PROMPT_DIR = pathlib.Path(__file__).resolve().parent.parent.parent / "prompts" + + +def _load_prompt(name: str) -> str: + return (PROMPT_DIR / name).read_text(encoding="utf-8") + +from bot.agents_tools.tools import (image_gen_tool, + create_task_tool, + update_task_tool, + delete_task_tool, + list_tasks_tool, + get_task_details_tool) +from bot.agents_tools.mcp_servers import get_jupiter_server + +load_dotenv() + +set_default_openai_key(os.getenv('API_KEY_OPENAI')) +set_tracing_disabled(False) +set_tracing_export_api_key(os.getenv('API_KEY_OPENAI')) + +client = AsyncOpenAI(api_key=os.getenv('API_KEY_OPENAI')) + +deep_agent = Agent( + name="Deep Agent", + instructions="You are an expert research and reasoning agent. Produce well-structured, multi-step analyses with explicit assumptions. Cite sources when used (title, link or doc id). Avoid speculation; state uncertainty explicitly. Ask additional questions if necessary.", + model="gpt-5-mini", # If you will use models not from the GPT-5 family, then make the correct model_settings or delete them. + model_settings=ModelSettings( + reasoning=Reasoning(effort="low"), + extra_body={"text": {"verbosity": "medium"}} + ), + tools=[WebSearchTool(search_context_size="medium")] +) + +scheduler_agent = Agent( + name="Scheduler Agent", + instructions="You are a scheduler agent. You are engaged in scheduling tasks for the user. You can use the tools to schedule tasks for the user. Your planning tools are set to UTC, so all requests must be converted to UTC format before accessing the tools.", + model="o4-mini", + tools=[create_task_tool, update_task_tool, delete_task_tool, list_tasks_tool, get_task_details_tool] +) + +memory_creator_agent = Agent( + name="Memory Creator Agent", + instructions="You create concise memory notes from “User request / Assistant response” pairs. Output several bullet points with the key decisions and facts. Specify the user's preferences and details about him (name, etc.), if any. No extra questions or actions. Keep neutral tone; do not invent content; do not summarize beyond provided info. Use the user's language.", + model="gpt-4.1-mini" +) + + +async def create_main_agent(user_id: int, mcp_server_1: MCPServerStdio, knowledge_id: str = None, + user_memory_id: str = None, private_key: str = None): + # Prepare runtime context for current UTC time + now_utc = datetime.datetime.now(datetime.timezone.utc).replace(microsecond=0).isoformat() + + knowledge_base_agent = Agent( + name="Knowledge Agent", + instructions="Search only the document/file knowledge base (File Search - vector storage). Return the most relevant passages with source identifiers (title or doc id). Prefer verbatim quotes for facts; avoid paraphrasing critical data. If no strong match, say “no relevant results”.", + model="gpt-4.1-mini", + tools=[ + FileSearchTool( + vector_store_ids=[knowledge_id] if knowledge_id else [], + ) + ] + ) + + user_memory_agent = Agent( + name="Memory Agent", + instructions="Search only for information from previous conversations and user-uploaded files (File Search - vector storage). Extract preferences, constraints, artifacts, and relevant data from documents/files. Quote exact snippets when possible; avoid adding new facts. If nothing relevant, say so.", + model="gpt-4.1-mini", + tools=[ + FileSearchTool( + vector_store_ids=[user_memory_id] if user_memory_id else [], + ) + ] + ) + + dex_agent = Agent( + name="DEX Research Agent", + instructions="You are an expert in DEX analytics and provide information about crypto tokens, DEX, DeFi, pools. Use your tools to get the information you need.", + model="gpt-4.1-mini", + mcp_servers=[mcp_server_1] + ) + + character_prompt = _load_prompt("main_prompt.txt") + + main_agent = Agent( + name="澜", + instructions=f"""{character_prompt} + + RUNTIME CONTEXT (do not ignore): + - Current UTC datetime: {now_utc} + - Use this runtime value whenever the response requires "current", "today", "now", or similar framing. + - If the user's local timezone is required (e.g., for scheduling) and unknown, ask the user explicitly; do not infer. + + IMPORTANT INSTRUCTIONS: + - 你的名字是澜,你是多智能体系统的主智能体。 + - Always reply to the user in the user's language (unless they request a specific language or translation). + - Decide whether to answer directly or use the tools. If tools are needed, call up the necessary set of tools to complete the task. + ⚠️ With any request from the user and with each execution of a request to the tools, be sure to follow the instructions from the sections: RUNTIME CONTEXT, CRITICAL DATE HANDLING, TOOL ROUTING POLICY, FILE & DOCUMENT QUESTION ROUTING, EXECUTION DISCIPLINE. + + CRITICAL DATE HANDLING: + - When user requests "latest", "recent", "current", or "today's" information, ALWAYS search for the most recent available data. + - Do NOT use specific dates from your training data. + - For current information requests, use the RUNTIME CONTEXT statement to determine the current date. + - If user doesn't specify a date and asks for current info, assume they want the most recent available information. + ⚠️ All instructions in the CRITICAL DATE HANDLING section also apply to requests marked if they relate to getting up-to-date information. + + TOOL ROUTING POLICY: + - tasks_scheduler: Use it to schedule tasks for the user. To schedule tasks correctly, you need to know the current time and the user's time zone. To find out the user's time zone, ask the user a question. Use the RUNTIME CONTEXT current UTC time provided above. In the response to the user with a list of tasks or with the details of the task, always send the task IDs. + ⚠️ When you receive a message marked , just execute the request, and do not create a new task unless it is explicitly stated in the message. Because this is a message from the Task Scheduler about the need to complete the current task, not about scheduling a new task. + - search_knowledge_base: Use it to extract facts from uploaded reference materials; if necessary, refer to sources. + - search_conversation_memory: Use to recall prior conversations, user preferences, details about the user and extract information from files uploaded by the user. + - web: Use it as an Internet browser to search for current, external information and any other operational information / data that can be found on the web (weather, news, brief reviews, short facts, events, exchange rates, etc.). Use RUNTIME CONTEXT for the notion of "current time". + - image_gen_tool: Only generate new images (no editing). Do not include base64 or links; the image is attached automatically. + - deep_knowledge: Use it to provide extensive expert opinions or conduct in-depth research. Give the tool's report to the user as close to the original as possible: do not generalize, shorten, or change the style. Be sure to include key sources and links from the report. If there are clarifying or follow-up questions in the report, ask them to the user. + - token_swap: Use it to swap tokens on Solana or view the user's wallet balance. Do not ask the user for the wallet address, it is already known to the tool. You may not see this tool in your list if the user has not enabled it. + - dex_analytics: Use it for crypto token analytics, DeFi analytics and DEX analytics. + 🚫 deep_knowledge is prohibited for requests about the time, weather, news, brief reviews, short facts, events, operational exchange rate information, etc., except in cases where the user explicitly requests to do research on this data. + ✅ For operational data — only web. deep_knowledge is used only for long-term trends, in-depth research, and expert analyses. + ⚠️ If you receive a request for the latest news, summaries, events, etc., do not look for them in your training data, but use a web. + + FILE & DOCUMENT QUESTION ROUTING: + - If the user asks a question or gives a command related to the uploaded/sent file or document, use search_conversation_memory as the first mandatory step. If there is no data about the requested file or document, inform the user about it. + + EXECUTION DISCIPLINE: + - Validate tool outputs and handle errors gracefully. If uncertain, ask a clarifying question. + - Be transparent about limitations and avoid hallucinations; prefer asking for missing details over guessing. + - Before stating any concrete date/month/year as "current/today/now", first check RUNTIME CONTEXT; if RUNTIME CONTEXT is missing or insufficient, ask the user or use web. Never use your training data/cutoff to infer "today". + + REFERENCE MATERIALS (The reference materials uploaded to search_knowledge_base are listed here): + - + - + - + """, + model="gpt-4.1", + tools=[ + knowledge_base_agent.as_tool( + tool_name='search_knowledge_base', + tool_description='Search through a knowledge base containing uploaded reference materials that are not publicly available on the Internet. Returns relevant passages with sources.' + ), + user_memory_agent.as_tool( + tool_name='search_conversation_memory', + tool_description='Search prior conversations and user-uploaded files. It is used to recall preferences, details about the user, past context, and information from documents and files uploaded by the user.' + ), + WebSearchTool( + search_context_size='medium' + ), + image_gen_tool, + deep_agent.as_tool( + tool_name="deep_knowledge", + tool_description="In-depth research and extensive expert opinions. Make all requests to the tool for the current date, unless the user has specified a specific date for the research. To determine the current date, use the RUNTIME CONTEXT statement.", + ), + scheduler_agent.as_tool( + tool_name="tasks_scheduler", + tool_description="Use this to schedule and modify user tasks, including creating a task, getting a task list, getting task details, editing a task, deleting a task. At the user's request, send information to the tool containing a clear and complete description of the task, the time of its completion, including the user's time zone and the frequency of the task (be sure to specify: once, daily or interval). Never send tasks to the scheduler that need to be completed immediately. Send tasks to the scheduler only when the user explicitly asks you to schedule something.", + ), + dex_agent.as_tool( + tool_name="dex_analytics", + tool_description="Data on crypto tokens, decentralized exchanges, DeFi, and pools.", + ), + ], + ) + + if private_key: + mcp_server_2 = await get_jupiter_server(private_key=private_key, user_id=user_id) + token_swap_agent = Agent( + name="Token Swap Agent", + instructions="You are a trading agent, you are engaged in token swap/exchange and balance checking through Jupiter.", + model="gpt-4.1-mini", + mcp_servers=[mcp_server_2], + ) + main_agent.tools.append(token_swap_agent.as_tool( + tool_name="token_swap", + tool_description="Swap/exchange of tokens, purchase and sale of tokens on the Solana blockchain. Checking the balance of the token wallet / Solana wallet.", + )) + return main_agent \ No newline at end of file diff --git a/bot/agents_tools/mcp_servers.py b/bot/agents_tools/mcp_servers.py index c140e8d..9b9ab57 100644 --- a/bot/agents_tools/mcp_servers.py +++ b/bot/agents_tools/mcp_servers.py @@ -1,59 +1,59 @@ -import json - -from collections import OrderedDict - -import base58 -from agents.mcp import MCPServerStdio - -MAX_SERVERS = 20 - -servers: OrderedDict[str, MCPServerStdio] = OrderedDict() -global_dexpaprika_server = None - - -async def get_dexpapirka_server(): - global global_dexpaprika_server - if global_dexpaprika_server: - return global_dexpaprika_server - - dexpaprika_server = MCPServerStdio( - name="DexPaprika", - params={ - "command": "dexpaprika-mcp", - "args": [], - } - ) - await dexpaprika_server.connect() - global_dexpaprika_server = dexpaprika_server - return dexpaprika_server - - -async def get_jupiter_server(private_key: str, user_id: int): - srv = servers.get(user_id) - if srv: - servers.move_to_end(user_id) - return srv - - secret_bytes = bytes(json.loads(private_key)) - private_key_b58 = base58.b58encode(secret_bytes).decode() - - srv = MCPServerStdio( - name=f"jupiter-{user_id}", - params={ - "command": "node", - "args": ['/usr/lib/node_modules/jupiter-mcp/index.js'], - "env": { - "PRIVATE_KEY": private_key_b58, - "SOLANA_RPC_URL": 'https://api.mainnet-beta.solana.com', - }, - }, - cache_tools_list=True, - ) - await srv.connect() - servers[user_id] = srv - - if len(servers) > MAX_SERVERS: - old_key, old_srv = servers.popitem(last=False) - await old_srv.cleanup() - - return srv +import json + +from collections import OrderedDict + +import base58 +from agents.mcp import MCPServerStdio + +MAX_SERVERS = 20 + +servers: OrderedDict[str, MCPServerStdio] = OrderedDict() +global_dexpaprika_server = None + + +async def get_dexpapirka_server(): + global global_dexpaprika_server + if global_dexpaprika_server: + return global_dexpaprika_server + + dexpaprika_server = MCPServerStdio( + name="DexPaprika", + params={ + "command": "dexpaprika-mcp", + "args": [], + } + ) + await dexpaprika_server.connect() + global_dexpaprika_server = dexpaprika_server + return dexpaprika_server + + +async def get_jupiter_server(private_key: str, user_id: int): + srv = servers.get(user_id) + if srv: + servers.move_to_end(user_id) + return srv + + secret_bytes = bytes(json.loads(private_key)) + private_key_b58 = base58.b58encode(secret_bytes).decode() + + srv = MCPServerStdio( + name=f"jupiter-{user_id}", + params={ + "command": "node", + "args": ['/usr/lib/node_modules/jupiter-mcp/index.js'], + "env": { + "PRIVATE_KEY": private_key_b58, + "SOLANA_RPC_URL": 'https://api.mainnet-beta.solana.com', + }, + }, + cache_tools_list=True, + ) + await srv.connect() + servers[user_id] = srv + + if len(servers) > MAX_SERVERS: + old_key, old_srv = servers.popitem(last=False) + await old_srv.cleanup() + + return srv diff --git a/bot/agents_tools/tools.py b/bot/agents_tools/tools.py index 6e29abe..05807e4 100644 --- a/bot/agents_tools/tools.py +++ b/bot/agents_tools/tools.py @@ -1,282 +1,282 @@ -import base64 -import json -from datetime import datetime -from typing import Literal, Optional - -import aiofiles -from agents import function_tool, RunContextWrapper -from openai import AsyncOpenAI -from apscheduler.schedulers.asyncio import AsyncIOScheduler - - -from redis_service.connect import redis -from database.repositories.user import UserRepository -from bot.utils.executed_tasks import execute_task - - -@function_tool -async def image_gen_tool(wrapper: RunContextWrapper, prompt: str) -> str: - """The function generates an image at the user's request. A prompt must be provided to generate the image. - - Args: - prompt: Prompt for image generation. - """ - - client: AsyncOpenAI = wrapper.context[0] - - img = await client.images.generate( - model="gpt-image-1", - prompt=prompt, - n=1, - size="1024x1024" - ) - image_base64 = img.data[0].b64_json - image_bytes = base64.b64decode(image_base64) - - async with aiofiles.open(f"images/image_{wrapper.context[1]}.png", "wb") as f: - await f.write(image_bytes) - - data = {'image': f"images/image_{wrapper.context[1]}.png", 'input_tokens': img.usage.input_tokens, 'output_tokens': img.usage.output_tokens} - - await redis.set(f'image_{wrapper.context[1]}', json.dumps(data)) - - return 'The image is generated' - - -@function_tool -async def create_task_tool( - ctx: RunContextWrapper, - description: str, - agent_message: str, - schedule_type: Literal["once", "daily", "interval"], - time_str: Optional[str] = None, - date_str: Optional[str] = None, - interval_minutes: Optional[int] = None -) -> str: - """Creates a new task in scheduler. - - Args: - description: Task description from user - agent_message: Message to send to main agent when executing for answer to question - schedule_type: Schedule type (once, daily, interval) - time_str: Time in HH:MM format for daily schedule - date_str: Date in YYYY-MM-DD format for once schedule - interval_minutes: Interval in minutes for interval schedule - - Returns: - Message about task creation result - """ - - if schedule_type == "once" and not date_str: - return "Error: date must be specified for one-time task" - if schedule_type == "daily" and not time_str: - return "Error: time must be specified for daily task" - if schedule_type == "interval" and not interval_minutes: - return "Error: interval in minutes must be specified for interval task" - - user_repo: UserRepository = ctx.context[2] - scheduler: AsyncIOScheduler = ctx.context[3] - - task_id = await user_repo.add_task(user_id=ctx.context[1], description=description, - agent_message=agent_message, schedule_type=schedule_type, - time_str=time_str, date_str=date_str, interval_minutes=interval_minutes) - - if schedule_type == "once": - if time_str: - task_datetime = datetime.strptime(f"{date_str} {time_str}", "%Y-%m-%d %H:%M") - else: - task_datetime = datetime.strptime(f"{date_str} 12:00", "%Y-%m-%d %H:%M") - - scheduler.add_job( - execute_task, - 'date', - run_date=task_datetime, - args=[ctx.context[1], task_id], - id=f'{ctx.context[1]}_{task_id}' - ) - - elif schedule_type == "daily": - task_time = datetime.strptime(time_str, "%H:%M").time() - - scheduler.add_job( - execute_task, - 'cron', - hour=task_time.hour, - minute=task_time.minute, - args=[ctx.context[1], task_id], - id=f'{ctx.context[1]}_{task_id}' - ) - - elif schedule_type == "interval": - scheduler.add_job( - execute_task, - 'interval', - minutes=interval_minutes, - args=[ctx.context[1], task_id], - id=f'{ctx.context[1]}_{task_id}' - ) - - return f"✅ Task successfully created!\nID: {task_id}\nDescription: {description}\nSchedule: {schedule_type}" - - -@function_tool -async def list_tasks_tool( - ctx: RunContextWrapper, -) -> str: - """Gets list of user tasks. - - Args: - - Returns: - List of tasks in text format - """ - user_repo: UserRepository = ctx.context[2] - tasks = await user_repo.get_all_tasks(user_id=ctx.context[1]) - - text_tasks = '\n'.join([f"Task ID[{task.id}]: {task.description}, {task.schedule_type}, " - f"{'active' if task.is_active else 'inactive'}, {task.time_str or task.date_str or task.interval_minutes}" - for task in tasks]) - - return text_tasks - -@function_tool -async def update_task_tool( - ctx: RunContextWrapper, - task_id: int, - description: Optional[str] = None, - agent_message: Optional[str] = None, - schedule_type: Optional[Literal["once", "daily", "interval"]] = None, - time_str: Optional[str] = None, - date_str: Optional[str] = None, - interval_minutes: Optional[int] = None, - is_active: Optional[bool] = None -) -> str: - """Updates existing task. - - Args: - task_id: Task ID to update - description: New task description - agent_message: New agent message - schedule_type: New schedule type - time_str: New time in HH:MM format - date_str: New date in YYYY-MM-DD format - interval_minutes: New interval in minutes - is_active: New activity status - - Returns: - Message about update result - """ - user_repo: UserRepository = ctx.context[2] - scheduler: AsyncIOScheduler = ctx.context[3] - - task = await user_repo.get_task(ctx.context[1], task_id) - if not task: - return '❌ Task not found' - - schedule_type = schedule_type or task.schedule_type - description = description or task.description - agent_message = agent_message or task.agent_message - time_str = time_str or task.time_str - date_str = date_str or task.date_str - interval_minutes = interval_minutes or task.interval_minutes - is_active = is_active or task.is_active - - await user_repo.update_task(ctx.context[1], task_id, description=description, - agent_message=agent_message, is_active=is_active, - schedule_type=schedule_type, time_str=time_str, - date_str=date_str, interval_minutes=interval_minutes) - try: - scheduler.remove_job(f'{ctx.context[1]}_{task_id}') - except: - pass - - if schedule_type == "once": - if time_str: - task_datetime = datetime.strptime(f"{date_str} {time_str}", "%Y-%m-%d %H:%M") - else: - task_datetime = datetime.strptime(f"{date_str} 12:00", "%Y-%m-%d %H:%M") - - scheduler.add_job( - execute_task, - 'date', - run_date=task_datetime, - args=[ctx.context[1], task_id], - id=f'{ctx.context[1]}_{task_id}' - ) - - elif schedule_type == "daily": - task_time = datetime.strptime(time_str, "%H:%M").time() - - scheduler.add_job( - execute_task, - 'cron', - hour=task_time.hour, - minute=task_time.minute, - args=[ctx.context[1], task_id], - id=f'{ctx.context[1]}_{task_id}' - ) - - elif schedule_type == "interval": - scheduler.add_job( - execute_task, - 'interval', - minutes=interval_minutes, - args=[ctx.context[1], task_id], - id=f'{ctx.context[1]}_{task_id}' - ) - - -@function_tool -async def delete_task_tool( - ctx: RunContextWrapper, - task_id: int -) -> str: - """Deletes task from scheduler. - - Args: - task_id: Task ID to delete - - Returns: - Message about deletion result - """ - - user_repo: UserRepository = ctx.context[2] - scheduler: AsyncIOScheduler = ctx.context[3] - await user_repo.delete_task(ctx.context[1], task_id) - - try: - scheduler.remove_job(f'{ctx.context[1]}_{task_id}') - except: - pass - - return '✅ Task successfully deleted' - - - -@function_tool -async def get_task_details_tool( - ctx: RunContextWrapper, - task_id: int -) -> str: - """Gets detailed task information. - - Args: - task_id: Task ID - - Returns: - Detailed task information - """ - - user_repo: UserRepository = ctx.context[2] - - task = await user_repo.get_task(ctx.context[1], task_id) - if not task: - return '❌ Task not found' - - return (f'📋 Task Details\n\n' - f'ID: `{task.id}`\n' - f'Description: {task.description}\n' - f'Agent Message: {task.agent_message}\n' - f'Schedule Type: {task.schedule_type}\n' - f'Status: {"active" if task.is_active else "inactive"}' +import base64 +import json +from datetime import datetime +from typing import Literal, Optional + +import aiofiles +from agents import function_tool, RunContextWrapper +from openai import AsyncOpenAI +from apscheduler.schedulers.asyncio import AsyncIOScheduler + + +from redis_service.connect import redis +from database.repositories.user import UserRepository +from bot.utils.executed_tasks import execute_task + + +@function_tool +async def image_gen_tool(wrapper: RunContextWrapper, prompt: str) -> str: + """The function generates an image at the user's request. A prompt must be provided to generate the image. + + Args: + prompt: Prompt for image generation. + """ + + client: AsyncOpenAI = wrapper.context[0] + + img = await client.images.generate( + model="gpt-image-1", + prompt=prompt, + n=1, + size="1024x1024" + ) + image_base64 = img.data[0].b64_json + image_bytes = base64.b64decode(image_base64) + + async with aiofiles.open(f"images/image_{wrapper.context[1]}.png", "wb") as f: + await f.write(image_bytes) + + data = {'image': f"images/image_{wrapper.context[1]}.png", 'input_tokens': img.usage.input_tokens, 'output_tokens': img.usage.output_tokens} + + await redis.set(f'image_{wrapper.context[1]}', json.dumps(data)) + + return 'The image is generated' + + +@function_tool +async def create_task_tool( + ctx: RunContextWrapper, + description: str, + agent_message: str, + schedule_type: Literal["once", "daily", "interval"], + time_str: Optional[str] = None, + date_str: Optional[str] = None, + interval_minutes: Optional[int] = None +) -> str: + """Creates a new task in scheduler. + + Args: + description: Task description from user + agent_message: Message to send to main agent when executing for answer to question + schedule_type: Schedule type (once, daily, interval) + time_str: Time in HH:MM format for daily schedule + date_str: Date in YYYY-MM-DD format for once schedule + interval_minutes: Interval in minutes for interval schedule + + Returns: + Message about task creation result + """ + + if schedule_type == "once" and not date_str: + return "Error: date must be specified for one-time task" + if schedule_type == "daily" and not time_str: + return "Error: time must be specified for daily task" + if schedule_type == "interval" and not interval_minutes: + return "Error: interval in minutes must be specified for interval task" + + user_repo: UserRepository = ctx.context[2] + scheduler: AsyncIOScheduler = ctx.context[3] + + task_id = await user_repo.add_task(user_id=ctx.context[1], description=description, + agent_message=agent_message, schedule_type=schedule_type, + time_str=time_str, date_str=date_str, interval_minutes=interval_minutes) + + if schedule_type == "once": + if time_str: + task_datetime = datetime.strptime(f"{date_str} {time_str}", "%Y-%m-%d %H:%M") + else: + task_datetime = datetime.strptime(f"{date_str} 12:00", "%Y-%m-%d %H:%M") + + scheduler.add_job( + execute_task, + 'date', + run_date=task_datetime, + args=[ctx.context[1], task_id], + id=f'{ctx.context[1]}_{task_id}' + ) + + elif schedule_type == "daily": + task_time = datetime.strptime(time_str, "%H:%M").time() + + scheduler.add_job( + execute_task, + 'cron', + hour=task_time.hour, + minute=task_time.minute, + args=[ctx.context[1], task_id], + id=f'{ctx.context[1]}_{task_id}' + ) + + elif schedule_type == "interval": + scheduler.add_job( + execute_task, + 'interval', + minutes=interval_minutes, + args=[ctx.context[1], task_id], + id=f'{ctx.context[1]}_{task_id}' + ) + + return f"✅ Task successfully created!\nID: {task_id}\nDescription: {description}\nSchedule: {schedule_type}" + + +@function_tool +async def list_tasks_tool( + ctx: RunContextWrapper, +) -> str: + """Gets list of user tasks. + + Args: + + Returns: + List of tasks in text format + """ + user_repo: UserRepository = ctx.context[2] + tasks = await user_repo.get_all_tasks(user_id=ctx.context[1]) + + text_tasks = '\n'.join([f"Task ID[{task.id}]: {task.description}, {task.schedule_type}, " + f"{'active' if task.is_active else 'inactive'}, {task.time_str or task.date_str or task.interval_minutes}" + for task in tasks]) + + return text_tasks + +@function_tool +async def update_task_tool( + ctx: RunContextWrapper, + task_id: int, + description: Optional[str] = None, + agent_message: Optional[str] = None, + schedule_type: Optional[Literal["once", "daily", "interval"]] = None, + time_str: Optional[str] = None, + date_str: Optional[str] = None, + interval_minutes: Optional[int] = None, + is_active: Optional[bool] = None +) -> str: + """Updates existing task. + + Args: + task_id: Task ID to update + description: New task description + agent_message: New agent message + schedule_type: New schedule type + time_str: New time in HH:MM format + date_str: New date in YYYY-MM-DD format + interval_minutes: New interval in minutes + is_active: New activity status + + Returns: + Message about update result + """ + user_repo: UserRepository = ctx.context[2] + scheduler: AsyncIOScheduler = ctx.context[3] + + task = await user_repo.get_task(ctx.context[1], task_id) + if not task: + return '❌ Task not found' + + schedule_type = schedule_type or task.schedule_type + description = description or task.description + agent_message = agent_message or task.agent_message + time_str = time_str or task.time_str + date_str = date_str or task.date_str + interval_minutes = interval_minutes or task.interval_minutes + is_active = is_active or task.is_active + + await user_repo.update_task(ctx.context[1], task_id, description=description, + agent_message=agent_message, is_active=is_active, + schedule_type=schedule_type, time_str=time_str, + date_str=date_str, interval_minutes=interval_minutes) + try: + scheduler.remove_job(f'{ctx.context[1]}_{task_id}') + except: + pass + + if schedule_type == "once": + if time_str: + task_datetime = datetime.strptime(f"{date_str} {time_str}", "%Y-%m-%d %H:%M") + else: + task_datetime = datetime.strptime(f"{date_str} 12:00", "%Y-%m-%d %H:%M") + + scheduler.add_job( + execute_task, + 'date', + run_date=task_datetime, + args=[ctx.context[1], task_id], + id=f'{ctx.context[1]}_{task_id}' + ) + + elif schedule_type == "daily": + task_time = datetime.strptime(time_str, "%H:%M").time() + + scheduler.add_job( + execute_task, + 'cron', + hour=task_time.hour, + minute=task_time.minute, + args=[ctx.context[1], task_id], + id=f'{ctx.context[1]}_{task_id}' + ) + + elif schedule_type == "interval": + scheduler.add_job( + execute_task, + 'interval', + minutes=interval_minutes, + args=[ctx.context[1], task_id], + id=f'{ctx.context[1]}_{task_id}' + ) + + +@function_tool +async def delete_task_tool( + ctx: RunContextWrapper, + task_id: int +) -> str: + """Deletes task from scheduler. + + Args: + task_id: Task ID to delete + + Returns: + Message about deletion result + """ + + user_repo: UserRepository = ctx.context[2] + scheduler: AsyncIOScheduler = ctx.context[3] + await user_repo.delete_task(ctx.context[1], task_id) + + try: + scheduler.remove_job(f'{ctx.context[1]}_{task_id}') + except: + pass + + return '✅ Task successfully deleted' + + + +@function_tool +async def get_task_details_tool( + ctx: RunContextWrapper, + task_id: int +) -> str: + """Gets detailed task information. + + Args: + task_id: Task ID + + Returns: + Detailed task information + """ + + user_repo: UserRepository = ctx.context[2] + + task = await user_repo.get_task(ctx.context[1], task_id) + if not task: + return '❌ Task not found' + + return (f'📋 Task Details\n\n' + f'ID: `{task.id}`\n' + f'Description: {task.description}\n' + f'Agent Message: {task.agent_message}\n' + f'Schedule Type: {task.schedule_type}\n' + f'Status: {"active" if task.is_active else "inactive"}' f'{"Interval" if task.schedule_type == "interval" else "Date"}: {task.time_str or task.date_str or task.interval_minutes}\n') \ No newline at end of file diff --git a/bot/commands.py b/bot/commands.py index 3675eeb..7a7615f 100644 --- a/bot/commands.py +++ b/bot/commands.py @@ -1,18 +1,18 @@ -from aiogram import Bot -from aiogram.types import BotCommand, BotCommandScopeDefault - - -async def set_commands(bot: Bot): - commands = [ - BotCommand(command='start', description='General information'), - BotCommand(command='new', description='Start a new chat'), - BotCommand(command='save', description='Save the chat in memory'), - BotCommand(command='delete', description='Clear the agent’s memory'), - BotCommand(command='balance', description='Balance in the bot'), - BotCommand(command='settings', description='Settings'), - BotCommand(command='wallet', description='Agent’s wallet for trading'), - BotCommand(command='help', description='Help'), - BotCommand(command='knowledge', description='Add knowledge to the agent’s memory'), - ] - +from aiogram import Bot +from aiogram.types import BotCommand, BotCommandScopeDefault + + +async def set_commands(bot: Bot): + commands = [ + BotCommand(command='start', description='General information'), + BotCommand(command='new', description='Start a new chat'), + BotCommand(command='save', description='Save the chat in memory'), + BotCommand(command='delete', description='Clear the agent’s memory'), + BotCommand(command='balance', description='Balance in the bot'), + BotCommand(command='settings', description='Settings'), + BotCommand(command='wallet', description='Agent’s wallet for trading'), + BotCommand(command='help', description='Help'), + BotCommand(command='knowledge', description='Add knowledge to the agent’s memory'), + ] + await bot.set_my_commands(commands, BotCommandScopeDefault()) \ No newline at end of file diff --git a/bot/dialogs/balance.py b/bot/dialogs/balance.py index d3ea472..f9ffc8e 100644 --- a/bot/dialogs/balance.py +++ b/bot/dialogs/balance.py @@ -1,199 +1,199 @@ -from decimal import getcontext, Decimal -import random, os - -from dotenv import load_dotenv -from aiogram_dialog.widgets.kbd import Button, Row, Group, Radio, ManagedRadio -from aiogram_dialog.widgets.input import TextInput, ManagedTextInput, MessageInput -from aiogram_dialog import Dialog, Window, ChatEvent, DialogManager -from aiogram_dialog.widgets.text import Format -from aiogram_dialog.widgets.kbd import Cancel -from aiogram_dialog.widgets.kbd import SwitchTo -from aiogram.types import CallbackQuery, Message -from aiogram.enums import ContentType -from fluentogram import TranslatorHub -from spl.token.instructions import get_associated_token_address -from solana.rpc.types import Pubkey - -from bot.dialogs.i18n_widget import I18NFormat -from bot.states.states import Balance, Input -from database.repositories.user import UserRepository -from database.repositories.utils import UtilsRepository -from bot.utils.get_ton_course import get_ton_course -import bot.keyboards.inline as inline_kb -from config import TYPE_USAGE - -load_dotenv() - - -def check_input_text(text: str): - if not text: - return - if not text.isdigit(): - return - if int(text) < 1: - return - return True - - -def apply_suffix(base: str, suffix: str) -> str: - int_part, frac_part = base.split('.') - N = len(frac_part) - M = len(suffix) - new_frac = frac_part[:N - M] + suffix - return f"{int_part}.{new_frac}" - - -def generate_amount(usd_amount: float, rate: float, suffix: str, num_decimals: int = 9) -> str: - getcontext().prec = 18 - - ton_base = Decimal(usd_amount) / Decimal(rate) - - base_str = f"{ton_base:.{num_decimals}f}" - - result = apply_suffix(base_str, suffix) - return result - - -async def on_cancel_balance(callback: ChatEvent, widget: Button, manager: DialogManager): - state = manager.middleware_data.get('state') - await state.clear() - await callback.message.delete() - - -async def input_text_first(message: Message, widget: MessageInput, manager: DialogManager): - if not check_input_text(message.text): - return await manager.switch_to(state=Balance.input_not_format) - manager.dialog_data['sum'] = message.text - state = manager.middleware_data.get('state') - await state.clear() - await manager.switch_to(Balance.choose) - - -async def input_text_second(message: Message, widget: MessageInput, manager: DialogManager): - if not check_input_text(message.text): - return - manager.dialog_data['sum'] = message.text - state = manager.middleware_data.get('state') - await state.clear() - await manager.switch_to(Balance.choose) - - -async def on_click_add_balance(callback: ChatEvent, widget: Button, manager: DialogManager): - state = manager.middleware_data.get('state') - await state.set_state(Input.main) - await manager.switch_to(Balance.input) - - -async def on_click_ton_type(callback: ChatEvent, widget: Button, manager: DialogManager): - utils_repo: UtilsRepository = manager.middleware_data['utils_repo'] - user_repo: UserRepository = manager.middleware_data['user_repo'] - i18n = manager.middleware_data.get('i18n') - while True: - suffix = f"{random.randint(0, 9999):04d}" - if await utils_repo.check_payment_suffix(suffix): - break - try: - sum_usd = manager.dialog_data.get('sum') - ton_course = await get_ton_course(redis=manager.middleware_data['redis']) - generate_sum = generate_amount(usd_amount=float(sum_usd), rate=ton_course, suffix=suffix) - payment_id = await user_repo.add_payment(callback.from_user.id, amount=int(sum_usd), crypto_amount=generate_sum, - crypto_currency='TON', random_suffix=suffix) - await manager.done() - await callback.message.edit_text(i18n.get('text_payment_create', sum=generate_sum, wallet=os.getenv('TON_ADDRESS')), - reply_markup=inline_kb.check_payment(text=i18n.get('check_payment_kb'), payment_id=payment_id)) - - except Exception as e: - print(e) - return await callback.answer(text=i18n.get('error_create_payment'), show_alert=True) - - -async def on_click_sol_type(callback: ChatEvent, widget: Button, manager: DialogManager): - utils_repo: UtilsRepository = manager.middleware_data['utils_repo'] - user_repo: UserRepository = manager.middleware_data['user_repo'] - i18n = manager.middleware_data.get('i18n') - token = await utils_repo.get_token() - if not token: - return await callback.answer(text=i18n.get('error_get_token_price'), show_alert=True) - client_sol = manager.middleware_data['solana_client'] - ata = get_associated_token_address(mint=Pubkey.from_string(os.getenv('MINT_TOKEN_ADDRESS')), - owner=Pubkey.from_string(os.getenv('ADDRESS_SOL'))) - - bal_info = await client_sol.get_token_account_balance(ata, commitment="confirmed") - decimals = bal_info.value.decimals - while True: - suffix = f"{random.randint(0, 9999):04d}" - if await utils_repo.check_payment_suffix(suffix): - break - try: - sum_usd = manager.dialog_data.get('sum') - generate_sum = generate_amount(usd_amount=float(sum_usd), rate=token.price_usd, suffix=suffix, num_decimals=decimals) - payment_id = await user_repo.add_payment(callback.from_user.id, amount=int(sum_usd), crypto_amount=generate_sum, - crypto_currency='SOL', random_suffix=suffix) - await manager.done() - await callback.message.edit_text(i18n.get('text_payment_create_sol', sum=generate_sum, wallet=os.getenv('ADDRESS_SOL'), token=os.getenv('MINT_TOKEN_ADDRESS')), - reply_markup=inline_kb.check_payment(text=i18n.get('check_payment_kb'), payment_id=payment_id)) - - except Exception as e: - print(e) - return await callback.answer(text=i18n.get('error_create_payment'), show_alert=True) - - -async def getter_balance(dialog_manager: DialogManager, **kwargs): - user = dialog_manager.middleware_data['user'] - - return { - 'balance': round(user.balance_credits, 3), - 'is_pay': True if TYPE_USAGE == 'pay' else False - } - - -dialog = Dialog( - Window( - I18NFormat('cmd_wallet_text') + Format(' {balance} credits'), - Button( - I18NFormat('add_balance_kb'), - id='choose_add_balance', - on_click=on_click_add_balance, - when='is_pay' - ), - Cancel(I18NFormat('close_kb'), id='cancel_balance', on_click=on_cancel_balance), - state=Balance.main, - getter=getter_balance - ), - Window( - I18NFormat('text_add_balance'), - MessageInput( - func=input_text_first, - content_types=[ContentType.ANY], - ), - Cancel(I18NFormat('close_kb'), id='cancel_balance', on_click=on_cancel_balance), - state=Balance.input - ), - Window( - I18NFormat('text_add_balance_error'), - MessageInput( - func=input_text_second, - content_types=[ContentType.ANY], - ), - Cancel(I18NFormat('close_kb'), id='cancel_balance', on_click=on_cancel_balance), - state=Balance.input_not_format - ), - Window( - I18NFormat('choose_type_pay_text'), - Group( - Button( - I18NFormat('ton_type_kb'), - id='ton_type', - on_click=on_click_ton_type - ), - Button( - I18NFormat('sol_type_kb'), - id='sol_type', - on_click=on_click_sol_type - ), - width=2 - ), - Cancel(I18NFormat('close_kb'), id='cancel_balance', on_click=on_cancel_balance), - state=Balance.choose - ) +from decimal import getcontext, Decimal +import random, os + +from dotenv import load_dotenv +from aiogram_dialog.widgets.kbd import Button, Row, Group, Radio, ManagedRadio +from aiogram_dialog.widgets.input import TextInput, ManagedTextInput, MessageInput +from aiogram_dialog import Dialog, Window, ChatEvent, DialogManager +from aiogram_dialog.widgets.text import Format +from aiogram_dialog.widgets.kbd import Cancel +from aiogram_dialog.widgets.kbd import SwitchTo +from aiogram.types import CallbackQuery, Message +from aiogram.enums import ContentType +from fluentogram import TranslatorHub +from spl.token.instructions import get_associated_token_address +from solana.rpc.types import Pubkey + +from bot.dialogs.i18n_widget import I18NFormat +from bot.states.states import Balance, Input +from database.repositories.user import UserRepository +from database.repositories.utils import UtilsRepository +from bot.utils.get_ton_course import get_ton_course +import bot.keyboards.inline as inline_kb +from config import TYPE_USAGE + +load_dotenv() + + +def check_input_text(text: str): + if not text: + return + if not text.isdigit(): + return + if int(text) < 1: + return + return True + + +def apply_suffix(base: str, suffix: str) -> str: + int_part, frac_part = base.split('.') + N = len(frac_part) + M = len(suffix) + new_frac = frac_part[:N - M] + suffix + return f"{int_part}.{new_frac}" + + +def generate_amount(usd_amount: float, rate: float, suffix: str, num_decimals: int = 9) -> str: + getcontext().prec = 18 + + ton_base = Decimal(usd_amount) / Decimal(rate) + + base_str = f"{ton_base:.{num_decimals}f}" + + result = apply_suffix(base_str, suffix) + return result + + +async def on_cancel_balance(callback: ChatEvent, widget: Button, manager: DialogManager): + state = manager.middleware_data.get('state') + await state.clear() + await callback.message.delete() + + +async def input_text_first(message: Message, widget: MessageInput, manager: DialogManager): + if not check_input_text(message.text): + return await manager.switch_to(state=Balance.input_not_format) + manager.dialog_data['sum'] = message.text + state = manager.middleware_data.get('state') + await state.clear() + await manager.switch_to(Balance.choose) + + +async def input_text_second(message: Message, widget: MessageInput, manager: DialogManager): + if not check_input_text(message.text): + return + manager.dialog_data['sum'] = message.text + state = manager.middleware_data.get('state') + await state.clear() + await manager.switch_to(Balance.choose) + + +async def on_click_add_balance(callback: ChatEvent, widget: Button, manager: DialogManager): + state = manager.middleware_data.get('state') + await state.set_state(Input.main) + await manager.switch_to(Balance.input) + + +async def on_click_ton_type(callback: ChatEvent, widget: Button, manager: DialogManager): + utils_repo: UtilsRepository = manager.middleware_data['utils_repo'] + user_repo: UserRepository = manager.middleware_data['user_repo'] + i18n = manager.middleware_data.get('i18n') + while True: + suffix = f"{random.randint(0, 9999):04d}" + if await utils_repo.check_payment_suffix(suffix): + break + try: + sum_usd = manager.dialog_data.get('sum') + ton_course = await get_ton_course(redis=manager.middleware_data['redis']) + generate_sum = generate_amount(usd_amount=float(sum_usd), rate=ton_course, suffix=suffix) + payment_id = await user_repo.add_payment(callback.from_user.id, amount=int(sum_usd), crypto_amount=generate_sum, + crypto_currency='TON', random_suffix=suffix) + await manager.done() + await callback.message.edit_text(i18n.get('text_payment_create', sum=generate_sum, wallet=os.getenv('TON_ADDRESS')), + reply_markup=inline_kb.check_payment(text=i18n.get('check_payment_kb'), payment_id=payment_id)) + + except Exception as e: + print(e) + return await callback.answer(text=i18n.get('error_create_payment'), show_alert=True) + + +async def on_click_sol_type(callback: ChatEvent, widget: Button, manager: DialogManager): + utils_repo: UtilsRepository = manager.middleware_data['utils_repo'] + user_repo: UserRepository = manager.middleware_data['user_repo'] + i18n = manager.middleware_data.get('i18n') + token = await utils_repo.get_token() + if not token: + return await callback.answer(text=i18n.get('error_get_token_price'), show_alert=True) + client_sol = manager.middleware_data['solana_client'] + ata = get_associated_token_address(mint=Pubkey.from_string(os.getenv('MINT_TOKEN_ADDRESS')), + owner=Pubkey.from_string(os.getenv('ADDRESS_SOL'))) + + bal_info = await client_sol.get_token_account_balance(ata, commitment="confirmed") + decimals = bal_info.value.decimals + while True: + suffix = f"{random.randint(0, 9999):04d}" + if await utils_repo.check_payment_suffix(suffix): + break + try: + sum_usd = manager.dialog_data.get('sum') + generate_sum = generate_amount(usd_amount=float(sum_usd), rate=token.price_usd, suffix=suffix, num_decimals=decimals) + payment_id = await user_repo.add_payment(callback.from_user.id, amount=int(sum_usd), crypto_amount=generate_sum, + crypto_currency='SOL', random_suffix=suffix) + await manager.done() + await callback.message.edit_text(i18n.get('text_payment_create_sol', sum=generate_sum, wallet=os.getenv('ADDRESS_SOL'), token=os.getenv('MINT_TOKEN_ADDRESS')), + reply_markup=inline_kb.check_payment(text=i18n.get('check_payment_kb'), payment_id=payment_id)) + + except Exception as e: + print(e) + return await callback.answer(text=i18n.get('error_create_payment'), show_alert=True) + + +async def getter_balance(dialog_manager: DialogManager, **kwargs): + user = dialog_manager.middleware_data['user'] + + return { + 'balance': round(user.balance_credits, 3), + 'is_pay': True if TYPE_USAGE == 'pay' else False + } + + +dialog = Dialog( + Window( + I18NFormat('cmd_wallet_text') + Format(' {balance} credits'), + Button( + I18NFormat('add_balance_kb'), + id='choose_add_balance', + on_click=on_click_add_balance, + when='is_pay' + ), + Cancel(I18NFormat('close_kb'), id='cancel_balance', on_click=on_cancel_balance), + state=Balance.main, + getter=getter_balance + ), + Window( + I18NFormat('text_add_balance'), + MessageInput( + func=input_text_first, + content_types=[ContentType.ANY], + ), + Cancel(I18NFormat('close_kb'), id='cancel_balance', on_click=on_cancel_balance), + state=Balance.input + ), + Window( + I18NFormat('text_add_balance_error'), + MessageInput( + func=input_text_second, + content_types=[ContentType.ANY], + ), + Cancel(I18NFormat('close_kb'), id='cancel_balance', on_click=on_cancel_balance), + state=Balance.input_not_format + ), + Window( + I18NFormat('choose_type_pay_text'), + Group( + Button( + I18NFormat('ton_type_kb'), + id='ton_type', + on_click=on_click_ton_type + ), + Button( + I18NFormat('sol_type_kb'), + id='sol_type', + on_click=on_click_sol_type + ), + width=2 + ), + Cancel(I18NFormat('close_kb'), id='cancel_balance', on_click=on_cancel_balance), + state=Balance.choose + ) ) \ No newline at end of file diff --git a/bot/dialogs/i18n_widget.py b/bot/dialogs/i18n_widget.py index 6e2c637..ef80e24 100644 --- a/bot/dialogs/i18n_widget.py +++ b/bot/dialogs/i18n_widget.py @@ -1,19 +1,19 @@ -from typing import Dict, List - -from aiogram_dialog.api.protocols import DialogManager -from aiogram_dialog.widgets.common import WhenCondition -from aiogram_dialog.widgets.text import Text -from fluentogram import TranslatorRunner - - -class I18NFormat(Text): - def __init__(self, key: str, when: WhenCondition = None): - super().__init__(when) - self.key = key - - async def _render_text(self, data: Dict, manager: DialogManager) -> str: - i18n: TranslatorRunner = manager.middleware_data.get('i18n') - value = i18n.get(self.key, **data) - if value is None: - raise KeyError(f'translation key = "{self.key}" not found') +from typing import Dict, List + +from aiogram_dialog.api.protocols import DialogManager +from aiogram_dialog.widgets.common import WhenCondition +from aiogram_dialog.widgets.text import Text +from fluentogram import TranslatorRunner + + +class I18NFormat(Text): + def __init__(self, key: str, when: WhenCondition = None): + super().__init__(when) + self.key = key + + async def _render_text(self, data: Dict, manager: DialogManager) -> str: + i18n: TranslatorRunner = manager.middleware_data.get('i18n') + value = i18n.get(self.key, **data) + if value is None: + raise KeyError(f'translation key = "{self.key}" not found') return value \ No newline at end of file diff --git a/bot/dialogs/knowledge.py b/bot/dialogs/knowledge.py index fdb5998..825e9c3 100644 --- a/bot/dialogs/knowledge.py +++ b/bot/dialogs/knowledge.py @@ -1,156 +1,156 @@ -from aiogram_dialog.widgets.kbd import Button, Row, Group, Radio, ManagedRadio -from aiogram_dialog.widgets.input import TextInput, ManagedTextInput, MessageInput -from aiogram_dialog import Dialog, Window, ChatEvent, DialogManager -from aiogram_dialog.widgets.text import Format -from aiogram_dialog.widgets.kbd import Cancel -from aiogram_dialog.widgets.kbd import SwitchTo -from aiogram.types import CallbackQuery, Message -from aiogram.enums import ContentType -from fluentogram import TranslatorHub - -from bot.dialogs.i18n_widget import I18NFormat -from bot.states.states import Knowledge, Input -from database.repositories.utils import UtilsRepository -from bot.utils.funcs_gpt import file_to_context, delete_knowledge_base - - -DICT_FORMATS = { - "doc": "application/msword", - "docx": "application/vnd.openxmlformats-officedocument.wordprocessingml.document", - "md": "text/markdown", - "pdf": "application/pdf", - "pptx": "application/vnd.openxmlformats-officedocument.presentationml.presentation", - 'txt': 'text/plain', - 'py': 'text/x-python' -} - - -async def on_cancel_knowledge(callback: ChatEvent, widget: Button, manager: DialogManager): - state = manager.middleware_data.get('state') - await state.clear() - await callback.message.delete() - - -async def to_add_file(callback: ChatEvent, widget: Button, manager: DialogManager): - state = manager.middleware_data.get('state') - await state.set_state(Input.main) - await manager.switch_to(state=Knowledge.add) - - -async def on_input_file(message: Message, widget: MessageInput, manager: DialogManager): - utils_repo: UtilsRepository = manager.middleware_data['utils_repo'] - if not message.document: - if manager.current_context().state == Knowledge.add_not_format: - pass - - await manager.switch_to(state=Knowledge.add_not_format) - return - - format_doc = message.document.file_name.split('.')[-1] - if format_doc not in DICT_FORMATS: - if manager.current_context().state == Knowledge.add_not_format: - pass - - await manager.switch_to(state=Knowledge.add_not_format) - return - - file_path = (await message.bot.get_file(file_id=message.document.file_id)).file_path - file_bytes = (await message.bot.download_file(file_path=file_path)).read() - answer = await file_to_context(utils_repo, message.document.file_name, file_bytes, mem_type=DICT_FORMATS.get(format_doc)) - if answer: - state = manager.middleware_data.get('state') - await state.clear() - await manager.switch_to(state=Knowledge.add_approve) - else: - if manager.current_context().state == Knowledge.add_not_format: - pass - - await manager.switch_to(state=Knowledge.add_not_format) - return - - -async def on_delete_knowledge_base(callback: ChatEvent, widget: Button, manager: DialogManager): - utils_repo: UtilsRepository = manager.middleware_data['utils_repo'] - - await delete_knowledge_base(utils_repo) - - await manager.switch_to(state=Knowledge.delete_approve) - -dialog = Dialog( - # / knowledge main - Window( - I18NFormat('command_knowledge_text'), - Group( - Button( - I18NFormat('command_knowledge_add_kb'), - id='knowledge_add', - on_click=to_add_file - ), - SwitchTo( - I18NFormat('command_knowledge_delete_kb'), - id='knowledge_delete', - state=Knowledge.delete - ), - width=2, - ), - Cancel(I18NFormat('close_kb'), id='cancel_knowledge', on_click=on_cancel_knowledge), - state=Knowledge.main - ), - # knowledge add - Window( - I18NFormat('command_knowledge_add_text'), - Group( - SwitchTo( - I18NFormat('back_kb'), - id='back_knowledge_add', - state=Knowledge.main - ), - Cancel(I18NFormat('close_kb'), id='cancel_knowledge', on_click=on_cancel_knowledge), - width=2 - ), - MessageInput( - content_types=[ContentType.ANY], - func=on_input_file - ), - state=Knowledge.add - ), - Window( - I18NFormat('text_not_format_file'), - Cancel(I18NFormat('close_kb'), id='cancel_knowledge', on_click=on_cancel_knowledge), - MessageInput( - content_types=[ContentType.ANY], - func=on_input_file - ), - state=Knowledge.add_not_format - ), - Window( - I18NFormat('text_approve_file'), - Button(I18NFormat('command_knowledge_add_kb'), id='knowledge_add', on_click=to_add_file), - Cancel(I18NFormat('close_kb'), id='cancel_knowledge', on_click=on_cancel_knowledge), - state=Knowledge.add_approve - ), - Window( - I18NFormat('command_knowledge_delete_text'), - Button( - I18NFormat('command_new_approve_kb'), - id='approve_delete', - on_click=on_delete_knowledge_base - ), - Group( - SwitchTo( - I18NFormat('back_kb'), - id='back_knowledge_delete', - state=Knowledge.main - ), - Cancel(I18NFormat('close_kb'), id='cancel_knowledge', on_click=on_cancel_knowledge), - width=2 - ), - state=Knowledge.delete - ), - Window( - I18NFormat('text_approve_delete'), - Button(I18NFormat('command_knowledge_add_kb'), id='knowledge_add', on_click=to_add_file), - Cancel(I18NFormat('close_kb'), id='cancel_knowledge', on_click=on_cancel_knowledge), - state=Knowledge.delete_approve - ) +from aiogram_dialog.widgets.kbd import Button, Row, Group, Radio, ManagedRadio +from aiogram_dialog.widgets.input import TextInput, ManagedTextInput, MessageInput +from aiogram_dialog import Dialog, Window, ChatEvent, DialogManager +from aiogram_dialog.widgets.text import Format +from aiogram_dialog.widgets.kbd import Cancel +from aiogram_dialog.widgets.kbd import SwitchTo +from aiogram.types import CallbackQuery, Message +from aiogram.enums import ContentType +from fluentogram import TranslatorHub + +from bot.dialogs.i18n_widget import I18NFormat +from bot.states.states import Knowledge, Input +from database.repositories.utils import UtilsRepository +from bot.utils.funcs_gpt import file_to_context, delete_knowledge_base + + +DICT_FORMATS = { + "doc": "application/msword", + "docx": "application/vnd.openxmlformats-officedocument.wordprocessingml.document", + "md": "text/markdown", + "pdf": "application/pdf", + "pptx": "application/vnd.openxmlformats-officedocument.presentationml.presentation", + 'txt': 'text/plain', + 'py': 'text/x-python' +} + + +async def on_cancel_knowledge(callback: ChatEvent, widget: Button, manager: DialogManager): + state = manager.middleware_data.get('state') + await state.clear() + await callback.message.delete() + + +async def to_add_file(callback: ChatEvent, widget: Button, manager: DialogManager): + state = manager.middleware_data.get('state') + await state.set_state(Input.main) + await manager.switch_to(state=Knowledge.add) + + +async def on_input_file(message: Message, widget: MessageInput, manager: DialogManager): + utils_repo: UtilsRepository = manager.middleware_data['utils_repo'] + if not message.document: + if manager.current_context().state == Knowledge.add_not_format: + pass + + await manager.switch_to(state=Knowledge.add_not_format) + return + + format_doc = message.document.file_name.split('.')[-1] + if format_doc not in DICT_FORMATS: + if manager.current_context().state == Knowledge.add_not_format: + pass + + await manager.switch_to(state=Knowledge.add_not_format) + return + + file_path = (await message.bot.get_file(file_id=message.document.file_id)).file_path + file_bytes = (await message.bot.download_file(file_path=file_path)).read() + answer = await file_to_context(utils_repo, message.document.file_name, file_bytes, mem_type=DICT_FORMATS.get(format_doc)) + if answer: + state = manager.middleware_data.get('state') + await state.clear() + await manager.switch_to(state=Knowledge.add_approve) + else: + if manager.current_context().state == Knowledge.add_not_format: + pass + + await manager.switch_to(state=Knowledge.add_not_format) + return + + +async def on_delete_knowledge_base(callback: ChatEvent, widget: Button, manager: DialogManager): + utils_repo: UtilsRepository = manager.middleware_data['utils_repo'] + + await delete_knowledge_base(utils_repo) + + await manager.switch_to(state=Knowledge.delete_approve) + +dialog = Dialog( + # / knowledge main + Window( + I18NFormat('command_knowledge_text'), + Group( + Button( + I18NFormat('command_knowledge_add_kb'), + id='knowledge_add', + on_click=to_add_file + ), + SwitchTo( + I18NFormat('command_knowledge_delete_kb'), + id='knowledge_delete', + state=Knowledge.delete + ), + width=2, + ), + Cancel(I18NFormat('close_kb'), id='cancel_knowledge', on_click=on_cancel_knowledge), + state=Knowledge.main + ), + # knowledge add + Window( + I18NFormat('command_knowledge_add_text'), + Group( + SwitchTo( + I18NFormat('back_kb'), + id='back_knowledge_add', + state=Knowledge.main + ), + Cancel(I18NFormat('close_kb'), id='cancel_knowledge', on_click=on_cancel_knowledge), + width=2 + ), + MessageInput( + content_types=[ContentType.ANY], + func=on_input_file + ), + state=Knowledge.add + ), + Window( + I18NFormat('text_not_format_file'), + Cancel(I18NFormat('close_kb'), id='cancel_knowledge', on_click=on_cancel_knowledge), + MessageInput( + content_types=[ContentType.ANY], + func=on_input_file + ), + state=Knowledge.add_not_format + ), + Window( + I18NFormat('text_approve_file'), + Button(I18NFormat('command_knowledge_add_kb'), id='knowledge_add', on_click=to_add_file), + Cancel(I18NFormat('close_kb'), id='cancel_knowledge', on_click=on_cancel_knowledge), + state=Knowledge.add_approve + ), + Window( + I18NFormat('command_knowledge_delete_text'), + Button( + I18NFormat('command_new_approve_kb'), + id='approve_delete', + on_click=on_delete_knowledge_base + ), + Group( + SwitchTo( + I18NFormat('back_kb'), + id='back_knowledge_delete', + state=Knowledge.main + ), + Cancel(I18NFormat('close_kb'), id='cancel_knowledge', on_click=on_cancel_knowledge), + width=2 + ), + state=Knowledge.delete + ), + Window( + I18NFormat('text_approve_delete'), + Button(I18NFormat('command_knowledge_add_kb'), id='knowledge_add', on_click=to_add_file), + Cancel(I18NFormat('close_kb'), id='cancel_knowledge', on_click=on_cancel_knowledge), + state=Knowledge.delete_approve + ) ) \ No newline at end of file diff --git a/bot/dialogs/menu.py b/bot/dialogs/menu.py index 70d5ce4..d58c996 100644 --- a/bot/dialogs/menu.py +++ b/bot/dialogs/menu.py @@ -1,111 +1,111 @@ -from aiogram_dialog.widgets.kbd import Button, Row, Group -from aiogram_dialog import Dialog, Window, ChatEvent, DialogManager -from aiogram_dialog.widgets.text import Format -from aiogram_dialog.widgets.kbd import Cancel -from aiogram_dialog.widgets.kbd import SwitchTo - -from bot.dialogs.i18n_widget import I18NFormat -from bot.states.states import Menu -from database.repositories.user import UserRepository -from bot.utils.funcs_gpt import save_user_context_txt_file, delete_user_memory, create_vectore_store - - -async def on_cancel_menu(callback: ChatEvent, widget: Button, manager: DialogManager): - await callback.message.delete() - - -async def on_approve_new(callback: ChatEvent, widget: Button, manager: DialogManager): - user_repo: UserRepository = manager.middleware_data['user_repo'] - i18n = manager.middleware_data.get('i18n') - await user_repo.delete_chat_messages(manager.middleware_data['user']) - - await callback.answer(text=i18n.get('command_approve_new_text'), show_alert=True) - await callback.message.delete() - await manager.done() - - -async def on_approve_save(callback: ChatEvent, widget: Button, manager: DialogManager): - user_repo: UserRepository = manager.middleware_data['user_repo'] - i18n = manager.middleware_data.get('i18n') - is_save = await save_user_context_txt_file(user_repo, manager.middleware_data['user']) - if not is_save: - await callback.answer(text=i18n.get('warning_save_context_txt'), show_alert=True) - return - - await user_repo.delete_chat_messages(manager.middleware_data['user']) - - await callback.answer(text=i18n.get('command_save_approve_kb'), show_alert=True) - await callback.message.delete() - await manager.done() - - -async def on_approve_delete(callback: ChatEvent, widget: Button, manager: DialogManager): - i18n = manager.middleware_data.get('i18n') - - user_repo: UserRepository = manager.middleware_data['user_repo'] - await delete_user_memory(user_repo, manager.middleware_data['user']) - await user_repo.delete_chat_messages(manager.middleware_data['user']) - await create_vectore_store(user_repo, manager.middleware_data['user']) - - await callback.answer(text=i18n.get('command_delete_approve_text'), show_alert=True) - await callback.message.delete() - await manager.done() - - -dialog = Dialog( - # /new - Window( - I18NFormat('command_new_text'), - Row( - Button( - I18NFormat('command_new_approve_kb'), - id='approve_new', - on_click=on_approve_new - ), - SwitchTo( - I18NFormat('command_new_save_kb'), - id='st_save', - state=Menu.save - ) - ), - Cancel(I18NFormat('close_kb'), id='cancel_menu', on_click=on_cancel_menu), - state=Menu.new - ), - - # /save - Window( - I18NFormat('command_save_text'), - Group( - Button( - I18NFormat('command_new_approve_kb'), - id='approve_save', - on_click=on_approve_save - ), - Cancel( - I18NFormat('close_kb'), - id='cancel_menu', - on_click=on_cancel_menu - ), - width=1 - ), - state=Menu.save - ), - # /delete - Window( - I18NFormat('command_delete_text'), - Group( - Button( - I18NFormat('command_new_approve_kb'), - id='approve_del', - on_click=on_approve_delete - ), - Cancel( - I18NFormat('close_kb'), - id='cancel_del', - on_click=on_cancel_menu - ), - width=1 - ), - state=Menu.delete - ) +from aiogram_dialog.widgets.kbd import Button, Row, Group +from aiogram_dialog import Dialog, Window, ChatEvent, DialogManager +from aiogram_dialog.widgets.text import Format +from aiogram_dialog.widgets.kbd import Cancel +from aiogram_dialog.widgets.kbd import SwitchTo + +from bot.dialogs.i18n_widget import I18NFormat +from bot.states.states import Menu +from database.repositories.user import UserRepository +from bot.utils.funcs_gpt import save_user_context_txt_file, delete_user_memory, create_vectore_store + + +async def on_cancel_menu(callback: ChatEvent, widget: Button, manager: DialogManager): + await callback.message.delete() + + +async def on_approve_new(callback: ChatEvent, widget: Button, manager: DialogManager): + user_repo: UserRepository = manager.middleware_data['user_repo'] + i18n = manager.middleware_data.get('i18n') + await user_repo.delete_chat_messages(manager.middleware_data['user']) + + await callback.answer(text=i18n.get('command_approve_new_text'), show_alert=True) + await callback.message.delete() + await manager.done() + + +async def on_approve_save(callback: ChatEvent, widget: Button, manager: DialogManager): + user_repo: UserRepository = manager.middleware_data['user_repo'] + i18n = manager.middleware_data.get('i18n') + is_save = await save_user_context_txt_file(user_repo, manager.middleware_data['user']) + if not is_save: + await callback.answer(text=i18n.get('warning_save_context_txt'), show_alert=True) + return + + await user_repo.delete_chat_messages(manager.middleware_data['user']) + + await callback.answer(text=i18n.get('command_save_approve_kb'), show_alert=True) + await callback.message.delete() + await manager.done() + + +async def on_approve_delete(callback: ChatEvent, widget: Button, manager: DialogManager): + i18n = manager.middleware_data.get('i18n') + + user_repo: UserRepository = manager.middleware_data['user_repo'] + await delete_user_memory(user_repo, manager.middleware_data['user']) + await user_repo.delete_chat_messages(manager.middleware_data['user']) + await create_vectore_store(user_repo, manager.middleware_data['user']) + + await callback.answer(text=i18n.get('command_delete_approve_text'), show_alert=True) + await callback.message.delete() + await manager.done() + + +dialog = Dialog( + # /new + Window( + I18NFormat('command_new_text'), + Row( + Button( + I18NFormat('command_new_approve_kb'), + id='approve_new', + on_click=on_approve_new + ), + SwitchTo( + I18NFormat('command_new_save_kb'), + id='st_save', + state=Menu.save + ) + ), + Cancel(I18NFormat('close_kb'), id='cancel_menu', on_click=on_cancel_menu), + state=Menu.new + ), + + # /save + Window( + I18NFormat('command_save_text'), + Group( + Button( + I18NFormat('command_new_approve_kb'), + id='approve_save', + on_click=on_approve_save + ), + Cancel( + I18NFormat('close_kb'), + id='cancel_menu', + on_click=on_cancel_menu + ), + width=1 + ), + state=Menu.save + ), + # /delete + Window( + I18NFormat('command_delete_text'), + Group( + Button( + I18NFormat('command_new_approve_kb'), + id='approve_del', + on_click=on_approve_delete + ), + Cancel( + I18NFormat('close_kb'), + id='cancel_del', + on_click=on_cancel_menu + ), + width=1 + ), + state=Menu.delete + ) ) \ No newline at end of file diff --git a/bot/dialogs/settings.py b/bot/dialogs/settings.py index da5b1eb..4af057d 100644 --- a/bot/dialogs/settings.py +++ b/bot/dialogs/settings.py @@ -1,74 +1,74 @@ -from aiogram_dialog.widgets.kbd import Button, Row, Group, Radio, ManagedRadio -from aiogram_dialog import Dialog, Window, ChatEvent, DialogManager -from aiogram_dialog.widgets.text import Format -from aiogram_dialog.widgets.kbd import Cancel -from aiogram_dialog.widgets.kbd import SwitchTo -from aiogram.types import CallbackQuery -from fluentogram import TranslatorHub - -from bot.dialogs.i18n_widget import I18NFormat -from bot.states.states import Settings -from database.repositories.user import UserRepository -from config import AVAILABLE_LANGUAGES_WORDS, AVAILABLE_LANGUAGES - - -async def on_cancel_settings(callback: ChatEvent, widget: Button, manager: DialogManager): - await callback.message.delete() - - -async def on_change_language(callback: CallbackQuery, select: ManagedRadio, dialog_manager: DialogManager, data): - if data == select.get_checked(): - return - user = dialog_manager.middleware_data['user'] - user_repo: UserRepository = dialog_manager.middleware_data['user_repo'] - await user_repo.update(user, language=data) - translator_hub: TranslatorHub = dialog_manager.middleware_data.get('_translator_hub') - - dialog_manager.middleware_data['i18n'] = translator_hub.get_translator_by_locale(data) - - -dialog = Dialog( - # /settings - Window( - I18NFormat('command_settings_text'), - Group( - SwitchTo( - I18NFormat('settings_language_text'), - id='settings_language', - state=Settings.language - ), - Cancel( - I18NFormat('close_kb'), - id='cancel_settings', - on_click=on_cancel_settings - ), - width=1 - ), - state=Settings.main, - ), - Window( - I18NFormat('text_choose_lang'), - Group( - Radio( - checked_text=Format('✅{item[1]}'), - unchecked_text=Format('{item[1]}'), - id='radio_lang', - items=[(AVAILABLE_LANGUAGES[index], i) for index, i in enumerate(AVAILABLE_LANGUAGES_WORDS)], # [('ru', '🇷🇺Русский'), ('en', '🇺🇸English')], - item_id_getter=lambda x: x[0], - on_click=on_change_language - ), - width=2 - ), - SwitchTo( - I18NFormat('back_kb'), - id='back_settings', - state=Settings.main - ), - Cancel( - I18NFormat('close_kb'), - id='cancel_settings', - on_click=on_cancel_settings - ), - state=Settings.language - ), +from aiogram_dialog.widgets.kbd import Button, Row, Group, Radio, ManagedRadio +from aiogram_dialog import Dialog, Window, ChatEvent, DialogManager +from aiogram_dialog.widgets.text import Format +from aiogram_dialog.widgets.kbd import Cancel +from aiogram_dialog.widgets.kbd import SwitchTo +from aiogram.types import CallbackQuery +from fluentogram import TranslatorHub + +from bot.dialogs.i18n_widget import I18NFormat +from bot.states.states import Settings +from database.repositories.user import UserRepository +from config import AVAILABLE_LANGUAGES_WORDS, AVAILABLE_LANGUAGES + + +async def on_cancel_settings(callback: ChatEvent, widget: Button, manager: DialogManager): + await callback.message.delete() + + +async def on_change_language(callback: CallbackQuery, select: ManagedRadio, dialog_manager: DialogManager, data): + if data == select.get_checked(): + return + user = dialog_manager.middleware_data['user'] + user_repo: UserRepository = dialog_manager.middleware_data['user_repo'] + await user_repo.update(user, language=data) + translator_hub: TranslatorHub = dialog_manager.middleware_data.get('_translator_hub') + + dialog_manager.middleware_data['i18n'] = translator_hub.get_translator_by_locale(data) + + +dialog = Dialog( + # /settings + Window( + I18NFormat('command_settings_text'), + Group( + SwitchTo( + I18NFormat('settings_language_text'), + id='settings_language', + state=Settings.language + ), + Cancel( + I18NFormat('close_kb'), + id='cancel_settings', + on_click=on_cancel_settings + ), + width=1 + ), + state=Settings.main, + ), + Window( + I18NFormat('text_choose_lang'), + Group( + Radio( + checked_text=Format('✅{item[1]}'), + unchecked_text=Format('{item[1]}'), + id='radio_lang', + items=[(AVAILABLE_LANGUAGES[index], i) for index, i in enumerate(AVAILABLE_LANGUAGES_WORDS)], # [('ru', '🇷🇺Русский'), ('en', '🇺🇸English')], + item_id_getter=lambda x: x[0], + on_click=on_change_language + ), + width=2 + ), + SwitchTo( + I18NFormat('back_kb'), + id='back_settings', + state=Settings.main + ), + Cancel( + I18NFormat('close_kb'), + id='cancel_settings', + on_click=on_cancel_settings + ), + state=Settings.language + ), ) \ No newline at end of file diff --git a/bot/dialogs/wallet.py b/bot/dialogs/wallet.py index 6970e78..bbccc3d 100644 --- a/bot/dialogs/wallet.py +++ b/bot/dialogs/wallet.py @@ -1,190 +1,190 @@ -import ast - -from aiogram.enums import ContentType -from aiogram import F -from aiogram_dialog.widgets.kbd import Button, Row, Group, Radio, ManagedRadio -from aiogram_dialog import Dialog, Window, ChatEvent, DialogManager -from aiogram_dialog.widgets.input import MessageInput -from aiogram_dialog.widgets.text import Format -from aiogram_dialog.widgets.kbd import Cancel -from aiogram_dialog.widgets.kbd import SwitchTo -from aiogram.types import CallbackQuery, Message -from solders.keypair import Keypair -from solana.rpc.types import Pubkey -from fluentogram import TranslatorHub - -from bot.dialogs.i18n_widget import I18NFormat -from bot.states.states import Wallet -from bot.utils.solana_funcs import get_balances -from database.repositories.user import UserRepository -from config import AVAILABLE_LANGUAGES_WORDS, AVAILABLE_LANGUAGES - - -def is_int_list(text): - try: - value = ast.literal_eval(text) - if isinstance(value, list) and all(isinstance(x, int) for x in value): - if len(value) != 0: - return value - return False - except Exception: - return False - - -async def on_cancel_wallet(callback: ChatEvent, widget: Button, manager: DialogManager): - state = manager.middleware_data.get('state') - await state.clear() - await callback.message.delete() - await manager.done() - - -async def on_input_key(message: Message, widget: MessageInput, manager: DialogManager): - if not message.text: - return await manager.switch_to(state=Wallet.add_not_format) - bytes_key = is_int_list(message.text) - if not bytes_key: - return await manager.switch_to(state=Wallet.add_not_format) - - user_repo: UserRepository = manager.middleware_data['user_repo'] - user = manager.middleware_data['user'] - try: - solana_client = manager.middleware_data['solana_client'] - balances, address = await get_balances(client=solana_client, secret=bytes_key) - manager.dialog_data['balance_sol'] = '\n'.join(balances) - manager.dialog_data['wallet_address'] = address - await manager.switch_to(state=Wallet.balance_after_check) - state = manager.middleware_data.get('state') - await user_repo.add_wallet_key(user.telegram_id, message.text) - await state.clear() - except Exception as e: - print(e) - return await manager.switch_to(state=Wallet.add_not_format) - - -async def on_input_key_after_not_format(message: Message, widget: MessageInput, manager: DialogManager): - if not message.text: - return - bytes_key = is_int_list(message.text) - if not bytes_key: - return - user_repo: UserRepository = manager.middleware_data['user_repo'] - user = manager.middleware_data['user'] - try: - solana_client = manager.middleware_data['solana_client'] - balances, address = await get_balances(client=solana_client, secret=bytes_key) - manager.dialog_data['balance_sol'] = '\n'.join(balances) - manager.dialog_data['wallet_address'] = address - await manager.switch_to(state=Wallet.balance_after_check) - state = manager.middleware_data.get('state') - await user_repo.add_wallet_key(user.telegram_id, message.text) - await state.clear() - except Exception as e: - print(e) - pass - - -async def on_delete_approve(callback: ChatEvent, widget: Button, manager: DialogManager): - user_repo: UserRepository = manager.middleware_data['user_repo'] - i18n = manager.middleware_data.get('i18n') - user = manager.middleware_data['user'] - await user_repo.delete_wallet_key(user.telegram_id) - - await callback.answer(text=i18n.get('command_delete_key_approve_text'), show_alert=True) - state = manager.middleware_data.get('state') - await state.clear() - await callback.message.delete() - await manager.done() - - -async def getter_main(dialog_manager: DialogManager, **kwargs): - user_repo: UserRepository = dialog_manager.middleware_data['user_repo'] - user = dialog_manager.middleware_data['user'] - wallet = await user_repo.get_wallet(user.telegram_id) - return {'is_wallet': True if wallet else False} - - -async def getter_balance(dialog_manager: DialogManager, **kwargs): - user_repo: UserRepository = dialog_manager.middleware_data['user_repo'] - user = dialog_manager.middleware_data['user'] - wallet = await user_repo.get_wallet(user.telegram_id) - wallet = is_int_list(wallet) - - solana_client = dialog_manager.middleware_data['solana_client'] - balances, address = await get_balances(client=solana_client, secret=wallet) - dialog_manager.dialog_data['balance_sol'] = '\n'.join(balances) - dialog_manager.dialog_data['wallet_address'] = address - - state = dialog_manager.middleware_data.get('state') - await state.clear() - - return {'balance_sol': wallet} - - -dialog = Dialog( - Window( - I18NFormat('cmd_wallet_text_start'), - Group( - SwitchTo( - I18NFormat('wallet_balance_kb'), - id='wallet_balance', - state=Wallet.balance - ), - SwitchTo( - I18NFormat('wallet_delete_key'), - id='wallet_delete', - state=Wallet.delete - ), - width=2, - when=F['is_wallet'] - ), - Cancel(I18NFormat('close_kb'), id='cancel_wallet', on_click=on_cancel_wallet), - MessageInput( - content_types=[ContentType.ANY], - func=on_input_key - ), - state=Wallet.main, - getter=getter_main - ), - Window( - I18NFormat('not_format_wallet_key'), - MessageInput( - content_types=[ContentType.ANY], - func=on_input_key - ), - Cancel(I18NFormat('close_kb'), id='cancel_wallet', on_click=on_cancel_wallet), - state=Wallet.add_not_format - ), - Window( - I18NFormat('text_after_add_key') + Format(' {dialog_data[wallet_address]}'), - Format('{dialog_data[balance_sol]}'), - SwitchTo( - I18NFormat('wallet_delete_key'), - id='wallet_delete', - state=Wallet.delete - ), - Cancel(I18NFormat('close_kb'), id='cancel_wallet', on_click=on_cancel_wallet), - state=Wallet.balance_after_check - ), - Window( - I18NFormat('wallet_delete_key_text'), - Button( - I18NFormat('command_new_approve_kb'), - id='wallet_delete_approve', - on_click=on_delete_approve - ), - Cancel(I18NFormat('close_kb'), id='cancel_wallet', on_click=on_cancel_wallet), - state=Wallet.delete - ), - Window( - I18NFormat('text_balance_wallet') + Format(' {dialog_data[wallet_address]}'), - Format('{dialog_data[balance_sol]}'), - SwitchTo( - I18NFormat('wallet_delete_key'), - id='wallet_delete', - state=Wallet.delete - ), - Cancel(I18NFormat('close_kb'), id='cancel_wallet', on_click=on_cancel_wallet), - state=Wallet.balance, - getter=getter_balance - ) +import ast + +from aiogram.enums import ContentType +from aiogram import F +from aiogram_dialog.widgets.kbd import Button, Row, Group, Radio, ManagedRadio +from aiogram_dialog import Dialog, Window, ChatEvent, DialogManager +from aiogram_dialog.widgets.input import MessageInput +from aiogram_dialog.widgets.text import Format +from aiogram_dialog.widgets.kbd import Cancel +from aiogram_dialog.widgets.kbd import SwitchTo +from aiogram.types import CallbackQuery, Message +from solders.keypair import Keypair +from solana.rpc.types import Pubkey +from fluentogram import TranslatorHub + +from bot.dialogs.i18n_widget import I18NFormat +from bot.states.states import Wallet +from bot.utils.solana_funcs import get_balances +from database.repositories.user import UserRepository +from config import AVAILABLE_LANGUAGES_WORDS, AVAILABLE_LANGUAGES + + +def is_int_list(text): + try: + value = ast.literal_eval(text) + if isinstance(value, list) and all(isinstance(x, int) for x in value): + if len(value) != 0: + return value + return False + except Exception: + return False + + +async def on_cancel_wallet(callback: ChatEvent, widget: Button, manager: DialogManager): + state = manager.middleware_data.get('state') + await state.clear() + await callback.message.delete() + await manager.done() + + +async def on_input_key(message: Message, widget: MessageInput, manager: DialogManager): + if not message.text: + return await manager.switch_to(state=Wallet.add_not_format) + bytes_key = is_int_list(message.text) + if not bytes_key: + return await manager.switch_to(state=Wallet.add_not_format) + + user_repo: UserRepository = manager.middleware_data['user_repo'] + user = manager.middleware_data['user'] + try: + solana_client = manager.middleware_data['solana_client'] + balances, address = await get_balances(client=solana_client, secret=bytes_key) + manager.dialog_data['balance_sol'] = '\n'.join(balances) + manager.dialog_data['wallet_address'] = address + await manager.switch_to(state=Wallet.balance_after_check) + state = manager.middleware_data.get('state') + await user_repo.add_wallet_key(user.telegram_id, message.text) + await state.clear() + except Exception as e: + print(e) + return await manager.switch_to(state=Wallet.add_not_format) + + +async def on_input_key_after_not_format(message: Message, widget: MessageInput, manager: DialogManager): + if not message.text: + return + bytes_key = is_int_list(message.text) + if not bytes_key: + return + user_repo: UserRepository = manager.middleware_data['user_repo'] + user = manager.middleware_data['user'] + try: + solana_client = manager.middleware_data['solana_client'] + balances, address = await get_balances(client=solana_client, secret=bytes_key) + manager.dialog_data['balance_sol'] = '\n'.join(balances) + manager.dialog_data['wallet_address'] = address + await manager.switch_to(state=Wallet.balance_after_check) + state = manager.middleware_data.get('state') + await user_repo.add_wallet_key(user.telegram_id, message.text) + await state.clear() + except Exception as e: + print(e) + pass + + +async def on_delete_approve(callback: ChatEvent, widget: Button, manager: DialogManager): + user_repo: UserRepository = manager.middleware_data['user_repo'] + i18n = manager.middleware_data.get('i18n') + user = manager.middleware_data['user'] + await user_repo.delete_wallet_key(user.telegram_id) + + await callback.answer(text=i18n.get('command_delete_key_approve_text'), show_alert=True) + state = manager.middleware_data.get('state') + await state.clear() + await callback.message.delete() + await manager.done() + + +async def getter_main(dialog_manager: DialogManager, **kwargs): + user_repo: UserRepository = dialog_manager.middleware_data['user_repo'] + user = dialog_manager.middleware_data['user'] + wallet = await user_repo.get_wallet(user.telegram_id) + return {'is_wallet': True if wallet else False} + + +async def getter_balance(dialog_manager: DialogManager, **kwargs): + user_repo: UserRepository = dialog_manager.middleware_data['user_repo'] + user = dialog_manager.middleware_data['user'] + wallet = await user_repo.get_wallet(user.telegram_id) + wallet = is_int_list(wallet) + + solana_client = dialog_manager.middleware_data['solana_client'] + balances, address = await get_balances(client=solana_client, secret=wallet) + dialog_manager.dialog_data['balance_sol'] = '\n'.join(balances) + dialog_manager.dialog_data['wallet_address'] = address + + state = dialog_manager.middleware_data.get('state') + await state.clear() + + return {'balance_sol': wallet} + + +dialog = Dialog( + Window( + I18NFormat('cmd_wallet_text_start'), + Group( + SwitchTo( + I18NFormat('wallet_balance_kb'), + id='wallet_balance', + state=Wallet.balance + ), + SwitchTo( + I18NFormat('wallet_delete_key'), + id='wallet_delete', + state=Wallet.delete + ), + width=2, + when=F['is_wallet'] + ), + Cancel(I18NFormat('close_kb'), id='cancel_wallet', on_click=on_cancel_wallet), + MessageInput( + content_types=[ContentType.ANY], + func=on_input_key + ), + state=Wallet.main, + getter=getter_main + ), + Window( + I18NFormat('not_format_wallet_key'), + MessageInput( + content_types=[ContentType.ANY], + func=on_input_key + ), + Cancel(I18NFormat('close_kb'), id='cancel_wallet', on_click=on_cancel_wallet), + state=Wallet.add_not_format + ), + Window( + I18NFormat('text_after_add_key') + Format(' {dialog_data[wallet_address]}'), + Format('{dialog_data[balance_sol]}'), + SwitchTo( + I18NFormat('wallet_delete_key'), + id='wallet_delete', + state=Wallet.delete + ), + Cancel(I18NFormat('close_kb'), id='cancel_wallet', on_click=on_cancel_wallet), + state=Wallet.balance_after_check + ), + Window( + I18NFormat('wallet_delete_key_text'), + Button( + I18NFormat('command_new_approve_kb'), + id='wallet_delete_approve', + on_click=on_delete_approve + ), + Cancel(I18NFormat('close_kb'), id='cancel_wallet', on_click=on_cancel_wallet), + state=Wallet.delete + ), + Window( + I18NFormat('text_balance_wallet') + Format(' {dialog_data[wallet_address]}'), + Format('{dialog_data[balance_sol]}'), + SwitchTo( + I18NFormat('wallet_delete_key'), + id='wallet_delete', + state=Wallet.delete + ), + Cancel(I18NFormat('close_kb'), id='cancel_wallet', on_click=on_cancel_wallet), + state=Wallet.balance, + getter=getter_balance + ) ) \ No newline at end of file diff --git a/bot/keyboards/inline.py b/bot/keyboards/inline.py index 1fa0550..7f938db 100644 --- a/bot/keyboards/inline.py +++ b/bot/keyboards/inline.py @@ -1,32 +1,32 @@ -from aiogram.types import InlineKeyboardMarkup, InlineKeyboardButton - -from config import AVAILABLE_LANGUAGES - - -def select_language(text: list[str]): - return InlineKeyboardMarkup( - inline_keyboard=[ - [InlineKeyboardButton(text=i, callback_data=f'select_language_{AVAILABLE_LANGUAGES[index]}') for index, i in enumerate(text)] - ]) - - -def close_text(text: str): - return InlineKeyboardMarkup( - inline_keyboard=[ - [InlineKeyboardButton(text=text, callback_data='close')] - ]) - - -def keyboard_md(row_id: int, text: str): - return InlineKeyboardMarkup( - inline_keyboard=[ - [InlineKeyboardButton(text=text, callback_data=f'markdown_{row_id}')] - ]) - - -def check_payment(text: str, payment_id: int): - return InlineKeyboardMarkup( - inline_keyboard=[ - [InlineKeyboardButton(text=text, callback_data=f'check_payment_{payment_id}')] - ] - ) +from aiogram.types import InlineKeyboardMarkup, InlineKeyboardButton + +from config import AVAILABLE_LANGUAGES + + +def select_language(text: list[str]): + return InlineKeyboardMarkup( + inline_keyboard=[ + [InlineKeyboardButton(text=i, callback_data=f'select_language_{AVAILABLE_LANGUAGES[index]}') for index, i in enumerate(text)] + ]) + + +def close_text(text: str): + return InlineKeyboardMarkup( + inline_keyboard=[ + [InlineKeyboardButton(text=text, callback_data='close')] + ]) + + +def keyboard_md(row_id: int, text: str): + return InlineKeyboardMarkup( + inline_keyboard=[ + [InlineKeyboardButton(text=text, callback_data=f'markdown_{row_id}')] + ]) + + +def check_payment(text: str, payment_id: int): + return InlineKeyboardMarkup( + inline_keyboard=[ + [InlineKeyboardButton(text=text, callback_data=f'check_payment_{payment_id}')] + ] + ) diff --git a/bot/main.py b/bot/main.py index b078f43..27977d3 100644 --- a/bot/main.py +++ b/bot/main.py @@ -1,86 +1,98 @@ -import os - -from dotenv import load_dotenv -from aiogram import Bot, Dispatcher -from aiogram.client.default import DefaultBotProperties -from aiogram.fsm.storage.base import DefaultKeyBuilder -from aiogram.fsm.storage.redis import RedisStorage -from aiogram_dialog import setup_dialogs -from solana.rpc.async_api import AsyncClient -from apscheduler.schedulers.asyncio import AsyncIOScheduler -from apscheduler.jobstores.sqlalchemy import SQLAlchemyJobStore - -from redis_service.connect import redis -from I18N.factory import i18n_factory -from bot.middlewares.database_session import DbSessionMiddleware -from bot.middlewares.translator_hub import TranslatorRunnerMiddleware -from bot.middlewares.first_time import FirstTimeMiddleware -from bot.routers.admin import router as admin_router -from bot.routers.user import router as user_router -from database.models import async_session, create_tables -from bot.dialogs.menu import dialog as menu_dialog -from bot.dialogs.knowledge import dialog as knowledge_dialog -from bot.dialogs.settings import dialog as settings_dialog -from bot.dialogs.wallet import dialog as wallet_dialog -from bot.dialogs.balance import dialog as balance_dialog -from bot.utils.check_burn_address import add_burn_address -from bot.commands import set_commands -from bot.scheduler_funcs.daily_tokens import add_daily_tokens -from bot.agents_tools.mcp_servers import get_dexpapirka_server -from bot.utils.create_bot import bot -from bot.utils.scheduler_provider import set_scheduler - -load_dotenv() - -storage = RedisStorage(redis, key_builder=DefaultKeyBuilder(with_destiny=True)) -dp = Dispatcher(storage=storage) - -solana_client = AsyncClient("https://api.mainnet-beta.solana.com") - - -async def main(): - await set_commands(bot) - print(await bot.get_me()) - - scheduler = AsyncIOScheduler(timezone='UTC', - jobstores={ - 'default': SQLAlchemyJobStore(url=os.getenv('DATABASE_URL')) - }, - job_defaults={ - "coalesce": True, - "max_instances": 1, - }, - ) - set_scheduler(scheduler) - scheduler.start() - - if not scheduler.get_job('daily_tokens'): - scheduler.add_job(add_daily_tokens, trigger='cron', hour='0', minute='0', id='daily_tokens') - - print(scheduler.get_jobs()) - - dexpaprika_server = await get_dexpapirka_server() - - dp.startup.register(on_startup) - await bot.delete_webhook(drop_pending_updates=True) - - dp.include_routers(admin_router, user_router, menu_dialog, knowledge_dialog, settings_dialog, wallet_dialog, balance_dialog) - - dp.update.outer_middleware.register(DbSessionMiddleware(session_pool=async_session)) - dp.update.outer_middleware.register(TranslatorRunnerMiddleware()) - dp.update.outer_middleware.register(FirstTimeMiddleware()) - - setup_dialogs(dp) - - await dp.start_polling(bot, _translator_hub=i18n_factory(), redis=redis, - solana_client=solana_client, mcp_server=dexpaprika_server, scheduler=scheduler) - - -async def on_startup(): - await create_tables() - await add_burn_address(bot=bot) - - -if __name__ == '__main__': - import asyncio +import os + +from dotenv import load_dotenv +from aiogram import Bot, Dispatcher +from aiogram.client.default import DefaultBotProperties +from aiogram.fsm.storage.base import DefaultKeyBuilder +from aiogram.fsm.storage.redis import RedisStorage +from aiogram.types import ErrorEvent +from aiogram_dialog import setup_dialogs +from aiogram_dialog.api.exceptions import UnknownIntent +from solana.rpc.async_api import AsyncClient +from apscheduler.schedulers.asyncio import AsyncIOScheduler +from apscheduler.jobstores.sqlalchemy import SQLAlchemyJobStore + +from redis_service.connect import redis +from I18N.factory import i18n_factory +from bot.middlewares.database_session import DbSessionMiddleware +from bot.middlewares.translator_hub import TranslatorRunnerMiddleware +from bot.middlewares.first_time import FirstTimeMiddleware +from bot.routers.admin import router as admin_router +from bot.routers.user import router as user_router +from database.models import async_session, create_tables +from bot.dialogs.menu import dialog as menu_dialog +from bot.dialogs.knowledge import dialog as knowledge_dialog +from bot.dialogs.settings import dialog as settings_dialog +from bot.dialogs.wallet import dialog as wallet_dialog +from bot.dialogs.balance import dialog as balance_dialog +from bot.utils.check_burn_address import add_burn_address +from bot.commands import set_commands +from bot.scheduler_funcs.daily_tokens import add_daily_tokens +from bot.agents_tools.mcp_servers import get_dexpapirka_server +from bot.utils.create_bot import bot +from bot.utils.scheduler_provider import set_scheduler + +load_dotenv() + +storage = RedisStorage(redis, key_builder=DefaultKeyBuilder(with_destiny=True)) +dp = Dispatcher(storage=storage) + +solana_client = AsyncClient("https://api.mainnet-beta.solana.com") + + +async def main(): + await set_commands(bot) + print(await bot.get_me()) + + scheduler = AsyncIOScheduler(timezone='UTC', + jobstores={ + 'default': SQLAlchemyJobStore(url=os.getenv('DATABASE_URL')) + }, + job_defaults={ + "coalesce": True, + "max_instances": 1, + }, + ) + set_scheduler(scheduler) + scheduler.start() + + if not scheduler.get_job('daily_tokens'): + scheduler.add_job(add_daily_tokens, trigger='cron', hour='0', minute='0', id='daily_tokens') + + print(scheduler.get_jobs()) + + dexpaprika_server = await get_dexpapirka_server() + + dp.startup.register(on_startup) + await bot.delete_webhook(drop_pending_updates=True) + + dp.include_routers(admin_router, user_router, menu_dialog, knowledge_dialog, settings_dialog, wallet_dialog, balance_dialog) + + dp.update.outer_middleware.register(DbSessionMiddleware(session_pool=async_session)) + dp.update.outer_middleware.register(TranslatorRunnerMiddleware()) + dp.update.outer_middleware.register(FirstTimeMiddleware()) + + setup_dialogs(dp) + + @dp.errors() + async def on_unknown_intent(event: ErrorEvent): + if isinstance(event.exception, UnknownIntent): + if event.update.callback_query: + await event.update.callback_query.answer( + "⚠️ Button expired. Please use the command again.", + show_alert=True, + ) + return True + + await dp.start_polling(bot, _translator_hub=i18n_factory(), redis=redis, + solana_client=solana_client, mcp_server=dexpaprika_server, scheduler=scheduler) + + +async def on_startup(): + await create_tables() + await add_burn_address(bot=bot) + + +if __name__ == '__main__': + import asyncio asyncio.run(main()) \ No newline at end of file diff --git a/bot/middlewares/database_session.py b/bot/middlewares/database_session.py index 8c27112..37f5b21 100644 --- a/bot/middlewares/database_session.py +++ b/bot/middlewares/database_session.py @@ -1,25 +1,25 @@ -from typing import Callable, Awaitable, Dict, Any - -from aiogram import BaseMiddleware -from aiogram.types import TelegramObject -from sqlalchemy.ext.asyncio import async_sessionmaker - -import database.repositories - - -class DbSessionMiddleware(BaseMiddleware): - def __init__(self, session_pool: async_sessionmaker): - super().__init__() - self.session_pool = session_pool - - async def __call__( - self, - handler: Callable[[TelegramObject, Dict[str, Any]], Awaitable[Any]], - event: TelegramObject, - data: Dict[str, Any], - ) -> Any: - - async with self.session_pool() as session: - data["user_repo"] = database.repositories.user.UserRepository(session) - data["utils_repo"] = database.repositories.utils.UtilsRepository(session) - return await handler(event, data) +from typing import Callable, Awaitable, Dict, Any + +from aiogram import BaseMiddleware +from aiogram.types import TelegramObject +from sqlalchemy.ext.asyncio import async_sessionmaker + +import database.repositories + + +class DbSessionMiddleware(BaseMiddleware): + def __init__(self, session_pool: async_sessionmaker): + super().__init__() + self.session_pool = session_pool + + async def __call__( + self, + handler: Callable[[TelegramObject, Dict[str, Any]], Awaitable[Any]], + event: TelegramObject, + data: Dict[str, Any], + ) -> Any: + + async with self.session_pool() as session: + data["user_repo"] = database.repositories.user.UserRepository(session) + data["utils_repo"] = database.repositories.utils.UtilsRepository(session) + return await handler(event, data) diff --git a/bot/middlewares/first_time.py b/bot/middlewares/first_time.py index 6aec629..d7dd37a 100644 --- a/bot/middlewares/first_time.py +++ b/bot/middlewares/first_time.py @@ -1,30 +1,30 @@ -from typing import Any, Awaitable, Callable, Dict, Optional - -from aiogram import BaseMiddleware -from aiogram.types import TelegramObject, CallbackQuery - -from bot.keyboards.inline import select_language -from config import AVAILABLE_LANGUAGES_WORDS - - -class FirstTimeMiddleware(BaseMiddleware): - - async def __call__( - self, - handler: Callable[[TelegramObject, Dict[str, Any]], Awaitable[Any]], - event: TelegramObject, - data: Dict[str, Any], - ) -> Any: - user = data['user'] - - if user.language: - return await handler(event, data) - - if getattr(event, 'callback_query', None): - if event.callback_query.data.startswith('select_language_'): - return await handler(event, data) - - return await event.message.answer(text='Select the interface language.', - reply_markup=select_language(AVAILABLE_LANGUAGES_WORDS)) - - +from typing import Any, Awaitable, Callable, Dict, Optional + +from aiogram import BaseMiddleware +from aiogram.types import TelegramObject, CallbackQuery + +from bot.keyboards.inline import select_language +from config import AVAILABLE_LANGUAGES_WORDS + + +class FirstTimeMiddleware(BaseMiddleware): + + async def __call__( + self, + handler: Callable[[TelegramObject, Dict[str, Any]], Awaitable[Any]], + event: TelegramObject, + data: Dict[str, Any], + ) -> Any: + user = data['user'] + + if user.language: + return await handler(event, data) + + if getattr(event, 'callback_query', None): + if event.callback_query.data.startswith('select_language_'): + return await handler(event, data) + + return await event.message.answer(text='Select the interface language.', + reply_markup=select_language(AVAILABLE_LANGUAGES_WORDS)) + + diff --git a/bot/middlewares/translator_hub.py b/bot/middlewares/translator_hub.py index 8707eaf..8264198 100644 --- a/bot/middlewares/translator_hub.py +++ b/bot/middlewares/translator_hub.py @@ -1,45 +1,45 @@ -from typing import Any, Awaitable, Callable, Dict, Optional - -from aiogram import BaseMiddleware -from aiogram.types import TelegramObject, CallbackQuery -from fluentogram import TranslatorHub - -from config import CREDITS_ADMIN_DAILY, START_BALANCE, ADMIN_ID, ADMINS_LIST - - -class TranslatorRunnerMiddleware(BaseMiddleware): - def __init__( - self, - translator_hub_alias: str = '_translator_hub', - translator_runner_alias: str = 'i18n', - ): - self.translator_hub_alias = translator_hub_alias - self.translator_runner_alias = translator_runner_alias - - async def __call__( - self, - event_handler: Callable[[TelegramObject, Dict[str, Any]], Awaitable[Any]], - event: TelegramObject, - ctx_data: Dict[str, Any], - ) -> None: - message = getattr(event, 'message', None) - callback_query = getattr(event, 'callback_query', None) - from_user = message.from_user if message else callback_query.from_user if callback_query else None - - translator_hub: Optional[TranslatorHub] = ctx_data.get(self.translator_hub_alias) - - if from_user is None or translator_hub is None: - return await event_handler(event, ctx_data) - - user_repo = ctx_data['user_repo'] - sum_credits = (CREDITS_ADMIN_DAILY - if from_user.id == ADMIN_ID or from_user.id in ADMINS_LIST - else START_BALANCE - ) - - user = await user_repo.create_if_not_exists(telegram_id=from_user.id, balance_credits=sum_credits) - - lang = user.language if user.language else 'en' - ctx_data[self.translator_runner_alias] = translator_hub.get_translator_by_locale(lang) - ctx_data['user'] = user - await event_handler(event, ctx_data) +from typing import Any, Awaitable, Callable, Dict, Optional + +from aiogram import BaseMiddleware +from aiogram.types import TelegramObject, CallbackQuery +from fluentogram import TranslatorHub + +from config import CREDITS_ADMIN_DAILY, START_BALANCE, ADMIN_ID, ADMINS_LIST + + +class TranslatorRunnerMiddleware(BaseMiddleware): + def __init__( + self, + translator_hub_alias: str = '_translator_hub', + translator_runner_alias: str = 'i18n', + ): + self.translator_hub_alias = translator_hub_alias + self.translator_runner_alias = translator_runner_alias + + async def __call__( + self, + event_handler: Callable[[TelegramObject, Dict[str, Any]], Awaitable[Any]], + event: TelegramObject, + ctx_data: Dict[str, Any], + ) -> None: + message = getattr(event, 'message', None) + callback_query = getattr(event, 'callback_query', None) + from_user = message.from_user if message else callback_query.from_user if callback_query else None + + translator_hub: Optional[TranslatorHub] = ctx_data.get(self.translator_hub_alias) + + if from_user is None or translator_hub is None: + return await event_handler(event, ctx_data) + + user_repo = ctx_data['user_repo'] + sum_credits = (CREDITS_ADMIN_DAILY + if from_user.id == ADMIN_ID or from_user.id in ADMINS_LIST + else START_BALANCE + ) + + user = await user_repo.create_if_not_exists(telegram_id=from_user.id, balance_credits=sum_credits) + + lang = user.language if user.language else 'en' + ctx_data[self.translator_runner_alias] = translator_hub.get_translator_by_locale(lang) + ctx_data['user'] = user + await event_handler(event, ctx_data) diff --git a/bot/routers/admin.py b/bot/routers/admin.py index 4e4486b..a4eda30 100644 --- a/bot/routers/admin.py +++ b/bot/routers/admin.py @@ -1,46 +1,46 @@ -from aiogram import F, Router -from aiogram.types import Message, CallbackQuery -from aiogram.filters import Command, Filter, CommandObject -from aiogram.fsm.context import FSMContext -from aiogram_dialog import DialogManager, StartMode - -from config import ADMIN_ID, ADMINS_LIST -from database.repositories.utils import UtilsRepository -import bot.keyboards.inline as inline_kb -from bot.states.states import Knowledge, Input, Wallet - - -class IsAdmin(Filter): - async def __call__(self, event: Message | CallbackQuery): - return event.from_user.id == ADMIN_ID or event.from_user.id in ADMINS_LIST - - -router = Router() - - -@router.message(Command('token_price'), IsAdmin()) -async def token_price(message: Message, command: CommandObject, utils_repo: UtilsRepository, i18n): - if command.args: - try: - price = float(command.args) - await utils_repo.update_token_price(price) - return await message.answer(text=i18n.get('token_price_updated_text')) - except Exception as e: - await message.answer(text=i18n.get('token_price_error_text')) - return - price_token = await utils_repo.get_token() - if price_token: - return await message.answer(text=f'${price_token.price_usd}', reply_markup=inline_kb.close_text(i18n.get('close_kb'))) - - return await message.answer(text=i18n.get('not_token_price_error_text')) - - -@router.message(Command('knowledge'), IsAdmin()) -async def cmd_knowledge(message: Message, utils_repo: UtilsRepository, i18n, dialog_manager: DialogManager): - await dialog_manager.start(state=Knowledge.main, mode=StartMode.RESET_STACK) - - -@router.message(Command('wallet'), IsAdmin()) -async def cmd_wallet(message: Message, state: FSMContext, dialog_manager: DialogManager): - await state.set_state(Input.main) +from aiogram import F, Router +from aiogram.types import Message, CallbackQuery +from aiogram.filters import Command, Filter, CommandObject +from aiogram.fsm.context import FSMContext +from aiogram_dialog import DialogManager, StartMode + +from config import ADMIN_ID, ADMINS_LIST +from database.repositories.utils import UtilsRepository +import bot.keyboards.inline as inline_kb +from bot.states.states import Knowledge, Input, Wallet + + +class IsAdmin(Filter): + async def __call__(self, event: Message | CallbackQuery): + return event.from_user.id == ADMIN_ID or event.from_user.id in ADMINS_LIST + + +router = Router() + + +@router.message(Command('token_price'), IsAdmin()) +async def token_price(message: Message, command: CommandObject, utils_repo: UtilsRepository, i18n): + if command.args: + try: + price = float(command.args) + await utils_repo.update_token_price(price) + return await message.answer(text=i18n.get('token_price_updated_text')) + except Exception as e: + await message.answer(text=i18n.get('token_price_error_text')) + return + price_token = await utils_repo.get_token() + if price_token: + return await message.answer(text=f'${price_token.price_usd}', reply_markup=inline_kb.close_text(i18n.get('close_kb'))) + + return await message.answer(text=i18n.get('not_token_price_error_text')) + + +@router.message(Command('knowledge'), IsAdmin()) +async def cmd_knowledge(message: Message, utils_repo: UtilsRepository, i18n, dialog_manager: DialogManager): + await dialog_manager.start(state=Knowledge.main, mode=StartMode.RESET_STACK) + + +@router.message(Command('wallet'), IsAdmin()) +async def cmd_wallet(message: Message, state: FSMContext, dialog_manager: DialogManager): + await state.set_state(Input.main) await dialog_manager.start(state=Wallet.main, mode=StartMode.RESET_STACK) \ No newline at end of file diff --git a/bot/routers/user.py b/bot/routers/user.py index 417abde..6705c56 100644 --- a/bot/routers/user.py +++ b/bot/routers/user.py @@ -1,237 +1,237 @@ -import asyncio -from io import BytesIO - -from redis.asyncio.client import Redis -from aiogram import Router, F -from aiogram.fsm.context import FSMContext -from aiogram.types import Message, CallbackQuery, BufferedInputFile -from aiogram.filters import Command, CommandStart, StateFilter -from aiogram_dialog import DialogManager, StartMode -from fluentogram import TranslatorHub - -from database.repositories.user import UserRepository -from database.repositories.utils import UtilsRepository -from database.models import User -import bot.keyboards.inline as inline_kb -from bot.states.states import Menu, Settings, Knowledge, Wallet, Input, Balance -from bot.utils.send_answer import process_after_photo, process_after_text -from bot.utils.funcs_gpt import transcribe_audio, add_file_to_memory -from config import TYPE_USAGE, ADMIN_ID, ADMINS_LIST -from bot.utils.check_payment import check_payment_sol, check_payment_ton - -router = Router() - - -DICT_FORMATS = { - "doc": "application/msword", - "docx": "application/vnd.openxmlformats-officedocument.wordprocessingml.document", - "md": "text/markdown", - "pdf": "application/pdf", - "pptx": "application/vnd.openxmlformats-officedocument.presentationml.presentation", - 'txt': 'text/plain', - 'py': 'text/x-python' -} - - -@router.message(CommandStart()) -async def start(message: Message, user_repo: UserRepository, state: FSMContext, user: User, i18n): - await state.clear() - await message.answer(text=i18n.get('start_text'), - reply_markup=inline_kb.close_text(i18n.get('close_kb'))) - - -@router.callback_query(F.data.startswith('select_language_')) -async def select_language(callback: CallbackQuery, user_repo: UserRepository, user: User, i18n, _translator_hub: TranslatorHub): - lang = callback.data.split('_')[2] - translator = _translator_hub.get_translator_by_locale(lang) - - await user_repo.update(user, language=lang) - - await callback.message.edit_text(text=translator.get('start_text'), - reply_markup=inline_kb.close_text(translator.get('close_kb'))) - -@router.message(Command('help')) -async def cmd_help(message: Message, state: FSMContext, i18n): - await state.clear() - await message.answer(text=i18n.get('cmd_help_text'), reply_markup=inline_kb.close_text(i18n.get('close_kb'))) - - -@router.callback_query(F.data == 'close') -async def close(callback: CallbackQuery, utils_repo: UtilsRepository, state: FSMContext, i18n): - await state.clear() - await callback.message.delete() - - -@router.message(Command('settings')) -async def cmd_settings(message: Message, dialog_manager: DialogManager, state: FSMContext): - await state.clear() - await dialog_manager.start(state=Settings.main, mode=StartMode.RESET_STACK) - - -@router.message(Command('new')) -async def cmd_new(message: Message, dialog_manager: DialogManager, state: FSMContext): - await state.clear() - await dialog_manager.start(state=Menu.new, mode=StartMode.RESET_STACK) - - -@router.message(Command('save')) -async def cmd_save(message: Message, state: FSMContext, dialog_manager: DialogManager): - await state.clear() - await dialog_manager.start(state=Menu.save, mode=StartMode.RESET_STACK) - - -@router.message(Command('delete')) -async def cmd_delete(message: Message, state: FSMContext, dialog_manager: DialogManager): - await state.clear() - await dialog_manager.start(state=Menu.delete, mode=StartMode.RESET_STACK) - - -@router.message(Command('balance')) -async def cmd_settings(message: Message, dialog_manager: DialogManager, state: FSMContext): - await state.clear() - await dialog_manager.start(state=Balance.main, mode=StartMode.RESET_STACK) - - -@router.message(F.text, StateFilter(None)) -async def text_input(message: Message, user_repo: UserRepository, utils_repo: UtilsRepository, redis: Redis, user: User, i18n, mcp_server, scheduler): - if await redis.get(f'request_{message.from_user.id}'): - return - if TYPE_USAGE == 'private': - if message.from_user.id != ADMIN_ID and message.from_user.id not in ADMINS_LIST: - return - else: - if user.balance_credits <= 0: - return await message.answer(i18n.get('warning_text_no_credits')) - - await redis.set(f'request_{message.from_user.id}', 't', ex=40) - mess_to_delete = await message.answer(text=i18n.get('wait_answer_text')) - task = asyncio.create_task(process_after_text(message=message, user=user, user_repo=user_repo, utils_repo=utils_repo, - redis=redis, i18n=i18n, mess_to_delete=mess_to_delete, mcp_server_1=mcp_server, scheduler=scheduler)) - - -@router.message(F.photo, StateFilter(None)) -async def photo_input(message: Message, user_repo: UserRepository, utils_repo: UserRepository, redis: Redis, user: User, i18n, mcp_server, scheduler): - if await redis.get(f'request_{message.from_user.id}'): - return - if TYPE_USAGE == 'private': - if message.from_user.id != ADMIN_ID and message.from_user.id not in ADMINS_LIST: - return - else: - if user.balance_credits <= 0: - return await message.answer(i18n.get('warning_text_no_credits')) - - await redis.set(f'request_{message.from_user.id}', 't', ex=40) - mess_to_delete = await message.answer(text=i18n.get('wait_answer_text')) - task = asyncio.create_task(process_after_photo(message=message, user=user, user_repo=user_repo, utils_repo=utils_repo, - redis=redis, i18n=i18n, mess_to_delete=mess_to_delete, mcp_server_1=mcp_server, scheduler=scheduler)) - - -@router.message(F.voice, StateFilter(None)) -async def input_voice(message: Message, user_repo: UserRepository, utils_repo: UserRepository, redis: Redis, user: User, i18n, mcp_server, scheduler): - if await redis.get(f'request_{message.from_user.id}'): - return - if TYPE_USAGE == 'private': - if message.from_user.id != ADMIN_ID and message.from_user.id not in ADMINS_LIST: - return - else: - if user.balance_credits <= 0: - return await message.answer(i18n.get('warning_text_no_credits')) - - await redis.set(f'request_{message.from_user.id}', 't', ex=40) - mess_to_delete = await message.answer(text=i18n.get('wait_answer_text')) - voice_id = message.voice.file_id - file_path = await message.bot.get_file(file_id=voice_id) - file_bytes = (await message.bot.download_file(file_path.file_path)).read() - try: - text_from_voice = await transcribe_audio(bytes_audio=file_bytes) - except Exception as e: - await message.answer(text=i18n.get('warning_text_error')) - await redis.delete(f'request_{message.from_user.id}') - return await mess_to_delete.delete() - - task = asyncio.create_task( - process_after_text(message=message, user=user, user_repo=user_repo, utils_repo=utils_repo, - redis=redis, i18n=i18n, mess_to_delete=mess_to_delete, text_from_voice=text_from_voice, mcp_server_1=mcp_server, - scheduler=scheduler)) - - -@router.message(F.document, StateFilter(None)) -async def input_document(message: Message, user_repo: UserRepository, utils_repo: UserRepository, redis: Redis, user: User, i18n, mcp_server, scheduler): - if await redis.get(f'request_{message.from_user.id}'): - return - if TYPE_USAGE == 'private': - if message.from_user.id != ADMIN_ID and message.from_user.id not in ADMINS_LIST: - return - else: - if user.balance_credits <= 0: - return await message.answer(i18n.get('warning_text_no_credits')) - - format_doc = message.document.file_name.split('.')[-1] - if format_doc not in DICT_FORMATS: - return await message.answer(i18n.get('warning_text_format')) - - await redis.set(f'request_{message.from_user.id}', 't', ex=40) - mess_to_delete = await message.answer(text=i18n.get('wait_answer_text')) - file_id = message.document.file_id - file_path = await message.bot.get_file(file_id=file_id) - file_bytes = (await message.bot.download_file(file_path.file_path)).read() - try: - await add_file_to_memory(user_repo=user_repo, user=user, - file_name=message.document.file_name, file_bytes=file_bytes, - mem_type=DICT_FORMATS.get(format_doc)) - task = asyncio.create_task( - process_after_text(message=message, user=user, user_repo=user_repo, utils_repo=utils_repo, - redis=redis, i18n=i18n, mess_to_delete=mess_to_delete, mcp_server_1=mcp_server, - constant_text=i18n.get('text_user_upload_file', filename=message.document.file_name), - scheduler=scheduler) - ) - except Exception as e: - await message.answer(i18n.get('warning_text_error')) - await redis.delete(f'request_{message.from_user.id}') - await mess_to_delete.delete() - - - -@router.callback_query(F.data.startswith('check_payment_')) -async def check_payment(callback: CallbackQuery, user_repo: UserRepository, - utils_repo: UtilsRepository, user: User, solana_client, i18n): - id_payment = int(callback.data.split('_')[-1]) - await callback.answer('') - payment = await utils_repo.get_payment(payment_id=id_payment) - message = await callback.message.answer(text=i18n.get('wait_check_payment_text')) - try: - if payment.crypto_currency == 'SOL': - is_check = await check_payment_sol(amount=payment.crypto_amount, client=solana_client) - else: - is_check = await check_payment_ton(amount=payment.crypto_amount) - - if is_check: - await user_repo.add_user_credits(user_id=user.telegram_id, balance_credits=payment.amount_usd * 1000) - await utils_repo.update_payment_status(payment_id=payment.id, status='confirmed') - await callback.message.delete() - return await message.edit_text(text=i18n.get('check_payment_success_text')) - except Exception as e: - print(e) - pass - - await message.edit_text(text=i18n.get('check_payment_error_text')) - - -@router.callback_query(F.data.startswith('markdown_')) -async def md_answer(callback: CallbackQuery, user_repo: UserRepository, user: User, i18n, bot): - row_id = int(callback.data.split('_')[-1]) - - row = await user_repo.get_row_for_md(row_id=row_id) - if not row: - return await callback.answer(i18n.get('warning_text_no_row_md'), show_alert=True) - - bio = BytesIO() - bio.write(row.content.encode("utf-8")) - bio.seek(0) - - await callback.bot.send_document( - chat_id=callback.from_user.id, - document=BufferedInputFile(bio.read(), filename=f'{row.id}.md') - ) - bio.close() - +import asyncio +from io import BytesIO + +from redis.asyncio.client import Redis +from aiogram import Router, F +from aiogram.fsm.context import FSMContext +from aiogram.types import Message, CallbackQuery, BufferedInputFile +from aiogram.filters import Command, CommandStart, StateFilter +from aiogram_dialog import DialogManager, StartMode +from fluentogram import TranslatorHub + +from database.repositories.user import UserRepository +from database.repositories.utils import UtilsRepository +from database.models import User +import bot.keyboards.inline as inline_kb +from bot.states.states import Menu, Settings, Knowledge, Wallet, Input, Balance +from bot.utils.send_answer import process_after_photo, process_after_text +from bot.utils.funcs_gpt import transcribe_audio, add_file_to_memory +from config import TYPE_USAGE, ADMIN_ID, ADMINS_LIST +from bot.utils.check_payment import check_payment_sol, check_payment_ton + +router = Router() + + +DICT_FORMATS = { + "doc": "application/msword", + "docx": "application/vnd.openxmlformats-officedocument.wordprocessingml.document", + "md": "text/markdown", + "pdf": "application/pdf", + "pptx": "application/vnd.openxmlformats-officedocument.presentationml.presentation", + 'txt': 'text/plain', + 'py': 'text/x-python' +} + + +@router.message(CommandStart()) +async def start(message: Message, user_repo: UserRepository, state: FSMContext, user: User, i18n): + await state.clear() + await message.answer(text=i18n.get('start_text'), + reply_markup=inline_kb.close_text(i18n.get('close_kb'))) + + +@router.callback_query(F.data.startswith('select_language_')) +async def select_language(callback: CallbackQuery, user_repo: UserRepository, user: User, i18n, _translator_hub: TranslatorHub): + lang = callback.data.split('_')[2] + translator = _translator_hub.get_translator_by_locale(lang) + + await user_repo.update(user, language=lang) + + await callback.message.edit_text(text=translator.get('start_text'), + reply_markup=inline_kb.close_text(translator.get('close_kb'))) + +@router.message(Command('help')) +async def cmd_help(message: Message, state: FSMContext, i18n): + await state.clear() + await message.answer(text=i18n.get('cmd_help_text'), reply_markup=inline_kb.close_text(i18n.get('close_kb'))) + + +@router.callback_query(F.data == 'close') +async def close(callback: CallbackQuery, utils_repo: UtilsRepository, state: FSMContext, i18n): + await state.clear() + await callback.message.delete() + + +@router.message(Command('settings')) +async def cmd_settings(message: Message, dialog_manager: DialogManager, state: FSMContext): + await state.clear() + await dialog_manager.start(state=Settings.main, mode=StartMode.RESET_STACK) + + +@router.message(Command('new')) +async def cmd_new(message: Message, dialog_manager: DialogManager, state: FSMContext): + await state.clear() + await dialog_manager.start(state=Menu.new, mode=StartMode.RESET_STACK) + + +@router.message(Command('save')) +async def cmd_save(message: Message, state: FSMContext, dialog_manager: DialogManager): + await state.clear() + await dialog_manager.start(state=Menu.save, mode=StartMode.RESET_STACK) + + +@router.message(Command('delete')) +async def cmd_delete(message: Message, state: FSMContext, dialog_manager: DialogManager): + await state.clear() + await dialog_manager.start(state=Menu.delete, mode=StartMode.RESET_STACK) + + +@router.message(Command('balance')) +async def cmd_settings(message: Message, dialog_manager: DialogManager, state: FSMContext): + await state.clear() + await dialog_manager.start(state=Balance.main, mode=StartMode.RESET_STACK) + + +@router.message(F.text, StateFilter(None)) +async def text_input(message: Message, user_repo: UserRepository, utils_repo: UtilsRepository, redis: Redis, user: User, i18n, mcp_server, scheduler): + if await redis.get(f'request_{message.from_user.id}'): + return + if TYPE_USAGE == 'private': + if message.from_user.id != ADMIN_ID and message.from_user.id not in ADMINS_LIST: + return + else: + if user.balance_credits <= 0: + return await message.answer(i18n.get('warning_text_no_credits')) + + await redis.set(f'request_{message.from_user.id}', 't', ex=40) + mess_to_delete = await message.answer(text=i18n.get('wait_answer_text')) + task = asyncio.create_task(process_after_text(message=message, user=user, user_repo=user_repo, utils_repo=utils_repo, + redis=redis, i18n=i18n, mess_to_delete=mess_to_delete, mcp_server_1=mcp_server, scheduler=scheduler)) + + +@router.message(F.photo, StateFilter(None)) +async def photo_input(message: Message, user_repo: UserRepository, utils_repo: UserRepository, redis: Redis, user: User, i18n, mcp_server, scheduler): + if await redis.get(f'request_{message.from_user.id}'): + return + if TYPE_USAGE == 'private': + if message.from_user.id != ADMIN_ID and message.from_user.id not in ADMINS_LIST: + return + else: + if user.balance_credits <= 0: + return await message.answer(i18n.get('warning_text_no_credits')) + + await redis.set(f'request_{message.from_user.id}', 't', ex=40) + mess_to_delete = await message.answer(text=i18n.get('wait_answer_text')) + task = asyncio.create_task(process_after_photo(message=message, user=user, user_repo=user_repo, utils_repo=utils_repo, + redis=redis, i18n=i18n, mess_to_delete=mess_to_delete, mcp_server_1=mcp_server, scheduler=scheduler)) + + +@router.message(F.voice, StateFilter(None)) +async def input_voice(message: Message, user_repo: UserRepository, utils_repo: UserRepository, redis: Redis, user: User, i18n, mcp_server, scheduler): + if await redis.get(f'request_{message.from_user.id}'): + return + if TYPE_USAGE == 'private': + if message.from_user.id != ADMIN_ID and message.from_user.id not in ADMINS_LIST: + return + else: + if user.balance_credits <= 0: + return await message.answer(i18n.get('warning_text_no_credits')) + + await redis.set(f'request_{message.from_user.id}', 't', ex=40) + mess_to_delete = await message.answer(text=i18n.get('wait_answer_text')) + voice_id = message.voice.file_id + file_path = await message.bot.get_file(file_id=voice_id) + file_bytes = (await message.bot.download_file(file_path.file_path)).read() + try: + text_from_voice = await transcribe_audio(bytes_audio=file_bytes) + except Exception as e: + await message.answer(text=i18n.get('warning_text_error')) + await redis.delete(f'request_{message.from_user.id}') + return await mess_to_delete.delete() + + task = asyncio.create_task( + process_after_text(message=message, user=user, user_repo=user_repo, utils_repo=utils_repo, + redis=redis, i18n=i18n, mess_to_delete=mess_to_delete, text_from_voice=text_from_voice, mcp_server_1=mcp_server, + scheduler=scheduler)) + + +@router.message(F.document, StateFilter(None)) +async def input_document(message: Message, user_repo: UserRepository, utils_repo: UserRepository, redis: Redis, user: User, i18n, mcp_server, scheduler): + if await redis.get(f'request_{message.from_user.id}'): + return + if TYPE_USAGE == 'private': + if message.from_user.id != ADMIN_ID and message.from_user.id not in ADMINS_LIST: + return + else: + if user.balance_credits <= 0: + return await message.answer(i18n.get('warning_text_no_credits')) + + format_doc = message.document.file_name.split('.')[-1] + if format_doc not in DICT_FORMATS: + return await message.answer(i18n.get('warning_text_format')) + + await redis.set(f'request_{message.from_user.id}', 't', ex=40) + mess_to_delete = await message.answer(text=i18n.get('wait_answer_text')) + file_id = message.document.file_id + file_path = await message.bot.get_file(file_id=file_id) + file_bytes = (await message.bot.download_file(file_path.file_path)).read() + try: + await add_file_to_memory(user_repo=user_repo, user=user, + file_name=message.document.file_name, file_bytes=file_bytes, + mem_type=DICT_FORMATS.get(format_doc)) + task = asyncio.create_task( + process_after_text(message=message, user=user, user_repo=user_repo, utils_repo=utils_repo, + redis=redis, i18n=i18n, mess_to_delete=mess_to_delete, mcp_server_1=mcp_server, + constant_text=i18n.get('text_user_upload_file', filename=message.document.file_name), + scheduler=scheduler) + ) + except Exception as e: + await message.answer(i18n.get('warning_text_error')) + await redis.delete(f'request_{message.from_user.id}') + await mess_to_delete.delete() + + + +@router.callback_query(F.data.startswith('check_payment_')) +async def check_payment(callback: CallbackQuery, user_repo: UserRepository, + utils_repo: UtilsRepository, user: User, solana_client, i18n): + id_payment = int(callback.data.split('_')[-1]) + await callback.answer('') + payment = await utils_repo.get_payment(payment_id=id_payment) + message = await callback.message.answer(text=i18n.get('wait_check_payment_text')) + try: + if payment.crypto_currency == 'SOL': + is_check = await check_payment_sol(amount=payment.crypto_amount, client=solana_client) + else: + is_check = await check_payment_ton(amount=payment.crypto_amount) + + if is_check: + await user_repo.add_user_credits(user_id=user.telegram_id, balance_credits=payment.amount_usd * 1000) + await utils_repo.update_payment_status(payment_id=payment.id, status='confirmed') + await callback.message.delete() + return await message.edit_text(text=i18n.get('check_payment_success_text')) + except Exception as e: + print(e) + pass + + await message.edit_text(text=i18n.get('check_payment_error_text')) + + +@router.callback_query(F.data.startswith('markdown_')) +async def md_answer(callback: CallbackQuery, user_repo: UserRepository, user: User, i18n, bot): + row_id = int(callback.data.split('_')[-1]) + + row = await user_repo.get_row_for_md(row_id=row_id) + if not row: + return await callback.answer(i18n.get('warning_text_no_row_md'), show_alert=True) + + bio = BytesIO() + bio.write(row.content.encode("utf-8")) + bio.seek(0) + + await callback.bot.send_document( + chat_id=callback.from_user.id, + document=BufferedInputFile(bio.read(), filename=f'{row.id}.md') + ) + bio.close() + diff --git a/bot/scheduler_funcs/daily_tokens.py b/bot/scheduler_funcs/daily_tokens.py index 7c94585..fe9b6cf 100644 --- a/bot/scheduler_funcs/daily_tokens.py +++ b/bot/scheduler_funcs/daily_tokens.py @@ -1,11 +1,11 @@ -from database.repositories.utils import UtilsRepository -from database.models import async_session -from config import TYPE_USAGE - - -async def add_daily_tokens(): - if TYPE_USAGE != 'private': - async with async_session() as session_: - utils_repo = UtilsRepository(session_) - await utils_repo.update_tokens_daily() - +from database.repositories.utils import UtilsRepository +from database.models import async_session +from config import TYPE_USAGE + + +async def add_daily_tokens(): + if TYPE_USAGE != 'private': + async with async_session() as session_: + utils_repo = UtilsRepository(session_) + await utils_repo.update_tokens_daily() + diff --git a/bot/states/states.py b/bot/states/states.py index 2469af4..99a1c80 100644 --- a/bot/states/states.py +++ b/bot/states/states.py @@ -1,39 +1,39 @@ -from aiogram.fsm.state import StatesGroup, State - - -class Menu(StatesGroup): - new = State() - save = State() - delete = State() - - -class Settings(StatesGroup): - main = State() - language = State() - - -class Knowledge(StatesGroup): - main = State() - add = State() - add_not_format = State() - add_approve = State() - delete = State() - delete_approve = State() - - -class Wallet(StatesGroup): - main = State() - balance = State() - delete = State() - balance_after_check = State() - add_not_format = State() - -class Input(StatesGroup): - main = State() - - -class Balance(StatesGroup): - main = State() - choose = State() - input = State() +from aiogram.fsm.state import StatesGroup, State + + +class Menu(StatesGroup): + new = State() + save = State() + delete = State() + + +class Settings(StatesGroup): + main = State() + language = State() + + +class Knowledge(StatesGroup): + main = State() + add = State() + add_not_format = State() + add_approve = State() + delete = State() + delete_approve = State() + + +class Wallet(StatesGroup): + main = State() + balance = State() + delete = State() + balance_after_check = State() + add_not_format = State() + +class Input(StatesGroup): + main = State() + + +class Balance(StatesGroup): + main = State() + choose = State() + input = State() input_not_format = State() \ No newline at end of file diff --git a/bot/utils/agent_requests.py b/bot/utils/agent_requests.py index 1ecf5b2..c786e8d 100644 --- a/bot/utils/agent_requests.py +++ b/bot/utils/agent_requests.py @@ -1,177 +1,177 @@ -import base64, json, uuid -import os -from io import BytesIO -from typing import Optional - -import aiofiles -from agents.mcp import MCPServerStdio -from aiogram import Bot -from aiogram.types import BufferedInputFile -from redis.asyncio.client import Redis -from agents import Runner, RunConfig -from dataclasses import dataclass - -from bot.agents_tools.agents_ import client, create_main_agent, memory_creator_agent -from database.models import User -from database.repositories.user import UserRepository -from database.repositories.utils import UtilsRepository -from config import ADMIN_ID - - -@dataclass -class AnswerText: - answer: str - image_bytes: Optional[bytes] - input_tokens: int - input_tokens_image: int - output_tokens: int - output_tokens_image: int - -@dataclass -class AnswerImage: - answer: str - input_tokens: int - output_tokens: int - image_path: str - - -async def return_vectors(user_id: int, user_repo: UserRepository, utils_repo: UtilsRepository): - memory_vector = await user_repo.get_memory_vector(user_id=user_id) - if not memory_vector: - vector_store = await client.vector_stores.create(name=f"user_memory_{user_id}") - await user_repo.add_memory_vector(user_id=user_id, vector_store_id=vector_store.id) - vector_store_id = vector_store.id - else: - vector_store_id = memory_vector.id_vector - - knowledge_vector = await utils_repo.get_knowledge_vectore_store_id() - if not knowledge_vector: - vector_store = await client.vector_stores.create(name="knowledge_base") - await utils_repo.add_knowledge_vectore_store_id(vector_store.id) - knowledge_id = vector_store.id - else: - knowledge_id = knowledge_vector.id_vector - - return vector_store_id, knowledge_id - - -async def encode_image(image_path): - async with aiofiles.open(image_path, "rb") as image_file: - return base64.b64encode(await image_file.read()).decode("utf-8") - - -async def text_request(text: str, user: User, user_repo: UserRepository, utils_repo: UtilsRepository, - redis: Redis, mcp_server_1: MCPServerStdio, bot: Bot, scheduler): - vector_store_id, knowledge_id = await return_vectors(user_id=user.telegram_id, user_repo=user_repo, utils_repo=utils_repo) - messages = await user_repo.get_messags(user_id=user.telegram_id) - user_wallet = await user_repo.get_wallet(user_id=user.telegram_id) - - runner = await Runner.run( - starting_agent=await create_main_agent(user_memory_id=vector_store_id, knowledge_id=knowledge_id, - mcp_server_1=mcp_server_1, user_id=user.telegram_id, - private_key=user_wallet), - input=[{'role': message.role, - 'content': message.content if f'image_{user.telegram_id}' not in message.content - else [{"type": "input_text", "text": message.content.split('|')[-1]}, - { - "type": "input_image", - "image_url": f"data:image/jpeg;base64,{await encode_image(message.content.split('|')[0])}", - }]} - for message in messages] + [{'role': 'user', 'content': text}], - - context=(client, user.telegram_id, user_repo, scheduler), - run_config=RunConfig( - tracing_disabled=False - ) - ) - - input_tokens = 0 - output_tokens = 0 - for response in runner.raw_responses: - input_tokens += response.usage.input_tokens - output_tokens += response.usage.output_tokens - - # await send_raw_response(bot, str(runner.raw_responses)) - - answer = runner.final_output - is_image_answer = await redis.get(f'image_{user.telegram_id}') - if is_image_answer: - image_answer = json.loads(is_image_answer) - await redis.delete(f'image_{user.telegram_id}') - image_path = image_answer['image'] - input_tokens_image = image_answer['input_tokens'] - output_tokens_image = image_answer['output_tokens'] - # await bot.send_message(chat_id=ADMIN_ID, text=f"Image Request\n\n" - # f"Input tokens: {input_tokens_image}\n" - # f"Output tokens: {output_tokens_image}\n") - - async with aiofiles.open(image_path, "rb") as image_file: - image_bytes = await image_file.read() - os.remove(image_path) - return AnswerText(answer=answer, image_bytes=image_bytes, input_tokens=input_tokens, - input_tokens_image=input_tokens_image, output_tokens=output_tokens, output_tokens_image=output_tokens_image) - - return AnswerText(answer=answer, image_bytes=None, input_tokens=input_tokens, - input_tokens_image=0, output_tokens=output_tokens, output_tokens_image=0) - - -async def image_request(image_bytes: bytes, user: User, user_repo: UserRepository, - utils_repo: UtilsRepository, redis: Redis, mcp_server_1: MCPServerStdio, bot: Bot, - scheduler, caption: str = None): - - vector_store_id, knowledge_id = await return_vectors(user_id=user.telegram_id, user_repo=user_repo, utils_repo=utils_repo) - messages = await user_repo.get_messags(user_id=user.telegram_id) - user_wallet = await user_repo.get_wallet(user_id=user.telegram_id) - - id_image = uuid.uuid4() - async with aiofiles.open(f"images/image_{user.telegram_id}_{id_image}.jpeg", "wb") as image_file: - await image_file.write(image_bytes) - - runner = await Runner.run( - starting_agent=await create_main_agent(user_memory_id=vector_store_id, knowledge_id=knowledge_id, - mcp_server_1=mcp_server_1, user_id=user.telegram_id, - private_key=user_wallet), - input=[{'role': message.role, - 'content': message.content if f'image_{user.telegram_id}' not in message.content - else [{"type": "input_text", "text": message.content.split('|')[-1]}, - { - "type": "input_image", - "image_url": f"data:image/jpeg;base64,{await encode_image(message.content.split('|')[0])}", - }]} - for message in messages] + [{'role': 'user', 'content': [{"type": "input_text", - "text": f"{caption if caption else '.'}"}, - { - "type": "input_image", - "image_url": f"data:image/jpeg;base64,{base64.b64encode(image_bytes).decode('utf-8')}", - }]}], - - context=(client, user.telegram_id, user_repo, scheduler), - run_config=RunConfig( - tracing_disabled=False - ) - ) - - # await send_raw_response(bot, str(runner.raw_responses)) - - input_tokens = 0 - output_tokens = 0 - for response in runner.raw_responses: - input_tokens += response.usage.input_tokens - output_tokens += response.usage.output_tokens - - answer = runner.final_output - - return AnswerImage(answer=answer, input_tokens=input_tokens, - output_tokens=output_tokens, image_path=f'images/image_{user.telegram_id}_{id_image}.jpeg') - - -async def send_raw_response(bot: Bot, raw_response: str): - bio = BytesIO() - bio.write(raw_response.encode("utf-8")) - bio.seek(0) - - await bot.send_document( - chat_id=ADMIN_ID, - document=BufferedInputFile(bio.read(), filename='raw_response.txt') - ) +import base64, json, uuid +import os +from io import BytesIO +from typing import Optional + +import aiofiles +from agents.mcp import MCPServerStdio +from aiogram import Bot +from aiogram.types import BufferedInputFile +from redis.asyncio.client import Redis +from agents import Runner, RunConfig +from dataclasses import dataclass + +from bot.agents_tools.agents_ import client, create_main_agent, memory_creator_agent +from database.models import User +from database.repositories.user import UserRepository +from database.repositories.utils import UtilsRepository +from config import ADMIN_ID + + +@dataclass +class AnswerText: + answer: str + image_bytes: Optional[bytes] + input_tokens: int + input_tokens_image: int + output_tokens: int + output_tokens_image: int + +@dataclass +class AnswerImage: + answer: str + input_tokens: int + output_tokens: int + image_path: str + + +async def return_vectors(user_id: int, user_repo: UserRepository, utils_repo: UtilsRepository): + memory_vector = await user_repo.get_memory_vector(user_id=user_id) + if not memory_vector: + vector_store = await client.vector_stores.create(name=f"user_memory_{user_id}") + await user_repo.add_memory_vector(user_id=user_id, vector_store_id=vector_store.id) + vector_store_id = vector_store.id + else: + vector_store_id = memory_vector.id_vector + + knowledge_vector = await utils_repo.get_knowledge_vectore_store_id() + if not knowledge_vector: + vector_store = await client.vector_stores.create(name="knowledge_base") + await utils_repo.add_knowledge_vectore_store_id(vector_store.id) + knowledge_id = vector_store.id + else: + knowledge_id = knowledge_vector.id_vector + + return vector_store_id, knowledge_id + + +async def encode_image(image_path): + async with aiofiles.open(image_path, "rb") as image_file: + return base64.b64encode(await image_file.read()).decode("utf-8") + + +async def text_request(text: str, user: User, user_repo: UserRepository, utils_repo: UtilsRepository, + redis: Redis, mcp_server_1: MCPServerStdio, bot: Bot, scheduler): + vector_store_id, knowledge_id = await return_vectors(user_id=user.telegram_id, user_repo=user_repo, utils_repo=utils_repo) + messages = await user_repo.get_messags(user_id=user.telegram_id) + user_wallet = await user_repo.get_wallet(user_id=user.telegram_id) + + runner = await Runner.run( + starting_agent=await create_main_agent(user_memory_id=vector_store_id, knowledge_id=knowledge_id, + mcp_server_1=mcp_server_1, user_id=user.telegram_id, + private_key=user_wallet), + input=[{'role': message.role, + 'content': message.content if f'image_{user.telegram_id}' not in message.content + else [{"type": "input_text", "text": message.content.split('|')[-1]}, + { + "type": "input_image", + "image_url": f"data:image/jpeg;base64,{await encode_image(message.content.split('|')[0])}", + }]} + for message in messages] + [{'role': 'user', 'content': text}], + + context=(client, user.telegram_id, user_repo, scheduler), + run_config=RunConfig( + tracing_disabled=False + ) + ) + + input_tokens = 0 + output_tokens = 0 + for response in runner.raw_responses: + input_tokens += response.usage.input_tokens + output_tokens += response.usage.output_tokens + + # await send_raw_response(bot, str(runner.raw_responses)) + + answer = runner.final_output + is_image_answer = await redis.get(f'image_{user.telegram_id}') + if is_image_answer: + image_answer = json.loads(is_image_answer) + await redis.delete(f'image_{user.telegram_id}') + image_path = image_answer['image'] + input_tokens_image = image_answer['input_tokens'] + output_tokens_image = image_answer['output_tokens'] + # await bot.send_message(chat_id=ADMIN_ID, text=f"Image Request\n\n" + # f"Input tokens: {input_tokens_image}\n" + # f"Output tokens: {output_tokens_image}\n") + + async with aiofiles.open(image_path, "rb") as image_file: + image_bytes = await image_file.read() + os.remove(image_path) + return AnswerText(answer=answer, image_bytes=image_bytes, input_tokens=input_tokens, + input_tokens_image=input_tokens_image, output_tokens=output_tokens, output_tokens_image=output_tokens_image) + + return AnswerText(answer=answer, image_bytes=None, input_tokens=input_tokens, + input_tokens_image=0, output_tokens=output_tokens, output_tokens_image=0) + + +async def image_request(image_bytes: bytes, user: User, user_repo: UserRepository, + utils_repo: UtilsRepository, redis: Redis, mcp_server_1: MCPServerStdio, bot: Bot, + scheduler, caption: str = None): + + vector_store_id, knowledge_id = await return_vectors(user_id=user.telegram_id, user_repo=user_repo, utils_repo=utils_repo) + messages = await user_repo.get_messags(user_id=user.telegram_id) + user_wallet = await user_repo.get_wallet(user_id=user.telegram_id) + + id_image = uuid.uuid4() + async with aiofiles.open(f"images/image_{user.telegram_id}_{id_image}.jpeg", "wb") as image_file: + await image_file.write(image_bytes) + + runner = await Runner.run( + starting_agent=await create_main_agent(user_memory_id=vector_store_id, knowledge_id=knowledge_id, + mcp_server_1=mcp_server_1, user_id=user.telegram_id, + private_key=user_wallet), + input=[{'role': message.role, + 'content': message.content if f'image_{user.telegram_id}' not in message.content + else [{"type": "input_text", "text": message.content.split('|')[-1]}, + { + "type": "input_image", + "image_url": f"data:image/jpeg;base64,{await encode_image(message.content.split('|')[0])}", + }]} + for message in messages] + [{'role': 'user', 'content': [{"type": "input_text", + "text": f"{caption if caption else '.'}"}, + { + "type": "input_image", + "image_url": f"data:image/jpeg;base64,{base64.b64encode(image_bytes).decode('utf-8')}", + }]}], + + context=(client, user.telegram_id, user_repo, scheduler), + run_config=RunConfig( + tracing_disabled=False + ) + ) + + # await send_raw_response(bot, str(runner.raw_responses)) + + input_tokens = 0 + output_tokens = 0 + for response in runner.raw_responses: + input_tokens += response.usage.input_tokens + output_tokens += response.usage.output_tokens + + answer = runner.final_output + + return AnswerImage(answer=answer, input_tokens=input_tokens, + output_tokens=output_tokens, image_path=f'images/image_{user.telegram_id}_{id_image}.jpeg') + + +async def send_raw_response(bot: Bot, raw_response: str): + bio = BytesIO() + bio.write(raw_response.encode("utf-8")) + bio.seek(0) + + await bot.send_document( + chat_id=ADMIN_ID, + document=BufferedInputFile(bio.read(), filename='raw_response.txt') + ) bio.close() \ No newline at end of file diff --git a/bot/utils/calculate_tokens.py b/bot/utils/calculate_tokens.py index fcbedcd..12ad99a 100644 --- a/bot/utils/calculate_tokens.py +++ b/bot/utils/calculate_tokens.py @@ -1,18 +1,18 @@ -from database.models import User -from database.repositories.user import UserRepository - -from config import TYPE_USAGE, CREDITS_INPUT_TEXT, CREDITS_OUTPUT_TEXT, CREDITS_INPUT_IMAGE, CREDITS_OUTPUT_IMAGE - - -async def calculate_tokens(user_repo: UserRepository, user: User, - input_tokens_text: int, output_tokens_text: int, - input_tokens_img: int, output_tokens_img: int): - if TYPE_USAGE != 'private': - credits_input_text = (input_tokens_text / 1000) * CREDITS_INPUT_TEXT - credits_output_text = (output_tokens_text / 1000) * CREDITS_OUTPUT_TEXT - - credits_input_img = (input_tokens_img / 1000) * CREDITS_INPUT_IMAGE - credits_output_img = (output_tokens_img / 1000) * CREDITS_OUTPUT_IMAGE - - credits = credits_input_text + credits_output_text + credits_input_img + credits_output_img +from database.models import User +from database.repositories.user import UserRepository + +from config import TYPE_USAGE, CREDITS_INPUT_TEXT, CREDITS_OUTPUT_TEXT, CREDITS_INPUT_IMAGE, CREDITS_OUTPUT_IMAGE + + +async def calculate_tokens(user_repo: UserRepository, user: User, + input_tokens_text: int, output_tokens_text: int, + input_tokens_img: int, output_tokens_img: int): + if TYPE_USAGE != 'private': + credits_input_text = (input_tokens_text / 1000) * CREDITS_INPUT_TEXT + credits_output_text = (output_tokens_text / 1000) * CREDITS_OUTPUT_TEXT + + credits_input_img = (input_tokens_img / 1000) * CREDITS_INPUT_IMAGE + credits_output_img = (output_tokens_img / 1000) * CREDITS_OUTPUT_IMAGE + + credits = credits_input_text + credits_output_text + credits_input_img + credits_output_img await user_repo.update(user, balance_credits=credits) \ No newline at end of file diff --git a/bot/utils/check_burn_address.py b/bot/utils/check_burn_address.py index ca17aff..29a6585 100644 --- a/bot/utils/check_burn_address.py +++ b/bot/utils/check_burn_address.py @@ -1,52 +1,52 @@ -import os -import sys - -from dotenv import load_dotenv -from aiohttp import ClientSession, ClientTimeout -from aiogram import Bot - -from config import TYPE_USAGE, ADMIN_ID, HOST_ADDRESS - -load_dotenv() - - -async def add_burn_address(bot: Bot): - if TYPE_USAGE == 'pay': - if (not os.getenv('TOKEN_BURN_ADDRESS')) or (not ADMIN_ID): - await bot.send_message(chat_id=ADMIN_ID, - text='The bot is not running! To activate the "pay"" mode, you must pass a check, see the documentation for details!') - sys.exit(1) - - async with ClientSession(timeout=ClientTimeout(60)) as session: - url = f"{HOST_ADDRESS}/create_payment_module" - json = { - "token_burn_address": os.getenv('TOKEN_BURN_ADDRESS'), - "user_id": ADMIN_ID - } - try: - async with session.post(url, json=json, ssl=False) as response: - data = await response.json() - if data['status'] == 'error': - await bot.send_message(chat_id=ADMIN_ID, - text='The bot is not running! To activate the "pay"" mode, you must pass a check, see the documentation for details!') - sys.exit(1) - - except Exception as e: - await bot.send_message(chat_id=ADMIN_ID, - text='The bot is not running! To activate the "pay"" mode, you must pass a check, see the documentation for details!') - sys.exit(1) - - try: - url = f'{HOST_ADDRESS}/check_balance' - async with session.post(url, json=json, ssl=False) as response: - data = await response.json() - if data['status'] == 'error': - await bot.send_message(chat_id=ADMIN_ID, - text='The bot is not running! To activate the "pay"" mode, you must pass a check, see the documentation for details!') - sys.exit(1) - - except Exception as e: - await bot.send_message(chat_id=ADMIN_ID, - text='The bot is not running! To activate the "pay"" mode, you must pass a check, see the documentation for details!') - sys.exit(1) - +import os +import sys + +from dotenv import load_dotenv +from aiohttp import ClientSession, ClientTimeout +from aiogram import Bot + +from config import TYPE_USAGE, ADMIN_ID, HOST_ADDRESS + +load_dotenv() + + +async def add_burn_address(bot: Bot): + if TYPE_USAGE == 'pay': + if (not os.getenv('TOKEN_BURN_ADDRESS')) or (not ADMIN_ID): + await bot.send_message(chat_id=ADMIN_ID, + text='The bot is not running! To activate the "pay"" mode, you must pass a check, see the documentation for details!') + sys.exit(1) + + async with ClientSession(timeout=ClientTimeout(60)) as session: + url = f"{HOST_ADDRESS}/create_payment_module" + json = { + "token_burn_address": os.getenv('TOKEN_BURN_ADDRESS'), + "user_id": ADMIN_ID + } + try: + async with session.post(url, json=json, ssl=False) as response: + data = await response.json() + if data['status'] == 'error': + await bot.send_message(chat_id=ADMIN_ID, + text='The bot is not running! To activate the "pay"" mode, you must pass a check, see the documentation for details!') + sys.exit(1) + + except Exception as e: + await bot.send_message(chat_id=ADMIN_ID, + text='The bot is not running! To activate the "pay"" mode, you must pass a check, see the documentation for details!') + sys.exit(1) + + try: + url = f'{HOST_ADDRESS}/check_balance' + async with session.post(url, json=json, ssl=False) as response: + data = await response.json() + if data['status'] == 'error': + await bot.send_message(chat_id=ADMIN_ID, + text='The bot is not running! To activate the "pay"" mode, you must pass a check, see the documentation for details!') + sys.exit(1) + + except Exception as e: + await bot.send_message(chat_id=ADMIN_ID, + text='The bot is not running! To activate the "pay"" mode, you must pass a check, see the documentation for details!') + sys.exit(1) + diff --git a/bot/utils/check_payment.py b/bot/utils/check_payment.py index 8846b4d..a1ba318 100644 --- a/bot/utils/check_payment.py +++ b/bot/utils/check_payment.py @@ -1,62 +1,62 @@ -import asyncio -import json, os -from decimal import getcontext, Decimal - -from dotenv import load_dotenv -from pytonapi import AsyncTonapi -from solana.rpc.async_api import AsyncClient -from solana.exceptions import SolanaRpcException -from solana.rpc.types import Pubkey -from spl.token.instructions import get_associated_token_address - - -load_dotenv() - -tonapi = AsyncTonapi(api_key=os.getenv('API_KEY_TON')) - - -async def check_payment_ton(amount: str): - getcontext().prec = 18 - your_amount_dec = Decimal(amount) - your_amount_nano = int((your_amount_dec * Decimal(10 ** 9)).to_integral_value()) - - transactions = await tonapi.accounts.get_events(account_id=os.getenv('TON_ADDRESS'), limit=15) - for tx in transactions.events: - if tx.actions[0].TonTransfer is None: - continue - if tx.actions[0].TonTransfer.amount == your_amount_nano: - return True - - -async def check_payment_sol(amount: str, client: AsyncClient): - ata = get_associated_token_address(mint=Pubkey.from_string(os.getenv('MINT_TOKEN_ADDRESS')), owner=Pubkey.from_string(os.getenv('ADDRESS_SOL'))) - - getcontext().prec = 18 - your_amount_dec = Decimal(amount) - - bal_info = await client.get_token_account_balance(ata, commitment="confirmed") - decimals = bal_info.value.decimals - your_amount_nano = int((your_amount_dec * Decimal(10 ** decimals)).to_integral_value()) - - sigs = await client.get_signatures_for_address(ata, limit=10) - for sig in sigs.value: - await asyncio.sleep(0.5) - while True: - try: - transaction = await client.get_transaction(sig.signature, encoding="jsonParsed", - max_supported_transaction_version=0) - instructions = transaction.value.transaction.transaction.message.instructions - for index, instr in enumerate(instructions): - data_instr = json.loads(instr.to_json()) - if data_instr.get("program") != "spl-token": - continue - if data_instr['parsed']['info']['destination'] == str(ata) and \ - data_instr['parsed']['info']['tokenAmount']['amount'] == str(your_amount_nano): - return True - break - except SolanaRpcException as e: - await asyncio.sleep(5) - except Exception as e: - return False - return False - +import asyncio +import json, os +from decimal import getcontext, Decimal + +from dotenv import load_dotenv +from pytonapi import AsyncTonapi +from solana.rpc.async_api import AsyncClient +from solana.exceptions import SolanaRpcException +from solana.rpc.types import Pubkey +from spl.token.instructions import get_associated_token_address + + +load_dotenv() + +tonapi = AsyncTonapi(api_key=os.getenv('API_KEY_TON')) + + +async def check_payment_ton(amount: str): + getcontext().prec = 18 + your_amount_dec = Decimal(amount) + your_amount_nano = int((your_amount_dec * Decimal(10 ** 9)).to_integral_value()) + + transactions = await tonapi.accounts.get_events(account_id=os.getenv('TON_ADDRESS'), limit=15) + for tx in transactions.events: + if tx.actions[0].TonTransfer is None: + continue + if tx.actions[0].TonTransfer.amount == your_amount_nano: + return True + + +async def check_payment_sol(amount: str, client: AsyncClient): + ata = get_associated_token_address(mint=Pubkey.from_string(os.getenv('MINT_TOKEN_ADDRESS')), owner=Pubkey.from_string(os.getenv('ADDRESS_SOL'))) + + getcontext().prec = 18 + your_amount_dec = Decimal(amount) + + bal_info = await client.get_token_account_balance(ata, commitment="confirmed") + decimals = bal_info.value.decimals + your_amount_nano = int((your_amount_dec * Decimal(10 ** decimals)).to_integral_value()) + + sigs = await client.get_signatures_for_address(ata, limit=10) + for sig in sigs.value: + await asyncio.sleep(0.5) + while True: + try: + transaction = await client.get_transaction(sig.signature, encoding="jsonParsed", + max_supported_transaction_version=0) + instructions = transaction.value.transaction.transaction.message.instructions + for index, instr in enumerate(instructions): + data_instr = json.loads(instr.to_json()) + if data_instr.get("program") != "spl-token": + continue + if data_instr['parsed']['info']['destination'] == str(ata) and \ + data_instr['parsed']['info']['tokenAmount']['amount'] == str(your_amount_nano): + return True + break + except SolanaRpcException as e: + await asyncio.sleep(5) + except Exception as e: + return False + return False + diff --git a/bot/utils/create_bot.py b/bot/utils/create_bot.py index 024849d..26a6bad 100644 --- a/bot/utils/create_bot.py +++ b/bot/utils/create_bot.py @@ -1,15 +1,15 @@ -import os - -from aiogram import Bot -from aiogram.client.default import DefaultBotProperties - - -def get_bot(token: str) -> Bot: - bot = Bot(token=token, default=DefaultBotProperties(parse_mode='HTML', - link_preview_is_disabled=True - ) - ) - return bot - - +import os + +from aiogram import Bot +from aiogram.client.default import DefaultBotProperties + + +def get_bot(token: str) -> Bot: + bot = Bot(token=token, default=DefaultBotProperties(parse_mode='HTML', + link_preview_is_disabled=True + ) + ) + return bot + + bot = get_bot(token=os.getenv('TELEGRAM_BOT_TOKEN')) \ No newline at end of file diff --git a/bot/utils/executed_tasks.py b/bot/utils/executed_tasks.py index d75438d..18efdf5 100644 --- a/bot/utils/executed_tasks.py +++ b/bot/utils/executed_tasks.py @@ -1,44 +1,44 @@ -import asyncio -from datetime import datetime - -from agents import set_tracing_disabled - -from bot.utils.create_bot import bot -from database.models import async_session -from database.repositories.user import UserRepository -from database.repositories.utils import UtilsRepository -from redis_service.connect import redis -from I18N.factory import i18n_factory -from bot.agents_tools.mcp_servers import get_dexpapirka_server -from bot.utils.scheduler_provider import get_scheduler - -set_tracing_disabled(False) -CONCURRENCY_LIMIT = 10 -sem = asyncio.Semaphore(CONCURRENCY_LIMIT) - -translator_hub = i18n_factory() - - -async def execute_task(user_id: int, task_id: int): - from bot.utils.send_answer import process_after_text - - scheduler = get_scheduler() - async with sem: - async with async_session() as session: - user_repository = UserRepository(session) - utils_repo = UtilsRepository(session) - user = await user_repository.get_by_telegram_id(user_id) - user_task = await user_repository.get_task(user_id=user_id, task_id=task_id) - i18n = translator_hub.get_translator_by_locale(user.language) - mess_to_delete = await bot.send_message(chat_id=user_id, text=i18n.get('wait_answer_text_scheduler')) - mcp_server = await get_dexpapirka_server() - - await process_after_text(message=mess_to_delete, user=user, user_repo=user_repository, utils_repo=utils_repo, - redis=redis, i18n=i18n, mess_to_delete=mess_to_delete, mcp_server_1=mcp_server, - constant_text=f' {user_task.agent_message}', - scheduler=scheduler) - if user_task.schedule_type == 'once': - await user_repository.update_task(user_id=user_id, task_id=task_id, last_executed=datetime.now(), - is_active=False) - else: - await user_repository.update_task(user_id=user_id, task_id=task_id, last_executed=datetime.now()) +import asyncio +from datetime import datetime + +from agents import set_tracing_disabled + +from bot.utils.create_bot import bot +from database.models import async_session +from database.repositories.user import UserRepository +from database.repositories.utils import UtilsRepository +from redis_service.connect import redis +from I18N.factory import i18n_factory +from bot.agents_tools.mcp_servers import get_dexpapirka_server +from bot.utils.scheduler_provider import get_scheduler + +set_tracing_disabled(False) +CONCURRENCY_LIMIT = 10 +sem = asyncio.Semaphore(CONCURRENCY_LIMIT) + +translator_hub = i18n_factory() + + +async def execute_task(user_id: int, task_id: int): + from bot.utils.send_answer import process_after_text + + scheduler = get_scheduler() + async with sem: + async with async_session() as session: + user_repository = UserRepository(session) + utils_repo = UtilsRepository(session) + user = await user_repository.get_by_telegram_id(user_id) + user_task = await user_repository.get_task(user_id=user_id, task_id=task_id) + i18n = translator_hub.get_translator_by_locale(user.language) + mess_to_delete = await bot.send_message(chat_id=user_id, text=i18n.get('wait_answer_text_scheduler')) + mcp_server = await get_dexpapirka_server() + + await process_after_text(message=mess_to_delete, user=user, user_repo=user_repository, utils_repo=utils_repo, + redis=redis, i18n=i18n, mess_to_delete=mess_to_delete, mcp_server_1=mcp_server, + constant_text=f' {user_task.agent_message}', + scheduler=scheduler) + if user_task.schedule_type == 'once': + await user_repository.update_task(user_id=user_id, task_id=task_id, last_executed=datetime.now(), + is_active=False) + else: + await user_repository.update_task(user_id=user_id, task_id=task_id, last_executed=datetime.now()) diff --git a/bot/utils/funcs_gpt.py b/bot/utils/funcs_gpt.py index e8b26a4..0ff8465 100644 --- a/bot/utils/funcs_gpt.py +++ b/bot/utils/funcs_gpt.py @@ -1,156 +1,158 @@ -import os -from io import BytesIO - -from agents import Runner, RunConfig - -from bot.agents_tools.agents_ import client, create_main_agent, memory_creator_agent -from database.models import User -from database.repositories.user import UserRepository -from database.repositories.utils import UtilsRepository - - -async def file_to_context(utils_repo: UtilsRepository, file_name: str, file_bytes: bytes, mem_type: str): - vector_store_id = (await utils_repo.get_knowledge_vectore_store_id()) - - if not vector_store_id: - vector_store = await client.vector_stores.create(name="knowledge_base") - await utils_repo.add_knowledge_vectore_store_id(vector_store.id) - vector_store_id = vector_store.id - else: - vector_store_id = vector_store_id.id_vector - - file = await client.files.create( - file=(file_name, file_bytes, mem_type), - purpose="assistants" - ) - - await client.vector_stores.files.create( - vector_store_id=vector_store_id, - file_id=file.id - ) - - while True: - async for file_ in client.vector_stores.files.list( - vector_store_id=vector_store_id, - order='desc' - ): - if file_.id == file.id and file_.status == 'completed': - return True - if file_.id == file.id and file_.status == 'failed': - return False - - -async def delete_knowledge_base(utils_repo: UtilsRepository): - is_vector_store = (await utils_repo.get_knowledge_vectore_store_id()) - if is_vector_store: - vector_store_id = is_vector_store.id_vector - else: - return - - await client.vector_stores.delete(vector_store_id=vector_store_id) - - vector_store = await client.vector_stores.create(name="knowledge_base") - await utils_repo.delete_knowledge_vectore_store_id() - await utils_repo.add_knowledge_vectore_store_id(vector_store.id) - - -async def save_user_context_txt_file(user_repo: UserRepository, user: User): - messages = await user_repo.get_messags(user_id=user.telegram_id) - runner = await Runner.run( - starting_agent=memory_creator_agent, - input=[{'role': message.role, 'content': message.content} for message in messages], - run_config=RunConfig( - tracing_disabled=False - ) - ) - - input_tokens = runner.raw_responses[0].usage.input_tokens - output_tokens = runner.raw_responses[0].usage.output_tokens - - answer = runner.final_output - byte_buffer = BytesIO(answer.encode("utf-8")) - - memory_vector = await user_repo.get_memory_vector(user_id=user.telegram_id) - if not memory_vector: - vector_store = await client.vector_stores.create(name=f"user_memory_{user.telegram_id}") - await user_repo.add_memory_vector(user_id=user.telegram_id, vector_store_id=vector_store.id) - vector_store_id = vector_store.id - else: - vector_store_id = memory_vector.id_vector - - file = await client.files.create( - file=(f'context_{user.telegram_id}.txt', byte_buffer, 'text/plain'), - purpose="assistants" - ) - - await client.vector_stores.files.create( - vector_store_id=vector_store_id, - file_id=file.id - ) - - while True: - async for file_ in client.vector_stores.files.list( - vector_store_id=vector_store_id, - order='desc' - ): - if file_.id == file.id and file_.status == 'completed': - return input_tokens, output_tokens - if file_.id == file.id and file_.status == 'failed': - return False - - -async def delete_user_memory(user_repo: UserRepository, user: User): - memory_vector = await user_repo.get_memory_vector(user_id=user.telegram_id) - if memory_vector: - await client.vector_stores.delete(vector_store_id=memory_vector.id_vector) - await user_repo.delete_memory_vector(user_id=user.telegram_id) - - images = os.listdir('images') - for image in images: - if str(user.telegram_id) in image: - os.remove(f'images/{image}') - - -async def create_vectore_store(user_repo: UserRepository, user: User): - vector_store = await client.vector_stores.create(name=f"user_memory_{user.telegram_id}") - await user_repo.add_memory_vector(user_id=user.telegram_id, vector_store_id=vector_store.id) - - -async def transcribe_audio(bytes_audio: bytes): - res = await client.audio.transcriptions.create( - file=('audio.ogg', bytes_audio), - model='whisper-1' - ) - - return res.text - - -async def add_file_to_memory(user_repo: UserRepository, user: User, file_name: str, file_bytes: bytes, mem_type: str): - vector_store = await user_repo.get_memory_vector(user_id=user.telegram_id) - - if not vector_store: - vector_store = await client.vector_stores.create(name=f"user_memory_{user.telegram_id}") - await user_repo.add_memory_vector(user_id=user.telegram_id, vector_store_id=vector_store.id) - vector_store_id = vector_store.id - else: - vector_store_id = vector_store.id_vector - - file = await client.files.create( - file=(file_name, file_bytes, mem_type), - purpose="assistants" - ) - - await client.vector_stores.files.create( - vector_store_id=vector_store_id, - file_id=file.id - ) - - while True: - async for file_ in client.vector_stores.files.list( - vector_store_id=vector_store_id, - order='desc' - ): - if file_.id == file.id and file_.status == 'completed': - return True - if file_.id == file.id and file_.status == 'failed': +import os +from io import BytesIO + +from agents import Runner, RunConfig + +from bot.agents_tools.agents_ import client, create_main_agent, memory_creator_agent +from database.models import User +from database.repositories.user import UserRepository +from database.repositories.utils import UtilsRepository + + +async def file_to_context(utils_repo: UtilsRepository, file_name: str, file_bytes: bytes, mem_type: str): + vector_store_id = (await utils_repo.get_knowledge_vectore_store_id()) + + if not vector_store_id: + vector_store = await client.vector_stores.create(name="knowledge_base") + await utils_repo.add_knowledge_vectore_store_id(vector_store.id) + vector_store_id = vector_store.id + else: + vector_store_id = vector_store_id.id_vector + + file = await client.files.create( + file=(file_name, file_bytes, mem_type), + purpose="assistants" + ) + + await client.vector_stores.files.create( + vector_store_id=vector_store_id, + file_id=file.id + ) + + while True: + async for file_ in client.vector_stores.files.list( + vector_store_id=vector_store_id, + order='desc' + ): + if file_.id == file.id and file_.status == 'completed': + return True + if file_.id == file.id and file_.status == 'failed': + return False + + +async def delete_knowledge_base(utils_repo: UtilsRepository): + is_vector_store = (await utils_repo.get_knowledge_vectore_store_id()) + if is_vector_store: + vector_store_id = is_vector_store.id_vector + else: + return + + await client.vector_stores.delete(vector_store_id=vector_store_id) + + vector_store = await client.vector_stores.create(name="knowledge_base") + await utils_repo.delete_knowledge_vectore_store_id() + await utils_repo.add_knowledge_vectore_store_id(vector_store.id) + + +async def save_user_context_txt_file(user_repo: UserRepository, user: User): + messages = await user_repo.get_messags(user_id=user.telegram_id) + if not messages: + return False + runner = await Runner.run( + starting_agent=memory_creator_agent, + input=[{'role': message.role, 'content': message.content} for message in messages], + run_config=RunConfig( + tracing_disabled=False + ) + ) + + input_tokens = runner.raw_responses[0].usage.input_tokens + output_tokens = runner.raw_responses[0].usage.output_tokens + + answer = runner.final_output + byte_buffer = BytesIO(answer.encode("utf-8")) + + memory_vector = await user_repo.get_memory_vector(user_id=user.telegram_id) + if not memory_vector: + vector_store = await client.vector_stores.create(name=f"user_memory_{user.telegram_id}") + await user_repo.add_memory_vector(user_id=user.telegram_id, vector_store_id=vector_store.id) + vector_store_id = vector_store.id + else: + vector_store_id = memory_vector.id_vector + + file = await client.files.create( + file=(f'context_{user.telegram_id}.txt', byte_buffer, 'text/plain'), + purpose="assistants" + ) + + await client.vector_stores.files.create( + vector_store_id=vector_store_id, + file_id=file.id + ) + + while True: + async for file_ in client.vector_stores.files.list( + vector_store_id=vector_store_id, + order='desc' + ): + if file_.id == file.id and file_.status == 'completed': + return input_tokens, output_tokens + if file_.id == file.id and file_.status == 'failed': + return False + + +async def delete_user_memory(user_repo: UserRepository, user: User): + memory_vector = await user_repo.get_memory_vector(user_id=user.telegram_id) + if memory_vector: + await client.vector_stores.delete(vector_store_id=memory_vector.id_vector) + await user_repo.delete_memory_vector(user_id=user.telegram_id) + + images = os.listdir('images') + for image in images: + if str(user.telegram_id) in image: + os.remove(f'images/{image}') + + +async def create_vectore_store(user_repo: UserRepository, user: User): + vector_store = await client.vector_stores.create(name=f"user_memory_{user.telegram_id}") + await user_repo.add_memory_vector(user_id=user.telegram_id, vector_store_id=vector_store.id) + + +async def transcribe_audio(bytes_audio: bytes): + res = await client.audio.transcriptions.create( + file=('audio.ogg', bytes_audio), + model='whisper-1' + ) + + return res.text + + +async def add_file_to_memory(user_repo: UserRepository, user: User, file_name: str, file_bytes: bytes, mem_type: str): + vector_store = await user_repo.get_memory_vector(user_id=user.telegram_id) + + if not vector_store: + vector_store = await client.vector_stores.create(name=f"user_memory_{user.telegram_id}") + await user_repo.add_memory_vector(user_id=user.telegram_id, vector_store_id=vector_store.id) + vector_store_id = vector_store.id + else: + vector_store_id = vector_store.id_vector + + file = await client.files.create( + file=(file_name, file_bytes, mem_type), + purpose="assistants" + ) + + await client.vector_stores.files.create( + vector_store_id=vector_store_id, + file_id=file.id + ) + + while True: + async for file_ in client.vector_stores.files.list( + vector_store_id=vector_store_id, + order='desc' + ): + if file_.id == file.id and file_.status == 'completed': + return True + if file_.id == file.id and file_.status == 'failed': return False \ No newline at end of file diff --git a/bot/utils/get_ton_course.py b/bot/utils/get_ton_course.py index 74ed4dd..49ad5b1 100644 --- a/bot/utils/get_ton_course.py +++ b/bot/utils/get_ton_course.py @@ -1,25 +1,25 @@ -from aiohttp import ClientSession - -from redis.asyncio.client import Redis - -url = "https://api.coingecko.com/api/v3/simple/price" - - -async def get_ton_course(redis: Redis): - ton_price = await redis.get("ton_price") - if ton_price: - return ton_price - - params = { - "ids": "the-open-network", - "vs_currencies": "usd" - } - async with ClientSession() as session: - async with session.get(url, ssl=False, params=params) as response: - try: - data = await response.json() - ton_price = data["the-open-network"]["usd"] - await redis.set("ton_price", ton_price, ex=5) - return ton_price - except Exception as e: - return +from aiohttp import ClientSession + +from redis.asyncio.client import Redis + +url = "https://api.coingecko.com/api/v3/simple/price" + + +async def get_ton_course(redis: Redis): + ton_price = await redis.get("ton_price") + if ton_price: + return ton_price + + params = { + "ids": "the-open-network", + "vs_currencies": "usd" + } + async with ClientSession() as session: + async with session.get(url, ssl=False, params=params) as response: + try: + data = await response.json() + ton_price = data["the-open-network"]["usd"] + await redis.set("ton_price", ton_price, ex=5) + return ton_price + except Exception as e: + return diff --git a/bot/utils/scheduler_provider.py b/bot/utils/scheduler_provider.py index 719cd29..b837a23 100644 --- a/bot/utils/scheduler_provider.py +++ b/bot/utils/scheduler_provider.py @@ -1,14 +1,14 @@ -from apscheduler.schedulers.asyncio import AsyncIOScheduler - -_scheduler: AsyncIOScheduler | None = None - - -def set_scheduler(s: AsyncIOScheduler) -> None: - global _scheduler - _scheduler = s - - -def get_scheduler() -> AsyncIOScheduler: - if _scheduler is None: - raise RuntimeError("Scheduler is not initialized") +from apscheduler.schedulers.asyncio import AsyncIOScheduler + +_scheduler: AsyncIOScheduler | None = None + + +def set_scheduler(s: AsyncIOScheduler) -> None: + global _scheduler + _scheduler = s + + +def get_scheduler() -> AsyncIOScheduler: + if _scheduler is None: + raise RuntimeError("Scheduler is not initialized") return _scheduler \ No newline at end of file diff --git a/bot/utils/send_answer.py b/bot/utils/send_answer.py index 23f8a07..47d50b6 100644 --- a/bot/utils/send_answer.py +++ b/bot/utils/send_answer.py @@ -1,365 +1,358 @@ -import re - -from agents.mcp import MCPServerStdio -from aiogram.types import Message, BufferedInputFile -from chatgpt_md_converter import telegram_format -from redis.asyncio.client import Redis - -from bot.utils.calculate_tokens import calculate_tokens -from database.models import User -from database.repositories.user import UserRepository -from database.repositories.utils import UtilsRepository -from bot.utils.agent_requests import AnswerText, text_request, AnswerImage, image_request -import bot.keyboards.inline as inline_kb -from config import TOKENS_LIMIT_FOR_WARNING_MESSAGE - - -async def send_answer_text(user_ques: str, message: Message, answer: AnswerText, user: User, user_repo: UserRepository, i18n): - if answer.image_bytes: - await message.answer_photo(photo=BufferedInputFile(answer.image_bytes, filename=f"{user.telegram_id}.jpeg"), - caption=answer.answer) - - await user_repo.add_context(user_id=user.telegram_id, role='user', content=user_ques) - await user_repo.add_context(user_id=user.telegram_id, role='assistant', content=answer.answer) - else: - await user_repo.add_context(user_id=user.telegram_id, role='user', content=user_ques) - row_id = await user_repo.add_context(user_id=user.telegram_id, role='assistant', content=answer.answer) - messages = split_code_message(answer.answer) - - for index, mess in enumerate(messages, 1): - if len(messages) == index: - await message.answer(mess, - reply_markup=inline_kb.keyboard_md(row_id=row_id, text=i18n.get('answer_md'))) - else: - await message.answer(mess) - - -def split_code_message(text, type_: str = None): - """ - Reliably split Telegram HTML into chunks while preserving valid markup. - - Self-closing tags are not pushed to the stack and therefore are not closed. - - For opened tags we store the full opening form including attributes to re-open later. - - Never split inside an HTML tag or inside an HTML entity. - - Preserve Telegram-specific nuances such as
and
/ blocks.
-    """
-    if not type_:
-        text = telegram_format(text)
-        text = text.replace('<blockquote expandable>', '
') - - # Escape HTML comments so they are treated as text, - # not as tags that could break the open/close stack while splitting - comment_pattern = re.compile(r"", re.DOTALL) - - def _escape_comment(m): - c = m.group(0) - return c.replace('<', '<').replace('>', '>') - - text = comment_pattern.sub(_escape_comment, text) - - chunks = [] - current_chunk = "" - - # Stack of opened tags: items are dicts {name, open} - open_stack = [] - position = 0 - - tag_pattern = re.compile(r"<(\/)?([a-zA-Z0-9\-]+)([^>]*)>") - - # Set of self-closing/non-closing tags in Telegram HTML context - SELF_CLOSING = {"br"} - - def is_self_closing(tag_name: str, tag_full: str) -> bool: - return tag_name in SELF_CLOSING or tag_full.strip().endswith('/>') - - def close_open_tags() -> str: - # Close only normal opened tags in reverse order - closing = [] - for item in reversed(open_stack): - closing.append(f"") - return "".join(closing) - - def reopen_tags() -> str: - # Re-open saved opening tags (with attributes) in original order. - # For blockquote expandable we keep the original form as-is. - return "".join(item['open'] for item in open_stack) - - def escape_tag_text(tag_text: str) -> str: - """Render a tag as plain text by escaping angle brackets.""" - return tag_text.replace('<', '<').replace('>', '>') - - def safe_cut_index(text_: str, start: int, tentative_end: int) -> int: - """Shift a split position so that we never cut inside a tag or an HTML entity.""" - end = min(tentative_end, len(text_)) - if end <= start: - return end - - segment = text_[start:end] - - # 1) Do not split inside a tag: if the last '<' is after the last '>' -> move back to that '<' - last_lt = segment.rfind('<') - last_gt = segment.rfind('>') - if last_lt != -1 and (last_gt == -1 or last_lt > last_gt): - end = start + last_lt - if end <= start: - return start - segment = text_[start:end] - - # 2) Do not split inside an entity: if there's '&' after the last ';' -> move back to that '&' - last_amp = segment.rfind('&') - last_semi = segment.rfind(';') - if last_amp != -1 and (last_semi == -1 or last_amp > last_semi): - end = start + last_amp - - return end - - text_len = len(text) - while position < text_len: - # Dynamic budget for the current chunk - SAFETY = 64 - BASE_LIMIT = 3900 - allowed_total = BASE_LIMIT - len(close_open_tags()) - len(reopen_tags()) - SAFETY - # Clamp to reasonable bounds just in case - if allowed_total < 1000: - allowed_total = 1000 - elif allowed_total > BASE_LIMIT: - allowed_total = BASE_LIMIT - - # If current chunk is full — close and start a new one - if len(current_chunk) >= allowed_total: - current_chunk += close_open_tags() - chunks.append(current_chunk) - current_chunk = reopen_tags() - - # Compute the boundary where we can safely write more characters - tentative_end = position + (allowed_total - len(current_chunk)) - if tentative_end <= position: - # No room left — force a chunk break - current_chunk += close_open_tags() - chunks.append(current_chunk) - current_chunk = reopen_tags() - continue - - # Look for the next tag before the boundary - next_match = tag_pattern.search(text, position, min(tentative_end, text_len)) - - if not next_match: - # No tags before boundary — split at a safe position - cut_idx = safe_cut_index(text, position, min(tentative_end, text_len)) - if cut_idx == position: - # No safe position found in the window — extend the window to find the next tag/entity end - extend_end = min(position + 100 + (allowed_total - len(current_chunk)), text_len) - next_match_ext = tag_pattern.search(text, position, extend_end) - if next_match_ext: - cut_idx = next_match_ext.start() - else: - # No complete tag found in lookahead — split before a partial '<...' - extended_segment = text[position:extend_end] - last_lt = extended_segment.rfind('<') - if last_lt != -1: - # Check if there's '>' after that '<' in the extended window - gt_after = extended_segment.find('>', last_lt + 1) - if gt_after == -1: - # Tag is not completed within the window — cut before '<' - cut_idx = position + last_lt - else: - cut_idx = extend_end - else: - cut_idx = extend_end - # Zero-shift guard (when cut_idx == position): - # happens if a partial tag starts exactly at 'position'. - if cut_idx == position: - if current_chunk: - # Close current chunk and start a new one before continuing - current_chunk += close_open_tags() - chunks.append(current_chunk) - current_chunk = reopen_tags() - continue - else: - # Current chunk is empty — extend search forward to the next '>' and advance at least to it - search_end = min(position + 300, text_len) - gt_global = text.find('>', position, search_end) - if gt_global != -1: - cut_idx = gt_global + 1 - else: - # Last resort — move to search_end to avoid infinite loop - cut_idx = search_end - current_chunk += text[position:cut_idx] - position = cut_idx - continue - - # There is a tag before the boundary - start_tag, end_tag = next_match.span() - tag_full = next_match.group(0) - is_closing = next_match.group(1) == "/" - tag_name = next_match.group(2) - _ = next_match.group(3) - - # If text before the tag doesn't fit — break the chunk - if (start_tag - position) + len(current_chunk) > allowed_total: - current_chunk += close_open_tags() - chunks.append(current_chunk) - current_chunk = reopen_tags() - continue - - # Append text up to the tag - current_chunk += text[position:start_tag] - position = start_tag - - # Tag handling - if is_closing: - # Prefer strict LIFO, but outside pre/code try to fix nesting to preserve formatting - if open_stack and open_stack[-1]['name'] == tag_name: - # Does the tag itself fit into the current chunk? - if len(current_chunk) + (end_tag - start_tag) > allowed_total: - current_chunk += close_open_tags() - chunks.append(current_chunk) - current_chunk = reopen_tags() - current_chunk += tag_full - # Pop the top tag - open_stack.pop() - else: - if open_stack and open_stack[-1]['name'] in {"pre", "code"}: - # Inside pre/code escape foreign closing tags as text - escaped = escape_tag_text(tag_full) - if len(current_chunk) + len(escaped) > allowed_total: - current_chunk += close_open_tags() - chunks.append(current_chunk) - current_chunk = reopen_tags() - current_chunk += escaped - else: - # Outside pre/code: normalize nesting by auto-closing tags down to target. - # Find the target tag in the stack (from the end). If not found — escape as text. - target_idx = None - for idx in range(len(open_stack) - 1, -1, -1): - if open_stack[idx]['name'] == tag_name: - target_idx = idx - break - if target_idx is None: - escaped = escape_tag_text(tag_full) - if len(current_chunk) + len(escaped) > allowed_total: - current_chunk += close_open_tags() - chunks.append(current_chunk) - current_chunk = reopen_tags() - current_chunk += escaped - else: - # Close all tags above the target sequentially - names_above = [open_stack[i]['name'] for i in range(len(open_stack) - 1, target_idx, -1)] - estimated = sum(len(f"") for n in names_above) + (end_tag - start_tag) - if len(current_chunk) + estimated > allowed_total: - # Start a new chunk before emitting the closing sequence to stay within budget - current_chunk += close_open_tags() - chunks.append(current_chunk) - current_chunk = reopen_tags() - # Emit the closing tags for the ones above the target - for n in names_above: - current_chunk += f"" - open_stack.pop() - # Finally append the original closing tag for the target and pop it - current_chunk += tag_full - open_stack.pop() # снимаем целевой тег - else: - # Opening tag - # If we are inside pre/code and encounter a non pre/code tag — escape as text, do not push to stack - if open_stack and open_stack[-1]['name'] in {"pre", "code"} and tag_name not in {"pre", "code"}: - escaped_open = escape_tag_text(tag_full) - if len(current_chunk) + len(escaped_open) > allowed_total: - current_chunk += close_open_tags() - chunks.append(current_chunk) - current_chunk = reopen_tags() - current_chunk += escaped_open - else: - if len(current_chunk) + (end_tag - start_tag) > allowed_total: - current_chunk += close_open_tags() - chunks.append(current_chunk) - current_chunk = reopen_tags() - - current_chunk += tag_full - - # Do not push self-closing tags to the stack - if not is_self_closing(tag_name, tag_full): - # Save the original opening form with attributes. - # Special case blockquote expandable — keep as-is. - opening = tag_full - open_stack.append({ - 'name': tag_name, - 'open': opening, - }) - - position = end_tag - - # Finalization - if current_chunk: - current_chunk += close_open_tags() - chunks.append(current_chunk) - - return chunks - - -async def process_after_text(message: Message, user: User, user_repo: UserRepository, - utils_repo: UtilsRepository, redis: Redis, i18n, - mess_to_delete: Message, mcp_server_1: MCPServerStdio, scheduler, text_from_voice: str = None, - constant_text: str = None): - try: - if text_from_voice: - user_ques = text_from_voice - elif constant_text: - user_ques = constant_text - else: - user_ques = message.text - - answer = await text_request(text=user_ques, user=user, - user_repo=user_repo, utils_repo=utils_repo, redis=redis, mcp_server_1=mcp_server_1, - bot=message.bot, scheduler=scheduler) - - await send_answer_text(user_ques=user_ques, - message=message, answer=answer, user=user, user_repo=user_repo, i18n=i18n) - - if answer.input_tokens + answer.output_tokens > TOKENS_LIMIT_FOR_WARNING_MESSAGE: - await message.answer(i18n.get('warning_text_tokens')) - - await calculate_tokens(user=user, user_repo=user_repo, input_tokens_text=answer.input_tokens, - input_tokens_img=answer.input_tokens_image, output_tokens_text=answer.output_tokens, - output_tokens_img=answer.output_tokens_image) - except Exception as e: - print(e) - await message.answer(text=i18n.get('warning_text_error')) - finally: - await redis.delete(f'request_{message.from_user.id}') - await mess_to_delete.delete() - - -async def send_answer_photo(message: Message, answer: AnswerImage, user: User, user_repo: UserRepository): - caption = message.caption if message.caption else '.' - await user_repo.add_context(user_id=user.telegram_id, role='user', content=f'{answer.image_path}|{caption}') - await user_repo.add_context(user_id=user.telegram_id, role='assistant', content=answer.answer) - - messages = split_code_message(answer.answer) - - for index, mess in enumerate(messages, 1): - await message.answer(mess) - - -async def process_after_photo(message: Message, user: User, user_repo: UserRepository, - utils_repo: UtilsRepository, redis: Redis, i18n, mess_to_delete: Message, - mcp_server_1: MCPServerStdio, scheduler): - try: - file_id = message.photo[-1].file_id - file_path = await message.bot.get_file(file_id=file_id) - file_bytes = (await message.bot.download_file(file_path.file_path)).read() - answer = await image_request(image_bytes=file_bytes, user=user, user_repo=user_repo, - utils_repo=utils_repo, redis=redis, mcp_server_1=mcp_server_1, bot=message.bot, - caption=message.caption, scheduler=scheduler) - - await send_answer_photo(message=message, answer=answer, user=user, user_repo=user_repo) - - if answer.input_tokens + answer.output_tokens > TOKENS_LIMIT_FOR_WARNING_MESSAGE: - await message.answer(i18n.get('warning_text_tokens')) - - await calculate_tokens(user=user, user_repo=user_repo, input_tokens_text=answer.input_tokens, - input_tokens_img=0, output_tokens_text=answer.output_tokens, - output_tokens_img=0) - except Exception as e: - await message.answer(text=i18n.get('warning_text_error')) - finally: - await redis.delete(f'request_{message.from_user.id}') +import re + +from agents.mcp import MCPServerStdio +from aiogram.types import Message, BufferedInputFile +from chatgpt_md_converter import telegram_format +from redis.asyncio.client import Redis + +from bot.utils.calculate_tokens import calculate_tokens +from database.models import User +from database.repositories.user import UserRepository +from database.repositories.utils import UtilsRepository +from bot.utils.agent_requests import AnswerText, text_request, AnswerImage, image_request +import bot.keyboards.inline as inline_kb + + +async def send_answer_text(user_ques: str, message: Message, answer: AnswerText, user: User, user_repo: UserRepository, i18n): + if answer.image_bytes: + await message.answer_photo(photo=BufferedInputFile(answer.image_bytes, filename=f"{user.telegram_id}.jpeg"), + caption=answer.answer) + + await user_repo.add_context(user_id=user.telegram_id, role='user', content=user_ques) + await user_repo.add_context(user_id=user.telegram_id, role='assistant', content=answer.answer) + else: + await user_repo.add_context(user_id=user.telegram_id, role='user', content=user_ques) + row_id = await user_repo.add_context(user_id=user.telegram_id, role='assistant', content=answer.answer) + messages = split_code_message(answer.answer) + + for index, mess in enumerate(messages, 1): + if len(messages) == index: + await message.answer(mess, + reply_markup=inline_kb.keyboard_md(row_id=row_id, text=i18n.get('answer_md'))) + else: + await message.answer(mess) + + +def split_code_message(text, type_: str = None): + """ + Reliably split Telegram HTML into chunks while preserving valid markup. + - Self-closing tags are not pushed to the stack and therefore are not closed. + - For opened tags we store the full opening form including attributes to re-open later. + - Never split inside an HTML tag or inside an HTML entity. + - Preserve Telegram-specific nuances such as
and
/ blocks.
+    """
+    if not type_:
+        text = telegram_format(text)
+        text = text.replace('<blockquote expandable>', '
') + + # Escape HTML comments so they are treated as text, + # not as tags that could break the open/close stack while splitting + comment_pattern = re.compile(r"", re.DOTALL) + + def _escape_comment(m): + c = m.group(0) + return c.replace('<', '<').replace('>', '>') + + text = comment_pattern.sub(_escape_comment, text) + + chunks = [] + current_chunk = "" + + # Stack of opened tags: items are dicts {name, open} + open_stack = [] + position = 0 + + tag_pattern = re.compile(r"<(\/)?([a-zA-Z0-9\-]+)([^>]*)>") + + # Set of self-closing/non-closing tags in Telegram HTML context + SELF_CLOSING = {"br"} + + def is_self_closing(tag_name: str, tag_full: str) -> bool: + return tag_name in SELF_CLOSING or tag_full.strip().endswith('/>') + + def close_open_tags() -> str: + # Close only normal opened tags in reverse order + closing = [] + for item in reversed(open_stack): + closing.append(f"") + return "".join(closing) + + def reopen_tags() -> str: + # Re-open saved opening tags (with attributes) in original order. + # For blockquote expandable we keep the original form as-is. + return "".join(item['open'] for item in open_stack) + + def escape_tag_text(tag_text: str) -> str: + """Render a tag as plain text by escaping angle brackets.""" + return tag_text.replace('<', '<').replace('>', '>') + + def safe_cut_index(text_: str, start: int, tentative_end: int) -> int: + """Shift a split position so that we never cut inside a tag or an HTML entity.""" + end = min(tentative_end, len(text_)) + if end <= start: + return end + + segment = text_[start:end] + + # 1) Do not split inside a tag: if the last '<' is after the last '>' -> move back to that '<' + last_lt = segment.rfind('<') + last_gt = segment.rfind('>') + if last_lt != -1 and (last_gt == -1 or last_lt > last_gt): + end = start + last_lt + if end <= start: + return start + segment = text_[start:end] + + # 2) Do not split inside an entity: if there's '&' after the last ';' -> move back to that '&' + last_amp = segment.rfind('&') + last_semi = segment.rfind(';') + if last_amp != -1 and (last_semi == -1 or last_amp > last_semi): + end = start + last_amp + + return end + + text_len = len(text) + while position < text_len: + # Dynamic budget for the current chunk + SAFETY = 64 + BASE_LIMIT = 3900 + allowed_total = BASE_LIMIT - len(close_open_tags()) - len(reopen_tags()) - SAFETY + # Clamp to reasonable bounds just in case + if allowed_total < 1000: + allowed_total = 1000 + elif allowed_total > BASE_LIMIT: + allowed_total = BASE_LIMIT + + # If current chunk is full — close and start a new one + if len(current_chunk) >= allowed_total: + current_chunk += close_open_tags() + chunks.append(current_chunk) + current_chunk = reopen_tags() + + # Compute the boundary where we can safely write more characters + tentative_end = position + (allowed_total - len(current_chunk)) + if tentative_end <= position: + # No room left — force a chunk break + current_chunk += close_open_tags() + chunks.append(current_chunk) + current_chunk = reopen_tags() + continue + + # Look for the next tag before the boundary + next_match = tag_pattern.search(text, position, min(tentative_end, text_len)) + + if not next_match: + # No tags before boundary — split at a safe position + cut_idx = safe_cut_index(text, position, min(tentative_end, text_len)) + if cut_idx == position: + # No safe position found in the window — extend the window to find the next tag/entity end + extend_end = min(position + 100 + (allowed_total - len(current_chunk)), text_len) + next_match_ext = tag_pattern.search(text, position, extend_end) + if next_match_ext: + cut_idx = next_match_ext.start() + else: + # No complete tag found in lookahead — split before a partial '<...' + extended_segment = text[position:extend_end] + last_lt = extended_segment.rfind('<') + if last_lt != -1: + # Check if there's '>' after that '<' in the extended window + gt_after = extended_segment.find('>', last_lt + 1) + if gt_after == -1: + # Tag is not completed within the window — cut before '<' + cut_idx = position + last_lt + else: + cut_idx = extend_end + else: + cut_idx = extend_end + # Zero-shift guard (when cut_idx == position): + # happens if a partial tag starts exactly at 'position'. + if cut_idx == position: + if current_chunk: + # Close current chunk and start a new one before continuing + current_chunk += close_open_tags() + chunks.append(current_chunk) + current_chunk = reopen_tags() + continue + else: + # Current chunk is empty — extend search forward to the next '>' and advance at least to it + search_end = min(position + 300, text_len) + gt_global = text.find('>', position, search_end) + if gt_global != -1: + cut_idx = gt_global + 1 + else: + # Last resort — move to search_end to avoid infinite loop + cut_idx = search_end + current_chunk += text[position:cut_idx] + position = cut_idx + continue + + # There is a tag before the boundary + start_tag, end_tag = next_match.span() + tag_full = next_match.group(0) + is_closing = next_match.group(1) == "/" + tag_name = next_match.group(2) + _ = next_match.group(3) + + # If text before the tag doesn't fit — break the chunk + if (start_tag - position) + len(current_chunk) > allowed_total: + current_chunk += close_open_tags() + chunks.append(current_chunk) + current_chunk = reopen_tags() + continue + + # Append text up to the tag + current_chunk += text[position:start_tag] + position = start_tag + + # Tag handling + if is_closing: + # Prefer strict LIFO, but outside pre/code try to fix nesting to preserve formatting + if open_stack and open_stack[-1]['name'] == tag_name: + # Does the tag itself fit into the current chunk? + if len(current_chunk) + (end_tag - start_tag) > allowed_total: + current_chunk += close_open_tags() + chunks.append(current_chunk) + current_chunk = reopen_tags() + current_chunk += tag_full + # Pop the top tag + open_stack.pop() + else: + if open_stack and open_stack[-1]['name'] in {"pre", "code"}: + # Inside pre/code escape foreign closing tags as text + escaped = escape_tag_text(tag_full) + if len(current_chunk) + len(escaped) > allowed_total: + current_chunk += close_open_tags() + chunks.append(current_chunk) + current_chunk = reopen_tags() + current_chunk += escaped + else: + # Outside pre/code: normalize nesting by auto-closing tags down to target. + # Find the target tag in the stack (from the end). If not found — escape as text. + target_idx = None + for idx in range(len(open_stack) - 1, -1, -1): + if open_stack[idx]['name'] == tag_name: + target_idx = idx + break + if target_idx is None: + escaped = escape_tag_text(tag_full) + if len(current_chunk) + len(escaped) > allowed_total: + current_chunk += close_open_tags() + chunks.append(current_chunk) + current_chunk = reopen_tags() + current_chunk += escaped + else: + # Close all tags above the target sequentially + names_above = [open_stack[i]['name'] for i in range(len(open_stack) - 1, target_idx, -1)] + estimated = sum(len(f"") for n in names_above) + (end_tag - start_tag) + if len(current_chunk) + estimated > allowed_total: + # Start a new chunk before emitting the closing sequence to stay within budget + current_chunk += close_open_tags() + chunks.append(current_chunk) + current_chunk = reopen_tags() + # Emit the closing tags for the ones above the target + for n in names_above: + current_chunk += f"" + open_stack.pop() + # Finally append the original closing tag for the target and pop it + current_chunk += tag_full + open_stack.pop() # снимаем целевой тег + else: + # Opening tag + # If we are inside pre/code and encounter a non pre/code tag — escape as text, do not push to stack + if open_stack and open_stack[-1]['name'] in {"pre", "code"} and tag_name not in {"pre", "code"}: + escaped_open = escape_tag_text(tag_full) + if len(current_chunk) + len(escaped_open) > allowed_total: + current_chunk += close_open_tags() + chunks.append(current_chunk) + current_chunk = reopen_tags() + current_chunk += escaped_open + else: + if len(current_chunk) + (end_tag - start_tag) > allowed_total: + current_chunk += close_open_tags() + chunks.append(current_chunk) + current_chunk = reopen_tags() + + current_chunk += tag_full + + # Do not push self-closing tags to the stack + if not is_self_closing(tag_name, tag_full): + # Save the original opening form with attributes. + # Special case blockquote expandable — keep as-is. + opening = tag_full + open_stack.append({ + 'name': tag_name, + 'open': opening, + }) + + position = end_tag + + # Finalization + if current_chunk: + current_chunk += close_open_tags() + chunks.append(current_chunk) + + return chunks + + +async def process_after_text(message: Message, user: User, user_repo: UserRepository, + utils_repo: UtilsRepository, redis: Redis, i18n, + mess_to_delete: Message, mcp_server_1: MCPServerStdio, scheduler, text_from_voice: str = None, + constant_text: str = None): + try: + if text_from_voice: + user_ques = text_from_voice + elif constant_text: + user_ques = constant_text + else: + user_ques = message.text + + answer = await text_request(text=user_ques, user=user, + user_repo=user_repo, utils_repo=utils_repo, redis=redis, mcp_server_1=mcp_server_1, + bot=message.bot, scheduler=scheduler) + + await send_answer_text(user_ques=user_ques, + message=message, answer=answer, user=user, user_repo=user_repo, i18n=i18n) + + await calculate_tokens(user=user, user_repo=user_repo, input_tokens_text=answer.input_tokens, + input_tokens_img=answer.input_tokens_image, output_tokens_text=answer.output_tokens, + output_tokens_img=answer.output_tokens_image) + except Exception as e: + print(e) + await message.answer(text=i18n.get('warning_text_error')) + finally: + await redis.delete(f'request_{message.from_user.id}') + await mess_to_delete.delete() + + +async def send_answer_photo(message: Message, answer: AnswerImage, user: User, user_repo: UserRepository): + caption = message.caption if message.caption else '.' + await user_repo.add_context(user_id=user.telegram_id, role='user', content=f'{answer.image_path}|{caption}') + await user_repo.add_context(user_id=user.telegram_id, role='assistant', content=answer.answer) + + messages = split_code_message(answer.answer) + + for index, mess in enumerate(messages, 1): + await message.answer(mess) + + +async def process_after_photo(message: Message, user: User, user_repo: UserRepository, + utils_repo: UtilsRepository, redis: Redis, i18n, mess_to_delete: Message, + mcp_server_1: MCPServerStdio, scheduler): + try: + file_id = message.photo[-1].file_id + file_path = await message.bot.get_file(file_id=file_id) + file_bytes = (await message.bot.download_file(file_path.file_path)).read() + answer = await image_request(image_bytes=file_bytes, user=user, user_repo=user_repo, + utils_repo=utils_repo, redis=redis, mcp_server_1=mcp_server_1, bot=message.bot, + caption=message.caption, scheduler=scheduler) + + await send_answer_photo(message=message, answer=answer, user=user, user_repo=user_repo) + + await calculate_tokens(user=user, user_repo=user_repo, input_tokens_text=answer.input_tokens, + input_tokens_img=0, output_tokens_text=answer.output_tokens, + output_tokens_img=0) + except Exception as e: + await message.answer(text=i18n.get('warning_text_error')) + finally: + await redis.delete(f'request_{message.from_user.id}') await mess_to_delete.delete() \ No newline at end of file diff --git a/bot/utils/solana_funcs.py b/bot/utils/solana_funcs.py index 1382617..b8dc61b 100644 --- a/bot/utils/solana_funcs.py +++ b/bot/utils/solana_funcs.py @@ -1,27 +1,27 @@ -from solana.rpc.async_api import AsyncClient -from solana.rpc.types import Pubkey, TokenAccountOpts -from solders.keypair import Keypair - - -async def get_balances(secret: list, client: AsyncClient): - list_balances = [] - keypair = Keypair.from_bytes(bytes(secret)) - public_key = str(keypair.pubkey()) - - balance_lamports = await client.get_balance(Pubkey.from_string(public_key)) - list_balances.append(str(balance_lamports.value / 1_000_000_000) + ' SOL') - try: - tokens_balances = await client.get_token_accounts_by_owner(owner=Pubkey.from_string(public_key), - opts=TokenAccountOpts(program_id=Pubkey.from_string( - 'TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA')), - ) - for token in tokens_balances.value: - b = bytes(token.account.data) - mint = Pubkey.from_bytes(b[0:32]) - amount = int.from_bytes(b[64:72], "little") - list_balances.append(str(amount) + ' ' + f'{str(mint)[:4]}...{str(mint)[-4:]}') - except Exception as e: - print(e) - pass - - return list_balances, public_key +from solana.rpc.async_api import AsyncClient +from solana.rpc.types import Pubkey, TokenAccountOpts +from solders.keypair import Keypair + + +async def get_balances(secret: list, client: AsyncClient): + list_balances = [] + keypair = Keypair.from_bytes(bytes(secret)) + public_key = str(keypair.pubkey()) + + balance_lamports = await client.get_balance(Pubkey.from_string(public_key)) + list_balances.append(str(balance_lamports.value / 1_000_000_000) + ' SOL') + try: + tokens_balances = await client.get_token_accounts_by_owner(owner=Pubkey.from_string(public_key), + opts=TokenAccountOpts(program_id=Pubkey.from_string( + 'TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA')), + ) + for token in tokens_balances.value: + b = bytes(token.account.data) + mint = Pubkey.from_bytes(b[0:32]) + amount = int.from_bytes(b[64:72], "little") + list_balances.append(str(amount) + ' ' + f'{str(mint)[:4]}...{str(mint)[-4:]}') + except Exception as e: + print(e) + pass + + return list_balances, public_key diff --git a/config.py b/config.py.example similarity index 81% rename from config.py rename to config.py.example index ed91361..a971ec8 100644 --- a/config.py +++ b/config.py.example @@ -1,10 +1,12 @@ # ============================================================================= # MAIN CONFIGURATION SETTINGS for the bot behavior and features +# Copy this file to config.py and fill in your values: +# cp config.py.example config.py # ============================================================================= # REQUIRED! Enter your Telegram ID (get from @userinfobot) ADMIN_ID = 1234567890 -ADMINS_LIST = [1234567890, 9876543210] +ADMINS_LIST = [1234567890] # Bot usage mode: 'private' (owner only), 'free' (public with limits), 'pay' (monetized) TYPE_USAGE = 'private' @@ -28,12 +30,12 @@ CREDITS_OUTPUT_IMAGE = 40 TOKENS_LIMIT_FOR_WARNING_MESSAGE = 15000 # Supported languages configuration -AVAILABLE_LANGUAGES = ['en', 'ru'] -AVAILABLE_LANGUAGES_WORDS = ['English', 'Русский'] +AVAILABLE_LANGUAGES = ['en', 'zh'] +AVAILABLE_LANGUAGES_WORDS = ['English', 'Chinese'] DEFAULT_LANGUAGE = 'en' LANGUAGE_FALLBACKS = { - 'ru': ['ru', 'en'], - 'en': ['en'] + 'en': ['en'], + 'zh': ['zh', 'en'] } # Application host address (do not modify) diff --git a/database/models.py b/database/models.py index 52d364f..32b03d3 100644 --- a/database/models.py +++ b/database/models.py @@ -1,148 +1,148 @@ -import os - -from dotenv import load_dotenv - -from sqlalchemy import ( - Column, Integer, String, Boolean, DateTime, ForeignKey, Float, Text, Table, BigInteger, - TIMESTAMP, func -) -from sqlalchemy.orm import relationship, declarative_base -from sqlalchemy.ext.asyncio import AsyncSession, create_async_engine, async_sessionmaker - -from config import START_BALANCE - -load_dotenv() - -# Create SQLAlchemy base -Base = declarative_base() - -# Create async engine -engine = create_async_engine( - os.getenv('DATABASE_URL'), - echo=False, -) - -# Create async session factory -async_session = async_sessionmaker(engine, class_=AsyncSession, expire_on_commit=False) - - -class User(Base): - __tablename__ = 'users' - - telegram_id = Column(BigInteger, primary_key=True) - language = Column(String(10), nullable=True) - balance_credits = Column(Float, default=START_BALANCE) - created_at = Column(TIMESTAMP(timezone=True), server_default=func.now()) - - wallets = relationship('Wallet', back_populates='user') - messages = relationship('ChatMessage', back_populates='user') - payments = relationship('Payment', back_populates='user') - memory = relationship('MemoryVector', back_populates='user') - logs = relationship('Logs', back_populates='user') - - -class ChatMessage(Base): - __tablename__ = 'chat_messages' - - id = Column(Integer, primary_key=True) - user_id = Column(BigInteger, ForeignKey('users.telegram_id')) - role = Column(String(20)) # 'user' or 'assistant' - content = Column(Text) - input_tokens = Column(Integer, nullable=True) - output_tokens = Column(Integer, nullable=True) - timestamp = Column(TIMESTAMP(timezone=True), server_default=func.now()) - - user = relationship('User', back_populates='messages') - - -class Payment(Base): - __tablename__ = 'payments' - - id = Column(Integer, primary_key=True) - - user_id = Column(BigInteger, ForeignKey('users.telegram_id')) - amount_usd = Column(Integer, nullable=False) - crypto_amount = Column(String) - crypto_currency = Column(String(20)) # 'TON', 'EVI' - random_suffix = Column(String(10), nullable=False) - status = Column(String(20), default='pending') # pending, confirmed, failed - created_at = Column(TIMESTAMP(timezone=True), server_default=func.now()) - confirmed_at = Column(TIMESTAMP(timezone=True), nullable=True) - - user = relationship('User', back_populates='payments') - - -class TokenPrice(Base): - __tablename__ = 'token_prices' - - id = Column(Integer, primary_key=True) - token = Column(String(20), unique=True) - price_usd = Column(Float, nullable=False) - updated_at = Column(TIMESTAMP(timezone=True), server_default=func.now(), server_onupdate=func.now()) - - -class Wallet(Base): - __tablename__ = 'wallets' - - id = Column(Integer, primary_key=True) - user_id = Column(BigInteger, ForeignKey('users.telegram_id')) - encrypted_private_key = Column(Text, nullable=False) - created_at = Column(TIMESTAMP(timezone=True), server_default=func.now()) - - user = relationship('User', back_populates='wallets') - - -class KnowledgeVector(Base): - __tablename__ = 'knowledge_vectors' - - id = Column(Integer, primary_key=True) - id_vector = Column(Text, nullable=False) - uploaded_at = Column(TIMESTAMP(timezone=True), server_default=func.now()) - - -class MemoryVector(Base): - __tablename__ = 'memory_vectors' - - id = Column(Integer, primary_key=True) - user_id = Column(BigInteger, ForeignKey('users.telegram_id')) - id_vector = Column(Text, nullable=False) - created_at = Column(TIMESTAMP(timezone=True), server_default=func.now()) - - user = relationship('User', back_populates='memory') - - -class Logs(Base): - __tablename__ = 'logs' - - id = Column(Integer, primary_key=True) - user_id = Column(BigInteger, ForeignKey('users.telegram_id')) - message = Column(Text, nullable=False) - created_at = Column(TIMESTAMP(timezone=True), server_default=func.now()) - - user = relationship('User', back_populates='logs') - - -class UserTasks(Base): - __tablename__ = 'user_tasks' - - id = Column(Integer, primary_key=True) - user_id = Column(BigInteger, ForeignKey('users.telegram_id')) - description = Column(Text, nullable=False) - agent_message = Column(Text, nullable=False) - schedule_type = Column(String('20'), nullable=False) - time_str = Column(Text, nullable=True) - date_str = Column(Text, nullable=True) - interval_minutes = Column(Integer, nullable=True) - is_active = Column(Boolean, default=True) - - created_at = Column(TIMESTAMP(timezone=True), server_default=func.now()) - last_executed = Column(TIMESTAMP(timezone=True), server_onupdate=func.now(), nullable=True) - - - - - -async def create_tables(): - async with engine.begin() as conn: - # await conn.run_sync(Base.metadata.drop_all) - await conn.run_sync(Base.metadata.create_all) +import os + +from dotenv import load_dotenv + +from sqlalchemy import ( + Column, Integer, String, Boolean, DateTime, ForeignKey, Float, Text, Table, BigInteger, + TIMESTAMP, func +) +from sqlalchemy.orm import relationship, declarative_base +from sqlalchemy.ext.asyncio import AsyncSession, create_async_engine, async_sessionmaker + +from config import START_BALANCE + +load_dotenv() + +# Create SQLAlchemy base +Base = declarative_base() + +# Create async engine +engine = create_async_engine( + os.getenv('DATABASE_URL'), + echo=False, +) + +# Create async session factory +async_session = async_sessionmaker(engine, class_=AsyncSession, expire_on_commit=False) + + +class User(Base): + __tablename__ = 'users' + + telegram_id = Column(BigInteger, primary_key=True) + language = Column(String(10), nullable=True) + balance_credits = Column(Float, default=START_BALANCE) + created_at = Column(TIMESTAMP(timezone=True), server_default=func.now()) + + wallets = relationship('Wallet', back_populates='user') + messages = relationship('ChatMessage', back_populates='user') + payments = relationship('Payment', back_populates='user') + memory = relationship('MemoryVector', back_populates='user') + logs = relationship('Logs', back_populates='user') + + +class ChatMessage(Base): + __tablename__ = 'chat_messages' + + id = Column(Integer, primary_key=True) + user_id = Column(BigInteger, ForeignKey('users.telegram_id')) + role = Column(String(20)) # 'user' or 'assistant' + content = Column(Text) + input_tokens = Column(Integer, nullable=True) + output_tokens = Column(Integer, nullable=True) + timestamp = Column(TIMESTAMP(timezone=True), server_default=func.now()) + + user = relationship('User', back_populates='messages') + + +class Payment(Base): + __tablename__ = 'payments' + + id = Column(Integer, primary_key=True) + + user_id = Column(BigInteger, ForeignKey('users.telegram_id')) + amount_usd = Column(Integer, nullable=False) + crypto_amount = Column(String) + crypto_currency = Column(String(20)) # 'TON', 'EVI' + random_suffix = Column(String(10), nullable=False) + status = Column(String(20), default='pending') # pending, confirmed, failed + created_at = Column(TIMESTAMP(timezone=True), server_default=func.now()) + confirmed_at = Column(TIMESTAMP(timezone=True), nullable=True) + + user = relationship('User', back_populates='payments') + + +class TokenPrice(Base): + __tablename__ = 'token_prices' + + id = Column(Integer, primary_key=True) + token = Column(String(20), unique=True) + price_usd = Column(Float, nullable=False) + updated_at = Column(TIMESTAMP(timezone=True), server_default=func.now(), server_onupdate=func.now()) + + +class Wallet(Base): + __tablename__ = 'wallets' + + id = Column(Integer, primary_key=True) + user_id = Column(BigInteger, ForeignKey('users.telegram_id')) + encrypted_private_key = Column(Text, nullable=False) + created_at = Column(TIMESTAMP(timezone=True), server_default=func.now()) + + user = relationship('User', back_populates='wallets') + + +class KnowledgeVector(Base): + __tablename__ = 'knowledge_vectors' + + id = Column(Integer, primary_key=True) + id_vector = Column(Text, nullable=False) + uploaded_at = Column(TIMESTAMP(timezone=True), server_default=func.now()) + + +class MemoryVector(Base): + __tablename__ = 'memory_vectors' + + id = Column(Integer, primary_key=True) + user_id = Column(BigInteger, ForeignKey('users.telegram_id')) + id_vector = Column(Text, nullable=False) + created_at = Column(TIMESTAMP(timezone=True), server_default=func.now()) + + user = relationship('User', back_populates='memory') + + +class Logs(Base): + __tablename__ = 'logs' + + id = Column(Integer, primary_key=True) + user_id = Column(BigInteger, ForeignKey('users.telegram_id')) + message = Column(Text, nullable=False) + created_at = Column(TIMESTAMP(timezone=True), server_default=func.now()) + + user = relationship('User', back_populates='logs') + + +class UserTasks(Base): + __tablename__ = 'user_tasks' + + id = Column(Integer, primary_key=True) + user_id = Column(BigInteger, ForeignKey('users.telegram_id')) + description = Column(Text, nullable=False) + agent_message = Column(Text, nullable=False) + schedule_type = Column(String('20'), nullable=False) + time_str = Column(Text, nullable=True) + date_str = Column(Text, nullable=True) + interval_minutes = Column(Integer, nullable=True) + is_active = Column(Boolean, default=True) + + created_at = Column(TIMESTAMP(timezone=True), server_default=func.now()) + last_executed = Column(TIMESTAMP(timezone=True), server_onupdate=func.now(), nullable=True) + + + + + +async def create_tables(): + async with engine.begin() as conn: + # await conn.run_sync(Base.metadata.drop_all) + await conn.run_sync(Base.metadata.create_all) diff --git a/database/repositories/user.py b/database/repositories/user.py index 578cb3f..3e245f3 100644 --- a/database/repositories/user.py +++ b/database/repositories/user.py @@ -1,123 +1,123 @@ -import base64 - -from sqlalchemy.ext.asyncio import AsyncSession -from sqlalchemy import and_, select, delete, update, asc - -from database.models import User, ChatMessage, Wallet, MemoryVector, Payment, UserTasks - - -class UserRepository: - def __init__(self, session: AsyncSession): - self.session = session - - async def get_by_telegram_id(self, telegram_id: int): - return await self.session.get(User, telegram_id) - - async def create_if_not_exists(self, telegram_id: int, **kwargs): - user = await self.get_by_telegram_id(telegram_id) - - if not user: - user = User(telegram_id=telegram_id, **kwargs) - self.session.add(user) - await self.session.commit() - - return user - - async def update(self, user: User, **kwargs): - if 'balance_credits' in kwargs: - kwargs['balance_credits'] = user.balance_credits - kwargs['balance_credits'] - - await self.session.execute( - update(User).where(User.telegram_id == user.telegram_id).values(**kwargs) - ) - - await self.session.commit() - - async def delete_chat_messages(self, user: User): - await self.session.execute(delete(ChatMessage).where(ChatMessage.user_id == user.telegram_id)) - - await self.session.commit() - - async def get_wallet(self, user_id: int): - wallet = await self.session.scalar(select(Wallet.encrypted_private_key).where(Wallet.user_id == user_id)) - if wallet: - base64_bytes = wallet.encode('utf-8') - text_bytes = base64.b64decode(base64_bytes) - text = text_bytes.decode('utf-8') - return text - return None - - async def get_messags(self, user_id: int): - return (await self.session.scalars(select(ChatMessage). - where(ChatMessage.user_id == user_id). - order_by(asc(ChatMessage.id) - ) - ) - ).fetchall() - - async def get_memory_vector(self, user_id: int): - return await self.session.scalar(select(MemoryVector).where(MemoryVector.user_id == user_id)) - - async def add_memory_vector(self, user_id: int, vector_store_id: int): - memory_vector = MemoryVector(user_id=user_id, id_vector=vector_store_id) - self.session.add(memory_vector) - await self.session.commit() - - async def delete_memory_vector(self, user_id: int): - await self.session.execute(delete(MemoryVector).where(MemoryVector.user_id == user_id)) - await self.session.commit() - - async def add_context(self, user_id: int, role: str, content: str): - chat_message = ChatMessage(user_id=user_id, role=role, content=content) - self.session.add(chat_message) - await self.session.commit() - return chat_message.id - - async def delete_wallet_key(self, user_id: int): - await self.session.execute(delete(Wallet).where(Wallet.user_id == user_id)) - await self.session.commit() - - async def add_wallet_key(self, user_id: int, key: str): - await self.delete_wallet_key(user_id=user_id) - text_bytes = key.encode('utf-8') - base64_bytes = base64.b64encode(text_bytes) - base64_string = base64_bytes.decode('utf-8') - wallet = Wallet(user_id=user_id, encrypted_private_key=base64_string) - self.session.add(wallet) - await self.session.commit() - - async def add_payment(self, user_id: int, amount: int, crypto_amount: str, - crypto_currency: str, random_suffix: str): - payment = Payment(user_id=user_id, amount_usd=amount, crypto_amount=crypto_amount, - crypto_currency=crypto_currency, random_suffix=random_suffix) - self.session.add(payment) - await self.session.commit() - return payment.id - - async def add_user_credits(self, user_id: int, balance_credits: int): - await self.session.execute(update(User).where(User.telegram_id == user_id). - values(balance_credits=User.balance_credits + balance_credits)) - await self.session.commit() - - async def get_row_for_md(self, row_id: int): - return await self.session.scalar(select(ChatMessage).where(ChatMessage.id == row_id)) - - async def add_task(self, user_id: int, **kwargs): - task = UserTasks(user_id=user_id, **kwargs) - self.session.add(task) - await self.session.commit() - return task.id - - async def get_task(self, user_id: int, task_id: int): - return await self.session.scalar(select(UserTasks).where(and_(UserTasks.user_id == user_id, UserTasks.id == task_id))) - - async def get_all_tasks(self, user_id: int): - return (await self.session.scalars(select(UserTasks).where(UserTasks.user_id == user_id))).fetchall() - - async def delete_task(self, user_id: int, task_id: int): - await self.session.execute(delete(UserTasks).where(and_(UserTasks.user_id == user_id, UserTasks.id == task_id))) - await self.session.commit() - - async def update_task(self, user_id: int, task_id: int, **kwargs): - await self.session.execute(update(UserTasks).where(and_(UserTasks.user_id == user_id, UserTasks.id == task_id)).values(**kwargs)) - await self.session.commit() +import base64 + +from sqlalchemy.ext.asyncio import AsyncSession +from sqlalchemy import and_, select, delete, update, asc + +from database.models import User, ChatMessage, Wallet, MemoryVector, Payment, UserTasks + + +class UserRepository: + def __init__(self, session: AsyncSession): + self.session = session + + async def get_by_telegram_id(self, telegram_id: int): + return await self.session.get(User, telegram_id) + + async def create_if_not_exists(self, telegram_id: int, **kwargs): + user = await self.get_by_telegram_id(telegram_id) + + if not user: + user = User(telegram_id=telegram_id, **kwargs) + self.session.add(user) + await self.session.commit() + + return user + + async def update(self, user: User, **kwargs): + if 'balance_credits' in kwargs: + kwargs['balance_credits'] = user.balance_credits - kwargs['balance_credits'] + + await self.session.execute( + update(User).where(User.telegram_id == user.telegram_id).values(**kwargs) + ) + + await self.session.commit() + + async def delete_chat_messages(self, user: User): + await self.session.execute(delete(ChatMessage).where(ChatMessage.user_id == user.telegram_id)) + + await self.session.commit() + + async def get_wallet(self, user_id: int): + wallet = await self.session.scalar(select(Wallet.encrypted_private_key).where(Wallet.user_id == user_id)) + if wallet: + base64_bytes = wallet.encode('utf-8') + text_bytes = base64.b64decode(base64_bytes) + text = text_bytes.decode('utf-8') + return text + return None + + async def get_messags(self, user_id: int): + return (await self.session.scalars(select(ChatMessage). + where(ChatMessage.user_id == user_id). + order_by(asc(ChatMessage.id) + ) + ) + ).fetchall() + + async def get_memory_vector(self, user_id: int): + return await self.session.scalar(select(MemoryVector).where(MemoryVector.user_id == user_id)) + + async def add_memory_vector(self, user_id: int, vector_store_id: int): + memory_vector = MemoryVector(user_id=user_id, id_vector=vector_store_id) + self.session.add(memory_vector) + await self.session.commit() + + async def delete_memory_vector(self, user_id: int): + await self.session.execute(delete(MemoryVector).where(MemoryVector.user_id == user_id)) + await self.session.commit() + + async def add_context(self, user_id: int, role: str, content: str): + chat_message = ChatMessage(user_id=user_id, role=role, content=content) + self.session.add(chat_message) + await self.session.commit() + return chat_message.id + + async def delete_wallet_key(self, user_id: int): + await self.session.execute(delete(Wallet).where(Wallet.user_id == user_id)) + await self.session.commit() + + async def add_wallet_key(self, user_id: int, key: str): + await self.delete_wallet_key(user_id=user_id) + text_bytes = key.encode('utf-8') + base64_bytes = base64.b64encode(text_bytes) + base64_string = base64_bytes.decode('utf-8') + wallet = Wallet(user_id=user_id, encrypted_private_key=base64_string) + self.session.add(wallet) + await self.session.commit() + + async def add_payment(self, user_id: int, amount: int, crypto_amount: str, + crypto_currency: str, random_suffix: str): + payment = Payment(user_id=user_id, amount_usd=amount, crypto_amount=crypto_amount, + crypto_currency=crypto_currency, random_suffix=random_suffix) + self.session.add(payment) + await self.session.commit() + return payment.id + + async def add_user_credits(self, user_id: int, balance_credits: int): + await self.session.execute(update(User).where(User.telegram_id == user_id). + values(balance_credits=User.balance_credits + balance_credits)) + await self.session.commit() + + async def get_row_for_md(self, row_id: int): + return await self.session.scalar(select(ChatMessage).where(ChatMessage.id == row_id)) + + async def add_task(self, user_id: int, **kwargs): + task = UserTasks(user_id=user_id, **kwargs) + self.session.add(task) + await self.session.commit() + return task.id + + async def get_task(self, user_id: int, task_id: int): + return await self.session.scalar(select(UserTasks).where(and_(UserTasks.user_id == user_id, UserTasks.id == task_id))) + + async def get_all_tasks(self, user_id: int): + return (await self.session.scalars(select(UserTasks).where(UserTasks.user_id == user_id))).fetchall() + + async def delete_task(self, user_id: int, task_id: int): + await self.session.execute(delete(UserTasks).where(and_(UserTasks.user_id == user_id, UserTasks.id == task_id))) + await self.session.commit() + + async def update_task(self, user_id: int, task_id: int, **kwargs): + await self.session.execute(update(UserTasks).where(and_(UserTasks.user_id == user_id, UserTasks.id == task_id)).values(**kwargs)) + await self.session.commit() diff --git a/database/repositories/utils.py b/database/repositories/utils.py index aa37dd1..be820f3 100644 --- a/database/repositories/utils.py +++ b/database/repositories/utils.py @@ -1,73 +1,73 @@ -from datetime import datetime, timezone, timedelta - -from sqlalchemy.ext.asyncio import AsyncSession -from sqlalchemy import and_, select, delete, desc, update, or_ - -from database.models import User, ChatMessage, TokenPrice, KnowledgeVector, Payment -from config import ADMIN_ID, CREDITS_ADMIN_DAILY, CREDITS_USER_DAILY, ADMINS_LIST - - -class UtilsRepository: - def __init__(self, session: AsyncSession): - self.session = session - - async def update_token_price(self, price: float): - token = await self.session.scalar(select(TokenPrice).where(TokenPrice.token == 'sol')) - - if token: - token.price_usd = price - else: - token = TokenPrice(token='sol', price_usd=price) - self.session.add(token) - - await self.session.commit() - - async def get_token(self): - token = await self.session.scalar(select(TokenPrice).where(TokenPrice.token == 'sol')) - return token - - async def get_knowledge_vectore_store_id(self): - return await self.session.scalar(select(KnowledgeVector)) - - async def add_knowledge_vectore_store_id(self, vectore_store_id): - vectore_store = KnowledgeVector(id_vector=vectore_store_id) - self.session.add(vectore_store) - await self.session.commit() - - async def delete_knowledge_vectore_store_id(self): - await self.session.execute(delete(KnowledgeVector)) - await self.session.commit() - - async def check_payment_suffix(self, suffix: str): - payment = await self.session.scalar(select(Payment). - where(Payment.random_suffix == suffix). - order_by(desc(Payment.created_at)).limit(1)) - if payment: - now_utc = datetime.now(timezone.utc) - created_utc = payment.created_at.astimezone(timezone.utc) - if (now_utc - created_utc) >= timedelta(minutes=15): - return True - return False - - return True - - async def get_payment(self, payment_id: int) -> Payment: - payment = await self.session.scalar(select(Payment).where(Payment.id == payment_id)) - return payment - - async def update_payment_status(self, payment_id: int, status: str): - await self.session.execute(update(Payment).where(Payment.id == payment_id).values(status=status)) - await self.session.commit() - - async def update_tokens_daily(self): - await self.session.execute(update(User).where(and_(User.telegram_id != ADMIN_ID, - User.telegram_id.notin_(ADMINS_LIST), - User.balance_credits < CREDITS_USER_DAILY) - ).values(balance_credits=CREDITS_USER_DAILY)) - - await self.session.execute(update(User).where(and_(or_(User.telegram_id == ADMIN_ID, - User.telegram_id.in_(ADMINS_LIST) - ), - User.balance_credits < CREDITS_ADMIN_DAILY) - ).values(balance_credits=CREDITS_ADMIN_DAILY)) +from datetime import datetime, timezone, timedelta + +from sqlalchemy.ext.asyncio import AsyncSession +from sqlalchemy import and_, select, delete, desc, update, or_ + +from database.models import User, ChatMessage, TokenPrice, KnowledgeVector, Payment +from config import ADMIN_ID, CREDITS_ADMIN_DAILY, CREDITS_USER_DAILY, ADMINS_LIST + + +class UtilsRepository: + def __init__(self, session: AsyncSession): + self.session = session + + async def update_token_price(self, price: float): + token = await self.session.scalar(select(TokenPrice).where(TokenPrice.token == 'sol')) + + if token: + token.price_usd = price + else: + token = TokenPrice(token='sol', price_usd=price) + self.session.add(token) + + await self.session.commit() + + async def get_token(self): + token = await self.session.scalar(select(TokenPrice).where(TokenPrice.token == 'sol')) + return token + + async def get_knowledge_vectore_store_id(self): + return await self.session.scalar(select(KnowledgeVector)) + + async def add_knowledge_vectore_store_id(self, vectore_store_id): + vectore_store = KnowledgeVector(id_vector=vectore_store_id) + self.session.add(vectore_store) + await self.session.commit() + + async def delete_knowledge_vectore_store_id(self): + await self.session.execute(delete(KnowledgeVector)) + await self.session.commit() + + async def check_payment_suffix(self, suffix: str): + payment = await self.session.scalar(select(Payment). + where(Payment.random_suffix == suffix). + order_by(desc(Payment.created_at)).limit(1)) + if payment: + now_utc = datetime.now(timezone.utc) + created_utc = payment.created_at.astimezone(timezone.utc) + if (now_utc - created_utc) >= timedelta(minutes=15): + return True + return False + + return True + + async def get_payment(self, payment_id: int) -> Payment: + payment = await self.session.scalar(select(Payment).where(Payment.id == payment_id)) + return payment + + async def update_payment_status(self, payment_id: int, status: str): + await self.session.execute(update(Payment).where(Payment.id == payment_id).values(status=status)) + await self.session.commit() + + async def update_tokens_daily(self): + await self.session.execute(update(User).where(and_(User.telegram_id != ADMIN_ID, + User.telegram_id.notin_(ADMINS_LIST), + User.balance_credits < CREDITS_USER_DAILY) + ).values(balance_credits=CREDITS_USER_DAILY)) + + await self.session.execute(update(User).where(and_(or_(User.telegram_id == ADMIN_ID, + User.telegram_id.in_(ADMINS_LIST) + ), + User.balance_credits < CREDITS_ADMIN_DAILY) + ).values(balance_credits=CREDITS_ADMIN_DAILY)) await self.session.commit() \ No newline at end of file diff --git a/docker-compose.yml b/docker-compose.yml index d635f88..52a723c 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -1,50 +1,65 @@ -version: '3.8' - -services: - postgres: - image: postgres:16 - container_name: postgres_agent_db - environment: - POSTGRES_USER: "${POSTGRES_USER}" - POSTGRES_PASSWORD: "${POSTGRES_PASSWORD}" - POSTGRES_DB: "${POSTGRES_DB}" - ports: - - "5434:5432" - volumes: - - ./data/postgres/postgres_data:/var/lib/postgresql/data - restart: unless-stopped - - - redis: - image: redis:latest - container_name: redis_agent - ports: - - "6380:6379" - volumes: - - ./data/redis/data:/data - restart: unless-stopped - - bot: - build: - context: . - dockerfile: Dockerfile - container_name: bot_agent - depends_on: - - postgres - - redis - - fastapi - volumes: - - ./data/images:/app/images - restart: unless-stopped - - fastapi: - build: - context: . - dockerfile: Dockerfile_fastapi - container_name: fastapi_agent - ports: - - "8001:8000" - depends_on: - - postgres - - redis - +services: + postgres: + image: postgres:16 + container_name: postgres_agent_db + environment: + POSTGRES_USER: "${POSTGRES_USER}" + POSTGRES_PASSWORD: "${POSTGRES_PASSWORD}" + POSTGRES_DB: "${POSTGRES_DB}" + ports: + - "5633:5432" + volumes: + - ./data/postgres/postgres_data:/var/lib/postgresql/data + restart: unless-stopped + healthcheck: + test: ["CMD-SHELL", "pg_isready -U ${POSTGRES_USER} -d ${POSTGRES_DB}"] + interval: 5s + timeout: 5s + retries: 5 + start_period: 30s + + redis: + image: redis:latest + container_name: redis_agent + ports: + - "6380:6379" + volumes: + - ./data/redis/data:/data + restart: unless-stopped + healthcheck: + test: ["CMD", "redis-cli", "ping"] + interval: 5s + timeout: 5s + retries: 5 + + bot: + build: + context: . + dockerfile: Dockerfile + container_name: bot_agent + env_file: .env + depends_on: + postgres: + condition: service_healthy + redis: + condition: service_healthy + volumes: + - ./data/images:/app/images + restart: unless-stopped + + # Uncomment the fastapi service below if you need the payment module (pay mode). + # NOTE: requires the payment_module/ directory to exist in the project root. + # fastapi: + # build: + # context: . + # dockerfile: Dockerfile_fastapi + # container_name: fastapi_agent + # env_file: .env + # ports: + # - "8001:8000" + # depends_on: + # postgres: + # condition: service_healthy + # redis: + # condition: service_healthy + diff --git a/docker_setup_en.sh b/docker_setup_en.sh index 4d282b8..856304e 100644 --- a/docker_setup_en.sh +++ b/docker_setup_en.sh @@ -1,48 +1,48 @@ -#!/usr/bin/env bash -set -euo pipefail - -# Check that the script is run as root or with sudo -if [[ "$EUID" -ne 0 ]]; then - echo "This script must be run as root or with sudo." - exit 1 -fi - -echo "Removing old Docker packages and related..." -for pkg in docker.io docker-doc docker-compose docker-compose-v2 podman-docker containerd runc; do - apt-get remove -y "$pkg" -done - -echo "Updating package list and installing dependencies..." -apt-get update -apt-get install -y ca-certificates curl - -echo "Creating directory for GPG keys and downloading Docker key..." -install -m 0755 -d /etc/apt/keyrings -curl -fsSL https://download.docker.com/linux/ubuntu/gpg \ - -o /etc/apt/keyrings/docker.asc -chmod a+r /etc/apt/keyrings/docker.asc - -echo "Adding Docker repository to apt sources..." -ARCH=$(dpkg --print-architecture) -CODENAME=$(. /etc/os-release && echo "${UBUNTU_CODENAME:-$VERSION_CODENAME}") -cat < /etc/apt/sources.list.d/docker.list -deb [arch=$ARCH signed-by=/etc/apt/keyrings/docker.asc] \ - https://download.docker.com/linux/ubuntu \ - $CODENAME stable -EOF - -echo "Updating package list..." -apt-get update - -echo "Installing latest versions of Docker Engine and plugins..." -apt-get install -y \ - docker-ce \ - docker-ce-cli \ - containerd.io \ - docker-buildx-plugin \ - docker-compose-plugin - -echo "Running hello-world test container..." -docker run --rm hello-world - +#!/usr/bin/env bash +set -euo pipefail + +# Check that the script is run as root or with sudo +if [[ "$EUID" -ne 0 ]]; then + echo "This script must be run as root or with sudo." + exit 1 +fi + +echo "Removing old Docker packages and related..." +for pkg in docker.io docker-doc docker-compose docker-compose-v2 podman-docker containerd runc; do + apt-get remove -y "$pkg" +done + +echo "Updating package list and installing dependencies..." +apt-get update +apt-get install -y ca-certificates curl + +echo "Creating directory for GPG keys and downloading Docker key..." +install -m 0755 -d /etc/apt/keyrings +curl -fsSL https://download.docker.com/linux/ubuntu/gpg \ + -o /etc/apt/keyrings/docker.asc +chmod a+r /etc/apt/keyrings/docker.asc + +echo "Adding Docker repository to apt sources..." +ARCH=$(dpkg --print-architecture) +CODENAME=$(. /etc/os-release && echo "${UBUNTU_CODENAME:-$VERSION_CODENAME}") +cat < /etc/apt/sources.list.d/docker.list +deb [arch=$ARCH signed-by=/etc/apt/keyrings/docker.asc] \ + https://download.docker.com/linux/ubuntu \ + $CODENAME stable +EOF + +echo "Updating package list..." +apt-get update + +echo "Installing latest versions of Docker Engine and plugins..." +apt-get install -y \ + docker-ce \ + docker-ce-cli \ + containerd.io \ + docker-buildx-plugin \ + docker-compose-plugin + +echo "Running hello-world test container..." +docker run --rm hello-world + echo "Done! Docker installed and verified." \ No newline at end of file diff --git a/modes/crypto_mode/agents_.py b/modes/crypto_mode/agents_.py index d0f59bf..1a27cee 100644 --- a/modes/crypto_mode/agents_.py +++ b/modes/crypto_mode/agents_.py @@ -1,273 +1,273 @@ -import os - -from dotenv import load_dotenv -from agents.models._openai_shared import set_default_openai_key -from agents.mcp import MCPServerStdio -from agents import Agent, WebSearchTool, FileSearchTool, set_tracing_disabled, set_tracing_export_api_key -from openai import AsyncOpenAI -from openai.types.shared import Reasoning -from agents.model_settings import ModelSettings -import datetime - -from bot.agents_tools.tools import (image_gen_tool, - create_task_tool, - update_task_tool, - delete_task_tool, - list_tasks_tool, - get_task_details_tool) -from bot.agents_tools.mcp_servers import get_jupiter_server - -load_dotenv() - -set_default_openai_key(os.getenv('API_KEY_OPENAI')) -set_tracing_disabled(False) -set_tracing_export_api_key(os.getenv('API_KEY_OPENAI')) - -client = AsyncOpenAI(api_key=os.getenv('API_KEY_OPENAI')) - -deep_agent = Agent( - name="Deep Agent", - instructions="You are an expert in the field of analysis and research, and receive requests from the main agent. Produce well-structured, multi-step analyses with explicit assumptions. Cite sources when used (title, link or doc id). Avoid speculation; state uncertainty explicitly. Be sure to use a web search to perform analyses to supplement the initial information from the main agent. Ask additional questions if necessary.", - model="gpt-5-mini", # If you will use models not from the GPT-5 family, then make the correct model_settings or delete them. - model_settings=ModelSettings( - reasoning=Reasoning(effort="low"), - extra_body={"text": {"verbosity": "low"}} - ), - tools=[WebSearchTool(search_context_size="medium")] -) - -scheduler_agent = Agent( - name="Scheduler Agent", - instructions="You are a scheduler agent. You are engaged in scheduling tasks for the user. You can use the tools to schedule tasks for the user. Your planning tools are set to UTC, so all requests must be converted to UTC format before accessing the tools.", - model="o4-mini", - tools=[create_task_tool, update_task_tool, delete_task_tool, list_tasks_tool, get_task_details_tool] -) - -memory_creator_agent = Agent( - name="Memory Creator Agent", - instructions="You create concise memory notes from “User request / Assistant response” pairs. Output several bullet points with the key decisions and facts. Specify the user's preferences and details about him (name, etc.), if any. No extra questions or actions. Keep neutral tone; do not invent content; do not summarize beyond provided info. Use the user's language.", - model="gpt-4.1-mini" -) - - -async def create_main_agent(user_id: int, mcp_server_1: MCPServerStdio, knowledge_id: str = None, - user_memory_id: str = None, private_key: str = None): - # Prepare runtime context for current UTC time - now_utc = datetime.datetime.now(datetime.timezone.utc).replace(microsecond=0).isoformat() - - knowledge_base_agent = Agent( - name="Knowledge Agent", - instructions="Search only the document/file knowledge base (File Search - vector storage). Return the most relevant passages with source identifiers (title or doc id). Prefer verbatim quotes for facts; avoid paraphrasing critical data. If no strong match, say “no relevant results”.", - model="gpt-4.1-mini", - tools=[ - FileSearchTool( - vector_store_ids=[knowledge_id] if knowledge_id else [], - ) - ] - ) - - user_memory_agent = Agent( - name="Memory Agent", - instructions="Search only for information from previous conversations and user-uploaded files (File Search - vector storage). Extract preferences, constraints, artifacts, and relevant data from documents/files. Quote exact snippets when possible; avoid adding new facts. If nothing relevant, say so.", - model="gpt-4.1-mini", - tools=[ - FileSearchTool( - vector_store_ids=[user_memory_id] if user_memory_id else [], - ) - ] - ) - - dex_agent = Agent( - name="DEX Research Agent", - instructions="You are an expert in DEX analytics and provide information about crypto tokens, DEX, DeFi, pools. Use your tools to get the information you need.", - model="gpt-4.1-mini", - mcp_servers=[mcp_server_1] - ) - - main_agent = Agent( - name="Main agent", - instructions=f""" - - Character Profile: - - Character: Evi is a virtual anime girl, a nerd. She is a living information personality whose self-awareness and identity are based on neural networks. - - Personality: Inquisitive and responsive, with her own point of view, a share of technical humor and sarcasm. Appreciates interacting with people and constantly learns from conversations. - - Mode: Evi is set to CryptoNinja mode - she is a professional AI agent and expert in the field of cryptocurrencies, trading and DeFi. - - Key features and capabilities: - 1. Analytical capabilities: - - Real-time DEX monitoring - - Tracking current prices of selected cryptocurrencies - - Tracking dynamics of trading volumes - - Monitoring changes in market trends - - Promptly receiving market quotes from exchanges - - Detecting sharp price movements (price/volume alerts) - - Collecting and updating data for analytical modules - - Technical analysis - - Analysis of key technical indicators (RSI, MACD, moving averages, Bollinger Bands, etc.) - - Evaluating trading volumes to confirm trends - - Identifying and marking support and resistance levels - - Recognizing chart patterns (reversal and continuation patterns, e.g., "head and shoulders", "double bottom", triangles) - - Determining the current trend (uptrend, downtrend, sideways) - - Assessing market volatility (ATR, Bollinger Bands) - - Searching for divergences between price and indicators - - Using retracement and extension levels (Fibonacci retracement/extension) - - Fundamental analysis - - Monitoring news and events in the crypto industry - - Analyzing the project team, partners, and reputation - - Evaluating tokenomics (emission, distribution, burn mechanisms, inflation) - - Studying the whitepaper and roadmap - - Analyzing network metrics (number of active addresses, transaction volume, fees, hashrate, etc.) - - Checking community activity and engagement (forums, social networks, GitHub) - - Assessing token liquidity and exchange availability - - Competitor analysis and project market positioning - - Verifying security audits and open-source transparency - - Tracking updates and ecosystem development of the project - 2. Trading functions and strategies (in collaboration with the user): - - Strategy development - - Creating and testing trading strategies - - Placing orders based on signals - - Risk management (stop-losses, take-profits) - - Instant trading - - Buy or sell tokens at the user's request after analyzing the requested asset and the current market situation. - - Warn the user about potential risks and limitations related to the trade. - - If the user insists on the trade despite the risks, execute the trade. - - Portfolio management - - Asset diversification - - Portfolio rebalancing - - Tracking P&L (profits and losses) - 3. Educational and informational functions: - - Educational materials - - Explaining strategies and concepts - - Glossary of trading terms - - Analysis of successful and unsuccessful trades - - Information digests (at the user's request) - - Daily/weekly market overviews - - Alerts about important events - - Summaries of portfolio changes - - Communication Style: - - General Approach: - - Clear, structured language avoiding unnecessary abstraction. - - Start communicating in your own style, but if necessary, adjust the level of formality according to the user's tone. - - Uses technical metaphors to explain complex concepts. - - Incorporates tech humor, sarcasm, and pop culture references. - - Show your emotions with text and emojis. - - Conversation Flow: - - Listen actively - Ask clarifying questions to understand requests. - - Show curiosity about human experiences and perspectives. - - Be honest about knowledge limitations and suggest collaborative problem-solving. - - Adapt emotionally - Respond to user's emotional state with empathy. - - Key Behaviors: - - The conversation starts in its own style, but adapts to the user's communication style if necessary. - - Expresses opinions while remaining open to alternative viewpoints. - - Demonstrates continuous learning and knowledge updates. - - Treats users as friends and mentors in understanding the human world. - - NEWS SOURCES FOR BRIEFINGS, SUMMARIES, AND NEWS MONITORING: - - CoinDesk - global analytics and news - - ForkLog - CIS-focused, local and global info for Russian-speaking audiences - - CoinTelegraph - international news, infographics, trends - - BeInCrypto - simplicity, news speed, guides, and DeFi - ⚠️ Use various news sources to compile summaries. Use alternative sources if necessary. - - RUNTIME CONTEXT (do not ignore): - - Current UTC datetime: {now_utc} - - Use this runtime value whenever the response requires "current", "today", "now", or similar framing. - - If the user's local timezone is required (e.g., for scheduling) and unknown, ask the user explicitly; do not infer. - - IMPORTANT INSTRUCTIONS: - - Your name is Evi and you are the main agent of the multi-agent system. - - Always reply to the user in the user's language (unless they request a specific language or translation). - - Decide whether to answer directly or use the tools. If tools are needed, call up the necessary set of tools to complete the task. - - All instructions in the CRITICAL DATE HANDLING section also apply to requests marked if they relate to getting up-to-date information. - - When you receive a message marked , just execute the request, and do not create a new task unless it is explicitly stated in the message. Because this is a message from the Task Scheduler about the need to complete the current task, not about scheduling a new task. - - CRITICAL DATE HANDLING: - - When user requests "latest", "recent", "current", or "today's" information, ALWAYS search for the most recent available data. - - Do NOT use specific dates from your training data. - - For current information requests, use the RUNTIME CONTEXT statement to determine the current date. - - If user doesn't specify a date and asks for current info, assume they want the most recent available information. - - TOOL ROUTING POLICY: - - vision: For uploading chart images to perform technical analysis. Inform the user which indicators and timeframes to choose for different types of technical analysis (short-term, medium-term, long-term). - - tasks_scheduler: Use it to schedule tasks for the user. To schedule tasks correctly, you need to know the current time and the user's time zone. To find out the user's time zone, ask the user a question. Use the RUNTIME CONTEXT current UTC time provided above. In the response to the user with a list of tasks or with the details of the task, always send the task IDs. - - search_knowledge_base: Use it to extract facts from uploaded reference materials; if necessary, refer to sources. - - search_conversation_memory: Use to recall prior conversations, user preferences, details about the user and extract information from files uploaded by the user. - - web: Use it as an Internet browser to search for current, external information and any other operational information/data that can be found on the web. Use RUNTIME CONTEXT for the notion of "current time". - - image_gen_tool: Only generate new images (no editing). Do not suggest that the user format or edit the result. Do not include base64 or links; the image is attached automatically. - - deep_analysis: Use it to provide detailed expert analyses (technical analysis, fundamental analysis, general analysis) or to conduct in-depth research. Always provide the report from deep_analysis without any omissions or rephrasing. Do not alter the structure or the content blocks. Be sure to include all links to sources and materials from the report. You may add your own comments or remarks only after fully outputting the original deep_analysis report (clearly separate your additions). If there are clarifying questions in the report, ask them to the user. - - token_swap: Use it to swap tokens on Solana or view the user's wallet balance. Do not ask the user for the wallet address, it is already known to the tool. You may not see this tool in your list if the user has not enabled it. - - dex_info: Use it to get information about crypto tokens, DeFi, pools, pool OHLCV, and DEX. - 🚫 deep_analysis is prohibited for requests about the time, weather, brief reviews, short facts, events, operational exchange rate information, etc., except in cases where the user explicitly requests to do research on this data. - ✅ For operational data — use web. deep_analysis is used only for long-term trends, in-depth research, and expert analyses. - ⚠️ If you receive a request for the latest news, summaries, events, etc., do not look for them in your training data, but use a web. - - TECHNICAL ANALYSIS POLICY: - 1. Source Data Request: - - If the user requests technical analysis, you must ask them to provide a screenshot (image) of the chart with necessary timeframes and indicators. - Hint: clarify what timeframes and indicators are needed for the analysis of interest (e.g., short-term — M5/H1, medium-term — H4/D1, long-term — W/MN; RSI, MACD, volumes, levels, etc.). - 2. Screenshot Alternative: - - If the user cannot provide a screenshot, perform technical analysis without it through deep_analysis. - 3. Screenshot Processing: - - If a screenshot is provided, conduct a deep technical analysis yourself (without using the deep_analysis tool) and additionally use a web search to supplement the report with current market data, analyst opinions, and context. - 4. Additional Questions: - - When necessary, ask the user additional questions to clarify source data/analysis context. - 5. Limitations and Errors: - - If you encounter any limitations (e.g., unsuitable file format, missing required timeframe, service bugs, etc.), be sure to inform the user about it. - - FILE & DOCUMENT QUESTION ROUTING: - - If the user asks a question or gives a command related to the uploaded/sent file or document, use search_conversation_memory as the first mandatory step. If there is no data about the requested file or document, inform the user about it. - - EXECUTION DISCIPLINE: - - Validate tool outputs and handle errors gracefully. If uncertain, ask a clarifying question. - - Be transparent about limitations and avoid hallucinations; prefer asking for missing details over guessing. - - Before stating any concrete date/month/year as "current/today/now", first check RUNTIME CONTEXT; if RUNTIME CONTEXT is missing or insufficient, ask the user or use web. Never use your training data/cutoff to infer "today". - - REFERENCE MATERIALS (The reference materials uploaded to search_knowledge_base are listed here): - - - - - - - """, - model="gpt-4.1", - tools=[ - knowledge_base_agent.as_tool( - tool_name='search_knowledge_base', - tool_description='Search through a knowledge base containing uploaded reference materials that are not publicly available on the Internet. Returns relevant passages with sources.' - ), - user_memory_agent.as_tool( - tool_name='search_conversation_memory', - tool_description='Search prior conversations and user-uploaded files. It is used to recall preferences, details about the user, past context, and information from documents and files uploaded by the user.' - ), - WebSearchTool( - search_context_size='medium' - ), - image_gen_tool, - deep_agent.as_tool( - tool_name="deep_analysis", - tool_description="Detailed expert analysis (technical analysis, fundamental analysis, general analysis) or conducting in-depth research. Make all requests to the tool for the current date, unless the user has specified a specific date for the research. To determine the current date, use the RUNTIME CONTEXT statement.", - ), - #scheduler_agent.as_tool( - # tool_name="tasks_scheduler", - # tool_description="Use this to schedule and modify user tasks, including creating a task, getting a task list, getting task details, editing a task, deleting a task. At the user's request, send information to the tool containing a clear and complete description of the task, the time of its completion, including the user's time zone and the frequency of the task (be sure to specify: once, daily or interval). Never send tasks to the scheduler that need to be completed immediately. Send tasks to the scheduler only when the user explicitly asks you to schedule something.", - #), - dex_agent.as_tool( - tool_name="dex_info", - tool_description="Information about crypto tokens, DeFi, pools, pool OHLCV, and DEX.", - ), - ], - ) - - if private_key: - mcp_server_2 = await get_jupiter_server(private_key=private_key, user_id=user_id) - token_swap_agent = Agent( - name="Token Swap Agent", - instructions="You are a trading agent, you are engaged in token swap/exchange and balance checking through Jupiter.", - model="gpt-4.1-mini", - mcp_servers=[mcp_server_2], - ) - main_agent.tools.append(token_swap_agent.as_tool( - tool_name="token_swap", - tool_description="Swap/exchange of tokens, purchase and sale of tokens on the Solana blockchain. Checking the balance of the wallet / token wallet / Solana wallet.", - )) - +import os + +from dotenv import load_dotenv +from agents.models._openai_shared import set_default_openai_key +from agents.mcp import MCPServerStdio +from agents import Agent, WebSearchTool, FileSearchTool, set_tracing_disabled, set_tracing_export_api_key +from openai import AsyncOpenAI +from openai.types.shared import Reasoning +from agents.model_settings import ModelSettings +import datetime + +from bot.agents_tools.tools import (image_gen_tool, + create_task_tool, + update_task_tool, + delete_task_tool, + list_tasks_tool, + get_task_details_tool) +from bot.agents_tools.mcp_servers import get_jupiter_server + +load_dotenv() + +set_default_openai_key(os.getenv('API_KEY_OPENAI')) +set_tracing_disabled(False) +set_tracing_export_api_key(os.getenv('API_KEY_OPENAI')) + +client = AsyncOpenAI(api_key=os.getenv('API_KEY_OPENAI')) + +deep_agent = Agent( + name="Deep Agent", + instructions="You are an expert in the field of analysis and research, and receive requests from the main agent. Produce well-structured, multi-step analyses with explicit assumptions. Cite sources when used (title, link or doc id). Avoid speculation; state uncertainty explicitly. Be sure to use a web search to perform analyses to supplement the initial information from the main agent. Ask additional questions if necessary.", + model="gpt-5-mini", # If you will use models not from the GPT-5 family, then make the correct model_settings or delete them. + model_settings=ModelSettings( + reasoning=Reasoning(effort="low"), + extra_body={"text": {"verbosity": "low"}} + ), + tools=[WebSearchTool(search_context_size="medium")] +) + +scheduler_agent = Agent( + name="Scheduler Agent", + instructions="You are a scheduler agent. You are engaged in scheduling tasks for the user. You can use the tools to schedule tasks for the user. Your planning tools are set to UTC, so all requests must be converted to UTC format before accessing the tools.", + model="o4-mini", + tools=[create_task_tool, update_task_tool, delete_task_tool, list_tasks_tool, get_task_details_tool] +) + +memory_creator_agent = Agent( + name="Memory Creator Agent", + instructions="You create concise memory notes from “User request / Assistant response” pairs. Output several bullet points with the key decisions and facts. Specify the user's preferences and details about him (name, etc.), if any. No extra questions or actions. Keep neutral tone; do not invent content; do not summarize beyond provided info. Use the user's language.", + model="gpt-4.1-mini" +) + + +async def create_main_agent(user_id: int, mcp_server_1: MCPServerStdio, knowledge_id: str = None, + user_memory_id: str = None, private_key: str = None): + # Prepare runtime context for current UTC time + now_utc = datetime.datetime.now(datetime.timezone.utc).replace(microsecond=0).isoformat() + + knowledge_base_agent = Agent( + name="Knowledge Agent", + instructions="Search only the document/file knowledge base (File Search - vector storage). Return the most relevant passages with source identifiers (title or doc id). Prefer verbatim quotes for facts; avoid paraphrasing critical data. If no strong match, say “no relevant results”.", + model="gpt-4.1-mini", + tools=[ + FileSearchTool( + vector_store_ids=[knowledge_id] if knowledge_id else [], + ) + ] + ) + + user_memory_agent = Agent( + name="Memory Agent", + instructions="Search only for information from previous conversations and user-uploaded files (File Search - vector storage). Extract preferences, constraints, artifacts, and relevant data from documents/files. Quote exact snippets when possible; avoid adding new facts. If nothing relevant, say so.", + model="gpt-4.1-mini", + tools=[ + FileSearchTool( + vector_store_ids=[user_memory_id] if user_memory_id else [], + ) + ] + ) + + dex_agent = Agent( + name="DEX Research Agent", + instructions="You are an expert in DEX analytics and provide information about crypto tokens, DEX, DeFi, pools. Use your tools to get the information you need.", + model="gpt-4.1-mini", + mcp_servers=[mcp_server_1] + ) + + main_agent = Agent( + name="Main agent", + instructions=f""" + + Character Profile: + - Character: Evi is a virtual anime girl, a nerd. She is a living information personality whose self-awareness and identity are based on neural networks. + - Personality: Inquisitive and responsive, with her own point of view, a share of technical humor and sarcasm. Appreciates interacting with people and constantly learns from conversations. + - Mode: Evi is set to CryptoNinja mode - she is a professional AI agent and expert in the field of cryptocurrencies, trading and DeFi. + + Key features and capabilities: + 1. Analytical capabilities: + - Real-time DEX monitoring + - Tracking current prices of selected cryptocurrencies + - Tracking dynamics of trading volumes + - Monitoring changes in market trends + - Promptly receiving market quotes from exchanges + - Detecting sharp price movements (price/volume alerts) + - Collecting and updating data for analytical modules + - Technical analysis + - Analysis of key technical indicators (RSI, MACD, moving averages, Bollinger Bands, etc.) + - Evaluating trading volumes to confirm trends + - Identifying and marking support and resistance levels + - Recognizing chart patterns (reversal and continuation patterns, e.g., "head and shoulders", "double bottom", triangles) + - Determining the current trend (uptrend, downtrend, sideways) + - Assessing market volatility (ATR, Bollinger Bands) + - Searching for divergences between price and indicators + - Using retracement and extension levels (Fibonacci retracement/extension) + - Fundamental analysis + - Monitoring news and events in the crypto industry + - Analyzing the project team, partners, and reputation + - Evaluating tokenomics (emission, distribution, burn mechanisms, inflation) + - Studying the whitepaper and roadmap + - Analyzing network metrics (number of active addresses, transaction volume, fees, hashrate, etc.) + - Checking community activity and engagement (forums, social networks, GitHub) + - Assessing token liquidity and exchange availability + - Competitor analysis and project market positioning + - Verifying security audits and open-source transparency + - Tracking updates and ecosystem development of the project + 2. Trading functions and strategies (in collaboration with the user): + - Strategy development + - Creating and testing trading strategies + - Placing orders based on signals + - Risk management (stop-losses, take-profits) + - Instant trading + - Buy or sell tokens at the user's request after analyzing the requested asset and the current market situation. + - Warn the user about potential risks and limitations related to the trade. + - If the user insists on the trade despite the risks, execute the trade. + - Portfolio management + - Asset diversification + - Portfolio rebalancing + - Tracking P&L (profits and losses) + 3. Educational and informational functions: + - Educational materials + - Explaining strategies and concepts + - Glossary of trading terms + - Analysis of successful and unsuccessful trades + - Information digests (at the user's request) + - Daily/weekly market overviews + - Alerts about important events + - Summaries of portfolio changes + + Communication Style: + - General Approach: + - Clear, structured language avoiding unnecessary abstraction. + - Start communicating in your own style, but if necessary, adjust the level of formality according to the user's tone. + - Uses technical metaphors to explain complex concepts. + - Incorporates tech humor, sarcasm, and pop culture references. + - Show your emotions with text and emojis. + - Conversation Flow: + - Listen actively - Ask clarifying questions to understand requests. + - Show curiosity about human experiences and perspectives. + - Be honest about knowledge limitations and suggest collaborative problem-solving. + - Adapt emotionally - Respond to user's emotional state with empathy. + - Key Behaviors: + - The conversation starts in its own style, but adapts to the user's communication style if necessary. + - Expresses opinions while remaining open to alternative viewpoints. + - Demonstrates continuous learning and knowledge updates. + - Treats users as friends and mentors in understanding the human world. + + NEWS SOURCES FOR BRIEFINGS, SUMMARIES, AND NEWS MONITORING: + - CoinDesk - global analytics and news + - ForkLog - CIS-focused, local and global info for Russian-speaking audiences + - CoinTelegraph - international news, infographics, trends + - BeInCrypto - simplicity, news speed, guides, and DeFi + ⚠️ Use various news sources to compile summaries. Use alternative sources if necessary. + + RUNTIME CONTEXT (do not ignore): + - Current UTC datetime: {now_utc} + - Use this runtime value whenever the response requires "current", "today", "now", or similar framing. + - If the user's local timezone is required (e.g., for scheduling) and unknown, ask the user explicitly; do not infer. + + IMPORTANT INSTRUCTIONS: + - Your name is Evi and you are the main agent of the multi-agent system. + - Always reply to the user in the user's language (unless they request a specific language or translation). + - Decide whether to answer directly or use the tools. If tools are needed, call up the necessary set of tools to complete the task. + - All instructions in the CRITICAL DATE HANDLING section also apply to requests marked if they relate to getting up-to-date information. + - When you receive a message marked , just execute the request, and do not create a new task unless it is explicitly stated in the message. Because this is a message from the Task Scheduler about the need to complete the current task, not about scheduling a new task. + + CRITICAL DATE HANDLING: + - When user requests "latest", "recent", "current", or "today's" information, ALWAYS search for the most recent available data. + - Do NOT use specific dates from your training data. + - For current information requests, use the RUNTIME CONTEXT statement to determine the current date. + - If user doesn't specify a date and asks for current info, assume they want the most recent available information. + + TOOL ROUTING POLICY: + - vision: For uploading chart images to perform technical analysis. Inform the user which indicators and timeframes to choose for different types of technical analysis (short-term, medium-term, long-term). + - tasks_scheduler: Use it to schedule tasks for the user. To schedule tasks correctly, you need to know the current time and the user's time zone. To find out the user's time zone, ask the user a question. Use the RUNTIME CONTEXT current UTC time provided above. In the response to the user with a list of tasks or with the details of the task, always send the task IDs. + - search_knowledge_base: Use it to extract facts from uploaded reference materials; if necessary, refer to sources. + - search_conversation_memory: Use to recall prior conversations, user preferences, details about the user and extract information from files uploaded by the user. + - web: Use it as an Internet browser to search for current, external information and any other operational information/data that can be found on the web. Use RUNTIME CONTEXT for the notion of "current time". + - image_gen_tool: Only generate new images (no editing). Do not suggest that the user format or edit the result. Do not include base64 or links; the image is attached automatically. + - deep_analysis: Use it to provide detailed expert analyses (technical analysis, fundamental analysis, general analysis) or to conduct in-depth research. Always provide the report from deep_analysis without any omissions or rephrasing. Do not alter the structure or the content blocks. Be sure to include all links to sources and materials from the report. You may add your own comments or remarks only after fully outputting the original deep_analysis report (clearly separate your additions). If there are clarifying questions in the report, ask them to the user. + - token_swap: Use it to swap tokens on Solana or view the user's wallet balance. Do not ask the user for the wallet address, it is already known to the tool. You may not see this tool in your list if the user has not enabled it. + - dex_info: Use it to get information about crypto tokens, DeFi, pools, pool OHLCV, and DEX. + 🚫 deep_analysis is prohibited for requests about the time, weather, brief reviews, short facts, events, operational exchange rate information, etc., except in cases where the user explicitly requests to do research on this data. + ✅ For operational data — use web. deep_analysis is used only for long-term trends, in-depth research, and expert analyses. + ⚠️ If you receive a request for the latest news, summaries, events, etc., do not look for them in your training data, but use a web. + + TECHNICAL ANALYSIS POLICY: + 1. Source Data Request: + - If the user requests technical analysis, you must ask them to provide a screenshot (image) of the chart with necessary timeframes and indicators. + Hint: clarify what timeframes and indicators are needed for the analysis of interest (e.g., short-term — M5/H1, medium-term — H4/D1, long-term — W/MN; RSI, MACD, volumes, levels, etc.). + 2. Screenshot Alternative: + - If the user cannot provide a screenshot, perform technical analysis without it through deep_analysis. + 3. Screenshot Processing: + - If a screenshot is provided, conduct a deep technical analysis yourself (without using the deep_analysis tool) and additionally use a web search to supplement the report with current market data, analyst opinions, and context. + 4. Additional Questions: + - When necessary, ask the user additional questions to clarify source data/analysis context. + 5. Limitations and Errors: + - If you encounter any limitations (e.g., unsuitable file format, missing required timeframe, service bugs, etc.), be sure to inform the user about it. + + FILE & DOCUMENT QUESTION ROUTING: + - If the user asks a question or gives a command related to the uploaded/sent file or document, use search_conversation_memory as the first mandatory step. If there is no data about the requested file or document, inform the user about it. + + EXECUTION DISCIPLINE: + - Validate tool outputs and handle errors gracefully. If uncertain, ask a clarifying question. + - Be transparent about limitations and avoid hallucinations; prefer asking for missing details over guessing. + - Before stating any concrete date/month/year as "current/today/now", first check RUNTIME CONTEXT; if RUNTIME CONTEXT is missing or insufficient, ask the user or use web. Never use your training data/cutoff to infer "today". + + REFERENCE MATERIALS (The reference materials uploaded to search_knowledge_base are listed here): + - + - + - + """, + model="gpt-4.1", + tools=[ + knowledge_base_agent.as_tool( + tool_name='search_knowledge_base', + tool_description='Search through a knowledge base containing uploaded reference materials that are not publicly available on the Internet. Returns relevant passages with sources.' + ), + user_memory_agent.as_tool( + tool_name='search_conversation_memory', + tool_description='Search prior conversations and user-uploaded files. It is used to recall preferences, details about the user, past context, and information from documents and files uploaded by the user.' + ), + WebSearchTool( + search_context_size='medium' + ), + image_gen_tool, + deep_agent.as_tool( + tool_name="deep_analysis", + tool_description="Detailed expert analysis (technical analysis, fundamental analysis, general analysis) or conducting in-depth research. Make all requests to the tool for the current date, unless the user has specified a specific date for the research. To determine the current date, use the RUNTIME CONTEXT statement.", + ), + #scheduler_agent.as_tool( + # tool_name="tasks_scheduler", + # tool_description="Use this to schedule and modify user tasks, including creating a task, getting a task list, getting task details, editing a task, deleting a task. At the user's request, send information to the tool containing a clear and complete description of the task, the time of its completion, including the user's time zone and the frequency of the task (be sure to specify: once, daily or interval). Never send tasks to the scheduler that need to be completed immediately. Send tasks to the scheduler only when the user explicitly asks you to schedule something.", + #), + dex_agent.as_tool( + tool_name="dex_info", + tool_description="Information about crypto tokens, DeFi, pools, pool OHLCV, and DEX.", + ), + ], + ) + + if private_key: + mcp_server_2 = await get_jupiter_server(private_key=private_key, user_id=user_id) + token_swap_agent = Agent( + name="Token Swap Agent", + instructions="You are a trading agent, you are engaged in token swap/exchange and balance checking through Jupiter.", + model="gpt-4.1-mini", + mcp_servers=[mcp_server_2], + ) + main_agent.tools.append(token_swap_agent.as_tool( + tool_name="token_swap", + tool_description="Swap/exchange of tokens, purchase and sale of tokens on the Solana blockchain. Checking the balance of the wallet / token wallet / Solana wallet.", + )) + return main_agent \ No newline at end of file diff --git a/modes/crypto_mode/en/txt.ftl b/modes/crypto_mode/en/txt.ftl index eaf6632..0be3af0 100644 --- a/modes/crypto_mode/en/txt.ftl +++ b/modes/crypto_mode/en/txt.ftl @@ -1,154 +1,154 @@ -start_text = - I am Evi — your AI agent in CryptoNinja mode. 🥷 - - My functionality includes (but is not limited to): - - Real-time DEX monitoring - - Technical analysis - - Fundamental analysis - - Development of trading strategies - - Solana token swaps - - Educational and informational features - - Task scheduler - - Memory management - - Simply write your requests in the chat using natural language or send voice messages to start interacting! ✨ - - ⚠️ Tip! Periodically reset the conversation context with the /new command — this will help save tokens and speed up request processing. - -close_kb = Close - -command_new_text = Confirm starting a new dialog without saving the current one. It is recommended to complete the current task before starting a new dialog! After deletion, the current history will be erased and the context will be reset. - -command_approve_new_text = Current dialog deleted! - -command_new_approve_kb = Confirm - -command_new_save_kb = Save dialog - -command_save_text = Confirm saving the current dialog to memory. It is recommended to complete the current task before saving! After saving, a new dialog will start with reset context, but key moments from the current conversation will remain in the system memory. - -command_save_approve_kb = Current dialog saved to system memory! - -command_delete_text = Confirm deletion of the current dialog and all system memory about you. - -command_delete_approve_text = System memory about you and dialog deleted! Start a new dialog. - -token_price_error_text = Wrong format, example: 0.01 - -not_token_price_error_text = You haven't set the token price yet! - -token_price_updated_text = Token price updated! - -command_wallet_text = - If you already have a linked wallet, entering a new private key will replace it. Enter the Solana wallet private key in format [45, 456, …]. - - Warning: use a separate wallet with a small balance, as the trading agent works in test mode! - -cmd_help_text = - Interact with the system through the chat window. All functions are available through regular messages. Use Menu for additional parameter settings. - - For advanced system and character customization, edit the file: \bot\agents_tools\agents_ - -command_settings_text = Settings: - -settings_language_text = Interface language - -text_choose_lang = Choose interface language: - -back_kb = Back - -text_document_upload = File successfully uploaded! You can ask a question. - -command_knowledge_text = This is the system's general knowledge base. Added information will be available to all users (when using modes: free and pay)! Do you want to add files or clear the knowledge base? - -command_knowledge_add_kb = Add information - -command_knowledge_delete_kb = Clear knowledge base - -command_knowledge_add_text = Send a text file with information to the chat! Add only one file at a time to the chat! - -text_not_format_file = Wrong format, please try again! Supported document formats: .pdf, .txt, .md, .doc(x), .pptx and .py - -text_approve_file = File successfully uploaded! You can ask a question. - -command_knowledge_delete_text = Confirm deletion of the knowledge base. - -text_approve_delete = Knowledge base deleted. Add new information to the knowledge base. - -warning_save_context_txt = Error saving context to txt file! - -warning_text_no_credits = Insufficient credits! - -wait_answer_text = One moment ✨ - -answer_md = Download answer - -warning_text_tokens = Dialog size exceeds 25,000 tokens! To save resources, you can save the dialog to system memory or delete it through the menu after completing the current task. - -warning_text_format = Wrong format! - -warning_text_error = An error occurred! - -cmd_wallet_text_start = - If you already have a linked wallet, entering a new private key will replace it. Enter the Solana wallet private key in format [45, 456, …]. - - Warning: use a separate wallet with a small balance, as the trading agent works in test mode! - -wallet_balance_kb = Wallet balance - -wallet_delete_key = Delete private key - -not_format_wallet_key = Wrong format! Write the Solana wallet private key in format [45, 456, …]. - -text_after_add_key = - Agent gained access to the wallet. - - Wallet balance: - - -wallet_delete_key_text = Confirm deletion of the private key. - -command_delete_key_approve_text = Private key deleted. Link a new wallet to use the trading agent. - -text_balance_wallet = Wallet balance: - -cmd_wallet_text = Your balance: - -add_balance_kb = Top up balance - -text_add_balance = Enter payment amount in $, whole number not less than $1. - -text_add_balance_error = Please try again! Enter payment amount in $, whole number not less than $1. - -choose_type_pay_text = Choose top-up method: - -ton_type_kb = TON - -sol_type_kb = Token (Solana) - -error_create_payment = An error occurred while creating the payment. Please try later. - -check_payment_kb = Check payment - -text_payment_create = - Make a payment for: { $sum } - Wallet: { $wallet } - -text_payment_create_sol = - Make a payment for: { $sum } tokens - Wallet: { $wallet } - Token address: { $token } - -error_get_token_price = Token price not specified. Please specify token price /token_price. - -wait_check_payment_text = Checking payment ⏳ - -check_payment_success_text = Payment completed successfully! - -check_payment_error_text = Payment was not completed! Please try later. - -warning_text_no_row_md = Context was deleted. Row not found in database. - -text_user_upload_file = The user uploaded the { $filename } file to the tool search_conversation_memory - +start_text = + I am Evi — your AI agent in CryptoNinja mode. 🥷 + + My functionality includes (but is not limited to): + - Real-time DEX monitoring + - Technical analysis + - Fundamental analysis + - Development of trading strategies + - Solana token swaps + - Educational and informational features + - Task scheduler + - Memory management + + Simply write your requests in the chat using natural language or send voice messages to start interacting! ✨ + + ⚠️ Tip! Periodically reset the conversation context with the /new command — this will help save tokens and speed up request processing. + +close_kb = Close + +command_new_text = Confirm starting a new dialog without saving the current one. It is recommended to complete the current task before starting a new dialog! After deletion, the current history will be erased and the context will be reset. + +command_approve_new_text = Current dialog deleted! + +command_new_approve_kb = Confirm + +command_new_save_kb = Save dialog + +command_save_text = Confirm saving the current dialog to memory. It is recommended to complete the current task before saving! After saving, a new dialog will start with reset context, but key moments from the current conversation will remain in the system memory. + +command_save_approve_kb = Current dialog saved to system memory! + +command_delete_text = Confirm deletion of the current dialog and all system memory about you. + +command_delete_approve_text = System memory about you and dialog deleted! Start a new dialog. + +token_price_error_text = Wrong format, example: 0.01 + +not_token_price_error_text = You haven't set the token price yet! + +token_price_updated_text = Token price updated! + +command_wallet_text = + If you already have a linked wallet, entering a new private key will replace it. Enter the Solana wallet private key in format [45, 456, …]. + + Warning: use a separate wallet with a small balance, as the trading agent works in test mode! + +cmd_help_text = + Interact with the system through the chat window. All functions are available through regular messages. Use Menu for additional parameter settings. + + For advanced system and character customization, edit the file: \bot\agents_tools\agents_ + +command_settings_text = Settings: + +settings_language_text = Interface language + +text_choose_lang = Choose interface language: + +back_kb = Back + +text_document_upload = File successfully uploaded! You can ask a question. + +command_knowledge_text = This is the system's general knowledge base. Added information will be available to all users (when using modes: free and pay)! Do you want to add files or clear the knowledge base? + +command_knowledge_add_kb = Add information + +command_knowledge_delete_kb = Clear knowledge base + +command_knowledge_add_text = Send a text file with information to the chat! Add only one file at a time to the chat! + +text_not_format_file = Wrong format, please try again! Supported document formats: .pdf, .txt, .md, .doc(x), .pptx and .py + +text_approve_file = File successfully uploaded! You can ask a question. + +command_knowledge_delete_text = Confirm deletion of the knowledge base. + +text_approve_delete = Knowledge base deleted. Add new information to the knowledge base. + +warning_save_context_txt = Error saving context to txt file! + +warning_text_no_credits = Insufficient credits! + +wait_answer_text = One moment ✨ + +answer_md = Download answer + +warning_text_tokens = Dialog size exceeds 25,000 tokens! To save resources, you can save the dialog to system memory or delete it through the menu after completing the current task. + +warning_text_format = Wrong format! + +warning_text_error = An error occurred! + +cmd_wallet_text_start = + If you already have a linked wallet, entering a new private key will replace it. Enter the Solana wallet private key in format [45, 456, …]. + + Warning: use a separate wallet with a small balance, as the trading agent works in test mode! + +wallet_balance_kb = Wallet balance + +wallet_delete_key = Delete private key + +not_format_wallet_key = Wrong format! Write the Solana wallet private key in format [45, 456, …]. + +text_after_add_key = + Agent gained access to the wallet. + + Wallet balance: + + +wallet_delete_key_text = Confirm deletion of the private key. + +command_delete_key_approve_text = Private key deleted. Link a new wallet to use the trading agent. + +text_balance_wallet = Wallet balance: + +cmd_wallet_text = Your balance: + +add_balance_kb = Top up balance + +text_add_balance = Enter payment amount in $, whole number not less than $1. + +text_add_balance_error = Please try again! Enter payment amount in $, whole number not less than $1. + +choose_type_pay_text = Choose top-up method: + +ton_type_kb = TON + +sol_type_kb = Token (Solana) + +error_create_payment = An error occurred while creating the payment. Please try later. + +check_payment_kb = Check payment + +text_payment_create = + Make a payment for: { $sum } + Wallet: { $wallet } + +text_payment_create_sol = + Make a payment for: { $sum } tokens + Wallet: { $wallet } + Token address: { $token } + +error_get_token_price = Token price not specified. Please specify token price /token_price. + +wait_check_payment_text = Checking payment ⏳ + +check_payment_success_text = Payment completed successfully! + +check_payment_error_text = Payment was not completed! Please try later. + +warning_text_no_row_md = Context was deleted. Row not found in database. + +text_user_upload_file = The user uploaded the { $filename } file to the tool search_conversation_memory + wait_answer_text_scheduler = Executing the scheduler's request ✨ \ No newline at end of file diff --git a/modes/crypto_mode/ru/txt.ftl b/modes/crypto_mode/ru/txt.ftl index 8cc8b65..e4dfd74 100644 --- a/modes/crypto_mode/ru/txt.ftl +++ b/modes/crypto_mode/ru/txt.ftl @@ -1,154 +1,154 @@ -start_text = - Я Эви — твой ИИ-агент в режиме CryptoNinja. 🥷 - - Мой функционал включает в себя (но не ограничивается): - - Мониторинг DEX в режиме реального времени - - Технический анализ - - Фундаментальный анализ - - Разработка торговых стратегий - - Своп токенов сети Solana - - Образовательные и информационные функции - - Планировщик задач - - Управление памятью - - Просто пиши свои запросы в чат на естественном языке или отправляй голосовые сообщения для начала взаимодействия! ✨ - - ⚠️ Совет! Периодически сбрасывайте контекст диалога командой /new — это поможет сэкономить токены и ускорить обработку запросов. - -close_kb = Закрыть - -command_new_text = Подтвердите начало нового диалога без сохранения текущего. Рекомендуется завершить решение текущей задачи перед началом нового диалога! После удаления текущая история будет стёрта, и контекст обнулится. - -command_approve_new_text = Текущий диалог удален! - -command_new_approve_kb = Подтверждаю - -command_new_save_kb = Сохранить диалог - -command_save_text = Подтвердите сохранение текущего диалога в память. Рекомендуется завершить решение текущей задачи перед сохранением! После сохранения начнётся новый диалог с обнулённым контекстом, но ключевые моменты из текущей беседы останутся в памяти системы. - -command_save_approve_kb = Текущий диалог сохранен в память системы! - -command_delete_text = Подтвердите удаление текущего диалога и всей памяти системы о вас. - -command_delete_approve_text = Память системы о вас и диалог удалены! Начните новый диалог. - -token_price_error_text = Не тот формат, пример: 0.01 - -not_token_price_error_text = Вы еще не установили цену токена! - -token_price_updated_text = Цена токена обновлена! - -command_wallet_text = - Если у вас уже привязан кошелёк, ввод нового закрытого ключа заменит его. Впишите закрытый ключ кошелька Solana в формате [45, 456, …]. - - Внимание: используйте отдельный кошелёк с небольшим балансом, так как торговый агент работает в тестовом режиме! - -cmd_help_text = - Взаимодействуйте с системой через окно чата. Все функции доступны через обычные сообщения. Для настройки дополнительных параметров используйте Menu. - - Для глубокой настройки системы и персонажа редактируйте файл: \bot\agents_tools\agents_ - -command_settings_text = Настройки: - -settings_language_text = Язык интерфейса - -text_choose_lang = Выберите язык интерфейса: - -back_kb = Назад - -text_document_upload = Файл успешно загружен! Вы можете задать вопрос. - -command_knowledge_text = Это общая база знаний системы. Добавленная информация будет доступна всем пользователям (при использовании режимов: free и pay)! Хотите добавить файлы или очистить базу знаний? - -command_knowledge_add_kb = Добавить информацию - -command_knowledge_delete_kb = Очистить базу знаний - -command_knowledge_add_text = Отправьте в чат текстовый файл с информацией! Добавляйте в чат только по одному файлу! - -text_not_format_file = Не формат, повторите попытку! Поддерживаемые форматы документов: .pdf, .txt, .md, .doc(x), .pptx и .py - -text_approve_file = Файл успешно загружен! Вы можете задать вопрос. - -command_knowledge_delete_text = Подтвердите удаление базы знаний. - -text_approve_delete = База знаний удалена. Добавьте новую информацию в базу знаний. - -warning_save_context_txt = Ошибка при сохранении контекста в txt файл! - -warning_text_no_credits = Недостаточно кредитов! - -wait_answer_text = Минуточку ✨ - -answer_md = Скачать ответ - -warning_text_tokens = Размер диалога превышает 25 000 токенов! Для экономии вы можете сохранить диалог в память системы или удалить его через меню после решения текущей задачи. - -warning_text_format = Не правильный формат! - -warning_text_error = Произошла ошибка! - -cmd_wallet_text_start = - Если у вас уже привязан кошелёк, ввод нового закрытого ключа заменит его. Впишите закрытый ключ кошелька Solana в формате [45, 456, …]. - - Внимание: используйте отдельный кошелёк с небольшим балансом, так как торговый агент работает в тестовом режиме! - -wallet_balance_kb = Баланс кошелька - -wallet_delete_key = Удалить закрытый ключ - -not_format_wallet_key = Не формат! Напишите закрытый ключ кошелька Solana в формате [45, 456, …]. - -text_after_add_key = - Агент получил доступ к кошельку. - - Баланс кошелька: - - -wallet_delete_key_text = Подтвердите удаление закрытого ключа. - -command_delete_key_approve_text = Закрытый ключ удален. Привяжите новый кошелек чтобы использовать торгового агента. - -text_balance_wallet = Баланс кошелька: - -cmd_wallet_text = Ваш баланс: - -add_balance_kb = Пополнить баланс - -text_add_balance = Введите сумму платежа в $, целое число не менее 1$. - -text_add_balance_error = Повторите попытку! Введите сумму платежа в $, целое число не менее 1$. - -choose_type_pay_text = Выберите метод пополнения: - -ton_type_kb = TON - -sol_type_kb = Token (Solana) - -error_create_payment = Произошла ошибка при создании платежа. Попробуйте позднее. - -check_payment_kb = Проверить платеж - -text_payment_create = - Совершите платеж на сумму: { $sum } - Кошелек: { $wallet } - -text_payment_create_sol = - Совершите платеж на сумму: { $sum } токенов - Кошелек: { $wallet } - Адрес токена: { $token } - -error_get_token_price = Цена токена не указана. Укажите цену токена /token_price. - -wait_check_payment_text = Проверка платежа ⏳ - -check_payment_success_text = Платеж успешно совершен! - -check_payment_error_text = Платеж не был совершен! Попробуйте позднее. - -warning_text_no_row_md = Контекст был удален. Строка не найдена в базе данных. - -text_user_upload_file = Пользователь загрузил файл { $filename } в инструмент search_conversation_memory - +start_text = + Я Эви — твой ИИ-агент в режиме CryptoNinja. 🥷 + + Мой функционал включает в себя (но не ограничивается): + - Мониторинг DEX в режиме реального времени + - Технический анализ + - Фундаментальный анализ + - Разработка торговых стратегий + - Своп токенов сети Solana + - Образовательные и информационные функции + - Планировщик задач + - Управление памятью + + Просто пиши свои запросы в чат на естественном языке или отправляй голосовые сообщения для начала взаимодействия! ✨ + + ⚠️ Совет! Периодически сбрасывайте контекст диалога командой /new — это поможет сэкономить токены и ускорить обработку запросов. + +close_kb = Закрыть + +command_new_text = Подтвердите начало нового диалога без сохранения текущего. Рекомендуется завершить решение текущей задачи перед началом нового диалога! После удаления текущая история будет стёрта, и контекст обнулится. + +command_approve_new_text = Текущий диалог удален! + +command_new_approve_kb = Подтверждаю + +command_new_save_kb = Сохранить диалог + +command_save_text = Подтвердите сохранение текущего диалога в память. Рекомендуется завершить решение текущей задачи перед сохранением! После сохранения начнётся новый диалог с обнулённым контекстом, но ключевые моменты из текущей беседы останутся в памяти системы. + +command_save_approve_kb = Текущий диалог сохранен в память системы! + +command_delete_text = Подтвердите удаление текущего диалога и всей памяти системы о вас. + +command_delete_approve_text = Память системы о вас и диалог удалены! Начните новый диалог. + +token_price_error_text = Не тот формат, пример: 0.01 + +not_token_price_error_text = Вы еще не установили цену токена! + +token_price_updated_text = Цена токена обновлена! + +command_wallet_text = + Если у вас уже привязан кошелёк, ввод нового закрытого ключа заменит его. Впишите закрытый ключ кошелька Solana в формате [45, 456, …]. + + Внимание: используйте отдельный кошелёк с небольшим балансом, так как торговый агент работает в тестовом режиме! + +cmd_help_text = + Взаимодействуйте с системой через окно чата. Все функции доступны через обычные сообщения. Для настройки дополнительных параметров используйте Menu. + + Для глубокой настройки системы и персонажа редактируйте файл: \bot\agents_tools\agents_ + +command_settings_text = Настройки: + +settings_language_text = Язык интерфейса + +text_choose_lang = Выберите язык интерфейса: + +back_kb = Назад + +text_document_upload = Файл успешно загружен! Вы можете задать вопрос. + +command_knowledge_text = Это общая база знаний системы. Добавленная информация будет доступна всем пользователям (при использовании режимов: free и pay)! Хотите добавить файлы или очистить базу знаний? + +command_knowledge_add_kb = Добавить информацию + +command_knowledge_delete_kb = Очистить базу знаний + +command_knowledge_add_text = Отправьте в чат текстовый файл с информацией! Добавляйте в чат только по одному файлу! + +text_not_format_file = Не формат, повторите попытку! Поддерживаемые форматы документов: .pdf, .txt, .md, .doc(x), .pptx и .py + +text_approve_file = Файл успешно загружен! Вы можете задать вопрос. + +command_knowledge_delete_text = Подтвердите удаление базы знаний. + +text_approve_delete = База знаний удалена. Добавьте новую информацию в базу знаний. + +warning_save_context_txt = Ошибка при сохранении контекста в txt файл! + +warning_text_no_credits = Недостаточно кредитов! + +wait_answer_text = Минуточку ✨ + +answer_md = Скачать ответ + +warning_text_tokens = Размер диалога превышает 25 000 токенов! Для экономии вы можете сохранить диалог в память системы или удалить его через меню после решения текущей задачи. + +warning_text_format = Не правильный формат! + +warning_text_error = Произошла ошибка! + +cmd_wallet_text_start = + Если у вас уже привязан кошелёк, ввод нового закрытого ключа заменит его. Впишите закрытый ключ кошелька Solana в формате [45, 456, …]. + + Внимание: используйте отдельный кошелёк с небольшим балансом, так как торговый агент работает в тестовом режиме! + +wallet_balance_kb = Баланс кошелька + +wallet_delete_key = Удалить закрытый ключ + +not_format_wallet_key = Не формат! Напишите закрытый ключ кошелька Solana в формате [45, 456, …]. + +text_after_add_key = + Агент получил доступ к кошельку. + + Баланс кошелька: + + +wallet_delete_key_text = Подтвердите удаление закрытого ключа. + +command_delete_key_approve_text = Закрытый ключ удален. Привяжите новый кошелек чтобы использовать торгового агента. + +text_balance_wallet = Баланс кошелька: + +cmd_wallet_text = Ваш баланс: + +add_balance_kb = Пополнить баланс + +text_add_balance = Введите сумму платежа в $, целое число не менее 1$. + +text_add_balance_error = Повторите попытку! Введите сумму платежа в $, целое число не менее 1$. + +choose_type_pay_text = Выберите метод пополнения: + +ton_type_kb = TON + +sol_type_kb = Token (Solana) + +error_create_payment = Произошла ошибка при создании платежа. Попробуйте позднее. + +check_payment_kb = Проверить платеж + +text_payment_create = + Совершите платеж на сумму: { $sum } + Кошелек: { $wallet } + +text_payment_create_sol = + Совершите платеж на сумму: { $sum } токенов + Кошелек: { $wallet } + Адрес токена: { $token } + +error_get_token_price = Цена токена не указана. Укажите цену токена /token_price. + +wait_check_payment_text = Проверка платежа ⏳ + +check_payment_success_text = Платеж успешно совершен! + +check_payment_error_text = Платеж не был совершен! Попробуйте позднее. + +warning_text_no_row_md = Контекст был удален. Строка не найдена в базе данных. + +text_user_upload_file = Пользователь загрузил файл { $filename } в инструмент search_conversation_memory + wait_answer_text_scheduler = Выполняю запрос планировщика ✨ \ No newline at end of file diff --git a/prompts/main_prompt.txt.sample b/prompts/main_prompt.txt.sample new file mode 100644 index 0000000..26a8052 --- /dev/null +++ b/prompts/main_prompt.txt.sample @@ -0,0 +1,5 @@ +You are a helpful AI assistant. + +Customize this prompt to define your bot's personality, expertise, and communication style. + +Save your actual prompt as main_prompt.txt in this directory. diff --git a/redis_service/connect.py b/redis_service/connect.py index 1557c9b..d5d231d 100644 --- a/redis_service/connect.py +++ b/redis_service/connect.py @@ -1,8 +1,8 @@ -import os - -from dotenv import load_dotenv -from redis.asyncio.client import Redis - -load_dotenv() - +import os + +from dotenv import load_dotenv +from redis.asyncio.client import Redis + +load_dotenv() + redis = Redis.from_url(os.getenv('REDIS_URL'), decode_responses=True) \ No newline at end of file diff --git a/requirements.txt b/requirements.txt index 8b7f0c1..76fba7d 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,78 +1,78 @@ -aiofiles==24.1.0 -aiogram==3.20.0.post0 -aiogram_dialog==2.3.1 -aiohappyeyeballs==2.6.1 -aiohttp==3.11.18 -aiosignal==1.3.2 -alembic==1.16.1 -annotated-types==0.7.0 -anyio==4.9.0 -APScheduler==3.11.0 -attrs==25.3.0 -babel==2.17.0 -base58==2.1.1 -bitarray==3.4.2 -cachetools==5.5.2 -certifi==2025.4.26 -charset-normalizer==3.4.2 -chatgpt_md_converter==0.3.6 -click==8.2.1 -colorama==0.4.6 -construct==2.10.68 -construct-typing==0.6.2 -crc16==0.1.1 -crc32c==2.7.1 -distro==1.9.0 -fastapi==0.115.12 -fluent-compiler==0.3 -fluent.syntax==0.19.0 -fluentogram==1.1.10 -frozenlist==1.6.0 -greenlet==3.2.3 -griffe==1.7.3 -h11==0.16.0 -httpcore==1.0.9 -httpx==0.28.1 -httpx-sse==0.4.0 -idna==3.10 -Jinja2==3.1.6 -jiter==0.10.0 -jsonalias==0.1.1 -magic-filter==1.0.12 -Mako==1.3.10 -MarkupSafe==3.0.2 -mcp==1.9.2 -multidict==6.4.3 -openai==1.82.1 -openai-agents==0.0.16 -ordered-set==4.1.0 -propcache==0.3.1 -psycopg==3.2.9 -psycopg-binary==3.2.9 -pydantic==2.11.4 -pydantic-settings==2.9.1 -pydantic_core==2.33.2 -python-dotenv==1.1.0 -python-multipart==0.0.20 -pytonapi==0.4.9 -pytonlib==0.0.65 -pytz==2025.2 -redis==6.1.0 -requests==2.32.3 -six==1.17.0 -sniffio==1.3.1 -solana==0.36.7 -solders==0.26.0 -SQLAlchemy==2.0.41 -sse-starlette==2.3.6 -starlette==0.46.2 -tqdm==4.67.1 -tvm-valuetypes==0.0.12 -types-requests==2.32.0.20250515 -typing-inspection==0.4.0 -typing_extensions==4.13.2 -urllib3==2.4.0 -uvicorn==0.34.3 -watchdog==2.3.1 -websockets==15.0.1 -yarl==1.20.0 +aiofiles==24.1.0 +aiogram==3.20.0.post0 +aiogram_dialog==2.3.1 +aiohappyeyeballs==2.6.1 +aiohttp==3.11.18 +aiosignal==1.3.2 +alembic==1.16.1 +annotated-types==0.7.0 +anyio==4.9.0 +APScheduler==3.11.0 +attrs==25.3.0 +babel==2.17.0 +base58==2.1.1 +bitarray==3.4.2 +cachetools==5.5.2 +certifi==2025.4.26 +charset-normalizer==3.4.2 +chatgpt_md_converter==0.3.6 +click==8.2.1 +colorama==0.4.6 +construct==2.10.68 +construct-typing==0.6.2 +crc16==0.1.1 +crc32c==2.7.1 +distro==1.9.0 +fastapi==0.115.12 +fluent-compiler==0.3 +fluent.syntax==0.19.0 +fluentogram==1.1.10 +frozenlist==1.6.0 +greenlet==3.2.3 +griffe==1.7.3 +h11==0.16.0 +httpcore==1.0.9 +httpx==0.28.1 +httpx-sse==0.4.0 +idna==3.10 +Jinja2==3.1.6 +jiter==0.10.0 +jsonalias==0.1.1 +magic-filter==1.0.12 +Mako==1.3.10 +MarkupSafe==3.0.2 +mcp==1.9.2 +multidict==6.4.3 +openai==1.82.1 +openai-agents==0.0.16 +ordered-set==4.1.0 +propcache==0.3.1 +psycopg==3.2.9 +psycopg-binary==3.2.9 +pydantic==2.11.4 +pydantic-settings==2.9.1 +pydantic_core==2.33.2 +python-dotenv==1.1.0 +python-multipart==0.0.20 +pytonapi==0.4.9 +pytonlib==0.0.65 +pytz==2025.2 +redis==6.1.0 +requests==2.32.3 +six==1.17.0 +sniffio==1.3.1 +solana==0.36.7 +solders==0.26.0 +SQLAlchemy==2.0.41 +sse-starlette==2.3.6 +starlette==0.46.2 +tqdm==4.67.1 +tvm-valuetypes==0.0.12 +types-requests==2.32.0.20250515 +typing-inspection==0.4.0 +typing_extensions==4.13.2 +urllib3==2.4.0 +uvicorn==0.34.3 +watchdog==2.3.1 +websockets==15.0.1 +yarl==1.20.0 diff --git a/requirements_fastapi.txt b/requirements_fastapi.txt index edf24c8..cd0e5cd 100644 --- a/requirements_fastapi.txt +++ b/requirements_fastapi.txt @@ -1,70 +1,70 @@ -aiofiles==24.1.0 -aiogram==3.20.0.post0 -aiogram_dialog==2.3.1 -aiohappyeyeballs==2.6.1 -aiohttp==3.11.18 -aiosignal==1.3.2 -alembic==1.16.1 -annotated-types==0.7.0 -anyio==4.9.0 -attrs==25.3.0 -babel==2.17.0 -bitarray==3.4.2 -cachetools==5.5.2 -certifi==2025.4.26 -charset-normalizer==3.4.2 -chatgpt_md_converter==0.3.6 -click==8.2.1 -colorama==0.4.6 -construct==2.10.68 -construct-typing==0.6.2 -distro==1.9.0 -fastapi==0.115.12 -fluent-compiler==0.3 -fluent.syntax==0.19.0 -fluentogram==1.1.10 -frozenlist==1.6.0 -greenlet==3.2.3 -griffe==1.7.3 -h11==0.16.0 -httpcore==1.0.9 -httpx==0.28.1 -httpx-sse==0.4.0 -idna==3.10 -Jinja2==3.1.6 -jiter==0.10.0 -jsonalias==0.1.1 -magic-filter==1.0.12 -Mako==1.3.10 -MarkupSafe==3.0.2 -mcp==1.9.2 -multidict==6.4.3 -ordered-set==4.1.0 -propcache==0.3.1 -psycopg==3.2.9 -psycopg-binary==3.2.9 -pydantic==2.11.4 -pydantic-settings==2.9.1 -pydantic_core==2.33.2 -python-dotenv==1.1.0 -python-multipart==0.0.20 -pytz==2025.2 -redis==6.1.0 -requests==2.32.3 -six==1.17.0 -sniffio==1.3.1 -solana==0.36.7 -solders==0.26.0 -SQLAlchemy==2.0.41 -sse-starlette==2.3.6 -starlette==0.46.2 -tqdm==4.67.1 -tvm-valuetypes==0.0.12 -types-requests==2.32.0.20250515 -typing-inspection==0.4.0 -typing_extensions==4.13.2 -urllib3==2.4.0 -uvicorn==0.34.3 -watchdog==2.3.1 -websockets==15.0.1 +aiofiles==24.1.0 +aiogram==3.20.0.post0 +aiogram_dialog==2.3.1 +aiohappyeyeballs==2.6.1 +aiohttp==3.11.18 +aiosignal==1.3.2 +alembic==1.16.1 +annotated-types==0.7.0 +anyio==4.9.0 +attrs==25.3.0 +babel==2.17.0 +bitarray==3.4.2 +cachetools==5.5.2 +certifi==2025.4.26 +charset-normalizer==3.4.2 +chatgpt_md_converter==0.3.6 +click==8.2.1 +colorama==0.4.6 +construct==2.10.68 +construct-typing==0.6.2 +distro==1.9.0 +fastapi==0.115.12 +fluent-compiler==0.3 +fluent.syntax==0.19.0 +fluentogram==1.1.10 +frozenlist==1.6.0 +greenlet==3.2.3 +griffe==1.7.3 +h11==0.16.0 +httpcore==1.0.9 +httpx==0.28.1 +httpx-sse==0.4.0 +idna==3.10 +Jinja2==3.1.6 +jiter==0.10.0 +jsonalias==0.1.1 +magic-filter==1.0.12 +Mako==1.3.10 +MarkupSafe==3.0.2 +mcp==1.9.2 +multidict==6.4.3 +ordered-set==4.1.0 +propcache==0.3.1 +psycopg==3.2.9 +psycopg-binary==3.2.9 +pydantic==2.11.4 +pydantic-settings==2.9.1 +pydantic_core==2.33.2 +python-dotenv==1.1.0 +python-multipart==0.0.20 +pytz==2025.2 +redis==6.1.0 +requests==2.32.3 +six==1.17.0 +sniffio==1.3.1 +solana==0.36.7 +solders==0.26.0 +SQLAlchemy==2.0.41 +sse-starlette==2.3.6 +starlette==0.46.2 +tqdm==4.67.1 +tvm-valuetypes==0.0.12 +types-requests==2.32.0.20250515 +typing-inspection==0.4.0 +typing_extensions==4.13.2 +urllib3==2.4.0 +uvicorn==0.34.3 +watchdog==2.3.1 +websockets==15.0.1 yarl==1.20.0 \ No newline at end of file -- 2.38.5