diff --git a/.env b/.env new file mode 100644 index 0000000..d75c6a2 --- /dev/null +++ b/.env @@ -0,0 +1,10 @@ +# Non-secret configuration (committed to git). +# Secrets (TG_BOT_TOKEN, VERCEL_API_KEY, OWNER_TELEGRAM_CHAT_ID) come from Infisical at deploy time. + +DEFAULT_MODEL=anthropic/claude-sonnet-4 +OPENAI_BASE_URL=https://ai-gateway.vercel.sh/v1 +DATA_DIR=/data + +# BetterBot site paths (also set as env vars in compose) +SITE_DIR=/repo/betterlifesg/site +MEMORAIZ_DIR=/repo/memoraiz/frontend diff --git a/.forgejo/workflows/deploy.yml b/.forgejo/workflows/deploy.yml new file mode 100644 index 0000000..7df06a7 --- /dev/null +++ b/.forgejo/workflows/deploy.yml @@ -0,0 +1,116 @@ +name: Deploy BetterBot + +on: + push: + branches: [master] + workflow_dispatch: + +concurrency: + group: deploy-betterbot-${{ forgejo.ref }} + cancel-in-progress: false + +jobs: + deploy: + runs-on: ubuntu-latest + steps: + - name: Configure SSH + shell: bash + env: + VPS_SSH_KEY: ${{ secrets.VPS_SSH_KEY }} + run: | + set -euo pipefail + install -d -m 700 ~/.ssh + python3 - <<'PY' + import os + from pathlib import Path + + key = os.environ["VPS_SSH_KEY"] + if "\\n" in key and "\n" not in key: + key = key.replace("\\n", "\n") + key = key.replace("\r\n", "\n").replace("\r", "\n").strip() + "\n" + Path.home().joinpath(".ssh", "id_ed25519").write_text(key, encoding="utf-8") + PY + chmod 600 ~/.ssh/id_ed25519 + ssh-keygen -y -f ~/.ssh/id_ed25519 >/dev/null + ssh-keyscan -H 23.226.133.245 >> ~/.ssh/known_hosts + + - name: Deploy on VPS + shell: bash + run: | + set -euo pipefail + ssh root@23.226.133.245 "cd /opt/src/betterbot && bash scripts/deploy-betterbot.sh" + + notify: + if: ${{ always() }} + needs: [deploy] + runs-on: ubuntu-latest + steps: + - name: Send ntfy notification + shell: bash + env: + JOB_RESULTS: ${{ toJson(needs) }} + NTFY_TOKEN: ${{ secrets.NTFY_TOKEN }} + NTFY_TOPIC_URL: https://ntfy.bytesizeprotip.com/deploy + RUN_URL: ${{ forgejo.server_url }}/${{ forgejo.repository }}/actions/runs/${{ forgejo.run_number }} + run: | + set -euo pipefail + + if [ -z "$NTFY_TOKEN" ]; then + echo "NTFY_TOKEN secret not configured, skipping notification." + exit 0 + fi + + eval "$(python3 - <<'PY' + import json + import os + import shlex + + needs = json.loads(os.environ["JOB_RESULTS"]) + results = {name: str(data.get("result") or "unknown") for name, data in needs.items()} + has_failure = any(result == "failure" for result in results.values()) + has_cancelled = any(result == "cancelled" for result in results.values()) + + if has_failure: + status = "failure" + priority = "5" + tags = "rotating_light,x" + elif has_cancelled: + status = "cancelled" + priority = "4" + tags = "warning" + else: + status = "success" + priority = "2" + tags = "white_check_mark" + + summary = ", ".join(f"{name}:{result}" for name, result in results.items()) or "no upstream jobs" + message = "\n".join([ + f"Repo: {os.environ['FORGEJO_REPOSITORY']}", + f"Workflow: {os.environ['FORGEJO_WORKFLOW']}", + f"Status: {status}", + f"Ref: {os.environ.get('FORGEJO_REF_NAME', '')}", + f"Actor: {os.environ.get('FORGEJO_ACTOR', '')}", + f"Run: {os.environ['RUN_URL']}", + f"Jobs: {summary}", + ]) + + values = { + "NTFY_TITLE": f"{os.environ['FORGEJO_WORKFLOW']} {status}", + "NTFY_PRIORITY": priority, + "NTFY_TAGS": tags, + "NTFY_MESSAGE": message, + } + + for key, value in values.items(): + print(f"{key}={shlex.quote(value)}") + PY + )" + + curl --fail --show-error --silent \ + -H "Authorization: Bearer $NTFY_TOKEN" \ + -H "Title: $NTFY_TITLE" \ + -H "Priority: $NTFY_PRIORITY" \ + -H "Tags: $NTFY_TAGS" \ + -H "Click: $RUN_URL" \ + -d "$NTFY_MESSAGE" \ + "$NTFY_TOPIC_URL" \ No newline at end of file diff --git a/.gitignore b/.gitignore index dd7e0d2..05fd344 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,6 @@ __pycache__/ .venv/ *.env.local +data/ +static/dist/ +frontend/node_modules/ diff --git a/Dockerfile b/Dockerfile index 870fa07..4f468b0 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,7 +1,25 @@ +# ── BetterBot Dockerfile ── +# Fork of CodeAnywhere — Telegram-only, no web UI build stage needed. + FROM python:3.12-slim + +RUN apt-get update \ + && apt-get install -y --no-install-recommends git openssh-client \ + && rm -rf /var/lib/apt/lists/* + WORKDIR /app -RUN apt-get update && apt-get install -y --no-install-recommends git openssh-client && rm -rf /var/lib/apt/lists/* + +# Site project mount points +RUN mkdir -p /site /memoraiz /data + COPY requirements.txt . RUN pip install --no-cache-dir -r requirements.txt -COPY main.py . -CMD ["python", "main.py"] + +COPY . . +RUN python -m compileall . +RUN chmod +x /app/docker-entrypoint.sh + +EXPOSE 3000 + +ENTRYPOINT ["/app/docker-entrypoint.sh"] +CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "3000", "--log-level", "info"] diff --git a/README.md b/README.md index a248227..550bd73 100644 --- a/README.md +++ b/README.md @@ -1,18 +1,43 @@ # BetterBot -A simplified Telegram bot that lets non-technical users edit the Better Life SG website by sending natural language instructions. +A Telegram bot for editing the Better Life SG website and Memoraiz app frontend. +Fork of [CodeAnywhere](https://github.com/andrekamarudin/code_anywhere) — shares +the core Copilot SDK framework and only customises `instance.py` and +`tools/site_editing/`. ## How it works 1. User sends a message to the Telegram bot (e.g. "Change the phone number to 91234567") -2. BetterBot uses an LLM (GPT-4.1) with file-editing tools to read and modify the website HTML -3. Changes are written directly to the site files served by Caddy +2. BetterBot uses the CodeAnywhere Copilot SDK framework with site-editing tools +3. Tools list / read / write files and automatically commit + push to git + +## Projects + +| Key | What it manages | Mount path | +|-----|----------------|------------| +| `betterlifesg` | Static HTML site (Tailwind CSS via CDN) | `/repo/betterlifesg/site` | +| `memoraiz` | React 19 + Vite 6 frontend | `/repo/memoraiz/frontend` | ## Stack -- Python 3.12 + python-telegram-bot + OpenAI SDK -- Reads/writes static HTML files mounted from `/opt/betterlifesg/site/` -- Runs on RackNerd at `betterbot.bytesizeprotip.com` +- Python 3.12 + CodeAnywhere framework (Copilot SDK, python-telegram-bot) +- Runs on RackNerd +- Forgejo repo: `andre/betterbot` + +## Fork relationship + +BetterBot shares all framework files with CodeAnywhere. The only +betterbot-specific files are: + +| File | Purpose | +|------|---------| +| `instance.py` | Bot identity, system prompt, feature flags, tool registration | +| `tools/site_editing/` | list_files / read_file / write_file with auto git push | +| `.env` | Non-secret config (model defaults, site paths) | +| `compose/` | Docker compose for RackNerd with site-dir mounts | +| `scripts/deploy-betterbot.sh` | Deploy script targeting RackNerd + Infisical | + +To sync upstream changes, copy updated framework files from `code_anywhere/`. ## Deployment @@ -24,3 +49,5 @@ ssh racknerd bash /opt/src/betterbot/scripts/deploy-betterbot.sh - `/start` — Introduction and examples - `/reset` — Clear conversation history +- `/model ` — Switch LLM model +- `/current` — Show current model diff --git a/background_tasks.py b/background_tasks.py new file mode 100644 index 0000000..d0aa858 --- /dev/null +++ b/background_tasks.py @@ -0,0 +1,147 @@ +"""Background task manager — spawns long-running Copilot SDK sessions outside the request cycle.""" + +import asyncio +import logging +from dataclasses import dataclass, field +from datetime import datetime, timezone +from typing import Awaitable, Callable + +from config import settings +from copilot_runtime import copilot, stream_session +from llm_costs import extract_usage_and_cost +from model_selection import ModelSelection, build_provider_config, resolve_selection +from ux import extract_final_text + +logger = logging.getLogger(__name__) + +BACKGROUND_TIMEOUT = 600 # 10 minutes + + +@dataclass +class BackgroundTask: + task_id: str + description: str + thread_id: str + started_at: datetime = field(default_factory=lambda: datetime.now(timezone.utc)) + result: str | None = None + error: str | None = None + usage: dict | None = None + _asyncio_task: asyncio.Task[None] | None = field(default=None, repr=False) + + @property + def done(self) -> bool: + return self._asyncio_task is not None and self._asyncio_task.done() + + @property + def elapsed_seconds(self) -> float: + return (datetime.now(timezone.utc) - self.started_at).total_seconds() + + +class BackgroundTaskManager: + """Tracks background agent tasks per thread. One active task per thread.""" + + def __init__(self) -> None: + self._tasks: dict[str, BackgroundTask] = {} + self._thread_tasks: dict[str, list[str]] = {} + self._counter = 0 + + def _next_id(self) -> str: + self._counter += 1 + return f"bg-{self._counter}" + + def start( + self, + *, + thread_id: str, + description: str, + selection: ModelSelection, + system_message: str, + on_complete: Callable[["BackgroundTask"], Awaitable[None]], + ) -> BackgroundTask: + task_id = self._next_id() + task = BackgroundTask(task_id=task_id, description=description, thread_id=thread_id) + + # Background agents use a dedicated model/provider (not the parent session's provider) + bg_selection = resolve_selection(model=settings.BACKGROUND_MODEL) + + bg_system = ( + system_message.rstrip() + "\n\nYou are running as a background agent. " + "Complete the task fully and return your findings. Be thorough." + ) + + async def _run() -> None: + try: + + async def _collect() -> str: + events: list = [] + async for ev in stream_session( + copilot, + model=bg_selection.model, + provider_config=build_provider_config(bg_selection), + system_message=bg_system, + prompt=description, + ): + events.append(ev) + # Extract usage before final text + task.usage = extract_usage_and_cost(bg_selection.model, bg_selection.provider, events) + return extract_final_text(events) or "Task completed but produced no output." + + task.result = await asyncio.wait_for(_collect(), timeout=BACKGROUND_TIMEOUT) + except asyncio.TimeoutError: + task.error = f"Timed out after {BACKGROUND_TIMEOUT}s" + except Exception as exc: + task.error = str(exc) + logger.exception("Background task %s failed", task_id) + finally: + try: + await on_complete(task) + except Exception: + logger.exception("Callback failed for background task %s", task_id) + + task._asyncio_task = asyncio.create_task(_run()) + self._tasks[task_id] = task + self._thread_tasks.setdefault(thread_id, []).append(task_id) + return task + + def get_active(self, thread_id: str) -> BackgroundTask | None: + for task_id in reversed(self._thread_tasks.get(thread_id, [])): + task = self._tasks.get(task_id) + if task and not task.done: + return task + return None + + def get_latest(self, thread_id: str) -> BackgroundTask | None: + ids = self._thread_tasks.get(thread_id, []) + if not ids: + return None + return self._tasks.get(ids[-1]) + + def context_summary(self, thread_id: str) -> str | None: + """Return background-agent context to inject into the system message, or None.""" + active = self.get_active(thread_id) + if active: + return ( + f"A background agent is currently running ({active.elapsed_seconds:.0f}s elapsed).\n" + f"Task: {active.description}\n" + "Its results will be posted to the chat when done." + ) + latest = self.get_latest(thread_id) + if latest is None: + return None + if latest.error: + return f"The last background agent failed: {latest.error}" + if latest.result: + snippet = latest.result[:2000] + ("..." if len(latest.result) > 2000 else "") + return f"The last background agent completed.\nTask: {latest.description}\nResult:\n{snippet}" + return None + + def format_status(self, thread_id: str) -> str: + active = self.get_active(thread_id) + if active: + return f"A background agent is running ({active.elapsed_seconds:.0f}s elapsed).\nTask: {active.description}" + latest = self.get_latest(thread_id) + if latest is None: + return "No background agent has run in this thread." + if latest.error: + return f"Last background agent failed: {latest.error}" + return f"Last background agent completed.\nTask: {latest.description}" diff --git a/compose/.env.example b/compose/.env.example index 228c318..9750fcc 100644 --- a/compose/.env.example +++ b/compose/.env.example @@ -1,5 +1,4 @@ +# Secrets from Infisical — used as .env in the compose stack dir TG_BOT_TOKEN=CHANGE_ME -VERCEL_AI_GATEWAY_KEY=CHANGE_ME -OPENAI_BASE_URL=https://ai-gateway.vercel.sh/v1 -MODEL=anthropic/claude-sonnet-4 -ALLOWED_USERS=876499264,417471802 +VERCEL_API_KEY=CHANGE_ME +OWNER_TELEGRAM_CHAT_ID=CHANGE_ME diff --git a/compose/docker-compose.yml b/compose/docker-compose.yml index eea190c..289381e 100644 --- a/compose/docker-compose.yml +++ b/compose/docker-compose.yml @@ -3,14 +3,24 @@ services: build: /opt/src/betterbot container_name: betterbot restart: unless-stopped + env_file: + - defaults.env + - .env volumes: - /opt/src/betterlifesg:/repo/betterlifesg:rw - /opt/src/hk_memoraiz:/repo/memoraiz:rw - /root/.ssh:/root/.ssh:ro - env_file: - - .env + - betterbot-data:/data environment: - TZ=${TZ:-Asia/Singapore} - SITE_DIR=/repo/betterlifesg/site - MEMORAIZ_DIR=/repo/memoraiz/frontend - GIT_SSH_COMMAND=ssh -o StrictHostKeyChecking=no + - GIT_AUTHOR_NAME=BetterBot + - GIT_AUTHOR_EMAIL=betterbot@bytesizeprotip.com + - GIT_COMMITTER_NAME=BetterBot + - GIT_COMMITTER_EMAIL=betterbot@bytesizeprotip.com + +volumes: + betterbot-data: + name: betterbot-data diff --git a/config.py b/config.py new file mode 100644 index 0000000..4af36c1 --- /dev/null +++ b/config.py @@ -0,0 +1,66 @@ +import os + +from pydantic_settings import BaseSettings, SettingsConfigDict + + +class Settings(BaseSettings): + # LLM + DEFAULT_MODEL: str = "gpt-5.4" + BACKGROUND_MODEL: str = "vercel:google/gemma-4-31b-it" + OPENAI_API_KEY: str = "" + OPENAI_BASE_URL: str = "https://api.openai.com/v1" + OPENROUTER_API_KEY: str = "" + OPENROUTER_BASE_URL: str = "https://openrouter.ai/api/v1" + VERCEL_API_KEY: str = "" + VERCEL_BASE_URL: str = "https://ai-gateway.vercel.sh/v1" + HUGGINGFACE_API_KEY: str = "" + HUGGINGFACE_BASE_URL: str = "https://router.huggingface.co/hf-inference/v1" + GITHUB_TOKEN: str = "" + + # Telegram + TG_BOT_TOKEN: str = "" + OWNER_TELEGRAM_CHAT_ID: str = "" # Telegram user ID of the bot owner for approval requests + ELEVENLABS_API_KEY: str = "" + ELEVENLABS_VOICE_ID: str = "" + ELEVENLABS_MODEL: str = "eleven_multilingual_v2" + + # Auth + AUTH_TOKEN: str = "" + + # Integrations + VIKUNJA_API_URL: str = "" + VIKUNJA_API_KEY: str = "" + VIKUNJA_MEMORY_PATH: str = "" + KARAKEEP_API_URL: str = "" + KARAKEEP_API_KEY: str = "" + + # User identity & provisioning + CREDENTIAL_VAULT_KEY: str = "" + VIKUNJA_ADMIN_API_KEY: str = "" + KARAKEEP_ADMIN_API_KEY: str = "" + ALLOW_CREDENTIAL_REVEAL_IN_CHAT: bool = False + EVENT_POLL_INTERVAL_SECONDS: int = 300 + + # Advisor + ADVISOR_ENABLED: bool = False + ADVISOR_DEFAULT_MODEL: str = "claude-opus-4.6" + ADVISOR_MAX_USES: int = 3 + ADVISOR_MAX_TOKENS: int = 700 + + # Paths + REPOS_DIR: str = "/repos" + DATA_DIR: str = "/data" + TG_PERSISTENCE_DIR: str = "" + + # BetterBot — site directories + SITE_DIR: str = "/site" + MEMORAIZ_DIR: str = "/memoraiz" + + model_config = SettingsConfigDict(env_file=".env", env_file_encoding="utf-8", extra="ignore") + + +settings = Settings() +if not settings.VERCEL_API_KEY: + settings.VERCEL_API_KEY = os.getenv("AI_GATEWAY_API_KEY", "") +if not settings.HUGGINGFACE_API_KEY: + settings.HUGGINGFACE_API_KEY = os.getenv("HF_ACCESS_TOKEN", "") diff --git a/copilot_runtime.py b/copilot_runtime.py new file mode 100644 index 0000000..8dce14b --- /dev/null +++ b/copilot_runtime.py @@ -0,0 +1,333 @@ +"""Copilot SDK runtime — manages CopilotClient lifecycle and session creation.""" + +import asyncio +import logging +import random +from typing import Any, AsyncIterator + +from copilot import CopilotClient, SubprocessConfig +from copilot.generated.session_events import SessionEventType +from copilot.session import CopilotSession, PermissionHandler, SessionEvent +from copilot.tools import Tool + +from config import settings +from instance import skill_directories as _default_skill_directories + +logger = logging.getLogger(__name__) + +# ── Retry configuration ───────────────────────────────────────────── + +MAX_SESSION_RETRIES = 3 +RETRY_BASE_DELAY = 3.0 # seconds +RETRY_MAX_DELAY = 30.0 # seconds + +_RETRYABLE_PATTERNS = ( + "failed to get response", + "operation was aborted", + "timed out", + "timeout", + "502", + "503", + "504", + "service unavailable", + "overloaded", +) + +# Rate-limit errors: the SDK already retries 5 times internally, so our +# outer retry loop should NOT retry these (it would just multiply the wait). +_RATE_LIMIT_PATTERNS = ("429", "rate limit", "rate_limit", "too many requests") + + +def _is_rate_limited(error_msg: str) -> bool: + """Check if an error is a rate-limit (429) from the provider.""" + lower = error_msg.lower() + return any(p in lower for p in _RATE_LIMIT_PATTERNS) + + +def _is_retryable(error_msg: str) -> bool: + """Check if an error message indicates a transient failure worth retrying. + + Rate-limit errors are excluded — the SDK already burned through 5 retries + internally, so adding more at our level just wastes time. + """ + if _is_rate_limited(error_msg): + return False + lower = error_msg.lower() + return any(p in lower for p in _RETRYABLE_PATTERNS) + + +def _backoff_delay(attempt: int) -> float: + """Exponential backoff with jitter.""" + delay = min(RETRY_BASE_DELAY * (2**attempt), RETRY_MAX_DELAY) + return delay * (0.5 + random.random() * 0.5) + + +TERMINAL_EVENT_TYPES = frozenset( + { + SessionEventType.SESSION_IDLE, + SessionEventType.SESSION_ERROR, + SessionEventType.SESSION_SHUTDOWN, + } +) + + +# Use PermissionHandler.approve_all from the SDK (single-user trusted environment) + + +def format_prompt_with_history( + history: list[dict[str, Any]], + new_message: str, +) -> str: + """Format conversation history + new message into a single prompt string. + + The history list typically includes the latest user message as the last item + (just added to the store). We split context (all but last) from the actual prompt (last). + """ + if not history: + return new_message + + # All but last message as context; last is the new user message + context_messages = history[:-1] + if not context_messages: + return new_message + + lines: list[str] = [] + for msg in context_messages: + role = msg.get("role", "assistant") + content = msg.get("content", "") + if isinstance(content, list): + text_parts = [p.get("text", "") for p in content if isinstance(p, dict) and p.get("type") == "input_text"] + image_count = sum(1 for p in content if isinstance(p, dict) and p.get("type") == "input_image") + content = " ".join(t for t in text_parts if t) + if image_count: + content += f" [+{image_count} image(s)]" if content else f"[{image_count} image(s)]" + if not content: + continue + lines.append(f"[{role}]: {content}") + + if not lines: + return new_message + + return "\n" + "\n".join(lines) + "\n\n\n" + new_message + + +class CopilotRuntime: + """Manages a long-lived CopilotClient and creates per-request sessions.""" + + def __init__(self) -> None: + self._client: CopilotClient | None = None + + async def start(self) -> None: + github_token = settings.GITHUB_TOKEN or "not-needed" + self._client = CopilotClient( + SubprocessConfig( + cwd=settings.REPOS_DIR, + github_token=github_token, + use_logged_in_user=False, + ) + ) + await self._client.start() + logger.info( + "Copilot SDK client started (cwd=%s, copilot_auth=%s)", + settings.REPOS_DIR, + "yes" if settings.GITHUB_TOKEN else "no", + ) + + async def stop(self) -> None: + if self._client: + try: + await self._client.stop() + except BaseException: + logger.warning("Copilot SDK client stop errors", exc_info=True) + self._client = None + logger.info("Copilot SDK client stopped") + + @property + def client(self) -> CopilotClient: + if self._client is None: + raise RuntimeError("CopilotRuntime not started — call start() first") + return self._client + + async def create_session( + self, + *, + model: str, + provider_config: dict[str, Any] | None, + system_message: str, + tools: list[Tool] | None = None, + streaming: bool = True, + ) -> CopilotSession: + kwargs: dict[str, Any] = { + "on_permission_request": PermissionHandler.approve_all, + "model": model, + "streaming": streaming, + "working_directory": settings.REPOS_DIR, + "system_message": {"mode": "replace", "content": system_message}, + "tools": tools or None, + "excluded_tools": ["task"], + "skill_directories": _default_skill_directories(), + } + if provider_config is not None: + kwargs["provider"] = provider_config + return await self.client.create_session(**kwargs) + + +async def stream_session( + rt: CopilotRuntime, + *, + model: str, + provider_config: dict[str, Any] | None, + system_message: str, + prompt: str, + tools: list[Tool] | None = None, + attachments: list[dict[str, Any]] | None = None, + thread_id: str | None = None, +) -> AsyncIterator[SessionEvent]: + """Create a session, send a prompt, yield events until idle, then destroy. + + Retries transparently on transient errors (model timeouts, 5xx, rate limits) + with exponential backoff + jitter. + """ + # Reset advisor counter at the start of each run + if thread_id is not None: + try: + from tools.advisor import _reset_advisor_state + + _reset_advisor_state(thread_id) + except ImportError: + pass + + last_error_msg = "" + for attempt in range(MAX_SESSION_RETRIES): + session = await rt.create_session( + model=model, + provider_config=provider_config, + system_message=system_message, + tools=tools, + ) + queue: asyncio.Queue[SessionEvent | None] = asyncio.Queue() + + def on_event(event: SessionEvent) -> None: + queue.put_nowait(event) + if event.type in TERMINAL_EVENT_TYPES: + queue.put_nowait(None) # sentinel + + unsub = session.on(on_event) + hit_retryable_error = False + try: + await session.send(prompt, attachments=attachments) + + while True: + item = await queue.get() + if item is None: + break + + # Intercept SESSION_ERROR for retry + if item.type == SessionEventType.SESSION_ERROR: + error_msg = (item.data and item.data.message) or "Unknown session error" + last_error_msg = error_msg + retries_left = MAX_SESSION_RETRIES - attempt - 1 + if _is_retryable(error_msg) and retries_left > 0: + delay = _backoff_delay(attempt) + logger.warning( + "Retryable session error (attempt %d/%d, next in %.1fs): %s", + attempt + 1, + MAX_SESSION_RETRIES, + delay, + error_msg, + ) + hit_retryable_error = True + break + # Not retryable or out of retries — yield the error to caller + if retries_left == 0 and _is_retryable(error_msg): + logger.error( + "All %d session retries exhausted: %s", + MAX_SESSION_RETRIES, + error_msg, + ) + yield item + return + + yield item + finally: + unsub() + await session.destroy() + + if hit_retryable_error: + await asyncio.sleep(_backoff_delay(attempt)) + continue + # Completed normally + return + + # Should not reach here, but safety net + logger.error("stream_session fell through retry loop; last error: %s", last_error_msg) + + +async def run_session( + rt: CopilotRuntime, + *, + model: str, + provider_config: dict[str, Any] | None, + system_message: str, + prompt: str, + tools: list[Tool] | None = None, + attachments: list[dict[str, Any]] | None = None, + thread_id: str | None = None, +) -> Any: + """Create a session, send a prompt, wait for completion, return raw result. + + Retries transparently on transient errors with exponential backoff + jitter. + """ + # Reset advisor counter at the start of each run + if thread_id is not None: + try: + from tools.advisor import _reset_advisor_state + + _reset_advisor_state(thread_id) + except ImportError: + pass + + last_exc: Exception | None = None + for attempt in range(MAX_SESSION_RETRIES): + session = await rt.create_session( + model=model, + provider_config=provider_config, + system_message=system_message, + tools=tools, + ) + try: + result = await session.send_and_wait(prompt, attachments=attachments, timeout=300) + if result and result.data and result.data.content: + return result + + # Fallback: check message history + messages = await session.get_messages() + for msg in reversed(messages): + if msg.type == SessionEventType.ASSISTANT_MESSAGE and msg.data and msg.data.content: + return msg + return result + except Exception as exc: + last_exc = exc + retries_left = MAX_SESSION_RETRIES - attempt - 1 + if _is_retryable(str(exc)) and retries_left > 0: + delay = _backoff_delay(attempt) + logger.warning( + "Retryable run_session error (attempt %d/%d, next in %.1fs): %s", + attempt + 1, + MAX_SESSION_RETRIES, + delay, + exc, + ) + await asyncio.sleep(delay) + continue + raise + finally: + await session.destroy() + + # All retries exhausted — raise the last exception + if last_exc is not None: + raise last_exc + + +# Module-level singleton +copilot = CopilotRuntime() diff --git a/docker-entrypoint.sh b/docker-entrypoint.sh new file mode 100644 index 0000000..b3d3b1c --- /dev/null +++ b/docker-entrypoint.sh @@ -0,0 +1,14 @@ +#!/usr/bin/env sh +set -eu + +if [ -f /host-git/.gitconfig ]; then + cp /host-git/.gitconfig /root/.gitconfig + chmod 644 /root/.gitconfig +fi + +if [ -f /host-git/.git-credentials ]; then + cp /host-git/.git-credentials /root/.git-credentials + chmod 600 /root/.git-credentials +fi + +exec "$@" \ No newline at end of file diff --git a/frontend/.gitignore b/frontend/.gitignore new file mode 100644 index 0000000..b947077 --- /dev/null +++ b/frontend/.gitignore @@ -0,0 +1,2 @@ +node_modules/ +dist/ diff --git a/frontend/index.html b/frontend/index.html new file mode 100644 index 0000000..7d8b821 --- /dev/null +++ b/frontend/index.html @@ -0,0 +1,15 @@ + + + + + + + CodeAnywhere + + + +
+ + + + \ No newline at end of file diff --git a/frontend/package-lock.json b/frontend/package-lock.json new file mode 100644 index 0000000..057e561 --- /dev/null +++ b/frontend/package-lock.json @@ -0,0 +1,3449 @@ +{ + "name": "code-anywhere-frontend", + "lockfileVersion": 3, + "requires": true, + "packages": { + "": { + "name": "code-anywhere-frontend", + "dependencies": { + "@ai-sdk/react": "latest", + "ai": "latest", + "react": "^19.1.0", + "react-dom": "^19.1.0", + "react-markdown": "^10.1.0", + "remark-gfm": "^4.0.0" + }, + "devDependencies": { + "@types/react": "^19.1.0", + "@types/react-dom": "^19.1.0", + "@vitejs/plugin-react": "^4.5.0", + "typescript": "^5.8.0", + "vite": "^6.3.0" + } + }, + "node_modules/@ai-sdk/gateway": { + "version": "3.0.88", + "resolved": "https://registry.npmjs.org/@ai-sdk/gateway/-/gateway-3.0.88.tgz", + "integrity": "sha512-AFoj7xdWAtCQcy0jJ235ENSakYM8D28qBX+rB+/rX4r8qe/LXgl0e5UivOqxAlIM5E9jnQdYxIPuj3XFtGk/yg==", + "license": "Apache-2.0", + "dependencies": { + "@ai-sdk/provider": "3.0.8", + "@ai-sdk/provider-utils": "4.0.22", + "@vercel/oidc": "3.1.0" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "zod": "^3.25.76 || ^4.1.8" + } + }, + "node_modules/@ai-sdk/provider": { + "version": "3.0.8", + "resolved": "https://registry.npmjs.org/@ai-sdk/provider/-/provider-3.0.8.tgz", + "integrity": "sha512-oGMAgGoQdBXbZqNG0Ze56CHjDZ1IDYOwGYxYjO5KLSlz5HiNQ9udIXsPZ61VWaHGZ5XW/jyjmr6t2xz2jGVwbQ==", + "license": "Apache-2.0", + "dependencies": { + "json-schema": "^0.4.0" + }, + "engines": { + "node": ">=18" + } + }, + "node_modules/@ai-sdk/provider-utils": { + "version": "4.0.22", + "resolved": "https://registry.npmjs.org/@ai-sdk/provider-utils/-/provider-utils-4.0.22.tgz", + "integrity": "sha512-B2OTFcRw/Pdka9ZTjpXv6T6qZ6RruRuLokyb8HwW+aoW9ndJ3YasA3/mVswyJw7VMBF8ofXgqvcrCt9KYvFifg==", + "license": "Apache-2.0", + "dependencies": { + "@ai-sdk/provider": "3.0.8", + "@standard-schema/spec": "^1.1.0", + "eventsource-parser": "^3.0.6" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "zod": "^3.25.76 || ^4.1.8" + } + }, + "node_modules/@ai-sdk/react": { + "version": "3.0.148", + "resolved": "https://registry.npmjs.org/@ai-sdk/react/-/react-3.0.148.tgz", + "integrity": "sha512-bxKtS3KINzjtEf9xrAhORRN0HIMgqlI1Nwhd0eaAXL3Eljf3XVl9Bw+HXiCLVNzyOcyOwwBLlvq8SZ0amys7eA==", + "license": "Apache-2.0", + "dependencies": { + "@ai-sdk/provider-utils": "4.0.22", + "ai": "6.0.146", + "swr": "^2.2.5", + "throttleit": "2.1.0" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "react": "^18 || ~19.0.1 || ~19.1.2 || ^19.2.1" + } + }, + "node_modules/@babel/code-frame": { + "version": "7.29.0", + "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.29.0.tgz", + "integrity": "sha512-9NhCeYjq9+3uxgdtp20LSiJXJvN0FeCtNGpJxuMFZ1Kv3cWUNb6DOhJwUvcVCzKGR66cw4njwM6hrJLqgOwbcw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-validator-identifier": "^7.28.5", + "js-tokens": "^4.0.0", + "picocolors": "^1.1.1" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/compat-data": { + "version": "7.29.0", + "resolved": "https://registry.npmjs.org/@babel/compat-data/-/compat-data-7.29.0.tgz", + "integrity": "sha512-T1NCJqT/j9+cn8fvkt7jtwbLBfLC/1y1c7NtCeXFRgzGTsafi68MRv8yzkYSapBnFA6L3U2VSc02ciDzoAJhJg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/core": { + "version": "7.29.0", + "resolved": "https://registry.npmjs.org/@babel/core/-/core-7.29.0.tgz", + "integrity": "sha512-CGOfOJqWjg2qW/Mb6zNsDm+u5vFQ8DxXfbM09z69p5Z6+mE1ikP2jUXw+j42Pf1XTYED2Rni5f95npYeuwMDQA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/code-frame": "^7.29.0", + "@babel/generator": "^7.29.0", + "@babel/helper-compilation-targets": "^7.28.6", + "@babel/helper-module-transforms": "^7.28.6", + "@babel/helpers": "^7.28.6", + "@babel/parser": "^7.29.0", + "@babel/template": "^7.28.6", + "@babel/traverse": "^7.29.0", + "@babel/types": "^7.29.0", + "@jridgewell/remapping": "^2.3.5", + "convert-source-map": "^2.0.0", + "debug": "^4.1.0", + "gensync": "^1.0.0-beta.2", + "json5": "^2.2.3", + "semver": "^6.3.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/babel" + } + }, + "node_modules/@babel/generator": { + "version": "7.29.1", + "resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.29.1.tgz", + "integrity": "sha512-qsaF+9Qcm2Qv8SRIMMscAvG4O3lJ0F1GuMo5HR/Bp02LopNgnZBC/EkbevHFeGs4ls/oPz9v+Bsmzbkbe+0dUw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/parser": "^7.29.0", + "@babel/types": "^7.29.0", + "@jridgewell/gen-mapping": "^0.3.12", + "@jridgewell/trace-mapping": "^0.3.28", + "jsesc": "^3.0.2" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-compilation-targets": { + "version": "7.28.6", + "resolved": "https://registry.npmjs.org/@babel/helper-compilation-targets/-/helper-compilation-targets-7.28.6.tgz", + "integrity": "sha512-JYtls3hqi15fcx5GaSNL7SCTJ2MNmjrkHXg4FSpOA/grxK8KwyZ5bubHsCq8FXCkua6xhuaaBit+3b7+VZRfcA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/compat-data": "^7.28.6", + "@babel/helper-validator-option": "^7.27.1", + "browserslist": "^4.24.0", + "lru-cache": "^5.1.1", + "semver": "^6.3.1" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-globals": { + "version": "7.28.0", + "resolved": "https://registry.npmjs.org/@babel/helper-globals/-/helper-globals-7.28.0.tgz", + "integrity": "sha512-+W6cISkXFa1jXsDEdYA8HeevQT/FULhxzR99pxphltZcVaugps53THCeiWA8SguxxpSp3gKPiuYfSWopkLQ4hw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-module-imports": { + "version": "7.28.6", + "resolved": "https://registry.npmjs.org/@babel/helper-module-imports/-/helper-module-imports-7.28.6.tgz", + "integrity": "sha512-l5XkZK7r7wa9LucGw9LwZyyCUscb4x37JWTPz7swwFE/0FMQAGpiWUZn8u9DzkSBWEcK25jmvubfpw2dnAMdbw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/traverse": "^7.28.6", + "@babel/types": "^7.28.6" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-module-transforms": { + "version": "7.28.6", + "resolved": "https://registry.npmjs.org/@babel/helper-module-transforms/-/helper-module-transforms-7.28.6.tgz", + "integrity": "sha512-67oXFAYr2cDLDVGLXTEABjdBJZ6drElUSI7WKp70NrpyISso3plG9SAGEF6y7zbha/wOzUByWWTJvEDVNIUGcA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-module-imports": "^7.28.6", + "@babel/helper-validator-identifier": "^7.28.5", + "@babel/traverse": "^7.28.6" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/@babel/helper-plugin-utils": { + "version": "7.28.6", + "resolved": "https://registry.npmjs.org/@babel/helper-plugin-utils/-/helper-plugin-utils-7.28.6.tgz", + "integrity": "sha512-S9gzZ/bz83GRysI7gAD4wPT/AI3uCnY+9xn+Mx/KPs2JwHJIz1W8PZkg2cqyt3RNOBM8ejcXhV6y8Og7ly/Dug==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-string-parser": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/helper-string-parser/-/helper-string-parser-7.27.1.tgz", + "integrity": "sha512-qMlSxKbpRlAridDExk92nSobyDdpPijUq2DW6oDnUqd0iOGxmQjyqhMIihI9+zv4LPyZdRje2cavWPbCbWm3eA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-validator-identifier": { + "version": "7.28.5", + "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.28.5.tgz", + "integrity": "sha512-qSs4ifwzKJSV39ucNjsvc6WVHs6b7S03sOh2OcHF9UHfVPqWWALUsNUVzhSBiItjRZoLHx7nIarVjqKVusUZ1Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-validator-option": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/helper-validator-option/-/helper-validator-option-7.27.1.tgz", + "integrity": "sha512-YvjJow9FxbhFFKDSuFnVCe2WxXk1zWc22fFePVNEaWJEu8IrZVlda6N0uHwzZrUM1il7NC9Mlp4MaJYbYd9JSg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helpers": { + "version": "7.29.2", + "resolved": "https://registry.npmjs.org/@babel/helpers/-/helpers-7.29.2.tgz", + "integrity": "sha512-HoGuUs4sCZNezVEKdVcwqmZN8GoHirLUcLaYVNBK2J0DadGtdcqgr3BCbvH8+XUo4NGjNl3VOtSjEKNzqfFgKw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/template": "^7.28.6", + "@babel/types": "^7.29.0" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/parser": { + "version": "7.29.2", + "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.29.2.tgz", + "integrity": "sha512-4GgRzy/+fsBa72/RZVJmGKPmZu9Byn8o4MoLpmNe1m8ZfYnz5emHLQz3U4gLud6Zwl0RZIcgiLD7Uq7ySFuDLA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/types": "^7.29.0" + }, + "bin": { + "parser": "bin/babel-parser.js" + }, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@babel/plugin-transform-react-jsx-self": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-jsx-self/-/plugin-transform-react-jsx-self-7.27.1.tgz", + "integrity": "sha512-6UzkCs+ejGdZ5mFFC/OCUrv028ab2fp1znZmCZjAOBKiBK2jXD1O+BPSfX8X2qjJ75fZBMSnQn3Rq2mrBJK2mw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-react-jsx-source": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-jsx-source/-/plugin-transform-react-jsx-source-7.27.1.tgz", + "integrity": "sha512-zbwoTsBruTeKB9hSq73ha66iFeJHuaFkUbwvqElnygoNbj/jHRsSeokowZFN3CZ64IvEqcmmkVe89OPXc7ldAw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/template": { + "version": "7.28.6", + "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.28.6.tgz", + "integrity": "sha512-YA6Ma2KsCdGb+WC6UpBVFJGXL58MDA6oyONbjyF/+5sBgxY/dwkhLogbMT2GXXyU84/IhRw/2D1Os1B/giz+BQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/code-frame": "^7.28.6", + "@babel/parser": "^7.28.6", + "@babel/types": "^7.28.6" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/traverse": { + "version": "7.29.0", + "resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.29.0.tgz", + "integrity": "sha512-4HPiQr0X7+waHfyXPZpWPfWL/J7dcN1mx9gL6WdQVMbPnF3+ZhSMs8tCxN7oHddJE9fhNE7+lxdnlyemKfJRuA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/code-frame": "^7.29.0", + "@babel/generator": "^7.29.0", + "@babel/helper-globals": "^7.28.0", + "@babel/parser": "^7.29.0", + "@babel/template": "^7.28.6", + "@babel/types": "^7.29.0", + "debug": "^4.3.1" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/types": { + "version": "7.29.0", + "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.29.0.tgz", + "integrity": "sha512-LwdZHpScM4Qz8Xw2iKSzS+cfglZzJGvofQICy7W7v4caru4EaAmyUuO6BGrbyQ2mYV11W0U8j5mBhd14dd3B0A==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-string-parser": "^7.27.1", + "@babel/helper-validator-identifier": "^7.28.5" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@esbuild/aix-ppc64": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/aix-ppc64/-/aix-ppc64-0.25.12.tgz", + "integrity": "sha512-Hhmwd6CInZ3dwpuGTF8fJG6yoWmsToE+vYgD4nytZVxcu1ulHpUQRAB1UJ8+N1Am3Mz4+xOByoQoSZf4D+CpkA==", + "cpu": [ + "ppc64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "aix" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/android-arm": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/android-arm/-/android-arm-0.25.12.tgz", + "integrity": "sha512-VJ+sKvNA/GE7Ccacc9Cha7bpS8nyzVv0jdVgwNDaR4gDMC/2TTRc33Ip8qrNYUcpkOHUT5OZ0bUcNNVZQ9RLlg==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/android-arm64": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/android-arm64/-/android-arm64-0.25.12.tgz", + "integrity": "sha512-6AAmLG7zwD1Z159jCKPvAxZd4y/VTO0VkprYy+3N2FtJ8+BQWFXU+OxARIwA46c5tdD9SsKGZ/1ocqBS/gAKHg==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/android-x64": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/android-x64/-/android-x64-0.25.12.tgz", + "integrity": "sha512-5jbb+2hhDHx5phYR2By8GTWEzn6I9UqR11Kwf22iKbNpYrsmRB18aX/9ivc5cabcUiAT/wM+YIZ6SG9QO6a8kg==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/darwin-arm64": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/darwin-arm64/-/darwin-arm64-0.25.12.tgz", + "integrity": "sha512-N3zl+lxHCifgIlcMUP5016ESkeQjLj/959RxxNYIthIg+CQHInujFuXeWbWMgnTo4cp5XVHqFPmpyu9J65C1Yg==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/darwin-x64": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/darwin-x64/-/darwin-x64-0.25.12.tgz", + "integrity": "sha512-HQ9ka4Kx21qHXwtlTUVbKJOAnmG1ipXhdWTmNXiPzPfWKpXqASVcWdnf2bnL73wgjNrFXAa3yYvBSd9pzfEIpA==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/freebsd-arm64": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/freebsd-arm64/-/freebsd-arm64-0.25.12.tgz", + "integrity": "sha512-gA0Bx759+7Jve03K1S0vkOu5Lg/85dou3EseOGUes8flVOGxbhDDh/iZaoek11Y8mtyKPGF3vP8XhnkDEAmzeg==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/freebsd-x64": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/freebsd-x64/-/freebsd-x64-0.25.12.tgz", + "integrity": "sha512-TGbO26Yw2xsHzxtbVFGEXBFH0FRAP7gtcPE7P5yP7wGy7cXK2oO7RyOhL5NLiqTlBh47XhmIUXuGciXEqYFfBQ==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-arm": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/linux-arm/-/linux-arm-0.25.12.tgz", + "integrity": "sha512-lPDGyC1JPDou8kGcywY0YILzWlhhnRjdof3UlcoqYmS9El818LLfJJc3PXXgZHrHCAKs/Z2SeZtDJr5MrkxtOw==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-arm64": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/linux-arm64/-/linux-arm64-0.25.12.tgz", + "integrity": "sha512-8bwX7a8FghIgrupcxb4aUmYDLp8pX06rGh5HqDT7bB+8Rdells6mHvrFHHW2JAOPZUbnjUpKTLg6ECyzvas2AQ==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-ia32": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/linux-ia32/-/linux-ia32-0.25.12.tgz", + "integrity": "sha512-0y9KrdVnbMM2/vG8KfU0byhUN+EFCny9+8g202gYqSSVMonbsCfLjUO+rCci7pM0WBEtz+oK/PIwHkzxkyharA==", + "cpu": [ + "ia32" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-loong64": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/linux-loong64/-/linux-loong64-0.25.12.tgz", + "integrity": "sha512-h///Lr5a9rib/v1GGqXVGzjL4TMvVTv+s1DPoxQdz7l/AYv6LDSxdIwzxkrPW438oUXiDtwM10o9PmwS/6Z0Ng==", + "cpu": [ + "loong64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-mips64el": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/linux-mips64el/-/linux-mips64el-0.25.12.tgz", + "integrity": "sha512-iyRrM1Pzy9GFMDLsXn1iHUm18nhKnNMWscjmp4+hpafcZjrr2WbT//d20xaGljXDBYHqRcl8HnxbX6uaA/eGVw==", + "cpu": [ + "mips64el" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-ppc64": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/linux-ppc64/-/linux-ppc64-0.25.12.tgz", + "integrity": "sha512-9meM/lRXxMi5PSUqEXRCtVjEZBGwB7P/D4yT8UG/mwIdze2aV4Vo6U5gD3+RsoHXKkHCfSxZKzmDssVlRj1QQA==", + "cpu": [ + "ppc64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-riscv64": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/linux-riscv64/-/linux-riscv64-0.25.12.tgz", + "integrity": "sha512-Zr7KR4hgKUpWAwb1f3o5ygT04MzqVrGEGXGLnj15YQDJErYu/BGg+wmFlIDOdJp0PmB0lLvxFIOXZgFRrdjR0w==", + "cpu": [ + "riscv64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-s390x": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/linux-s390x/-/linux-s390x-0.25.12.tgz", + "integrity": "sha512-MsKncOcgTNvdtiISc/jZs/Zf8d0cl/t3gYWX8J9ubBnVOwlk65UIEEvgBORTiljloIWnBzLs4qhzPkJcitIzIg==", + "cpu": [ + "s390x" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-x64": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/linux-x64/-/linux-x64-0.25.12.tgz", + "integrity": "sha512-uqZMTLr/zR/ed4jIGnwSLkaHmPjOjJvnm6TVVitAa08SLS9Z0VM8wIRx7gWbJB5/J54YuIMInDquWyYvQLZkgw==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/netbsd-arm64": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/netbsd-arm64/-/netbsd-arm64-0.25.12.tgz", + "integrity": "sha512-xXwcTq4GhRM7J9A8Gv5boanHhRa/Q9KLVmcyXHCTaM4wKfIpWkdXiMog/KsnxzJ0A1+nD+zoecuzqPmCRyBGjg==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "netbsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/netbsd-x64": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/netbsd-x64/-/netbsd-x64-0.25.12.tgz", + "integrity": "sha512-Ld5pTlzPy3YwGec4OuHh1aCVCRvOXdH8DgRjfDy/oumVovmuSzWfnSJg+VtakB9Cm0gxNO9BzWkj6mtO1FMXkQ==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "netbsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/openbsd-arm64": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/openbsd-arm64/-/openbsd-arm64-0.25.12.tgz", + "integrity": "sha512-fF96T6KsBo/pkQI950FARU9apGNTSlZGsv1jZBAlcLL1MLjLNIWPBkj5NlSz8aAzYKg+eNqknrUJ24QBybeR5A==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "openbsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/openbsd-x64": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/openbsd-x64/-/openbsd-x64-0.25.12.tgz", + "integrity": "sha512-MZyXUkZHjQxUvzK7rN8DJ3SRmrVrke8ZyRusHlP+kuwqTcfWLyqMOE3sScPPyeIXN/mDJIfGXvcMqCgYKekoQw==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "openbsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/openharmony-arm64": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/openharmony-arm64/-/openharmony-arm64-0.25.12.tgz", + "integrity": "sha512-rm0YWsqUSRrjncSXGA7Zv78Nbnw4XL6/dzr20cyrQf7ZmRcsovpcRBdhD43Nuk3y7XIoW2OxMVvwuRvk9XdASg==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "openharmony" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/sunos-x64": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/sunos-x64/-/sunos-x64-0.25.12.tgz", + "integrity": "sha512-3wGSCDyuTHQUzt0nV7bocDy72r2lI33QL3gkDNGkod22EsYl04sMf0qLb8luNKTOmgF/eDEDP5BFNwoBKH441w==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "sunos" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/win32-arm64": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/win32-arm64/-/win32-arm64-0.25.12.tgz", + "integrity": "sha512-rMmLrur64A7+DKlnSuwqUdRKyd3UE7oPJZmnljqEptesKM8wx9J8gx5u0+9Pq0fQQW8vqeKebwNXdfOyP+8Bsg==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/win32-ia32": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/win32-ia32/-/win32-ia32-0.25.12.tgz", + "integrity": "sha512-HkqnmmBoCbCwxUKKNPBixiWDGCpQGVsrQfJoVGYLPT41XWF8lHuE5N6WhVia2n4o5QK5M4tYr21827fNhi4byQ==", + "cpu": [ + "ia32" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/win32-x64": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/win32-x64/-/win32-x64-0.25.12.tgz", + "integrity": "sha512-alJC0uCZpTFrSL0CCDjcgleBXPnCrEAhTBILpeAp7M/OFgoqtAetfBzX0xM00MUsVVPpVjlPuMbREqnZCXaTnA==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@jridgewell/gen-mapping": { + "version": "0.3.13", + "resolved": "https://registry.npmjs.org/@jridgewell/gen-mapping/-/gen-mapping-0.3.13.tgz", + "integrity": "sha512-2kkt/7niJ6MgEPxF0bYdQ6etZaA+fQvDcLKckhy1yIQOzaoKjBBjSj63/aLVjYE3qhRt5dvM+uUyfCg6UKCBbA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jridgewell/sourcemap-codec": "^1.5.0", + "@jridgewell/trace-mapping": "^0.3.24" + } + }, + "node_modules/@jridgewell/remapping": { + "version": "2.3.5", + "resolved": "https://registry.npmjs.org/@jridgewell/remapping/-/remapping-2.3.5.tgz", + "integrity": "sha512-LI9u/+laYG4Ds1TDKSJW2YPrIlcVYOwi2fUC6xB43lueCjgxV4lffOCZCtYFiH6TNOX+tQKXx97T4IKHbhyHEQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jridgewell/gen-mapping": "^0.3.5", + "@jridgewell/trace-mapping": "^0.3.24" + } + }, + "node_modules/@jridgewell/resolve-uri": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/@jridgewell/resolve-uri/-/resolve-uri-3.1.2.tgz", + "integrity": "sha512-bRISgCIjP20/tbWSPWMEi54QVPRZExkuD9lJL+UIxUKtwVJA8wW1Trb1jMs1RFXo1CBTNZ/5hpC9QvmKWdopKw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@jridgewell/sourcemap-codec": { + "version": "1.5.5", + "resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.5.5.tgz", + "integrity": "sha512-cYQ9310grqxueWbl+WuIUIaiUaDcj7WOq5fVhEljNVgRfOUhY9fy2zTvfoqWsnebh8Sl70VScFbICvJnLKB0Og==", + "dev": true, + "license": "MIT" + }, + "node_modules/@jridgewell/trace-mapping": { + "version": "0.3.31", + "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.31.tgz", + "integrity": "sha512-zzNR+SdQSDJzc8joaeP8QQoCQr8NuYx2dIIytl1QeBEZHJ9uW6hebsrYgbz8hJwUQao3TWCMtmfV8Nu1twOLAw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jridgewell/resolve-uri": "^3.1.0", + "@jridgewell/sourcemap-codec": "^1.4.14" + } + }, + "node_modules/@opentelemetry/api": { + "version": "1.9.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/api/-/api-1.9.0.tgz", + "integrity": "sha512-3giAOQvZiH5F9bMlMiv8+GSPMeqg0dbaeo58/0SlA9sxSqZhnUtxzX9/2FzyhS9sWQf5S0GJE0AKBrFqjpeYcg==", + "license": "Apache-2.0", + "engines": { + "node": ">=8.0.0" + } + }, + "node_modules/@rolldown/pluginutils": { + "version": "1.0.0-beta.27", + "resolved": "https://registry.npmjs.org/@rolldown/pluginutils/-/pluginutils-1.0.0-beta.27.tgz", + "integrity": "sha512-+d0F4MKMCbeVUJwG96uQ4SgAznZNSq93I3V+9NHA4OpvqG8mRCpGdKmK8l/dl02h2CCDHwW2FqilnTyDcAnqjA==", + "dev": true, + "license": "MIT" + }, + "node_modules/@rollup/rollup-android-arm-eabi": { + "version": "4.60.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm-eabi/-/rollup-android-arm-eabi-4.60.1.tgz", + "integrity": "sha512-d6FinEBLdIiK+1uACUttJKfgZREXrF0Qc2SmLII7W2AD8FfiZ9Wjd+rD/iRuf5s5dWrr1GgwXCvPqOuDquOowA==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ] + }, + "node_modules/@rollup/rollup-android-arm64": { + "version": "4.60.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm64/-/rollup-android-arm64-4.60.1.tgz", + "integrity": "sha512-YjG/EwIDvvYI1YvYbHvDz/BYHtkY4ygUIXHnTdLhG+hKIQFBiosfWiACWortsKPKU/+dUwQQCKQM3qrDe8c9BA==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ] + }, + "node_modules/@rollup/rollup-darwin-arm64": { + "version": "4.60.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-arm64/-/rollup-darwin-arm64-4.60.1.tgz", + "integrity": "sha512-mjCpF7GmkRtSJwon+Rq1N8+pI+8l7w5g9Z3vWj4T7abguC4Czwi3Yu/pFaLvA3TTeMVjnu3ctigusqWUfjZzvw==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ] + }, + "node_modules/@rollup/rollup-darwin-x64": { + "version": "4.60.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-x64/-/rollup-darwin-x64-4.60.1.tgz", + "integrity": "sha512-haZ7hJ1JT4e9hqkoT9R/19XW2QKqjfJVv+i5AGg57S+nLk9lQnJ1F/eZloRO3o9Scy9CM3wQ9l+dkXtcBgN5Ew==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ] + }, + "node_modules/@rollup/rollup-freebsd-arm64": { + "version": "4.60.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-arm64/-/rollup-freebsd-arm64-4.60.1.tgz", + "integrity": "sha512-czw90wpQq3ZsAVBlinZjAYTKduOjTywlG7fEeWKUA7oCmpA8xdTkxZZlwNJKWqILlq0wehoZcJYfBvOyhPTQ6w==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ] + }, + "node_modules/@rollup/rollup-freebsd-x64": { + "version": "4.60.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-x64/-/rollup-freebsd-x64-4.60.1.tgz", + "integrity": "sha512-KVB2rqsxTHuBtfOeySEyzEOB7ltlB/ux38iu2rBQzkjbwRVlkhAGIEDiiYnO2kFOkJp+Z7pUXKyrRRFuFUKt+g==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ] + }, + "node_modules/@rollup/rollup-linux-arm-gnueabihf": { + "version": "4.60.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-gnueabihf/-/rollup-linux-arm-gnueabihf-4.60.1.tgz", + "integrity": "sha512-L+34Qqil+v5uC0zEubW7uByo78WOCIrBvci69E7sFASRl0X7b/MB6Cqd1lky/CtcSVTydWa2WZwFuWexjS5o6g==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-arm-musleabihf": { + "version": "4.60.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-musleabihf/-/rollup-linux-arm-musleabihf-4.60.1.tgz", + "integrity": "sha512-n83O8rt4v34hgFzlkb1ycniJh7IR5RCIqt6mz1VRJD6pmhRi0CXdmfnLu9dIUS6buzh60IvACM842Ffb3xd6Gg==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-arm64-gnu": { + "version": "4.60.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-gnu/-/rollup-linux-arm64-gnu-4.60.1.tgz", + "integrity": "sha512-Nql7sTeAzhTAja3QXeAI48+/+GjBJ+QmAH13snn0AJSNL50JsDqotyudHyMbO2RbJkskbMbFJfIJKWA6R1LCJQ==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-arm64-musl": { + "version": "4.60.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-musl/-/rollup-linux-arm64-musl-4.60.1.tgz", + "integrity": "sha512-+pUymDhd0ys9GcKZPPWlFiZ67sTWV5UU6zOJat02M1+PiuSGDziyRuI/pPue3hoUwm2uGfxdL+trT6Z9rxnlMA==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-loong64-gnu": { + "version": "4.60.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-loong64-gnu/-/rollup-linux-loong64-gnu-4.60.1.tgz", + "integrity": "sha512-VSvgvQeIcsEvY4bKDHEDWcpW4Yw7BtlKG1GUT4FzBUlEKQK0rWHYBqQt6Fm2taXS+1bXvJT6kICu5ZwqKCnvlQ==", + "cpu": [ + "loong64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-loong64-musl": { + "version": "4.60.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-loong64-musl/-/rollup-linux-loong64-musl-4.60.1.tgz", + "integrity": "sha512-4LqhUomJqwe641gsPp6xLfhqWMbQV04KtPp7/dIp0nzPxAkNY1AbwL5W0MQpcalLYk07vaW9Kp1PBhdpZYYcEw==", + "cpu": [ + "loong64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-ppc64-gnu": { + "version": "4.60.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-ppc64-gnu/-/rollup-linux-ppc64-gnu-4.60.1.tgz", + "integrity": "sha512-tLQQ9aPvkBxOc/EUT6j3pyeMD6Hb8QF2BTBnCQWP/uu1lhc9AIrIjKnLYMEroIz/JvtGYgI9dF3AxHZNaEH0rw==", + "cpu": [ + "ppc64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-ppc64-musl": { + "version": "4.60.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-ppc64-musl/-/rollup-linux-ppc64-musl-4.60.1.tgz", + "integrity": "sha512-RMxFhJwc9fSXP6PqmAz4cbv3kAyvD1etJFjTx4ONqFP9DkTkXsAMU4v3Vyc5BgzC+anz7nS/9tp4obsKfqkDHg==", + "cpu": [ + "ppc64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-riscv64-gnu": { + "version": "4.60.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-gnu/-/rollup-linux-riscv64-gnu-4.60.1.tgz", + "integrity": "sha512-QKgFl+Yc1eEk6MmOBfRHYF6lTxiiiV3/z/BRrbSiW2I7AFTXoBFvdMEyglohPj//2mZS4hDOqeB0H1ACh3sBbg==", + "cpu": [ + "riscv64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-riscv64-musl": { + "version": "4.60.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-musl/-/rollup-linux-riscv64-musl-4.60.1.tgz", + "integrity": "sha512-RAjXjP/8c6ZtzatZcA1RaQr6O1TRhzC+adn8YZDnChliZHviqIjmvFwHcxi4JKPSDAt6Uhf/7vqcBzQJy0PDJg==", + "cpu": [ + "riscv64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-s390x-gnu": { + "version": "4.60.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-s390x-gnu/-/rollup-linux-s390x-gnu-4.60.1.tgz", + "integrity": "sha512-wcuocpaOlaL1COBYiA89O6yfjlp3RwKDeTIA0hM7OpmhR1Bjo9j31G1uQVpDlTvwxGn2nQs65fBFL5UFd76FcQ==", + "cpu": [ + "s390x" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-x64-gnu": { + "version": "4.60.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-gnu/-/rollup-linux-x64-gnu-4.60.1.tgz", + "integrity": "sha512-77PpsFQUCOiZR9+LQEFg9GClyfkNXj1MP6wRnzYs0EeWbPcHs02AXu4xuUbM1zhwn3wqaizle3AEYg5aeoohhg==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-x64-musl": { + "version": "4.60.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-musl/-/rollup-linux-x64-musl-4.60.1.tgz", + "integrity": "sha512-5cIATbk5vynAjqqmyBjlciMJl1+R/CwX9oLk/EyiFXDWd95KpHdrOJT//rnUl4cUcskrd0jCCw3wpZnhIHdD9w==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-openbsd-x64": { + "version": "4.60.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-openbsd-x64/-/rollup-openbsd-x64-4.60.1.tgz", + "integrity": "sha512-cl0w09WsCi17mcmWqqglez9Gk8isgeWvoUZ3WiJFYSR3zjBQc2J5/ihSjpl+VLjPqjQ/1hJRcqBfLjssREQILw==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "openbsd" + ] + }, + "node_modules/@rollup/rollup-openharmony-arm64": { + "version": "4.60.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-openharmony-arm64/-/rollup-openharmony-arm64-4.60.1.tgz", + "integrity": "sha512-4Cv23ZrONRbNtbZa37mLSueXUCtN7MXccChtKpUnQNgF010rjrjfHx3QxkS2PI7LqGT5xXyYs1a7LbzAwT0iCA==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "openharmony" + ] + }, + "node_modules/@rollup/rollup-win32-arm64-msvc": { + "version": "4.60.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-arm64-msvc/-/rollup-win32-arm64-msvc-4.60.1.tgz", + "integrity": "sha512-i1okWYkA4FJICtr7KpYzFpRTHgy5jdDbZiWfvny21iIKky5YExiDXP+zbXzm3dUcFpkEeYNHgQ5fuG236JPq0g==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ] + }, + "node_modules/@rollup/rollup-win32-ia32-msvc": { + "version": "4.60.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-ia32-msvc/-/rollup-win32-ia32-msvc-4.60.1.tgz", + "integrity": "sha512-u09m3CuwLzShA0EYKMNiFgcjjzwqtUMLmuCJLeZWjjOYA3IT2Di09KaxGBTP9xVztWyIWjVdsB2E9goMjZvTQg==", + "cpu": [ + "ia32" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ] + }, + "node_modules/@rollup/rollup-win32-x64-gnu": { + "version": "4.60.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-gnu/-/rollup-win32-x64-gnu-4.60.1.tgz", + "integrity": "sha512-k+600V9Zl1CM7eZxJgMyTUzmrmhB/0XZnF4pRypKAlAgxmedUA+1v9R+XOFv56W4SlHEzfeMtzujLJD22Uz5zg==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ] + }, + "node_modules/@rollup/rollup-win32-x64-msvc": { + "version": "4.60.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-msvc/-/rollup-win32-x64-msvc-4.60.1.tgz", + "integrity": "sha512-lWMnixq/QzxyhTV6NjQJ4SFo1J6PvOX8vUx5Wb4bBPsEb+8xZ89Bz6kOXpfXj9ak9AHTQVQzlgzBEc1SyM27xQ==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ] + }, + "node_modules/@standard-schema/spec": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@standard-schema/spec/-/spec-1.1.0.tgz", + "integrity": "sha512-l2aFy5jALhniG5HgqrD6jXLi/rUWrKvqN/qJx6yoJsgKhblVd+iqqU4RCXavm/jPityDo5TCvKMnpjKnOriy0w==", + "license": "MIT" + }, + "node_modules/@types/babel__core": { + "version": "7.20.5", + "resolved": "https://registry.npmjs.org/@types/babel__core/-/babel__core-7.20.5.tgz", + "integrity": "sha512-qoQprZvz5wQFJwMDqeseRXWv3rqMvhgpbXFfVyWhbx9X47POIA6i/+dXefEmZKoAgOaTdaIgNSMqMIU61yRyzA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/parser": "^7.20.7", + "@babel/types": "^7.20.7", + "@types/babel__generator": "*", + "@types/babel__template": "*", + "@types/babel__traverse": "*" + } + }, + "node_modules/@types/babel__generator": { + "version": "7.27.0", + "resolved": "https://registry.npmjs.org/@types/babel__generator/-/babel__generator-7.27.0.tgz", + "integrity": "sha512-ufFd2Xi92OAVPYsy+P4n7/U7e68fex0+Ee8gSG9KX7eo084CWiQ4sdxktvdl0bOPupXtVJPY19zk6EwWqUQ8lg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/types": "^7.0.0" + } + }, + "node_modules/@types/babel__template": { + "version": "7.4.4", + "resolved": "https://registry.npmjs.org/@types/babel__template/-/babel__template-7.4.4.tgz", + "integrity": "sha512-h/NUaSyG5EyxBIp8YRxo4RMe2/qQgvyowRwVMzhYhBCONbW8PUsg4lkFMrhgZhUe5z3L3MiLDuvyJ/CaPa2A8A==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/parser": "^7.1.0", + "@babel/types": "^7.0.0" + } + }, + "node_modules/@types/babel__traverse": { + "version": "7.28.0", + "resolved": "https://registry.npmjs.org/@types/babel__traverse/-/babel__traverse-7.28.0.tgz", + "integrity": "sha512-8PvcXf70gTDZBgt9ptxJ8elBeBjcLOAcOtoO/mPJjtji1+CdGbHgm77om1GrsPxsiE+uXIpNSK64UYaIwQXd4Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/types": "^7.28.2" + } + }, + "node_modules/@types/debug": { + "version": "4.1.13", + "resolved": "https://registry.npmjs.org/@types/debug/-/debug-4.1.13.tgz", + "integrity": "sha512-KSVgmQmzMwPlmtljOomayoR89W4FynCAi3E8PPs7vmDVPe84hT+vGPKkJfThkmXs0x0jAaa9U8uW8bbfyS2fWw==", + "license": "MIT", + "dependencies": { + "@types/ms": "*" + } + }, + "node_modules/@types/estree": { + "version": "1.0.8", + "resolved": "https://registry.npmjs.org/@types/estree/-/estree-1.0.8.tgz", + "integrity": "sha512-dWHzHa2WqEXI/O1E9OjrocMTKJl2mSrEolh1Iomrv6U+JuNwaHXsXx9bLu5gG7BUWFIN0skIQJQ/L1rIex4X6w==", + "license": "MIT" + }, + "node_modules/@types/estree-jsx": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/@types/estree-jsx/-/estree-jsx-1.0.5.tgz", + "integrity": "sha512-52CcUVNFyfb1A2ALocQw/Dd1BQFNmSdkuC3BkZ6iqhdMfQz7JWOFRuJFloOzjk+6WijU56m9oKXFAXc7o3Towg==", + "license": "MIT", + "dependencies": { + "@types/estree": "*" + } + }, + "node_modules/@types/hast": { + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/@types/hast/-/hast-3.0.4.tgz", + "integrity": "sha512-WPs+bbQw5aCj+x6laNGWLH3wviHtoCv/P3+otBhbOhJgG8qtpdAMlTCxLtsTWA7LH1Oh/bFCHsBn0TPS5m30EQ==", + "license": "MIT", + "dependencies": { + "@types/unist": "*" + } + }, + "node_modules/@types/mdast": { + "version": "4.0.4", + "resolved": "https://registry.npmjs.org/@types/mdast/-/mdast-4.0.4.tgz", + "integrity": "sha512-kGaNbPh1k7AFzgpud/gMdvIm5xuECykRR+JnWKQno9TAXVa6WIVCGTPvYGekIDL4uwCZQSYbUxNBSb1aUo79oA==", + "license": "MIT", + "dependencies": { + "@types/unist": "*" + } + }, + "node_modules/@types/ms": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/@types/ms/-/ms-2.1.0.tgz", + "integrity": "sha512-GsCCIZDE/p3i96vtEqx+7dBUGXrc7zeSK3wwPHIaRThS+9OhWIXRqzs4d6k1SVU8g91DrNRWxWUGhp5KXQb2VA==", + "license": "MIT" + }, + "node_modules/@types/react": { + "version": "19.2.14", + "resolved": "https://registry.npmjs.org/@types/react/-/react-19.2.14.tgz", + "integrity": "sha512-ilcTH/UniCkMdtexkoCN0bI7pMcJDvmQFPvuPvmEaYA/NSfFTAgdUSLAoVjaRJm7+6PvcM+q1zYOwS4wTYMF9w==", + "license": "MIT", + "dependencies": { + "csstype": "^3.2.2" + } + }, + "node_modules/@types/react-dom": { + "version": "19.2.3", + "resolved": "https://registry.npmjs.org/@types/react-dom/-/react-dom-19.2.3.tgz", + "integrity": "sha512-jp2L/eY6fn+KgVVQAOqYItbF0VY/YApe5Mz2F0aykSO8gx31bYCZyvSeYxCHKvzHG5eZjc+zyaS5BrBWya2+kQ==", + "dev": true, + "license": "MIT", + "peerDependencies": { + "@types/react": "^19.2.0" + } + }, + "node_modules/@types/unist": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/@types/unist/-/unist-3.0.3.tgz", + "integrity": "sha512-ko/gIFJRv177XgZsZcBwnqJN5x/Gien8qNOn0D5bQU/zAzVf9Zt3BlcUiLqhV9y4ARk0GbT3tnUiPNgnTXzc/Q==", + "license": "MIT" + }, + "node_modules/@ungap/structured-clone": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/@ungap/structured-clone/-/structured-clone-1.3.0.tgz", + "integrity": "sha512-WmoN8qaIAo7WTYWbAZuG8PYEhn5fkz7dZrqTBZ7dtt//lL2Gwms1IcnQ5yHqjDfX8Ft5j4YzDM23f87zBfDe9g==", + "license": "ISC" + }, + "node_modules/@vercel/oidc": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/@vercel/oidc/-/oidc-3.1.0.tgz", + "integrity": "sha512-Fw28YZpRnA3cAHHDlkt7xQHiJ0fcL+NRcIqsocZQUSmbzeIKRpwttJjik5ZGanXP+vlA4SbTg+AbA3bP363l+w==", + "license": "Apache-2.0", + "engines": { + "node": ">= 20" + } + }, + "node_modules/@vitejs/plugin-react": { + "version": "4.7.0", + "resolved": "https://registry.npmjs.org/@vitejs/plugin-react/-/plugin-react-4.7.0.tgz", + "integrity": "sha512-gUu9hwfWvvEDBBmgtAowQCojwZmJ5mcLn3aufeCsitijs3+f2NsrPtlAWIR6OPiqljl96GVCUbLe0HyqIpVaoA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/core": "^7.28.0", + "@babel/plugin-transform-react-jsx-self": "^7.27.1", + "@babel/plugin-transform-react-jsx-source": "^7.27.1", + "@rolldown/pluginutils": "1.0.0-beta.27", + "@types/babel__core": "^7.20.5", + "react-refresh": "^0.17.0" + }, + "engines": { + "node": "^14.18.0 || >=16.0.0" + }, + "peerDependencies": { + "vite": "^4.2.0 || ^5.0.0 || ^6.0.0 || ^7.0.0" + } + }, + "node_modules/ai": { + "version": "6.0.146", + "resolved": "https://registry.npmjs.org/ai/-/ai-6.0.146.tgz", + "integrity": "sha512-70DE8k1rR0N3mXxyyfjYAx/FxRln/kQ5ym18lt1ys1eUklcPuoIXGbUBwdfCbmkt6YF3jCDZ5+OgkWieP/NGDw==", + "license": "Apache-2.0", + "dependencies": { + "@ai-sdk/gateway": "3.0.88", + "@ai-sdk/provider": "3.0.8", + "@ai-sdk/provider-utils": "4.0.22", + "@opentelemetry/api": "1.9.0" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "zod": "^3.25.76 || ^4.1.8" + } + }, + "node_modules/bail": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/bail/-/bail-2.0.2.tgz", + "integrity": "sha512-0xO6mYd7JB2YesxDKplafRpsiOzPt9V02ddPCLbY1xYGPOX24NTyN50qnUxgCPcSoYMhKpAuBTjQoRZCAkUDRw==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/baseline-browser-mapping": { + "version": "2.10.14", + "resolved": "https://registry.npmjs.org/baseline-browser-mapping/-/baseline-browser-mapping-2.10.14.tgz", + "integrity": "sha512-fOVLPAsFTsQfuCkvahZkzq6nf8KvGWanlYoTh0SVA0A/PIUxQGU2AOZAoD95n2gFLVDW/jP6sbGLny95nmEuHA==", + "dev": true, + "license": "Apache-2.0", + "bin": { + "baseline-browser-mapping": "dist/cli.cjs" + }, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/browserslist": { + "version": "4.28.2", + "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-4.28.2.tgz", + "integrity": "sha512-48xSriZYYg+8qXna9kwqjIVzuQxi+KYWp2+5nCYnYKPTr0LvD89Jqk2Or5ogxz0NUMfIjhh2lIUX/LyX9B4oIg==", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/browserslist" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/browserslist" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "dependencies": { + "baseline-browser-mapping": "^2.10.12", + "caniuse-lite": "^1.0.30001782", + "electron-to-chromium": "^1.5.328", + "node-releases": "^2.0.36", + "update-browserslist-db": "^1.2.3" + }, + "bin": { + "browserslist": "cli.js" + }, + "engines": { + "node": "^6 || ^7 || ^8 || ^9 || ^10 || ^11 || ^12 || >=13.7" + } + }, + "node_modules/caniuse-lite": { + "version": "1.0.30001785", + "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001785.tgz", + "integrity": "sha512-blhOL/WNR+Km1RI/LCVAvA73xplXA7ZbjzI4YkMK9pa6T/P3F2GxjNpEkyw5repTw9IvkyrjyHpwjnhZ5FOvYQ==", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/browserslist" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/caniuse-lite" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "CC-BY-4.0" + }, + "node_modules/ccount": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/ccount/-/ccount-2.0.1.tgz", + "integrity": "sha512-eyrF0jiFpY+3drT6383f1qhkbGsLSifNAjA61IUjZjmLCWjItY6LB9ft9YhoDgwfmclB2zhu51Lc7+95b8NRAg==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/character-entities": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/character-entities/-/character-entities-2.0.2.tgz", + "integrity": "sha512-shx7oQ0Awen/BRIdkjkvz54PnEEI/EjwXDSIZp86/KKdbafHh1Df/RYGBhn4hbe2+uKC9FnT5UCEdyPz3ai9hQ==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/character-entities-html4": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/character-entities-html4/-/character-entities-html4-2.1.0.tgz", + "integrity": "sha512-1v7fgQRj6hnSwFpq1Eu0ynr/CDEw0rXo2B61qXrLNdHZmPKgb7fqS1a2JwF0rISo9q77jDI8VMEHoApn8qDoZA==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/character-entities-legacy": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/character-entities-legacy/-/character-entities-legacy-3.0.0.tgz", + "integrity": "sha512-RpPp0asT/6ufRm//AJVwpViZbGM/MkjQFxJccQRHmISF/22NBtsHqAWmL+/pmkPWoIUJdWyeVleTl1wydHATVQ==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/character-reference-invalid": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/character-reference-invalid/-/character-reference-invalid-2.0.1.tgz", + "integrity": "sha512-iBZ4F4wRbyORVsu0jPV7gXkOsGYjGHPmAyv+HiHG8gi5PtC9KI2j1+v8/tlibRvjoWX027ypmG/n0HtO5t7unw==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/comma-separated-tokens": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/comma-separated-tokens/-/comma-separated-tokens-2.0.3.tgz", + "integrity": "sha512-Fu4hJdvzeylCfQPp9SGWidpzrMs7tTrlu6Vb8XGaRGck8QSNZJJp538Wrb60Lax4fPwR64ViY468OIUTbRlGZg==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/convert-source-map": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/convert-source-map/-/convert-source-map-2.0.0.tgz", + "integrity": "sha512-Kvp459HrV2FEJ1CAsi1Ku+MY3kasH19TFykTz2xWmMeq6bk2NU3XXvfJ+Q61m0xktWwt+1HSYf3JZsTms3aRJg==", + "dev": true, + "license": "MIT" + }, + "node_modules/csstype": { + "version": "3.2.3", + "resolved": "https://registry.npmjs.org/csstype/-/csstype-3.2.3.tgz", + "integrity": "sha512-z1HGKcYy2xA8AGQfwrn0PAy+PB7X/GSj3UVJW9qKyn43xWa+gl5nXmU4qqLMRzWVLFC8KusUX8T/0kCiOYpAIQ==", + "license": "MIT" + }, + "node_modules/debug": { + "version": "4.4.3", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.4.3.tgz", + "integrity": "sha512-RGwwWnwQvkVfavKVt22FGLw+xYSdzARwm0ru6DhTVA3umU5hZc28V3kO4stgYryrTlLpuvgI9GiijltAjNbcqA==", + "license": "MIT", + "dependencies": { + "ms": "^2.1.3" + }, + "engines": { + "node": ">=6.0" + }, + "peerDependenciesMeta": { + "supports-color": { + "optional": true + } + } + }, + "node_modules/decode-named-character-reference": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/decode-named-character-reference/-/decode-named-character-reference-1.3.0.tgz", + "integrity": "sha512-GtpQYB283KrPp6nRw50q3U9/VfOutZOe103qlN7BPP6Ad27xYnOIWv4lPzo8HCAL+mMZofJ9KEy30fq6MfaK6Q==", + "license": "MIT", + "dependencies": { + "character-entities": "^2.0.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/dequal": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/dequal/-/dequal-2.0.3.tgz", + "integrity": "sha512-0je+qPKHEMohvfRTCEo3CrPG6cAzAYgmzKyxRiYSSDkS6eGJdyVJm7WaYA5ECaAD9wLB2T4EEeymA5aFVcYXCA==", + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/devlop": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/devlop/-/devlop-1.1.0.tgz", + "integrity": "sha512-RWmIqhcFf1lRYBvNmr7qTNuyCt/7/ns2jbpp1+PalgE/rDQcBT0fioSMUpJ93irlUhC5hrg4cYqe6U+0ImW0rA==", + "license": "MIT", + "dependencies": { + "dequal": "^2.0.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/electron-to-chromium": { + "version": "1.5.331", + "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.5.331.tgz", + "integrity": "sha512-IbxXrsTlD3hRodkLnbxAPP4OuJYdWCeM3IOdT+CpcMoIwIoDfCmRpEtSPfwBXxVkg9xmBeY7Lz2Eo2TDn/HC3Q==", + "dev": true, + "license": "ISC" + }, + "node_modules/esbuild": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/esbuild/-/esbuild-0.25.12.tgz", + "integrity": "sha512-bbPBYYrtZbkt6Os6FiTLCTFxvq4tt3JKall1vRwshA3fdVztsLAatFaZobhkBC8/BrPetoa0oksYoKXoG4ryJg==", + "dev": true, + "hasInstallScript": true, + "license": "MIT", + "bin": { + "esbuild": "bin/esbuild" + }, + "engines": { + "node": ">=18" + }, + "optionalDependencies": { + "@esbuild/aix-ppc64": "0.25.12", + "@esbuild/android-arm": "0.25.12", + "@esbuild/android-arm64": "0.25.12", + "@esbuild/android-x64": "0.25.12", + "@esbuild/darwin-arm64": "0.25.12", + "@esbuild/darwin-x64": "0.25.12", + "@esbuild/freebsd-arm64": "0.25.12", + "@esbuild/freebsd-x64": "0.25.12", + "@esbuild/linux-arm": "0.25.12", + "@esbuild/linux-arm64": "0.25.12", + "@esbuild/linux-ia32": "0.25.12", + "@esbuild/linux-loong64": "0.25.12", + "@esbuild/linux-mips64el": "0.25.12", + "@esbuild/linux-ppc64": "0.25.12", + "@esbuild/linux-riscv64": "0.25.12", + "@esbuild/linux-s390x": "0.25.12", + "@esbuild/linux-x64": "0.25.12", + "@esbuild/netbsd-arm64": "0.25.12", + "@esbuild/netbsd-x64": "0.25.12", + "@esbuild/openbsd-arm64": "0.25.12", + "@esbuild/openbsd-x64": "0.25.12", + "@esbuild/openharmony-arm64": "0.25.12", + "@esbuild/sunos-x64": "0.25.12", + "@esbuild/win32-arm64": "0.25.12", + "@esbuild/win32-ia32": "0.25.12", + "@esbuild/win32-x64": "0.25.12" + } + }, + "node_modules/escalade": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.2.0.tgz", + "integrity": "sha512-WUj2qlxaQtO4g6Pq5c29GTcWGDyd8itL8zTlipgECz3JesAiiOKotd8JU6otB3PACgG6xkJUyVhboMS+bje/jA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/escape-string-regexp": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-5.0.0.tgz", + "integrity": "sha512-/veY75JbMK4j1yjvuUxuVsiS/hr/4iHs9FTT6cgTexxdE0Ly/glccBAkloH/DofkjRbZU3bnoj38mOmhkZ0lHw==", + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/estree-util-is-identifier-name": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/estree-util-is-identifier-name/-/estree-util-is-identifier-name-3.0.0.tgz", + "integrity": "sha512-hFtqIDZTIUZ9BXLb8y4pYGyk6+wekIivNVTcmvk8NoOh+VeRn5y6cEHzbURrWbfp1fIqdVipilzj+lfaadNZmg==", + "license": "MIT", + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/eventsource-parser": { + "version": "3.0.6", + "resolved": "https://registry.npmjs.org/eventsource-parser/-/eventsource-parser-3.0.6.tgz", + "integrity": "sha512-Vo1ab+QXPzZ4tCa8SwIHJFaSzy4R6SHf7BY79rFBDf0idraZWAkYrDjDj8uWaSm3S2TK+hJ7/t1CEmZ7jXw+pg==", + "license": "MIT", + "engines": { + "node": ">=18.0.0" + } + }, + "node_modules/extend": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/extend/-/extend-3.0.2.tgz", + "integrity": "sha512-fjquC59cD7CyW6urNXK0FBufkZcoiGG80wTuPujX590cB5Ttln20E2UB4S/WARVqhXffZl2LNgS+gQdPIIim/g==", + "license": "MIT" + }, + "node_modules/fdir": { + "version": "6.5.0", + "resolved": "https://registry.npmjs.org/fdir/-/fdir-6.5.0.tgz", + "integrity": "sha512-tIbYtZbucOs0BRGqPJkshJUYdL+SDH7dVM8gjy+ERp3WAUjLEFJE+02kanyHtwjWOnwrKYBiwAmM0p4kLJAnXg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12.0.0" + }, + "peerDependencies": { + "picomatch": "^3 || ^4" + }, + "peerDependenciesMeta": { + "picomatch": { + "optional": true + } + } + }, + "node_modules/fsevents": { + "version": "2.3.3", + "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.3.tgz", + "integrity": "sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==", + "dev": true, + "hasInstallScript": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": "^8.16.0 || ^10.6.0 || >=11.0.0" + } + }, + "node_modules/gensync": { + "version": "1.0.0-beta.2", + "resolved": "https://registry.npmjs.org/gensync/-/gensync-1.0.0-beta.2.tgz", + "integrity": "sha512-3hN7NaskYvMDLQY55gnW3NQ+mesEAepTqlg+VEbj7zzqEMBVNhzcGYYeqFo/TlYz6eQiFcp1HcsCZO+nGgS8zg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/hast-util-to-jsx-runtime": { + "version": "2.3.6", + "resolved": "https://registry.npmjs.org/hast-util-to-jsx-runtime/-/hast-util-to-jsx-runtime-2.3.6.tgz", + "integrity": "sha512-zl6s8LwNyo1P9uw+XJGvZtdFF1GdAkOg8ujOw+4Pyb76874fLps4ueHXDhXWdk6YHQ6OgUtinliG7RsYvCbbBg==", + "license": "MIT", + "dependencies": { + "@types/estree": "^1.0.0", + "@types/hast": "^3.0.0", + "@types/unist": "^3.0.0", + "comma-separated-tokens": "^2.0.0", + "devlop": "^1.0.0", + "estree-util-is-identifier-name": "^3.0.0", + "hast-util-whitespace": "^3.0.0", + "mdast-util-mdx-expression": "^2.0.0", + "mdast-util-mdx-jsx": "^3.0.0", + "mdast-util-mdxjs-esm": "^2.0.0", + "property-information": "^7.0.0", + "space-separated-tokens": "^2.0.0", + "style-to-js": "^1.0.0", + "unist-util-position": "^5.0.0", + "vfile-message": "^4.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/hast-util-whitespace": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/hast-util-whitespace/-/hast-util-whitespace-3.0.0.tgz", + "integrity": "sha512-88JUN06ipLwsnv+dVn+OIYOvAuvBMy/Qoi6O7mQHxdPXpjy+Cd6xRkWwux7DKO+4sYILtLBRIKgsdpS2gQc7qw==", + "license": "MIT", + "dependencies": { + "@types/hast": "^3.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/html-url-attributes": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/html-url-attributes/-/html-url-attributes-3.0.1.tgz", + "integrity": "sha512-ol6UPyBWqsrO6EJySPz2O7ZSr856WDrEzM5zMqp+FJJLGMW35cLYmmZnl0vztAZxRUoNZJFTCohfjuIJ8I4QBQ==", + "license": "MIT", + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/inline-style-parser": { + "version": "0.2.7", + "resolved": "https://registry.npmjs.org/inline-style-parser/-/inline-style-parser-0.2.7.tgz", + "integrity": "sha512-Nb2ctOyNR8DqQoR0OwRG95uNWIC0C1lCgf5Naz5H6Ji72KZ8OcFZLz2P5sNgwlyoJ8Yif11oMuYs5pBQa86csA==", + "license": "MIT" + }, + "node_modules/is-alphabetical": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/is-alphabetical/-/is-alphabetical-2.0.1.tgz", + "integrity": "sha512-FWyyY60MeTNyeSRpkM2Iry0G9hpr7/9kD40mD/cGQEuilcZYS4okz8SN2Q6rLCJ8gbCt6fN+rC+6tMGS99LaxQ==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/is-alphanumerical": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/is-alphanumerical/-/is-alphanumerical-2.0.1.tgz", + "integrity": "sha512-hmbYhX/9MUMF5uh7tOXyK/n0ZvWpad5caBA17GsC6vyuCqaWliRG5K1qS9inmUhEMaOBIW7/whAnSwveW/LtZw==", + "license": "MIT", + "dependencies": { + "is-alphabetical": "^2.0.0", + "is-decimal": "^2.0.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/is-decimal": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/is-decimal/-/is-decimal-2.0.1.tgz", + "integrity": "sha512-AAB9hiomQs5DXWcRB1rqsxGUstbRroFOPPVAomNk/3XHR5JyEZChOyTWe2oayKnsSsr/kcGqF+z6yuH6HHpN0A==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/is-hexadecimal": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/is-hexadecimal/-/is-hexadecimal-2.0.1.tgz", + "integrity": "sha512-DgZQp241c8oO6cA1SbTEWiXeoxV42vlcJxgH+B3hi1AiqqKruZR3ZGF8In3fj4+/y/7rHvlOZLZtgJ/4ttYGZg==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/is-plain-obj": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/is-plain-obj/-/is-plain-obj-4.1.0.tgz", + "integrity": "sha512-+Pgi+vMuUNkJyExiMBt5IlFoMyKnr5zhJ4Uspz58WOhBF5QoIZkFyNHIbBAtHwzVAgk5RtndVNsDRN61/mmDqg==", + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/js-tokens": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-4.0.0.tgz", + "integrity": "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/jsesc": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/jsesc/-/jsesc-3.1.0.tgz", + "integrity": "sha512-/sM3dO2FOzXjKQhJuo0Q173wf2KOo8t4I8vHy6lF9poUp7bKT0/NHE8fPX23PwfhnykfqnC2xRxOnVw5XuGIaA==", + "dev": true, + "license": "MIT", + "bin": { + "jsesc": "bin/jsesc" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/json-schema": { + "version": "0.4.0", + "resolved": "https://registry.npmjs.org/json-schema/-/json-schema-0.4.0.tgz", + "integrity": "sha512-es94M3nTIfsEPisRafak+HDLfHXnKBhV3vU5eqPcS3flIWqcxJWgXHXiey3YrpaNsanY5ei1VoYEbOzijuq9BA==", + "license": "(AFL-2.1 OR BSD-3-Clause)" + }, + "node_modules/json5": { + "version": "2.2.3", + "resolved": "https://registry.npmjs.org/json5/-/json5-2.2.3.tgz", + "integrity": "sha512-XmOWe7eyHYH14cLdVPoyg+GOH3rYX++KpzrylJwSW98t3Nk+U8XOl8FWKOgwtzdb8lXGf6zYwDUzeHMWfxasyg==", + "dev": true, + "license": "MIT", + "bin": { + "json5": "lib/cli.js" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/longest-streak": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/longest-streak/-/longest-streak-3.1.0.tgz", + "integrity": "sha512-9Ri+o0JYgehTaVBBDoMqIl8GXtbWg711O3srftcHhZ0dqnETqLaoIK0x17fUw9rFSlK/0NlsKe0Ahhyl5pXE2g==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/lru-cache": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-5.1.1.tgz", + "integrity": "sha512-KpNARQA3Iwv+jTA0utUVVbrh+Jlrr1Fv0e56GGzAFOXN7dk/FviaDW8LHmK52DlcH4WP2n6gI8vN1aesBFgo9w==", + "dev": true, + "license": "ISC", + "dependencies": { + "yallist": "^3.0.2" + } + }, + "node_modules/markdown-table": { + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/markdown-table/-/markdown-table-3.0.4.tgz", + "integrity": "sha512-wiYz4+JrLyb/DqW2hkFJxP7Vd7JuTDm77fvbM8VfEQdmSMqcImWeeRbHwZjBjIFki/VaMK2BhFi7oUUZeM5bqw==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/mdast-util-find-and-replace": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/mdast-util-find-and-replace/-/mdast-util-find-and-replace-3.0.2.tgz", + "integrity": "sha512-Tmd1Vg/m3Xz43afeNxDIhWRtFZgM2VLyaf4vSTYwudTyeuTneoL3qtWMA5jeLyz/O1vDJmmV4QuScFCA2tBPwg==", + "license": "MIT", + "dependencies": { + "@types/mdast": "^4.0.0", + "escape-string-regexp": "^5.0.0", + "unist-util-is": "^6.0.0", + "unist-util-visit-parents": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-from-markdown": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/mdast-util-from-markdown/-/mdast-util-from-markdown-2.0.3.tgz", + "integrity": "sha512-W4mAWTvSlKvf8L6J+VN9yLSqQ9AOAAvHuoDAmPkz4dHf553m5gVj2ejadHJhoJmcmxEnOv6Pa8XJhpxE93kb8Q==", + "license": "MIT", + "dependencies": { + "@types/mdast": "^4.0.0", + "@types/unist": "^3.0.0", + "decode-named-character-reference": "^1.0.0", + "devlop": "^1.0.0", + "mdast-util-to-string": "^4.0.0", + "micromark": "^4.0.0", + "micromark-util-decode-numeric-character-reference": "^2.0.0", + "micromark-util-decode-string": "^2.0.0", + "micromark-util-normalize-identifier": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0", + "unist-util-stringify-position": "^4.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-gfm": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/mdast-util-gfm/-/mdast-util-gfm-3.1.0.tgz", + "integrity": "sha512-0ulfdQOM3ysHhCJ1p06l0b0VKlhU0wuQs3thxZQagjcjPrlFRqY215uZGHHJan9GEAXd9MbfPjFJz+qMkVR6zQ==", + "license": "MIT", + "dependencies": { + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-gfm-autolink-literal": "^2.0.0", + "mdast-util-gfm-footnote": "^2.0.0", + "mdast-util-gfm-strikethrough": "^2.0.0", + "mdast-util-gfm-table": "^2.0.0", + "mdast-util-gfm-task-list-item": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-gfm-autolink-literal": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/mdast-util-gfm-autolink-literal/-/mdast-util-gfm-autolink-literal-2.0.1.tgz", + "integrity": "sha512-5HVP2MKaP6L+G6YaxPNjuL0BPrq9orG3TsrZ9YXbA3vDw/ACI4MEsnoDpn6ZNm7GnZgtAcONJyPhOP8tNJQavQ==", + "license": "MIT", + "dependencies": { + "@types/mdast": "^4.0.0", + "ccount": "^2.0.0", + "devlop": "^1.0.0", + "mdast-util-find-and-replace": "^3.0.0", + "micromark-util-character": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-gfm-footnote": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/mdast-util-gfm-footnote/-/mdast-util-gfm-footnote-2.1.0.tgz", + "integrity": "sha512-sqpDWlsHn7Ac9GNZQMeUzPQSMzR6Wv0WKRNvQRg0KqHh02fpTz69Qc1QSseNX29bhz1ROIyNyxExfawVKTm1GQ==", + "license": "MIT", + "dependencies": { + "@types/mdast": "^4.0.0", + "devlop": "^1.1.0", + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0", + "micromark-util-normalize-identifier": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-gfm-strikethrough": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/mdast-util-gfm-strikethrough/-/mdast-util-gfm-strikethrough-2.0.0.tgz", + "integrity": "sha512-mKKb915TF+OC5ptj5bJ7WFRPdYtuHv0yTRxK2tJvi+BDqbkiG7h7u/9SI89nRAYcmap2xHQL9D+QG/6wSrTtXg==", + "license": "MIT", + "dependencies": { + "@types/mdast": "^4.0.0", + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-gfm-table": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/mdast-util-gfm-table/-/mdast-util-gfm-table-2.0.0.tgz", + "integrity": "sha512-78UEvebzz/rJIxLvE7ZtDd/vIQ0RHv+3Mh5DR96p7cS7HsBhYIICDBCu8csTNWNO6tBWfqXPWekRuj2FNOGOZg==", + "license": "MIT", + "dependencies": { + "@types/mdast": "^4.0.0", + "devlop": "^1.0.0", + "markdown-table": "^3.0.0", + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-gfm-task-list-item": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/mdast-util-gfm-task-list-item/-/mdast-util-gfm-task-list-item-2.0.0.tgz", + "integrity": "sha512-IrtvNvjxC1o06taBAVJznEnkiHxLFTzgonUdy8hzFVeDun0uTjxxrRGVaNFqkU1wJR3RBPEfsxmU6jDWPofrTQ==", + "license": "MIT", + "dependencies": { + "@types/mdast": "^4.0.0", + "devlop": "^1.0.0", + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-mdx-expression": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/mdast-util-mdx-expression/-/mdast-util-mdx-expression-2.0.1.tgz", + "integrity": "sha512-J6f+9hUp+ldTZqKRSg7Vw5V6MqjATc+3E4gf3CFNcuZNWD8XdyI6zQ8GqH7f8169MM6P7hMBRDVGnn7oHB9kXQ==", + "license": "MIT", + "dependencies": { + "@types/estree-jsx": "^1.0.0", + "@types/hast": "^3.0.0", + "@types/mdast": "^4.0.0", + "devlop": "^1.0.0", + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-mdx-jsx": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/mdast-util-mdx-jsx/-/mdast-util-mdx-jsx-3.2.0.tgz", + "integrity": "sha512-lj/z8v0r6ZtsN/cGNNtemmmfoLAFZnjMbNyLzBafjzikOM+glrjNHPlf6lQDOTccj9n5b0PPihEBbhneMyGs1Q==", + "license": "MIT", + "dependencies": { + "@types/estree-jsx": "^1.0.0", + "@types/hast": "^3.0.0", + "@types/mdast": "^4.0.0", + "@types/unist": "^3.0.0", + "ccount": "^2.0.0", + "devlop": "^1.1.0", + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0", + "parse-entities": "^4.0.0", + "stringify-entities": "^4.0.0", + "unist-util-stringify-position": "^4.0.0", + "vfile-message": "^4.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-mdxjs-esm": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/mdast-util-mdxjs-esm/-/mdast-util-mdxjs-esm-2.0.1.tgz", + "integrity": "sha512-EcmOpxsZ96CvlP03NghtH1EsLtr0n9Tm4lPUJUBccV9RwUOneqSycg19n5HGzCf+10LozMRSObtVr3ee1WoHtg==", + "license": "MIT", + "dependencies": { + "@types/estree-jsx": "^1.0.0", + "@types/hast": "^3.0.0", + "@types/mdast": "^4.0.0", + "devlop": "^1.0.0", + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-phrasing": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/mdast-util-phrasing/-/mdast-util-phrasing-4.1.0.tgz", + "integrity": "sha512-TqICwyvJJpBwvGAMZjj4J2n0X8QWp21b9l0o7eXyVJ25YNWYbJDVIyD1bZXE6WtV6RmKJVYmQAKWa0zWOABz2w==", + "license": "MIT", + "dependencies": { + "@types/mdast": "^4.0.0", + "unist-util-is": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-to-hast": { + "version": "13.2.1", + "resolved": "https://registry.npmjs.org/mdast-util-to-hast/-/mdast-util-to-hast-13.2.1.tgz", + "integrity": "sha512-cctsq2wp5vTsLIcaymblUriiTcZd0CwWtCbLvrOzYCDZoWyMNV8sZ7krj09FSnsiJi3WVsHLM4k6Dq/yaPyCXA==", + "license": "MIT", + "dependencies": { + "@types/hast": "^3.0.0", + "@types/mdast": "^4.0.0", + "@ungap/structured-clone": "^1.0.0", + "devlop": "^1.0.0", + "micromark-util-sanitize-uri": "^2.0.0", + "trim-lines": "^3.0.0", + "unist-util-position": "^5.0.0", + "unist-util-visit": "^5.0.0", + "vfile": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-to-markdown": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/mdast-util-to-markdown/-/mdast-util-to-markdown-2.1.2.tgz", + "integrity": "sha512-xj68wMTvGXVOKonmog6LwyJKrYXZPvlwabaryTjLh9LuvovB/KAH+kvi8Gjj+7rJjsFi23nkUxRQv1KqSroMqA==", + "license": "MIT", + "dependencies": { + "@types/mdast": "^4.0.0", + "@types/unist": "^3.0.0", + "longest-streak": "^3.0.0", + "mdast-util-phrasing": "^4.0.0", + "mdast-util-to-string": "^4.0.0", + "micromark-util-classify-character": "^2.0.0", + "micromark-util-decode-string": "^2.0.0", + "unist-util-visit": "^5.0.0", + "zwitch": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-to-string": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/mdast-util-to-string/-/mdast-util-to-string-4.0.0.tgz", + "integrity": "sha512-0H44vDimn51F0YwvxSJSm0eCDOJTRlmN0R1yBh4HLj9wiV1Dn0QoXGbvFAWj2hSItVTlCmBF1hqKlIyUBVFLPg==", + "license": "MIT", + "dependencies": { + "@types/mdast": "^4.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/micromark/-/micromark-4.0.2.tgz", + "integrity": "sha512-zpe98Q6kvavpCr1NPVSCMebCKfD7CA2NqZ+rykeNhONIJBpc1tFKt9hucLGwha3jNTNI8lHpctWJWoimVF4PfA==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "@types/debug": "^4.0.0", + "debug": "^4.0.0", + "decode-named-character-reference": "^1.0.0", + "devlop": "^1.0.0", + "micromark-core-commonmark": "^2.0.0", + "micromark-factory-space": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-chunked": "^2.0.0", + "micromark-util-combine-extensions": "^2.0.0", + "micromark-util-decode-numeric-character-reference": "^2.0.0", + "micromark-util-encode": "^2.0.0", + "micromark-util-normalize-identifier": "^2.0.0", + "micromark-util-resolve-all": "^2.0.0", + "micromark-util-sanitize-uri": "^2.0.0", + "micromark-util-subtokenize": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-core-commonmark": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/micromark-core-commonmark/-/micromark-core-commonmark-2.0.3.tgz", + "integrity": "sha512-RDBrHEMSxVFLg6xvnXmb1Ayr2WzLAWjeSATAoxwKYJV94TeNavgoIdA0a9ytzDSVzBy2YKFK+emCPOEibLeCrg==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "decode-named-character-reference": "^1.0.0", + "devlop": "^1.0.0", + "micromark-factory-destination": "^2.0.0", + "micromark-factory-label": "^2.0.0", + "micromark-factory-space": "^2.0.0", + "micromark-factory-title": "^2.0.0", + "micromark-factory-whitespace": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-chunked": "^2.0.0", + "micromark-util-classify-character": "^2.0.0", + "micromark-util-html-tag-name": "^2.0.0", + "micromark-util-normalize-identifier": "^2.0.0", + "micromark-util-resolve-all": "^2.0.0", + "micromark-util-subtokenize": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-extension-gfm": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/micromark-extension-gfm/-/micromark-extension-gfm-3.0.0.tgz", + "integrity": "sha512-vsKArQsicm7t0z2GugkCKtZehqUm31oeGBV/KVSorWSy8ZlNAv7ytjFhvaryUiCUJYqs+NoE6AFhpQvBTM6Q4w==", + "license": "MIT", + "dependencies": { + "micromark-extension-gfm-autolink-literal": "^2.0.0", + "micromark-extension-gfm-footnote": "^2.0.0", + "micromark-extension-gfm-strikethrough": "^2.0.0", + "micromark-extension-gfm-table": "^2.0.0", + "micromark-extension-gfm-tagfilter": "^2.0.0", + "micromark-extension-gfm-task-list-item": "^2.0.0", + "micromark-util-combine-extensions": "^2.0.0", + "micromark-util-types": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-extension-gfm-autolink-literal": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/micromark-extension-gfm-autolink-literal/-/micromark-extension-gfm-autolink-literal-2.1.0.tgz", + "integrity": "sha512-oOg7knzhicgQ3t4QCjCWgTmfNhvQbDDnJeVu9v81r7NltNCVmhPy1fJRX27pISafdjL+SVc4d3l48Gb6pbRypw==", + "license": "MIT", + "dependencies": { + "micromark-util-character": "^2.0.0", + "micromark-util-sanitize-uri": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-extension-gfm-footnote": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/micromark-extension-gfm-footnote/-/micromark-extension-gfm-footnote-2.1.0.tgz", + "integrity": "sha512-/yPhxI1ntnDNsiHtzLKYnE3vf9JZ6cAisqVDauhp4CEHxlb4uoOTxOCJ+9s51bIB8U1N1FJ1RXOKTIlD5B/gqw==", + "license": "MIT", + "dependencies": { + "devlop": "^1.0.0", + "micromark-core-commonmark": "^2.0.0", + "micromark-factory-space": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-normalize-identifier": "^2.0.0", + "micromark-util-sanitize-uri": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-extension-gfm-strikethrough": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/micromark-extension-gfm-strikethrough/-/micromark-extension-gfm-strikethrough-2.1.0.tgz", + "integrity": "sha512-ADVjpOOkjz1hhkZLlBiYA9cR2Anf8F4HqZUO6e5eDcPQd0Txw5fxLzzxnEkSkfnD0wziSGiv7sYhk/ktvbf1uw==", + "license": "MIT", + "dependencies": { + "devlop": "^1.0.0", + "micromark-util-chunked": "^2.0.0", + "micromark-util-classify-character": "^2.0.0", + "micromark-util-resolve-all": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-extension-gfm-table": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/micromark-extension-gfm-table/-/micromark-extension-gfm-table-2.1.1.tgz", + "integrity": "sha512-t2OU/dXXioARrC6yWfJ4hqB7rct14e8f7m0cbI5hUmDyyIlwv5vEtooptH8INkbLzOatzKuVbQmAYcbWoyz6Dg==", + "license": "MIT", + "dependencies": { + "devlop": "^1.0.0", + "micromark-factory-space": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-extension-gfm-tagfilter": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-extension-gfm-tagfilter/-/micromark-extension-gfm-tagfilter-2.0.0.tgz", + "integrity": "sha512-xHlTOmuCSotIA8TW1mDIM6X2O1SiX5P9IuDtqGonFhEK0qgRI4yeC6vMxEV2dgyr2TiD+2PQ10o+cOhdVAcwfg==", + "license": "MIT", + "dependencies": { + "micromark-util-types": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-extension-gfm-task-list-item": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/micromark-extension-gfm-task-list-item/-/micromark-extension-gfm-task-list-item-2.1.0.tgz", + "integrity": "sha512-qIBZhqxqI6fjLDYFTBIa4eivDMnP+OZqsNwmQ3xNLE4Cxwc+zfQEfbs6tzAo2Hjq+bh6q5F+Z8/cksrLFYWQQw==", + "license": "MIT", + "dependencies": { + "devlop": "^1.0.0", + "micromark-factory-space": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-factory-destination": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-factory-destination/-/micromark-factory-destination-2.0.1.tgz", + "integrity": "sha512-Xe6rDdJlkmbFRExpTOmRj9N3MaWmbAgdpSrBQvCFqhezUn4AHqJHbaEnfbVYYiexVSs//tqOdY/DxhjdCiJnIA==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-character": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-factory-label": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-factory-label/-/micromark-factory-label-2.0.1.tgz", + "integrity": "sha512-VFMekyQExqIW7xIChcXn4ok29YE3rnuyveW3wZQWWqF4Nv9Wk5rgJ99KzPvHjkmPXF93FXIbBp6YdW3t71/7Vg==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "devlop": "^1.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-factory-space": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-factory-space/-/micromark-factory-space-2.0.1.tgz", + "integrity": "sha512-zRkxjtBxxLd2Sc0d+fbnEunsTj46SWXgXciZmHq0kDYGnck/ZSGj9/wULTV95uoeYiK5hRXP2mJ98Uo4cq/LQg==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-character": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-factory-title": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-factory-title/-/micromark-factory-title-2.0.1.tgz", + "integrity": "sha512-5bZ+3CjhAd9eChYTHsjy6TGxpOFSKgKKJPJxr293jTbfry2KDoWkhBb6TcPVB4NmzaPhMs1Frm9AZH7OD4Cjzw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-factory-space": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-factory-whitespace": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-factory-whitespace/-/micromark-factory-whitespace-2.0.1.tgz", + "integrity": "sha512-Ob0nuZ3PKt/n0hORHyvoD9uZhr+Za8sFoP+OnMcnWK5lngSzALgQYKMr9RJVOWLqQYuyn6ulqGWSXdwf6F80lQ==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-factory-space": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-util-character": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-2.1.1.tgz", + "integrity": "sha512-wv8tdUTJ3thSFFFJKtpYKOYiGP2+v96Hvk4Tu8KpCAsTMs6yi+nVmGh1syvSCsaxz45J6Jbw+9DD6g97+NV67Q==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-util-chunked": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-chunked/-/micromark-util-chunked-2.0.1.tgz", + "integrity": "sha512-QUNFEOPELfmvv+4xiNg2sRYeS/P84pTW0TCgP5zc9FpXetHY0ab7SxKyAQCNCc1eK0459uoLI1y5oO5Vc1dbhA==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-symbol": "^2.0.0" + } + }, + "node_modules/micromark-util-classify-character": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-classify-character/-/micromark-util-classify-character-2.0.1.tgz", + "integrity": "sha512-K0kHzM6afW/MbeWYWLjoHQv1sgg2Q9EccHEDzSkxiP/EaagNzCm7T/WMKZ3rjMbvIpvBiZgwR3dKMygtA4mG1Q==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-character": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-util-combine-extensions": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-combine-extensions/-/micromark-util-combine-extensions-2.0.1.tgz", + "integrity": "sha512-OnAnH8Ujmy59JcyZw8JSbK9cGpdVY44NKgSM7E9Eh7DiLS2E9RNQf0dONaGDzEG9yjEl5hcqeIsj4hfRkLH/Bg==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-chunked": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-util-decode-numeric-character-reference": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/micromark-util-decode-numeric-character-reference/-/micromark-util-decode-numeric-character-reference-2.0.2.tgz", + "integrity": "sha512-ccUbYk6CwVdkmCQMyr64dXz42EfHGkPQlBj5p7YVGzq8I7CtjXZJrubAYezf7Rp+bjPseiROqe7G6foFd+lEuw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-symbol": "^2.0.0" + } + }, + "node_modules/micromark-util-decode-string": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-decode-string/-/micromark-util-decode-string-2.0.1.tgz", + "integrity": "sha512-nDV/77Fj6eH1ynwscYTOsbK7rR//Uj0bZXBwJZRfaLEJ1iGBR6kIfNmlNqaqJf649EP0F3NWNdeJi03elllNUQ==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "decode-named-character-reference": "^1.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-decode-numeric-character-reference": "^2.0.0", + "micromark-util-symbol": "^2.0.0" + } + }, + "node_modules/micromark-util-encode": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-encode/-/micromark-util-encode-2.0.1.tgz", + "integrity": "sha512-c3cVx2y4KqUnwopcO9b/SCdo2O67LwJJ/UyqGfbigahfegL9myoEFoDYZgkT7f36T0bLrM9hZTAaAyH+PCAXjw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT" + }, + "node_modules/micromark-util-html-tag-name": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-html-tag-name/-/micromark-util-html-tag-name-2.0.1.tgz", + "integrity": "sha512-2cNEiYDhCWKI+Gs9T0Tiysk136SnR13hhO8yW6BGNyhOC4qYFnwF1nKfD3HFAIXA5c45RrIG1ub11GiXeYd1xA==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT" + }, + "node_modules/micromark-util-normalize-identifier": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-normalize-identifier/-/micromark-util-normalize-identifier-2.0.1.tgz", + "integrity": "sha512-sxPqmo70LyARJs0w2UclACPUUEqltCkJ6PhKdMIDuJ3gSf/Q+/GIe3WKl0Ijb/GyH9lOpUkRAO2wp0GVkLvS9Q==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-symbol": "^2.0.0" + } + }, + "node_modules/micromark-util-resolve-all": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-resolve-all/-/micromark-util-resolve-all-2.0.1.tgz", + "integrity": "sha512-VdQyxFWFT2/FGJgwQnJYbe1jjQoNTS4RjglmSjTUlpUMa95Htx9NHeYW4rGDJzbjvCsl9eLjMQwGeElsqmzcHg==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-util-sanitize-uri": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-sanitize-uri/-/micromark-util-sanitize-uri-2.0.1.tgz", + "integrity": "sha512-9N9IomZ/YuGGZZmQec1MbgxtlgougxTodVwDzzEouPKo3qFWvymFHWcnDi2vzV1ff6kas9ucW+o3yzJK9YB1AQ==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-character": "^2.0.0", + "micromark-util-encode": "^2.0.0", + "micromark-util-symbol": "^2.0.0" + } + }, + "node_modules/micromark-util-subtokenize": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/micromark-util-subtokenize/-/micromark-util-subtokenize-2.1.0.tgz", + "integrity": "sha512-XQLu552iSctvnEcgXw6+Sx75GflAPNED1qx7eBJ+wydBb2KCbRZe+NwvIEEMM83uml1+2WSXpBAcp9IUCgCYWA==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "devlop": "^1.0.0", + "micromark-util-chunked": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-util-symbol": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.1.tgz", + "integrity": "sha512-vs5t8Apaud9N28kgCrRUdEed4UJ+wWNvicHLPxCa9ENlYuAY31M0ETy5y1vA33YoNPDFTghEbnh6efaE8h4x0Q==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT" + }, + "node_modules/micromark-util-types": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/micromark-util-types/-/micromark-util-types-2.0.2.tgz", + "integrity": "sha512-Yw0ECSpJoViF1qTU4DC6NwtC4aWGt1EkzaQB8KPPyCRR8z9TWeV0HbEFGTO+ZY1wB22zmxnJqhPyTpOVCpeHTA==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT" + }, + "node_modules/ms": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", + "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==", + "license": "MIT" + }, + "node_modules/nanoid": { + "version": "3.3.11", + "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.11.tgz", + "integrity": "sha512-N8SpfPUnUp1bK+PMYW8qSWdl9U+wwNWI4QKxOYDy9JAro3WMX7p2OeVRF9v+347pnakNevPmiHhNmZ2HbFA76w==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "bin": { + "nanoid": "bin/nanoid.cjs" + }, + "engines": { + "node": "^10 || ^12 || ^13.7 || ^14 || >=15.0.1" + } + }, + "node_modules/node-releases": { + "version": "2.0.37", + "resolved": "https://registry.npmjs.org/node-releases/-/node-releases-2.0.37.tgz", + "integrity": "sha512-1h5gKZCF+pO/o3Iqt5Jp7wc9rH3eJJ0+nh/CIoiRwjRxde/hAHyLPXYN4V3CqKAbiZPSeJFSWHmJsbkicta0Eg==", + "dev": true, + "license": "MIT" + }, + "node_modules/parse-entities": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/parse-entities/-/parse-entities-4.0.2.tgz", + "integrity": "sha512-GG2AQYWoLgL877gQIKeRPGO1xF9+eG1ujIb5soS5gPvLQ1y2o8FL90w2QWNdf9I361Mpp7726c+lj3U0qK1uGw==", + "license": "MIT", + "dependencies": { + "@types/unist": "^2.0.0", + "character-entities-legacy": "^3.0.0", + "character-reference-invalid": "^2.0.0", + "decode-named-character-reference": "^1.0.0", + "is-alphanumerical": "^2.0.0", + "is-decimal": "^2.0.0", + "is-hexadecimal": "^2.0.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/parse-entities/node_modules/@types/unist": { + "version": "2.0.11", + "resolved": "https://registry.npmjs.org/@types/unist/-/unist-2.0.11.tgz", + "integrity": "sha512-CmBKiL6NNo/OqgmMn95Fk9Whlp2mtvIv+KNpQKN2F4SjvrEesubTRWGYSg+BnWZOnlCaSTU1sMpsBOzgbYhnsA==", + "license": "MIT" + }, + "node_modules/picocolors": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.1.1.tgz", + "integrity": "sha512-xceH2snhtb5M9liqDsmEw56le376mTZkEX/jEb/RxNFyegNul7eNslCXP9FDj/Lcu0X8KEyMceP2ntpaHrDEVA==", + "dev": true, + "license": "ISC" + }, + "node_modules/picomatch": { + "version": "4.0.4", + "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-4.0.4.tgz", + "integrity": "sha512-QP88BAKvMam/3NxH6vj2o21R6MjxZUAd6nlwAS/pnGvN9IVLocLHxGYIzFhg6fUQ+5th6P4dv4eW9jX3DSIj7A==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/jonschlinkert" + } + }, + "node_modules/postcss": { + "version": "8.5.8", + "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.5.8.tgz", + "integrity": "sha512-OW/rX8O/jXnm82Ey1k44pObPtdblfiuWnrd8X7GJ7emImCOstunGbXUpp7HdBrFQX6rJzn3sPT397Wp5aCwCHg==", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/postcss/" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/postcss" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "dependencies": { + "nanoid": "^3.3.11", + "picocolors": "^1.1.1", + "source-map-js": "^1.2.1" + }, + "engines": { + "node": "^10 || ^12 || >=14" + } + }, + "node_modules/property-information": { + "version": "7.1.0", + "resolved": "https://registry.npmjs.org/property-information/-/property-information-7.1.0.tgz", + "integrity": "sha512-TwEZ+X+yCJmYfL7TPUOcvBZ4QfoT5YenQiJuX//0th53DE6w0xxLEtfK3iyryQFddXuvkIk51EEgrJQ0WJkOmQ==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/react": { + "version": "19.2.4", + "resolved": "https://registry.npmjs.org/react/-/react-19.2.4.tgz", + "integrity": "sha512-9nfp2hYpCwOjAN+8TZFGhtWEwgvWHXqESH8qT89AT/lWklpLON22Lc8pEtnpsZz7VmawabSU0gCjnj8aC0euHQ==", + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/react-dom": { + "version": "19.2.4", + "resolved": "https://registry.npmjs.org/react-dom/-/react-dom-19.2.4.tgz", + "integrity": "sha512-AXJdLo8kgMbimY95O2aKQqsz2iWi9jMgKJhRBAxECE4IFxfcazB2LmzloIoibJI3C12IlY20+KFaLv+71bUJeQ==", + "license": "MIT", + "dependencies": { + "scheduler": "^0.27.0" + }, + "peerDependencies": { + "react": "^19.2.4" + } + }, + "node_modules/react-markdown": { + "version": "10.1.0", + "resolved": "https://registry.npmjs.org/react-markdown/-/react-markdown-10.1.0.tgz", + "integrity": "sha512-qKxVopLT/TyA6BX3Ue5NwabOsAzm0Q7kAPwq6L+wWDwisYs7R8vZ0nRXqq6rkueboxpkjvLGU9fWifiX/ZZFxQ==", + "license": "MIT", + "dependencies": { + "@types/hast": "^3.0.0", + "@types/mdast": "^4.0.0", + "devlop": "^1.0.0", + "hast-util-to-jsx-runtime": "^2.0.0", + "html-url-attributes": "^3.0.0", + "mdast-util-to-hast": "^13.0.0", + "remark-parse": "^11.0.0", + "remark-rehype": "^11.0.0", + "unified": "^11.0.0", + "unist-util-visit": "^5.0.0", + "vfile": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + }, + "peerDependencies": { + "@types/react": ">=18", + "react": ">=18" + } + }, + "node_modules/react-refresh": { + "version": "0.17.0", + "resolved": "https://registry.npmjs.org/react-refresh/-/react-refresh-0.17.0.tgz", + "integrity": "sha512-z6F7K9bV85EfseRCp2bzrpyQ0Gkw1uLoCel9XBVWPg/TjRj94SkJzUTGfOa4bs7iJvBWtQG0Wq7wnI0syw3EBQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/remark-gfm": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/remark-gfm/-/remark-gfm-4.0.1.tgz", + "integrity": "sha512-1quofZ2RQ9EWdeN34S79+KExV1764+wCUGop5CPL1WGdD0ocPpu91lzPGbwWMECpEpd42kJGQwzRfyov9j4yNg==", + "license": "MIT", + "dependencies": { + "@types/mdast": "^4.0.0", + "mdast-util-gfm": "^3.0.0", + "micromark-extension-gfm": "^3.0.0", + "remark-parse": "^11.0.0", + "remark-stringify": "^11.0.0", + "unified": "^11.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/remark-parse": { + "version": "11.0.0", + "resolved": "https://registry.npmjs.org/remark-parse/-/remark-parse-11.0.0.tgz", + "integrity": "sha512-FCxlKLNGknS5ba/1lmpYijMUzX2esxW5xQqjWxw2eHFfS2MSdaHVINFmhjo+qN1WhZhNimq0dZATN9pH0IDrpA==", + "license": "MIT", + "dependencies": { + "@types/mdast": "^4.0.0", + "mdast-util-from-markdown": "^2.0.0", + "micromark-util-types": "^2.0.0", + "unified": "^11.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/remark-rehype": { + "version": "11.1.2", + "resolved": "https://registry.npmjs.org/remark-rehype/-/remark-rehype-11.1.2.tgz", + "integrity": "sha512-Dh7l57ianaEoIpzbp0PC9UKAdCSVklD8E5Rpw7ETfbTl3FqcOOgq5q2LVDhgGCkaBv7p24JXikPdvhhmHvKMsw==", + "license": "MIT", + "dependencies": { + "@types/hast": "^3.0.0", + "@types/mdast": "^4.0.0", + "mdast-util-to-hast": "^13.0.0", + "unified": "^11.0.0", + "vfile": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/remark-stringify": { + "version": "11.0.0", + "resolved": "https://registry.npmjs.org/remark-stringify/-/remark-stringify-11.0.0.tgz", + "integrity": "sha512-1OSmLd3awB/t8qdoEOMazZkNsfVTeY4fTsgzcQFdXNq8ToTN4ZGwrMnlda4K6smTFKD+GRV6O48i6Z4iKgPPpw==", + "license": "MIT", + "dependencies": { + "@types/mdast": "^4.0.0", + "mdast-util-to-markdown": "^2.0.0", + "unified": "^11.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/rollup": { + "version": "4.60.1", + "resolved": "https://registry.npmjs.org/rollup/-/rollup-4.60.1.tgz", + "integrity": "sha512-VmtB2rFU/GroZ4oL8+ZqXgSA38O6GR8KSIvWmEFv63pQ0G6KaBH9s07PO8XTXP4vI+3UJUEypOfjkGfmSBBR0w==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/estree": "1.0.8" + }, + "bin": { + "rollup": "dist/bin/rollup" + }, + "engines": { + "node": ">=18.0.0", + "npm": ">=8.0.0" + }, + "optionalDependencies": { + "@rollup/rollup-android-arm-eabi": "4.60.1", + "@rollup/rollup-android-arm64": "4.60.1", + "@rollup/rollup-darwin-arm64": "4.60.1", + "@rollup/rollup-darwin-x64": "4.60.1", + "@rollup/rollup-freebsd-arm64": "4.60.1", + "@rollup/rollup-freebsd-x64": "4.60.1", + "@rollup/rollup-linux-arm-gnueabihf": "4.60.1", + "@rollup/rollup-linux-arm-musleabihf": "4.60.1", + "@rollup/rollup-linux-arm64-gnu": "4.60.1", + "@rollup/rollup-linux-arm64-musl": "4.60.1", + "@rollup/rollup-linux-loong64-gnu": "4.60.1", + "@rollup/rollup-linux-loong64-musl": "4.60.1", + "@rollup/rollup-linux-ppc64-gnu": "4.60.1", + "@rollup/rollup-linux-ppc64-musl": "4.60.1", + "@rollup/rollup-linux-riscv64-gnu": "4.60.1", + "@rollup/rollup-linux-riscv64-musl": "4.60.1", + "@rollup/rollup-linux-s390x-gnu": "4.60.1", + "@rollup/rollup-linux-x64-gnu": "4.60.1", + "@rollup/rollup-linux-x64-musl": "4.60.1", + "@rollup/rollup-openbsd-x64": "4.60.1", + "@rollup/rollup-openharmony-arm64": "4.60.1", + "@rollup/rollup-win32-arm64-msvc": "4.60.1", + "@rollup/rollup-win32-ia32-msvc": "4.60.1", + "@rollup/rollup-win32-x64-gnu": "4.60.1", + "@rollup/rollup-win32-x64-msvc": "4.60.1", + "fsevents": "~2.3.2" + } + }, + "node_modules/scheduler": { + "version": "0.27.0", + "resolved": "https://registry.npmjs.org/scheduler/-/scheduler-0.27.0.tgz", + "integrity": "sha512-eNv+WrVbKu1f3vbYJT/xtiF5syA5HPIMtf9IgY/nKg0sWqzAUEvqY/xm7OcZc/qafLx/iO9FgOmeSAp4v5ti/Q==", + "license": "MIT" + }, + "node_modules/semver": { + "version": "6.3.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", + "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", + "dev": true, + "license": "ISC", + "bin": { + "semver": "bin/semver.js" + } + }, + "node_modules/source-map-js": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/source-map-js/-/source-map-js-1.2.1.tgz", + "integrity": "sha512-UXWMKhLOwVKb728IUtQPXxfYU+usdybtUrK/8uGE8CQMvrhOpwvzDBwj0QhSL7MQc7vIsISBG8VQ8+IDQxpfQA==", + "dev": true, + "license": "BSD-3-Clause", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/space-separated-tokens": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/space-separated-tokens/-/space-separated-tokens-2.0.2.tgz", + "integrity": "sha512-PEGlAwrG8yXGXRjW32fGbg66JAlOAwbObuqVoJpv/mRgoWDQfgH1wDPvtzWyUSNAXBGSk8h755YDbbcEy3SH2Q==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/stringify-entities": { + "version": "4.0.4", + "resolved": "https://registry.npmjs.org/stringify-entities/-/stringify-entities-4.0.4.tgz", + "integrity": "sha512-IwfBptatlO+QCJUo19AqvrPNqlVMpW9YEL2LIVY+Rpv2qsjCGxaDLNRgeGsQWJhfItebuJhsGSLjaBbNSQ+ieg==", + "license": "MIT", + "dependencies": { + "character-entities-html4": "^2.0.0", + "character-entities-legacy": "^3.0.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/style-to-js": { + "version": "1.1.21", + "resolved": "https://registry.npmjs.org/style-to-js/-/style-to-js-1.1.21.tgz", + "integrity": "sha512-RjQetxJrrUJLQPHbLku6U/ocGtzyjbJMP9lCNK7Ag0CNh690nSH8woqWH9u16nMjYBAok+i7JO1NP2pOy8IsPQ==", + "license": "MIT", + "dependencies": { + "style-to-object": "1.0.14" + } + }, + "node_modules/style-to-object": { + "version": "1.0.14", + "resolved": "https://registry.npmjs.org/style-to-object/-/style-to-object-1.0.14.tgz", + "integrity": "sha512-LIN7rULI0jBscWQYaSswptyderlarFkjQ+t79nzty8tcIAceVomEVlLzH5VP4Cmsv6MtKhs7qaAiwlcp+Mgaxw==", + "license": "MIT", + "dependencies": { + "inline-style-parser": "0.2.7" + } + }, + "node_modules/swr": { + "version": "2.4.1", + "resolved": "https://registry.npmjs.org/swr/-/swr-2.4.1.tgz", + "integrity": "sha512-2CC6CiKQtEwaEeNiqWTAw9PGykW8SR5zZX8MZk6TeAvEAnVS7Visz8WzphqgtQ8v2xz/4Q5K+j+SeMaKXeeQIA==", + "license": "MIT", + "dependencies": { + "dequal": "^2.0.3", + "use-sync-external-store": "^1.6.0" + }, + "peerDependencies": { + "react": "^16.11.0 || ^17.0.0 || ^18.0.0 || ^19.0.0" + } + }, + "node_modules/throttleit": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/throttleit/-/throttleit-2.1.0.tgz", + "integrity": "sha512-nt6AMGKW1p/70DF/hGBdJB57B8Tspmbp5gfJ8ilhLnt7kkr2ye7hzD6NVG8GGErk2HWF34igrL2CXmNIkzKqKw==", + "license": "MIT", + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/tinyglobby": { + "version": "0.2.15", + "resolved": "https://registry.npmjs.org/tinyglobby/-/tinyglobby-0.2.15.tgz", + "integrity": "sha512-j2Zq4NyQYG5XMST4cbs02Ak8iJUdxRM0XI5QyxXuZOzKOINmWurp3smXu3y5wDcJrptwpSjgXHzIQxR0omXljQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "fdir": "^6.5.0", + "picomatch": "^4.0.3" + }, + "engines": { + "node": ">=12.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/SuperchupuDev" + } + }, + "node_modules/trim-lines": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/trim-lines/-/trim-lines-3.0.1.tgz", + "integrity": "sha512-kRj8B+YHZCc9kQYdWfJB2/oUl9rA99qbowYYBtr4ui4mZyAQ2JpvVBd/6U2YloATfqBhBTSMhTpgBHtU0Mf3Rg==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/trough": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/trough/-/trough-2.2.0.tgz", + "integrity": "sha512-tmMpK00BjZiUyVyvrBK7knerNgmgvcV/KLVyuma/SC+TQN167GrMRciANTz09+k3zW8L8t60jWO1GpfkZdjTaw==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/typescript": { + "version": "5.9.3", + "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.9.3.tgz", + "integrity": "sha512-jl1vZzPDinLr9eUt3J/t7V6FgNEw9QjvBPdysz9KfQDD41fQrC2Y4vKQdiaUpFT4bXlb1RHhLpp8wtm6M5TgSw==", + "dev": true, + "license": "Apache-2.0", + "bin": { + "tsc": "bin/tsc", + "tsserver": "bin/tsserver" + }, + "engines": { + "node": ">=14.17" + } + }, + "node_modules/unified": { + "version": "11.0.5", + "resolved": "https://registry.npmjs.org/unified/-/unified-11.0.5.tgz", + "integrity": "sha512-xKvGhPWw3k84Qjh8bI3ZeJjqnyadK+GEFtazSfZv/rKeTkTjOJho6mFqh2SM96iIcZokxiOpg78GazTSg8+KHA==", + "license": "MIT", + "dependencies": { + "@types/unist": "^3.0.0", + "bail": "^2.0.0", + "devlop": "^1.0.0", + "extend": "^3.0.0", + "is-plain-obj": "^4.0.0", + "trough": "^2.0.0", + "vfile": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/unist-util-is": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/unist-util-is/-/unist-util-is-6.0.1.tgz", + "integrity": "sha512-LsiILbtBETkDz8I9p1dQ0uyRUWuaQzd/cuEeS1hoRSyW5E5XGmTzlwY1OrNzzakGowI9Dr/I8HVaw4hTtnxy8g==", + "license": "MIT", + "dependencies": { + "@types/unist": "^3.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/unist-util-position": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/unist-util-position/-/unist-util-position-5.0.0.tgz", + "integrity": "sha512-fucsC7HjXvkB5R3kTCO7kUjRdrS0BJt3M/FPxmHMBOm8JQi2BsHAHFsy27E0EolP8rp0NzXsJ+jNPyDWvOJZPA==", + "license": "MIT", + "dependencies": { + "@types/unist": "^3.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/unist-util-stringify-position": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/unist-util-stringify-position/-/unist-util-stringify-position-4.0.0.tgz", + "integrity": "sha512-0ASV06AAoKCDkS2+xw5RXJywruurpbC4JZSm7nr7MOt1ojAzvyyaO+UxZf18j8FCF6kmzCZKcAgN/yu2gm2XgQ==", + "license": "MIT", + "dependencies": { + "@types/unist": "^3.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/unist-util-visit": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/unist-util-visit/-/unist-util-visit-5.1.0.tgz", + "integrity": "sha512-m+vIdyeCOpdr/QeQCu2EzxX/ohgS8KbnPDgFni4dQsfSCtpz8UqDyY5GjRru8PDKuYn7Fq19j1CQ+nJSsGKOzg==", + "license": "MIT", + "dependencies": { + "@types/unist": "^3.0.0", + "unist-util-is": "^6.0.0", + "unist-util-visit-parents": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/unist-util-visit-parents": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/unist-util-visit-parents/-/unist-util-visit-parents-6.0.2.tgz", + "integrity": "sha512-goh1s1TBrqSqukSc8wrjwWhL0hiJxgA8m4kFxGlQ+8FYQ3C/m11FcTs4YYem7V664AhHVvgoQLk890Ssdsr2IQ==", + "license": "MIT", + "dependencies": { + "@types/unist": "^3.0.0", + "unist-util-is": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/update-browserslist-db": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/update-browserslist-db/-/update-browserslist-db-1.2.3.tgz", + "integrity": "sha512-Js0m9cx+qOgDxo0eMiFGEueWztz+d4+M3rGlmKPT+T4IS/jP4ylw3Nwpu6cpTTP8R1MAC1kF4VbdLt3ARf209w==", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/browserslist" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/browserslist" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "dependencies": { + "escalade": "^3.2.0", + "picocolors": "^1.1.1" + }, + "bin": { + "update-browserslist-db": "cli.js" + }, + "peerDependencies": { + "browserslist": ">= 4.21.0" + } + }, + "node_modules/use-sync-external-store": { + "version": "1.6.0", + "resolved": "https://registry.npmjs.org/use-sync-external-store/-/use-sync-external-store-1.6.0.tgz", + "integrity": "sha512-Pp6GSwGP/NrPIrxVFAIkOQeyw8lFenOHijQWkUTrDvrF4ALqylP2C/KCkeS9dpUM3KvYRQhna5vt7IL95+ZQ9w==", + "license": "MIT", + "peerDependencies": { + "react": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0" + } + }, + "node_modules/vfile": { + "version": "6.0.3", + "resolved": "https://registry.npmjs.org/vfile/-/vfile-6.0.3.tgz", + "integrity": "sha512-KzIbH/9tXat2u30jf+smMwFCsno4wHVdNmzFyL+T/L3UGqqk6JKfVqOFOZEpZSHADH1k40ab6NUIXZq422ov3Q==", + "license": "MIT", + "dependencies": { + "@types/unist": "^3.0.0", + "vfile-message": "^4.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/vfile-message": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/vfile-message/-/vfile-message-4.0.3.tgz", + "integrity": "sha512-QTHzsGd1EhbZs4AsQ20JX1rC3cOlt/IWJruk893DfLRr57lcnOeMaWG4K0JrRta4mIJZKth2Au3mM3u03/JWKw==", + "license": "MIT", + "dependencies": { + "@types/unist": "^3.0.0", + "unist-util-stringify-position": "^4.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/vite": { + "version": "6.4.1", + "resolved": "https://registry.npmjs.org/vite/-/vite-6.4.1.tgz", + "integrity": "sha512-+Oxm7q9hDoLMyJOYfUYBuHQo+dkAloi33apOPP56pzj+vsdJDzr+j1NISE5pyaAuKL4A3UD34qd0lx5+kfKp2g==", + "dev": true, + "license": "MIT", + "dependencies": { + "esbuild": "^0.25.0", + "fdir": "^6.4.4", + "picomatch": "^4.0.2", + "postcss": "^8.5.3", + "rollup": "^4.34.9", + "tinyglobby": "^0.2.13" + }, + "bin": { + "vite": "bin/vite.js" + }, + "engines": { + "node": "^18.0.0 || ^20.0.0 || >=22.0.0" + }, + "funding": { + "url": "https://github.com/vitejs/vite?sponsor=1" + }, + "optionalDependencies": { + "fsevents": "~2.3.3" + }, + "peerDependencies": { + "@types/node": "^18.0.0 || ^20.0.0 || >=22.0.0", + "jiti": ">=1.21.0", + "less": "*", + "lightningcss": "^1.21.0", + "sass": "*", + "sass-embedded": "*", + "stylus": "*", + "sugarss": "*", + "terser": "^5.16.0", + "tsx": "^4.8.1", + "yaml": "^2.4.2" + }, + "peerDependenciesMeta": { + "@types/node": { + "optional": true + }, + "jiti": { + "optional": true + }, + "less": { + "optional": true + }, + "lightningcss": { + "optional": true + }, + "sass": { + "optional": true + }, + "sass-embedded": { + "optional": true + }, + "stylus": { + "optional": true + }, + "sugarss": { + "optional": true + }, + "terser": { + "optional": true + }, + "tsx": { + "optional": true + }, + "yaml": { + "optional": true + } + } + }, + "node_modules/yallist": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/yallist/-/yallist-3.1.1.tgz", + "integrity": "sha512-a4UGQaWPH59mOXUYnAG2ewncQS4i4F43Tv3JoAM+s2VDAmS9NsK8GpDMLrCHPksFT7h3K6TOoUNn2pb7RoXx4g==", + "dev": true, + "license": "ISC" + }, + "node_modules/zod": { + "version": "4.3.6", + "resolved": "https://registry.npmjs.org/zod/-/zod-4.3.6.tgz", + "integrity": "sha512-rftlrkhHZOcjDwkGlnUtZZkvaPHCsDATp4pGpuOOMDaTdDDXF91wuVDJoWoPsKX/3YPQ5fHuF3STjcYyKr+Qhg==", + "license": "MIT", + "peer": true, + "funding": { + "url": "https://github.com/sponsors/colinhacks" + } + }, + "node_modules/zwitch": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/zwitch/-/zwitch-2.0.4.tgz", + "integrity": "sha512-bXE4cR/kVZhKZX/RjPEflHaKVhUVl85noU3v6b8apfQEc1x4A+zBxjZ4lN8LqGd6WZ3dl98pY4o717VFmoPp+A==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + } + } +} diff --git a/frontend/package.json b/frontend/package.json new file mode 100644 index 0000000..20b21bb --- /dev/null +++ b/frontend/package.json @@ -0,0 +1,25 @@ +{ + "name": "code-anywhere-frontend", + "private": true, + "type": "module", + "scripts": { + "dev": "vite", + "build": "tsc && vite build", + "preview": "vite preview" + }, + "dependencies": { + "react": "^19.1.0", + "react-dom": "^19.1.0", + "@ai-sdk/react": "latest", + "ai": "latest", + "react-markdown": "^10.1.0", + "remark-gfm": "^4.0.0" + }, + "devDependencies": { + "vite": "^6.3.0", + "@vitejs/plugin-react": "^4.5.0", + "typescript": "^5.8.0", + "@types/react": "^19.1.0", + "@types/react-dom": "^19.1.0" + } +} \ No newline at end of file diff --git a/frontend/src/App.tsx b/frontend/src/App.tsx new file mode 100644 index 0000000..5fee1e1 --- /dev/null +++ b/frontend/src/App.tsx @@ -0,0 +1,344 @@ +import { useChat } from '@ai-sdk/react'; +import type { UIMessage } from 'ai'; +import { DefaultChatTransport } from 'ai'; +import { useCallback, useEffect, useMemo, useRef, useState, type CSSProperties } from 'react'; +import { Chat } from './Chat'; +import { Sidebar } from './Sidebar'; +import * as api from './api'; +import type { ServerThread, UIConfig } from './types'; + +export function App() { + const [config, setConfig] = useState(null); + const [threads, setThreads] = useState([]); + const [activeThreadId, setActiveThreadId] = useState(null); + const [activeThread, setActiveThread] = useState(null); + const [toast, setToast] = useState<{ text: string; type?: 'error' } | null>(null); + const [mobileSidebar, setMobileSidebar] = useState(false); + const [searchQuery, setSearchQuery] = useState(''); + const [sidebarWidth, setSidebarWidth] = useState(() => { + if (typeof window === 'undefined') return 260; + const raw = window.localStorage.getItem('codeanywhere.sidebarWidth'); + const parsed = Number(raw || 260); + return Number.isFinite(parsed) ? Math.min(460, Math.max(220, parsed)) : 260; + }); + + const threadIdRef = useRef(null); + const sidebarWidthRef = useRef(sidebarWidth); + threadIdRef.current = activeThreadId; + sidebarWidthRef.current = sidebarWidth; + + const transport = useMemo( + () => + new DefaultChatTransport({ + api: '/api/chat', + credentials: 'same-origin', + prepareSendMessagesRequest: ({ messages }) => { + const last = messages[messages.length - 1]; + const textContent = last?.parts + ?.filter((p): p is { type: 'text'; text: string } => p.type === 'text') + .map((p) => p.text) + .join('\n') || ''; + return { + body: { + threadId: threadIdRef.current, + content: textContent, + }, + }; + }, + }), + [], + ); + + const { + messages, + sendMessage, + setMessages, + status, + error: chatError, + stop, + } = useChat({ transport }); + + const flash = useCallback((text: string, type?: 'error') => { + setToast({ text, type }); + setTimeout(() => setToast(null), 4000); + }, []); + + useEffect(() => { + (async () => { + try { + const [cfg, threadList] = await Promise.all([api.loadConfig(), api.listThreads()]); + setConfig(cfg); + setThreads(threadList); + if (threadList.length > 0) { + const first = threadList[0]; + setActiveThreadId(first.id); + const full = await api.getThread(first.id); + setActiveThread(full); + setMessages(toUIMessages(full.messages || [])); + } + } catch { + // 401 handled by api.ts redirect + } + })(); + }, [setMessages]); + + const refreshThreads = useCallback( + async (selectId?: string) => { + const list = await api.listThreads(searchQuery || undefined); + setThreads(list); + if (selectId && list.some((thread) => thread.id === selectId)) { + setActiveThreadId(selectId); + } + }, + [searchQuery], + ); + + useEffect(() => { + const timer = setTimeout(() => { + refreshThreads(activeThreadId || undefined).catch(() => { }); + }, 250); + return () => clearTimeout(timer); + // eslint-disable-next-line react-hooks/exhaustive-deps + }, [searchQuery]); + + const switchThread = useCallback( + async (id: string) => { + setActiveThreadId(id); + setMobileSidebar(false); + try { + const full = await api.getThread(id); + setActiveThread(full); + setMessages(toUIMessages(full.messages || [])); + } catch (e: unknown) { + flash(e instanceof Error ? e.message : 'Failed to load thread', 'error'); + } + }, + [flash, setMessages], + ); + + const createNewThread = useCallback( + async (selection?: { model?: string; provider?: string }) => { + try { + const thread = await api.createThread({ + model: selection?.model || config?.defaultModel, + provider: selection?.provider || config?.defaultProvider, + }); + setActiveThreadId(thread.id); + setActiveThread(thread); + setMessages([]); + await refreshThreads(thread.id); + setMobileSidebar(false); + } catch (e: unknown) { + flash(e instanceof Error ? e.message : 'Failed to create thread', 'error'); + } + }, + [config, flash, refreshThreads, setMessages], + ); + + const handleSend = useCallback( + async (text: string) => { + let threadId = activeThreadId; + if (!threadId) { + const firstLine = text.split('\n', 1)[0].slice(0, 60).trim() || 'New chat'; + const thread = await api.createThread({ + title: firstLine, + model: config?.defaultModel, + provider: config?.defaultProvider, + }); + threadId = thread.id; + setActiveThreadId(threadId); + setActiveThread(thread); + threadIdRef.current = threadId; + await refreshThreads(threadId); + } + sendMessage({ text }); + }, + [activeThreadId, config, refreshThreads, sendMessage], + ); + + useEffect(() => { + if (status === 'ready' && activeThreadId && messages.length > 0) { + refreshThreads(activeThreadId).catch(() => { }); + } + // eslint-disable-next-line react-hooks/exhaustive-deps + }, [status]); + + const handleDeleteThread = useCallback( + async (id: string) => { + if (!confirm('Delete this thread?')) return; + try { + await api.deleteThread(id); + if (activeThreadId === id) { + setActiveThreadId(null); + setActiveThread(null); + setMessages([]); + } + await refreshThreads(); + } catch (e: unknown) { + flash(e instanceof Error ? e.message : 'Delete failed', 'error'); + } + }, + [activeThreadId, flash, refreshThreads, setMessages], + ); + + const handleRename = useCallback( + async (id: string) => { + const name = prompt('New name:'); + if (!name) return; + try { + const updated = await api.updateThread(id, { title: name }); + if (activeThreadId === id) setActiveThread(updated); + await refreshThreads(id); + } catch (e: unknown) { + flash(e instanceof Error ? e.message : 'Rename failed', 'error'); + } + }, + [activeThreadId, flash, refreshThreads], + ); + + const handleMagicRename = useCallback( + async (id: string) => { + try { + flash('Summarizing with AI...'); + const updated = await api.magicRename(id); + if (activeThreadId === id) setActiveThread(updated); + await refreshThreads(id); + flash('Thread renamed'); + } catch (e: unknown) { + flash(e instanceof Error ? e.message : 'Magic rename failed', 'error'); + } + }, + [activeThreadId, flash, refreshThreads], + ); + + const handleMagicRenameAll = useCallback(async () => { + try { + flash('Summarizing eligible threads with gpt-5.4-nano...'); + const result = await api.magicRenameAll(); + await refreshThreads(activeThreadId || undefined); + if (activeThreadId) { + const full = await api.getThread(activeThreadId); + setActiveThread(full); + } + if (result.renamedCount > 0) { + flash( + result.skippedCount > 0 + ? `Renamed ${result.renamedCount} threads. Skipped ${result.skippedCount}.` + : `Renamed ${result.renamedCount} threads.`, + ); + } else { + flash('No eligible threads were renamed.'); + } + } catch (e: unknown) { + flash(e instanceof Error ? e.message : 'Magic rename all failed', 'error'); + } + }, [activeThreadId, flash, refreshThreads]); + + const handleSaveModel = useCallback( + async (provider: string, model: string) => { + if (!activeThreadId) return; + try { + const updated = await api.updateThread(activeThreadId, { provider, model }); + setActiveThread(updated); + await refreshThreads(activeThreadId); + flash(`Routing set to ${updated.provider}:${updated.model}`); + } catch (e: unknown) { + flash(e instanceof Error ? e.message : 'Routing save failed', 'error'); + } + }, + [activeThreadId, flash, refreshThreads], + ); + + const handleSidebarResizeStart = useCallback((event: React.MouseEvent) => { + if (window.matchMedia('(max-width: 768px)').matches) return; + event.preventDefault(); + const startX = event.clientX; + const startWidth = sidebarWidthRef.current; + let nextWidth = startWidth; + document.body.classList.add('sidebar-resizing'); + + const onMove = (moveEvent: MouseEvent) => { + nextWidth = Math.min(460, Math.max(220, startWidth + moveEvent.clientX - startX)); + setSidebarWidth(nextWidth); + }; + + const onUp = () => { + document.body.classList.remove('sidebar-resizing'); + window.removeEventListener('mousemove', onMove); + window.removeEventListener('mouseup', onUp); + window.localStorage.setItem('codeanywhere.sidebarWidth', String(nextWidth)); + }; + + window.addEventListener('mousemove', onMove); + window.addEventListener('mouseup', onUp); + }, []); + + const shellStyle = useMemo( + () => ({ '--sidebar-width': `${sidebarWidth}px` }) as CSSProperties, + [sidebarWidth], + ); + + if (!config) { + return ( +
+ Loading CodeAnywhere... +
+ ); + } + + return ( +
+ {mobileSidebar &&
setMobileSidebar(false)} />} + + + +
+ + handleDeleteThread(activeThreadId) : undefined} + onRename={activeThreadId ? () => handleRename(activeThreadId) : undefined} + onMagicRename={activeThreadId ? () => handleMagicRename(activeThreadId) : undefined} + onToggleSidebar={() => setMobileSidebar((value) => !value)} + /> + + {toast &&
{toast.text}
} +
+ ); +} + +function toUIMessages(serverMessages: { id: string; role: string; content: string; created_at: string; parts?: { type: string;[k: string]: unknown }[] }[]): UIMessage[] { + return serverMessages.map((m) => ({ + id: m.id, + role: m.role as 'user' | 'assistant', + parts: [{ type: 'text' as const, text: m.content || '' }], + createdAt: new Date(m.created_at), + })); +} diff --git a/frontend/src/Chat.tsx b/frontend/src/Chat.tsx new file mode 100644 index 0000000..386324a --- /dev/null +++ b/frontend/src/Chat.tsx @@ -0,0 +1,286 @@ +import type { UIMessage } from 'ai'; +import { useCallback, useEffect, useRef, useState } from 'react'; +import ReactMarkdown from 'react-markdown'; +import remarkGfm from 'remark-gfm'; +import type { ServerThread } from './types'; + +interface ChatProps { + thread: ServerThread | null; + messages: UIMessage[]; + status: string; + error: Error | undefined; + onSend: (text: string) => void; + onStop: () => void; + onDeleteThread?: () => void; + onRename?: () => void; + onMagicRename?: () => void; + onToggleSidebar: () => void; +} + +export function Chat({ + thread, + messages, + status, + error, + onSend, + onStop, + onDeleteThread, + onRename, + onMagicRename, + onToggleSidebar, +}: ChatProps) { + const [input, setInput] = useState(''); + const [busyElapsed, setBusyElapsed] = useState('0s'); + const scrollRef = useRef(null); + const textareaRef = useRef(null); + + // Auto-scroll on new content + useEffect(() => { + const el = scrollRef.current; + if (el) el.scrollTop = el.scrollHeight; + }, [messages, status]); + + const handleSubmit = useCallback( + (e?: React.FormEvent) => { + e?.preventDefault(); + const text = input.trim(); + if (!text || status === 'submitted' || status === 'streaming') return; + onSend(text); + setInput(''); + }, + [input, status, onSend], + ); + + const handleKeyDown = useCallback( + (e: React.KeyboardEvent) => { + if (e.key === 'Enter' && !e.shiftKey) { + e.preventDefault(); + handleSubmit(); + } + }, + [handleSubmit], + ); + + const busy = status === 'submitted' || status === 'streaming'; + + useEffect(() => { + if (!busy) { + setBusyElapsed('0s'); + return; + } + + const startedAt = Date.now(); + const tick = () => { + setBusyElapsed(formatElapsedDuration((Date.now() - startedAt) / 1000)); + }; + + tick(); + const timer = window.setInterval(tick, 1000); + return () => window.clearInterval(timer); + }, [busy]); + + return ( +
+
+
+ +
Workspace chat
+

{thread?.title || 'Start a chat'}

+ {thread?.usage && ( +
+ {thread.usage.total_tokens.toLocaleString()} tokens · ${Number(thread.usage.cost_usd || '0').toFixed(6)} +
+ )} +
+ {thread && ( +
+ {onMagicRename && ( + + )} + {onRename && ( + + )} + {onDeleteThread && ( + + )} +
+ )} +
+ +
+ {messages.length === 0 && !busy && ( +
+ {thread + ? 'This thread is empty. Type your first prompt below.' + : 'Select a thread or create a new chat to get started.'} +
+ )} + + {messages.map((msg) => ( + + ))} + + {busy && ( +
+ {status === 'submitted' ? 'Sending...' : 'Streaming...'} · {busyElapsed} + +
+ )} + + {error && ( +
+
Error
+
+ {error.message || 'An error occurred.'} +
+
+ )} +
+ +
+
+ + + + +
+
+ + Shift+Enter for a newline. Press Enter to send. +
+ +
+ +
+
+ + + +
+ Debug +

+        
+
+ + + + + \ No newline at end of file diff --git a/telegram_bot.py b/telegram_bot.py new file mode 100644 index 0000000..2daf09b --- /dev/null +++ b/telegram_bot.py @@ -0,0 +1,2012 @@ +"""Telegram bot — routes messages to Copilot SDK with persistent shared sessions.""" + +import asyncio +import hashlib +import json +import logging +import re +from decimal import Decimal +from io import BytesIO +from pathlib import Path +from typing import Any + +from copilot import define_tool +from copilot.generated.session_events import SessionEventType +from copilot.tools import ToolInvocation +from pydantic import BaseModel, Field +from telegram import ( + BotCommand, + BotCommandScopeAllChatAdministrators, + BotCommandScopeAllGroupChats, + BotCommandScopeDefault, + InlineKeyboardButton, + InlineKeyboardMarkup, + InputFile, + Update, + User, + helpers, +) +from telegram.error import RetryAfter, TelegramError +from telegram.ext import ( + Application, + ApplicationHandlerStop, + CallbackQueryHandler, + CommandHandler, + ContextTypes, + MessageHandler, + PicklePersistence, + TypeHandler, + filters, +) + +import tools as _tools_init # noqa: F401 — registers tool sets +from background_tasks import BackgroundTaskManager +from config import settings +from copilot_runtime import copilot, format_prompt_with_history, stream_session +from instance import BASE_CONTEXT, SKILLS_CONTEXT, TELEGRAM_START_MESSAGE +from learning import extract_learnings_from_turn, format_learnings_for_prompt +from llm_costs import extract_usage_and_cost, format_cost_value, summarize_usage + + +def _get_advisor_usage(thread_id: str | None) -> dict | None: + """Fetch advisor sidecar usage for *thread_id*, if the module is loaded.""" + if not thread_id: + return None + try: + from tools.advisor import get_advisor_usage + return get_advisor_usage(thread_id) + except ImportError: + return None +from model_selection import ( + ModelSelection, + ModelSelectionError, + build_provider_config, + default_selection, + format_known_models, + format_selection, + resolve_selection, +) +from prompt_utils import generate_diff +from tool_pipeline import ( + active_toolset_names, + build_capability_fragment, + build_integration_tools, + build_onboarding_fragment, +) +from tool_registry import registry as tool_registry +from ux import ( + append_elapsed_status, + busy_message, + extract_final_text, + format_session_error, + format_tool_counts, + markdown_to_telegram_html, + stream_status_updates, + stream_trace_event, + working_message, +) +from web_fallback_store import WebFallbackStore + +logger = logging.getLogger(__name__) + +# ── Telegram access control (dynamic owner-approval) ──────────── +_OWNER_TG_CHAT_ID: int | None = None +if settings.OWNER_TELEGRAM_CHAT_ID.strip(): + try: + _OWNER_TG_CHAT_ID = int(settings.OWNER_TELEGRAM_CHAT_ID.strip()) + logger.info("Owner Telegram chat ID configured: %d", _OWNER_TG_CHAT_ID) + except ValueError: + logger.warning("OWNER_TELEGRAM_CHAT_ID is not a valid integer, approval gate disabled") + +# Track pending approval requests so we don't spam the owner +_pending_approval_notified: set[int] = set() + + +async def _gate_telegram_access(update: Update, context: ContextTypes.DEFAULT_TYPE) -> None: + """Gate Telegram access via owner-approval flow. + + - Owner (matching OWNER_TELEGRAM_CHAT_ID) → always allowed. + - telegram_approved=True → allowed. + - telegram_approved=False → silently blocked (denied). + - telegram_approved=None (new/pending) → notify owner with approve/deny, + tell user "waiting for approval", then block. + - Callback queries for approval buttons are always let through. + """ + if not _OWNER_TG_CHAT_ID: + return # no owner configured, open access + + # Always let approval callback queries through + if update.callback_query and update.callback_query.data and update.callback_query.data.startswith("tg_approve:"): + return + + tg_user = update.effective_user + if not tg_user: + return # system update, let it through + + # Owner is always allowed + if tg_user.id == _OWNER_TG_CHAT_ID: + return + + # Look up approval status + from user_store import get_store as _get_user_store + + store = _get_user_store() + db_user = store.resolve_or_create_user( + provider="telegram", + external_id=str(tg_user.id), + display_name=tg_user.full_name or str(tg_user.id), + ) + + if db_user.telegram_approved is True: + return # approved + + if db_user.telegram_approved is False: + raise ApplicationHandlerStop() # denied — silent drop + + # telegram_approved is None → pending approval + if tg_user.id not in _pending_approval_notified: + _pending_approval_notified.add(tg_user.id) + # Send approval request to owner + keyboard = InlineKeyboardMarkup( + [ + [ + InlineKeyboardButton("✅ Approve", callback_data=f"tg_approve:yes:{db_user.id}"), + InlineKeyboardButton("❌ Deny", callback_data=f"tg_approve:no:{db_user.id}"), + ] + ] + ) + user_info = ( + f"New Telegram user requesting access:\n" + f"• Name: {helpers.escape_markdown(tg_user.full_name or 'unknown')}\n" + f"• Username: @{tg_user.username or 'none'}\n" + f"• Telegram ID: {tg_user.id}\n" + f"• Internal ID: {db_user.id}" + ) + try: + await context.bot.send_message( + chat_id=_OWNER_TG_CHAT_ID, + text=user_info, + parse_mode="HTML", + reply_markup=keyboard, + ) + except TelegramError: + logger.warning("Failed to send approval request to owner", exc_info=True) + + # Tell the user they're pending + msg = update.effective_message + if msg: + try: + await msg.reply_text("⏳ Your access request has been sent to the bot owner. Please wait for approval.") + except TelegramError: + pass + + raise ApplicationHandlerStop() + + +async def _handle_approval_callback(update: Update, context: ContextTypes.DEFAULT_TYPE) -> None: + """Handle approve/deny inline keyboard callbacks from the owner.""" + query = update.callback_query + if not query or not query.data: + return + await query.answer() + + parts = query.data.split(":") + if len(parts) != 3: + return + _, decision, user_id = parts + + from user_store import get_store as _get_user_store + + store = _get_user_store() + db_user = store.get_user(user_id) + if not db_user: + await query.edit_message_text("⚠️ User not found in database.") + return + + approved = decision == "yes" + store.set_telegram_approval(user_id, approved) + + status = "✅ Approved" if approved else "❌ Denied" + await query.edit_message_text(f"{status}: {db_user.display_name} (Telegram)") + + # If approved, notify the user + if approved: + # Find their Telegram ID from external_identities + row = store.conn.execute( + "SELECT external_id FROM external_identities WHERE user_id = ? AND provider = 'telegram'", + (user_id,), + ).fetchone() + if row: + tg_id = int(row["external_id"]) + _pending_approval_notified.discard(tg_id) + try: + await context.bot.send_message( + chat_id=tg_id, + text="✅ Your access has been approved! You can now use the bot.", + ) + except TelegramError: + logger.warning("Failed to notify approved user %s", tg_id, exc_info=True) + + +MAX_HISTORY = 40 +TG_MESSAGE_LIMIT = 4000 +TG_TOPIC_NAME_LIMIT = 128 +TG_STATUS_EDIT_INTERVAL = 5.0 +TG_SYSTEM_PROMPT_LIMIT = 12000 +SKILL_MENU_CALLBACK_PREFIX = "skill_menu:" +SKILL_MENU_ITEMS = [ + ("create-a-skill", "Create or update skills"), +] + + +_ACTIVE_RUNS_KEY = "active_runs" +_USER_MODEL_ALIASES_KEY = "user_model_aliases" + +# ── Alias file persistence (belt-and-suspenders alongside PicklePersistence) ── + + +def _alias_file_path() -> Path: + return Path(settings.TG_PERSISTENCE_DIR or settings.DATA_DIR).expanduser() / "aliases.json" + + +def _load_alias_file() -> dict[str, dict[str, str]]: + """Load all aliases from the JSON file. Returns {owner_key: {alias: target}}.""" + p = _alias_file_path() + if not p.exists(): + return {} + try: + data = json.loads(p.read_text("utf-8")) + if isinstance(data, dict): + # Filter out any flat alias entries that leaked into the top level; + # only keep entries where the value is a nested dict (owner -> aliases). + return {k: v for k, v in data.items() if isinstance(v, dict)} + except Exception: + logger.warning("Failed to read alias file %s, starting fresh", p, exc_info=True) + return {} + + +def _save_alias_file(all_aliases: dict[str, dict[str, str]]) -> None: + """Atomically write all aliases to the JSON file.""" + p = _alias_file_path() + p.parent.mkdir(parents=True, exist_ok=True) + tmp = p.with_suffix(".tmp") + try: + tmp.write_text(json.dumps(all_aliases, indent=2, sort_keys=True), "utf-8") + tmp.replace(p) + except Exception: + logger.warning("Failed to write alias file %s", p, exc_info=True) + + +def _normalize_alias_name(raw_name: str) -> str: + normalized = str(raw_name or "").strip().lower() + if not normalized: + raise ValueError("Alias name cannot be empty") + if ":" in normalized or any(ch.isspace() for ch in normalized): + raise ValueError("Alias names cannot contain spaces or `:`") + if not re.fullmatch(r"[a-z0-9][a-z0-9._-]*", normalized): + raise ValueError("Alias names may only use lowercase letters, numbers, `.`, `_`, and `-`") + return normalized + + +def _alias_owner_key(user: User | None) -> str | None: + if user is None: + return None + return str(user.id) + + +def _user_aliases(context: ContextTypes.DEFAULT_TYPE, user: User | None) -> dict[str, str]: + owner_key = _alias_owner_key(user) + if owner_key is None: + return {} + + aliases = context.application.bot_data.get(_USER_MODEL_ALIASES_KEY) + if not isinstance(aliases, dict): + aliases = {} + context.application.bot_data[_USER_MODEL_ALIASES_KEY] = aliases + + user_aliases = aliases.get(owner_key) + if not isinstance(user_aliases, dict): + user_aliases = {} + aliases[owner_key] = user_aliases + + # Merge aliases from the durable JSON file (file wins for missing keys) + file_aliases = _load_alias_file().get(owner_key, {}) + for k, v in file_aliases.items(): + if k not in user_aliases: + user_aliases[k] = v + + normalized: dict[str, str] = {} + changed = False + for raw_name, raw_target in list(user_aliases.items()): + try: + alias_name = _normalize_alias_name(str(raw_name)) + except ValueError: + changed = True + continue + target = str(raw_target or "").strip() + if not target: + changed = True + continue + normalized[alias_name] = target + if raw_name != alias_name or raw_target != target: + changed = True + + if changed or user_aliases is not normalized: + aliases[owner_key] = normalized + return aliases[owner_key] + + +def _resolve_user_model_alias( + context: ContextTypes.DEFAULT_TYPE, user: User | None, requested_model: str +) -> tuple[str, str | None]: + normalized_request = str(requested_model or "").strip() + if not normalized_request: + return normalized_request, None + + aliases = _user_aliases(context, user) + alias_key = normalized_request.lower() + target = aliases.get(alias_key) + if not target: + return normalized_request, None + return target, alias_key + + +def _set_user_model_alias( + context: ContextTypes.DEFAULT_TYPE, user: User | None, alias_name: str, target_model: str +) -> tuple[str, str]: + owner_key = _alias_owner_key(user) + if owner_key is None: + raise ValueError("User identity is unavailable for alias storage") + + normalized_alias = _normalize_alias_name(alias_name) + normalized_target = str(target_model or "").strip() + if not normalized_target: + raise ValueError("Model target cannot be empty") + + aliases = _user_aliases(context, user) + aliases[normalized_alias] = normalized_target + + # Immediately persist to JSON file (durable, survives crashes) + _persist_aliases_to_file(context) + + return normalized_alias, normalized_target + + +def _persist_aliases_to_file(context: ContextTypes.DEFAULT_TYPE) -> None: + """Write the full alias dict from bot_data to the JSON file.""" + all_aliases = context.application.bot_data.get(_USER_MODEL_ALIASES_KEY) + if isinstance(all_aliases, dict): + _save_alias_file(all_aliases) + + +def _format_user_aliases(context: ContextTypes.DEFAULT_TYPE, user: User | None) -> str: + aliases = _user_aliases(context, user) + if not aliases: + return "No aliases saved yet.\nUsage: /alias cheap vercel:google/gemma-4-31b-it" + + lines = ["Your model aliases"] + for name in sorted(aliases): + lines.append(f"- `{name}` → `{aliases[name]}`") + return "\n".join(lines) + + +def _active_runs(context: ContextTypes.DEFAULT_TYPE) -> dict[str, dict[str, Any]]: + runs = context.application.bot_data.get(_ACTIVE_RUNS_KEY) + if not isinstance(runs, dict): + runs = {} + context.application.bot_data[_ACTIVE_RUNS_KEY] = runs + return runs + + +def _register_active_run(context: ContextTypes.DEFAULT_TYPE, thread_id: str, run_state: dict[str, Any]) -> None: + _active_runs(context)[thread_id] = run_state + + +def _clear_active_run(context: ContextTypes.DEFAULT_TYPE, thread_id: str, run_state: dict[str, Any]) -> None: + runs = _active_runs(context) + if runs.get(thread_id) is run_state: + runs.pop(thread_id, None) + + +def _get_active_run( + update: Update, context: ContextTypes.DEFAULT_TYPE +) -> tuple[dict[str, Any] | None, dict[str, Any] | None]: + thread = _current_thread(update, context) + if thread is None: + return None, None + return thread, _active_runs(context).get(thread["id"]) + + +def _effective_topic_thread_id(update: Update) -> int: + message = update.effective_message + if message is None or not getattr(message, "is_topic_message", False): + return 0 + return int(getattr(message, "message_thread_id", 0) or 0) + + +def _is_general_topic(update: Update) -> bool: + chat = update.effective_chat + if chat is None or not getattr(chat, "is_forum", False): + return False + return _effective_topic_thread_id(update) == 1 + + +def _normalize_topic_name(raw_name: str) -> str: + normalized = " ".join(str(raw_name or "").split()) + if not normalized: + raise ValueError("Topic name cannot be empty") + if len(normalized) > TG_TOPIC_NAME_LIMIT: + raise ValueError(f"Topic names must be {TG_TOPIC_NAME_LIMIT} characters or fewer") + return normalized + + +def _normalize_system_prompt_value(raw_prompt: str) -> str: + normalized = str(raw_prompt or "").strip() + if not normalized: + raise ValueError("System instructions cannot be empty") + if len(normalized) > TG_SYSTEM_PROMPT_LIMIT: + raise ValueError(f"System instructions must be {TG_SYSTEM_PROMPT_LIMIT} characters or fewer.") + return normalized + + +def _format_prompt_diff_message(diff: str) -> str: + header = "Proposed System Prompt Change:\n\n" + if not diff: + return header.rstrip() + + suffix = "\n\n[diff truncated]" + max_diff_length = TG_MESSAGE_LIMIT - len(header) + if len(diff) <= max_diff_length: + return header + diff + + truncated_length = max_diff_length - len(suffix) + preview = diff[: max(truncated_length, 0)].rstrip("\n") + return header + preview + suffix + + +def _topic_session_key(chat_id: int, thread_id: int) -> str: + return f"topic:{chat_id}:{thread_id}" + + +def _telegram_agent_context(update: Update) -> str | None: + chat = update.effective_chat + message = update.effective_message + if chat is None or not getattr(chat, "is_forum", False): + return None + + if _is_general_topic(update): + current_location = "The current message is inside the General topic." + elif getattr(message, "is_topic_message", False): + current_location = "The current message is inside a named forum topic." + else: + current_location = "The current message is in a forum supergroup." + + return ( + "You are running inside a Telegram forum supergroup. " + f"{current_location} " + "Telegram topic-management tools are available in this run. " + "Use them when the user asks to create, rename, or organize forum topics. " + "If the user wants one topic per repo or project, inspect the directory first and then create the requested topics. " + "Your visible final reply still appears in the current chat or topic even if you create new topics elsewhere." + ) + + +def _build_telegram_topic_tools(update: Update, context: ContextTypes.DEFAULT_TYPE) -> list[Any]: + chat = update.effective_chat + if chat is None or not getattr(chat, "is_forum", False): + return [] + + store = _store(context) + + class CreateTopicsParams(BaseModel): + topic_names: list[str] + initial_message: str | None = None + + async def _create_topics_handler(params: CreateTopicsParams, invocation: ToolInvocation) -> str: + unique_names: list[str] = [] + seen_names: set[str] = set() + invalid_names: list[str] = [] + + for raw_name in params.topic_names: + try: + normalized_name = _normalize_topic_name(raw_name) + except ValueError as error: + invalid_names.append(f"{raw_name!r}: {error}") + continue + + dedupe_key = normalized_name.casefold() + if dedupe_key in seen_names: + continue + seen_names.add(dedupe_key) + unique_names.append(normalized_name) + + if not unique_names: + if invalid_names: + return "No valid topic names were provided. " + "; ".join(invalid_names) + return "No topic names were provided." + + seeded_message = str(params.initial_message or "").strip() + selection = _get_selection(update, context) + created_topics: list[str] = [] + + for topic_name in unique_names: + forum_topic = await chat.create_forum_topic(name=topic_name) + thread_id = int(forum_topic.message_thread_id) + thread = store.get_or_create_session_thread( + session_key=_topic_session_key(chat.id, thread_id), + title=topic_name, + model=selection.model, + provider=selection.provider, + source="telegram", + session_label=f"Telegram topic {topic_name}", + ) + store.update_thread(thread["id"], title=topic_name, title_source="manual") + + if seeded_message: + await chat.send_message(seeded_message, message_thread_id=thread_id) + store.add_message(thread["id"], "assistant", seeded_message) + + created_topics.append(f"{topic_name} (thread {thread_id})") + + if invalid_names: + return "Created topics: " + ", ".join(created_topics) + ". Skipped: " + "; ".join(invalid_names) + return "Created topics: " + ", ".join(created_topics) + + class RenameTopicParams(BaseModel): + topic_name: str + + async def _rename_topic_handler(params: RenameTopicParams, invocation: ToolInvocation) -> str: + normalized_name = _normalize_topic_name(params.topic_name) + await _rename_forum_topic(update, normalized_name) + thread = _ensure_thread(update, context) + store.update_thread(thread["id"], title=normalized_name, title_source="manual") + return f"Renamed the current topic to {normalized_name}." + + create_tool = define_tool( + name="telegram_create_topics", + description="Create one or more Telegram forum topics in the current chat. Use this when the user asks to start new topics, open one topic per item, or organize work into forum topics.", + handler=_create_topics_handler, + params_type=CreateTopicsParams, + ) + rename_tool = define_tool( + name="telegram_rename_current_topic", + description="Rename the current Telegram forum topic. Use this when the user asks to rename the topic they are currently chatting in.", + handler=_rename_topic_handler, + params_type=RenameTopicParams, + ) + + class ProposePromptParams(BaseModel): + new_prompt: str + + async def _propose_system_prompt(params: ProposePromptParams, invocation: ToolInvocation) -> str: + try: + normalized_prompt = _normalize_system_prompt_value(params.new_prompt) + except ValueError as error: + return f"❌ {error}" + + session_key = _session_key(update) + current_record = store.get_topic_system_prompt(session_key) + old_prompt = current_record["prompt"] if current_record else "" + + diff = generate_diff(old_prompt, normalized_prompt) + if not diff: + return "The proposed prompt is identical to the current one." + + pending = context.application.bot_data.setdefault("pending_prompts", {}) + pending[session_key] = normalized_prompt + + keyboard = InlineKeyboardMarkup( + [ + [ + InlineKeyboardButton("✅ Approve", callback_data=f"prompt_app:yes:{session_key}"), + InlineKeyboardButton("❌ Decline", callback_data=f"prompt_app:no:{session_key}"), + ] + ] + ) + + chat = update.effective_chat + if chat is None: + raise RuntimeError("Telegram update missing chat context") + + diff_text = _format_prompt_diff_message(diff) + + send_kwargs: dict[str, Any] = {"reply_markup": keyboard} + thread_id = _effective_topic_thread_id(update) + if thread_id and not _is_general_topic(update): + send_kwargs["message_thread_id"] = thread_id + + await chat.send_message( + diff_text, + **send_kwargs, + ) + + return "I have submitted the prompt change for approval." + + propose_prompt_tool = define_tool( + name="propose_system_prompt", + description="Propose a change to the current topic's system prompt. This requires human approval via a diff message.", + handler=_propose_system_prompt, + params_type=ProposePromptParams, + ) + + return [create_tool, rename_tool, propose_prompt_tool] + + +async def _rename_forum_topic(update: Update, topic_name: str) -> None: + chat = update.effective_chat + message = update.effective_message + if chat is None or message is None: + raise RuntimeError("Telegram update missing chat context") + if not getattr(chat, "is_forum", False) or not getattr(message, "is_topic_message", False): + raise RuntimeError("This command only works inside a forum topic") + + if _is_general_topic(update): + await chat.edit_general_forum_topic(name=topic_name) + return + + thread_id = _effective_topic_thread_id(update) + if not thread_id: + raise RuntimeError("Could not determine the current forum topic") + await chat.edit_forum_topic(message_thread_id=thread_id, name=topic_name) + + +def _format_system_prompt(prompt: str) -> str: + compact = str(prompt or "").strip() + if len(compact) <= TG_MESSAGE_LIMIT: + return compact + return compact[: TG_MESSAGE_LIMIT - 3].rstrip() + "..." + + +def _topic_system_message(update: Update, context: ContextTypes.DEFAULT_TYPE) -> str | None: + record = _store(context).get_topic_system_prompt(_session_key(update)) + if not record: + return None + prompt = str(record.get("prompt") or "").strip() + return prompt or None + + +def _format_tg_recent_events(user) -> str | None: + """Format recent events for injection into the Telegram system message (T028).""" + from datetime import datetime + from datetime import timezone as tz + + from user_store import get_store as _us + + store = _us() + events = store.get_recent_events(user.id, window_hours=24) + if not events: + return None + lines = [] + now = datetime.now(tz.utc) + for ev in events[:15]: + try: + created = datetime.fromisoformat(ev.created_at) + delta = now - created + hours = int(delta.total_seconds() // 3600) + if hours < 1: + ago = f"{int(delta.total_seconds() // 60)}m ago" + elif hours < 24: + ago = f"{hours}h ago" + else: + ago = "yesterday" + except ValueError: + ago = "recently" + lines.append(f"- {ago}: {ev.summary}") + store.mark_events_consumed([ev.id for ev in events[:15]]) + return "Recent activity for you:\n" + "\n".join(lines) + + +def _compose_system_message( + update: Update, + context: ContextTypes.DEFAULT_TYPE, + *, + background_summary: str | None = None, + user=None, +) -> str: + store = _store(context) + sections = [BASE_CONTEXT.rstrip(), SKILLS_CONTEXT.rstrip()] + + # Integration tool prompt fragments + active_toolsets = active_toolset_names(user) + fragments = tool_registry.get_system_prompt_fragments(active_toolsets) + if fragments: + sections.extend(fragments) + + topic_prompt = _topic_system_message(update, context) + if topic_prompt: + sections.append(f"Persistent topic instruction:\n{topic_prompt}") + + extra_context = _telegram_agent_context(update) + if extra_context: + sections.append(extra_context.strip()) + + if background_summary: + sections.append(background_summary.strip()) + + # Inject learnings (global + project-scoped) + thread = _current_thread(update, context) + if thread: + project_learnings = store.get_project_learnings(thread["id"]) + global_learnings = store.get_global_learnings() + learnings_text = format_learnings_for_prompt(project_learnings, global_learnings) + if learnings_text: + sections.append(learnings_text) + + # Inject recent events (T028) + if user: + events_section = _format_tg_recent_events(user) + if events_section: + sections.append(events_section) + + # Inject onboarding / capability status (T037) + if user: + onboarding = build_onboarding_fragment(user) + if onboarding: + sections.append(onboarding) + capability = build_capability_fragment(user) + if capability: + sections.append(capability) + + return "\n\n".join(section for section in sections if section) + + +def _session_key(update: Update) -> str: + chat = update.effective_chat + if chat is None: + raise RuntimeError("Telegram update missing chat context") + thread_id = _effective_topic_thread_id(update) + if chat.type == "private": + return f"pm:{chat.id}" + if thread_id: + return f"topic:{chat.id}:{thread_id}" + return f"group:{chat.id}" + + +def _store(context: ContextTypes.DEFAULT_TYPE) -> WebFallbackStore: + store = getattr(context.application, "_runtime_store", None) + if not isinstance(store, WebFallbackStore): + raise RuntimeError("Telegram thread store is not configured") + return store + + +def _bg_manager(context: ContextTypes.DEFAULT_TYPE) -> BackgroundTaskManager: + mgr = getattr(context.application, "_runtime_bg_manager", None) + if not isinstance(mgr, BackgroundTaskManager): + raise RuntimeError("Background task manager is not configured") + return mgr + + +def _session_label(update: Update) -> str: + chat = update.effective_chat + if chat is None: + return "Telegram" + if chat.type == "private": + username = getattr(chat, "username", None) + if username: + return f"Telegram DM @{username}" + first_name = getattr(chat, "first_name", None) or "" + last_name = getattr(chat, "last_name", None) or "" + full_name = " ".join(part for part in [first_name, last_name] if part).strip() + return f"Telegram DM {full_name}" if full_name else "Telegram DM" + title = getattr(chat, "title", None) or f"chat {chat.id}" + if _effective_topic_thread_id(update): + return f"Telegram topic {title}" + return f"Telegram group {title}" + + +def _current_thread(update: Update, context: ContextTypes.DEFAULT_TYPE) -> dict | None: + return _store(context).get_session_thread(_session_key(update)) + + +def _current_lock(update: Update, context: ContextTypes.DEFAULT_TYPE): + thread = _current_thread(update, context) + if thread is None: + return None + return _store(context).lock(thread["id"]) + + +def _get_selection(update: Update, context: ContextTypes.DEFAULT_TYPE) -> ModelSelection: + thread = _current_thread(update, context) + if thread is None: + return default_selection() + return resolve_selection(model=thread.get("model"), provider=thread.get("provider")) + + +def _ensure_thread( + update: Update, + context: ContextTypes.DEFAULT_TYPE, + *, + selection: ModelSelection | None = None, +) -> dict: + resolved_selection = selection or _get_selection(update, context) + return _store(context).get_or_create_session_thread( + session_key=_session_key(update), + title="New chat", + model=resolved_selection.model, + provider=resolved_selection.provider, + source="telegram", + session_label=_session_label(update), + ) + + +def _apply_selection(update: Update, context: ContextTypes.DEFAULT_TYPE, selection: ModelSelection) -> None: + thread = _ensure_thread(update, context, selection=selection) + if thread.get("model") == selection.model and thread.get("provider") == selection.provider: + return + _store(context).update_thread(thread["id"], model=selection.model, provider=selection.provider) + + +def _skill_menu_markup() -> InlineKeyboardMarkup: + return InlineKeyboardMarkup( + [ + [InlineKeyboardButton(label, callback_data=f"{SKILL_MENU_CALLBACK_PREFIX}{skill_name}")] + for skill_name, label in SKILL_MENU_ITEMS + ] + ) + + +def _render_skill_entry(skill_name: str) -> str: + skill_path = Path(settings.REPOS_DIR) / "code_anywhere" / ".agents" / "skills" / skill_name / "SKILL.md" + if not skill_path.exists(): + return f"❌ Skill `{skill_name}` is not available." + + body = skill_path.read_text("utf-8").strip() + return f"*Skill:* `{skill_name}`\n*Path:* `{skill_path}`\n\n```md\n{body}\n```" + + +async def cmd_skills(update: Update, context: ContextTypes.DEFAULT_TYPE) -> None: + message = update.effective_message + if message is None: + return + await message.reply_text( + "Choose a skill to inspect. The bot can also invoke matching skills during a run.", + reply_markup=_skill_menu_markup(), + ) + + +async def handle_skill_menu_callback(update: Update, context: ContextTypes.DEFAULT_TYPE) -> None: + query = update.callback_query + if not query or not query.data: + return + await query.answer() + + skill_name = query.data.removeprefix(SKILL_MENU_CALLBACK_PREFIX) + rendered = _render_skill_entry(skill_name) + await _send_long(update, rendered) + + +async def cmd_start(update: Update, context: ContextTypes.DEFAULT_TYPE) -> None: + message = update.effective_message + if message is None: + return + await message.reply_text(TELEGRAM_START_MESSAGE) + + +async def cmd_alias(update: Update, context: ContextTypes.DEFAULT_TYPE) -> None: + message = update.effective_message + if message is None or not message.text: + return + + args = message.text.split(maxsplit=2) + if len(args) < 3: + await message.reply_text(_format_user_aliases(context, update.effective_user), parse_mode="Markdown") + return + + alias_name = args[1].strip() + target_model = args[2].strip() + current = _get_selection(update, context) + + try: + selection = resolve_selection(model=target_model, current=current) + normalized_alias, _ = _set_user_model_alias(context, update.effective_user, alias_name, selection.ref) + except (ModelSelectionError, ValueError) as error: + await message.reply_text(f"❌ {error}") + return + + # Force PTB to flush bot_data to disk immediately + try: + await context.application.update_persistence() + except Exception: + logger.warning("Failed to flush persistence after alias set", exc_info=True) + + await message.reply_text( + f"Saved alias `{normalized_alias}` → `{selection.ref}`", + parse_mode="Markdown", + ) + + +async def cmd_alias_export(update: Update, context: ContextTypes.DEFAULT_TYPE) -> None: + message = update.effective_message + user = update.effective_user + if message is None or user is None: + return + + aliases = _user_aliases(context, user) + payload = { + "user_id": user.id, + "username": user.username or "", + "aliases": dict(sorted(aliases.items())), + } + data = json.dumps(payload, indent=2, sort_keys=True).encode("utf-8") + export_name = f"aliases-{user.id}.json" + await message.reply_document( + document=InputFile(BytesIO(data), filename=export_name), + caption=f"Alias backup with {len(aliases)} entr{'y' if len(aliases) == 1 else 'ies'}.", + ) + + +async def cmd_model(update: Update, context: ContextTypes.DEFAULT_TYPE) -> None: + message = update.effective_message + if message is None or not message.text: + return + + args = message.text.split(maxsplit=1) + current = _get_selection(update, context) + if len(args) < 2: + await message.reply_text( + ( + f"{format_selection(current)}\n" + "Usage: /model gpt-5.4-mini\n" + "Usage: /model openrouter:anthropic/claude-sonnet-4.5" + ), + parse_mode="Markdown", + ) + return + requested_model = args[1].strip() + resolved_model, alias_name = _resolve_user_model_alias(context, update.effective_user, requested_model) + try: + selection = resolve_selection(model=resolved_model, current=current) + except ModelSelectionError as error: + await message.reply_text(f"❌ {error}") + return + + _apply_selection(update, context, selection) + alias_note = f" via alias `{alias_name}`" if alias_name else "" + await message.reply_text( + f"Switched to provider `{selection.provider}` with model `{selection.model}`{alias_note}", parse_mode="Markdown" + ) + + +async def cmd_provider(update: Update, context: ContextTypes.DEFAULT_TYPE) -> None: + message = update.effective_message + if message is None or not message.text: + return + + current = _get_selection(update, context) + args = message.text.split(maxsplit=1) + if len(args) < 2: + await message.reply_text( + (f"Current provider: `{current.provider}`\nUsage: /provider openai\nUsage: /provider openrouter"), + parse_mode="Markdown", + ) + return + + try: + selection = resolve_selection(model=current.model, provider=args[1].strip(), current=current) + except ModelSelectionError as error: + await message.reply_text(f"❌ {error}") + return + + _apply_selection(update, context, selection) + await message.reply_text( + f"Provider switched to `{selection.provider}`. Active model: `{selection.model}`", parse_mode="Markdown" + ) + + +async def cmd_models(update: Update, context: ContextTypes.DEFAULT_TYPE) -> None: + message = update.effective_message + if message is None or not message.text: + return + args = message.text.split(maxsplit=1) + requested_provider = args[1].strip() if len(args) > 1 else None + + try: + rendered = format_known_models(current=_get_selection(update, context), provider=requested_provider) + except ModelSelectionError as error: + await message.reply_text(f"❌ {error}") + return + + await message.reply_text(rendered, parse_mode="Markdown") + + +async def cmd_system(update: Update, context: ContextTypes.DEFAULT_TYPE) -> None: + message = update.effective_message + if message is None or message.text is None: + return + lock = _current_lock(update, context) + if lock is not None and lock.locked(): + await message.reply_text(busy_message(surface="telegram")) + return + + store = _store(context) + args = message.text.split(maxsplit=1) + if len(args) < 2 or not args[1].strip(): + current = store.get_topic_system_prompt(_session_key(update)) + if not current: + await message.reply_text("No persistent system instruction is saved for this chat/topic.") + return + await _send_long( + update, "Current persistent system instruction:\n\n" + _format_system_prompt(current["prompt"]) + ) + return + + raw_value = args[1].strip() + if raw_value.lower() == "clear": + cleared = store.clear_topic_system_prompt(_session_key(update)) + if cleared: + await message.reply_text("Cleared the persistent system instruction for this chat/topic.") + else: + await message.reply_text("No persistent system instruction was set for this chat/topic.") + return + + try: + normalized_prompt = _normalize_system_prompt_value(raw_value) + except ValueError as error: + await message.reply_text(f"❌ {error}") + return + + store.set_topic_system_prompt(_session_key(update), normalized_prompt) + await _send_long( + update, + "Saved persistent system instruction for this chat/topic. It will be reused after /new.\n\n" + + _format_system_prompt(normalized_prompt), + ) + + +async def cmd_new(update: Update, context: ContextTypes.DEFAULT_TYPE) -> None: + message = update.effective_message + if message is None: + return + + selection = _get_selection(update, context) + _store(context).start_new_session_thread( + session_key=_session_key(update), + title="New chat", + model=selection.model, + provider=selection.provider, + source="telegram", + session_label=_session_label(update), + ) + await message.reply_text("Started a new session. Older sessions stay available in the web UI.") + + +async def cmd_current(update: Update, context: ContextTypes.DEFAULT_TYPE) -> None: + message = update.effective_message + if message is None: + return + await message.reply_text(format_selection(_get_selection(update, context)), parse_mode="Markdown") + + +async def cmd_status(update: Update, context: ContextTypes.DEFAULT_TYPE) -> None: + message = update.effective_message + if message is None: + return + thread = _current_thread(update, context) + if thread is None: + await message.reply_text("No active session in this chat.") + return + + store = _store(context) + bg_manager = _bg_manager(context) + active_runs = _active_runs(context) + current_run = active_runs.get(thread["id"]) + all_threads = store.list_threads(limit=500) + + current_session_key = _session_key(update) + current_label = thread.get("session_label") or _session_label(update) + active_thread_ids = set(active_runs.keys()) + active_threads = [item for item in all_threads if item.get("id") in active_thread_ids] + current_chat_threads = [item for item in all_threads if item.get("session_key") == current_session_key] + recent_threads = all_threads[:5] + + status_lines = [ + "📊 *System Status*", + f"Active runs now: `{len(active_runs)}`", + f"Saved sessions: `{len(all_threads)}`", + f"Current session: *{helpers.escape_markdown(str(current_label), version=2)}*", + f"Current model: `{helpers.escape_markdown(thread.get('model') or 'unknown', version=2)}` via `{helpers.escape_markdown(thread.get('provider') or 'unknown', version=2)}`", + f"Messages in current session: `{len(thread.get('messages', []))}`", + f"Current chat/topic sessions: `{len(current_chat_threads)}`", + ] + + if current_run: + started_by = str(current_run.get("started_by") or "").strip() + if started_by: + preview_source = started_by.replace(chr(10), " ") + preview = preview_source[:120] + if len(preview_source) > 120: + preview += "..." + status_lines.append(f"Current run prompt: _{helpers.escape_markdown(preview, version=2)}_") + status_lines.append("Current session state: `busy`") + else: + status_lines.append("Current session state: `idle`") + + status_lines.extend( + [ + "", + "🏃 *Active Runs*", + f"Concurrent active runs: `{len(active_threads)}`", + ] + ) + + if active_threads: + for item in active_threads[:5]: + title = item.get("title") or item.get("session_label") or item.get("id") or "Untitled" + marker = " (here)" if item.get("id") == thread["id"] else "" + status_lines.append(f"• {helpers.escape_markdown(str(title), version=2)}{marker}") + if len(active_threads) > 5: + status_lines.append(f"• _and {len(active_threads) - 5} more active run(s)_") + else: + status_lines.append("No active runs.") + + status_lines.extend( + [ + "", + "🕒 *Background Tasks*", + helpers.escape_markdown(bg_manager.format_status(thread["id"]), version=2), + ] + ) + + latest_bg = bg_manager.get_latest(thread["id"]) + if latest_bg and latest_bg.started_at: + status_lines.append(f"Last background update: `{latest_bg.started_at.strftime('%Y-%m-%d %H:%M:%SZ')}`") + + if recent_threads: + status_lines.extend( + [ + "", + "🗂️ *Recent Sessions*", + ] + ) + for item in recent_threads: + title = item.get("title") or item.get("session_label") or item.get("id") or "Untitled" + updated_at = str(item.get("updated_at") or "") + marker = " (here)" if item.get("id") == thread["id"] else "" + line = f"• {helpers.escape_markdown(str(title), version=2)}" + if updated_at: + line += f" — `{helpers.escape_markdown(updated_at, version=2)}`" + line += marker + status_lines.append(line) + + await message.reply_text("\n".join(status_lines), parse_mode="MarkdownV2") + + +async def cmd_usage(update: Update, context: ContextTypes.DEFAULT_TYPE) -> None: + message = update.effective_message + if message is None: + return + thread = _current_thread(update, context) + if thread is None: + await message.reply_text("No usage yet in this chat.") + return + usage = thread.get("usage") if isinstance(thread, dict) else None + summary = summarize_usage(usage) + if not summary: + await message.reply_text("No usage recorded yet in this chat.") + return + + lines = [ + f"Usage so far: {summary['total_tokens']:,} tok across {summary['request_count']} request(s)", + f"Input: {summary['prompt_tokens']:,} · Output: {summary['completion_tokens']:,}", + ] + if summary["cached_tokens"]: + lines.append(f"Cached: {summary['cached_tokens']:,}") + if summary["reasoning_tokens"]: + lines.append(f"Reasoning: {summary['reasoning_tokens']:,}") + lines.append(f"Cost: ${format_cost_value(summary['cost_usd'])}") + await message.reply_text("\n".join(lines)) + + +async def cmd_learnings(update: Update, context: ContextTypes.DEFAULT_TYPE) -> None: + """Show project and global learnings for this thread.""" + message = update.effective_message + if message is None: + return + + store = _store(context) + thread = _current_thread(update, context) + + lines = [] + + # Global learnings + global_learnings = store.get_global_learnings() + if global_learnings: + lines.append("🌐 *Global Learnings*") + for item in global_learnings: + cat = helpers.escape_markdown(str(item.get("category", "general")), version=2) + fact = helpers.escape_markdown(str(item.get("fact", "")), version=2) + lines.append(f" \\[{cat}] {fact}") + + # Project learnings + if thread: + project_learnings = store.get_project_learnings(thread["id"]) + if project_learnings: + lines.append("") + lines.append("📁 *Project Learnings*") + for item in project_learnings: + cat = helpers.escape_markdown(str(item.get("category", "general")), version=2) + fact = helpers.escape_markdown(str(item.get("fact", "")), version=2) + lines.append(f" \\[{cat}] {fact}") + + if not lines: + await message.reply_text("No learnings recorded yet.") + return + + await message.reply_text("\n".join(lines), parse_mode="MarkdownV2") + + +async def cmd_stop(update: Update, context: ContextTypes.DEFAULT_TYPE) -> None: + message = update.effective_message + if message is None: + return + thread, run_state = _get_active_run(update, context) + if thread is None: + await message.reply_text("No active session in this chat.") + return + if not isinstance(run_state, dict): + await message.reply_text("Nothing is currently running here.") + return + stop_event = run_state.get("stop_event") + if stop_event is None: + await message.reply_text("This run cannot be stopped cleanly.") + return + stop_event.set() + await message.reply_text("Stopping the current run…") + + +async def handle_prompt_callback(update: Update, context: ContextTypes.DEFAULT_TYPE) -> None: + query = update.callback_query + await query.answer() + + data = query.data.split(":", 2) + if len(data) < 3 or data[0] != "prompt_app": + return + + approved = data[1] == "yes" + session_key = data[2] + + pending = context.application.bot_data.get("pending_prompts", {}) + new_prompt = pending.get(session_key) + + if not new_prompt: + try: + await query.edit_message_text("❌ Error: Pending prompt not found or expired.") + except (RetryAfter, TelegramError): + pass + return + + if approved: + store = _store(context) + try: + store.set_topic_system_prompt(session_key, new_prompt) + except ValueError as error: + try: + await query.edit_message_text(f"❌ Error: {error}") + except (RetryAfter, TelegramError): + pass + else: + try: + await query.edit_message_text("✅ System prompt updated successfully!") + except (RetryAfter, TelegramError): + pass + else: + try: + await query.edit_message_text("❌ System prompt change declined.") + except (RetryAfter, TelegramError): + pass + + # Cleanup pending + pending.pop(session_key, None) + context.application.bot_data["pending_prompts"] = pending + + +async def cmd_topic(update: Update, context: ContextTypes.DEFAULT_TYPE) -> None: + message = update.effective_message + if message is None or not message.text: + return + chat = update.effective_chat + + args = message.text.split(maxsplit=1) + if len(args) < 2: + await message.reply_text("Usage: /topic New topic name") + return + + if chat is None or not getattr(chat, "is_forum", False) or not getattr(message, "is_topic_message", False): + await message.reply_text("❌ This command only works inside a forum topic") + return + + try: + topic_name = _normalize_topic_name(args[1]) + except ValueError as error: + await message.reply_text(f"❌ {error}") + return + + try: + thread = _ensure_thread(update, context) + except RuntimeError as error: + await message.reply_text(f"❌ {error}") + return + + try: + await _rename_forum_topic(update, topic_name) + except (RuntimeError, TelegramError) as error: + await message.reply_text(f"❌ {error}") + return + + _store(context).update_thread(thread["id"], title=topic_name, title_source="manual") + await message.reply_text(f"Renamed this topic to: {topic_name}") + + +async def cmd_newtopic(update: Update, context: ContextTypes.DEFAULT_TYPE) -> None: + """Create a new forum topic in the current group.""" + message = update.effective_message + if message is None or not message.text: + return + chat = update.effective_chat + + args = message.text.split(maxsplit=1) + if len(args) < 2: + await message.reply_text("Usage: /newtopic Topic name") + return + + if chat is None or not getattr(chat, "is_forum", False): + await message.reply_text("❌ This command only works in a forum supergroup") + return + + try: + topic_name = _normalize_topic_name(args[1]) + except ValueError as error: + await message.reply_text(f"❌ {error}") + return + + try: + forum_topic = await chat.create_forum_topic(name=topic_name) + thread_id = int(forum_topic.message_thread_id) + selection = _get_selection(update, context) + store = _store(context) + store.get_or_create_session_thread( + session_key=_topic_session_key(chat.id, thread_id), + title=topic_name, + model=selection.model, + provider=selection.provider, + source="telegram", + session_label=f"Telegram topic {topic_name}", + ) + await message.reply_text(f"Created new topic: {topic_name}") + except TelegramError as error: + await message.reply_text(f"❌ Failed to create topic: {error}") + except RuntimeError as error: + await message.reply_text(f"❌ {error}") + + +def _build_user_parts(text: str, uploads: list[dict]) -> list[dict]: + parts: list[dict] = [] + if text: + parts.append({"type": "input_text", "text": text}) + + for upload in uploads: + parts.append( + { + "type": "input_image", + "file_id": upload["id"], + "name": upload.get("name") or "image", + "mime_type": upload.get("mime_type") or "image/jpeg", + "detail": "auto", + } + ) + + return parts + + +def _build_background_tools( + update: Update, + context: ContextTypes.DEFAULT_TYPE, + thread: dict, + selection: ModelSelection, + system_message: str, +) -> list[Any]: + """Build tools that let the LLM spawn and query background agents.""" + store = _store(context) + bg = _bg_manager(context) + bot = context.bot + chat_id = update.effective_chat.id + tg_thread_id = _effective_topic_thread_id(update) + is_general = _is_general_topic(update) + + class StartBgParams(BaseModel): + task: str = Field(description="Detailed description of what the background agent should do.") + + async def _start_bg_handler(params: StartBgParams, invocation: ToolInvocation) -> str: + active = bg.get_active(thread["id"]) + if active: + return f"A background agent is already running: {active.description}. Wait for it to finish first." + + async def _on_complete(bg_task) -> None: + output = bg_task.result or f"Background agent failed: {bg_task.error}" + store.add_message(thread["id"], "assistant", f"[Background agent]\n\n{output}") + # Track background agent usage + if bg_task.usage: + store.add_usage(thread["id"], bg_task.usage) + kwargs: dict[str, Any] = {} + if tg_thread_id and not is_general: + kwargs["message_thread_id"] = tg_thread_id + # Split long results across messages + prefix = "🔄 Background agent finished:\n\n" + for i in range(0, len(output), TG_MESSAGE_LIMIT - len(prefix)): + chunk = output[i : i + TG_MESSAGE_LIMIT - len(prefix)] + text = (prefix + chunk) if i == 0 else chunk + await bot.send_message(chat_id, text, **kwargs) + + bg.start( + thread_id=thread["id"], + description=params.task, + selection=selection, + system_message=system_message, + on_complete=_on_complete, + ) + return "Background agent started. Results will be sent to this chat when done." + + class CheckStatusParams(BaseModel): + pass + + async def _check_status_handler(params: CheckStatusParams, invocation: ToolInvocation) -> str: + return bg.format_status(thread["id"]) + + start_tool = define_tool( + "ask_agent", + description=( + "Start a background agent for a long-running task (research, multi-file changes, deep investigation). " + "The agent runs in a separate session and posts results to the chat when done. " + "Only one background agent per thread at a time." + ), + handler=_start_bg_handler, + params_type=StartBgParams, + ) + status_tool = define_tool( + "check_agent", + description="Check the status of the background agent in this thread.", + handler=_check_status_handler, + params_type=CheckStatusParams, + ) + return [start_tool, status_tool] + + +# Module-level backoff tracker: tracks the earliest time we're allowed to +# edit a Telegram message after hitting flood control. +_flood_backoff_until: float = 0.0 + + +async def _edit_status_message(message: Any, text: str) -> None: + """Edit a Telegram status message, respecting flood-control backoff.""" + global _flood_backoff_until + loop = asyncio.get_running_loop() + now = loop.time() + + # If we're still in a backoff window, skip this edit entirely. + if now < _flood_backoff_until: + return + + try: + await message.edit_text(text) + except RetryAfter as exc: + # Telegram says "retry in N seconds" — honour it and add a small buffer. + wait = max(float(exc.retry_after), 1.0) + 0.5 + _flood_backoff_until = loop.time() + wait + logger.warning("Telegram flood control: backing off %.1fs", wait) + except TelegramError: + pass + + +async def _run_user_turn( + update: Update, + context: ContextTypes.DEFAULT_TYPE, + *, + text: str, + uploads: list[dict] | None = None, +) -> None: + message = update.effective_message + if message is None: + return + + normalized_text = str(text or "").strip() + normalized_uploads = uploads or [] + if not normalized_text and not normalized_uploads: + return + + # Resolve user identity (T019) + from user_store import get_store as _get_user_store + + tg_user = update.effective_user + if tg_user: + user_store = _get_user_store() + current_user = user_store.resolve_or_create_user( + provider="telegram", + external_id=str(tg_user.id), + display_name=tg_user.full_name or str(tg_user.id), + ) + else: + current_user = None + + store = _store(context) + selection = _get_selection(update, context) + thread = _ensure_thread(update, context, selection=selection) + lock = store.lock(thread["id"]) + if lock.locked(): + await message.reply_text(busy_message(surface="telegram")) + return + + async with lock: + thinking = await message.reply_text(working_message(surface="telegram")) + latest_status = working_message(surface="telegram") + tool_counts: dict[str, int] = {} + stop_status = asyncio.Event() + stop_run = asyncio.Event() + run_state = {"stop_event": stop_run, "status_message": thinking, "started_by": normalized_text} + _register_active_run(context, thread["id"], run_state) + + # Grab the running session cost so far (before this turn) + try: + _pre_usage = store.get_total_usage(thread["id"]) + except Exception: + _pre_usage = {} + _pre_cost = Decimal(str(_pre_usage.get("cost_usd", "0") or "0")) + _pre_tokens = int(_pre_usage.get("total_tokens", 0) or 0) + + def _cost_line() -> str: + """Format session cost line for the status bubble. + + Computes a *live* running cost from collected_events so far + (which accumulate ASSISTANT_USAGE events during streaming), + adds it to the pre-existing session cost, and returns the + combined figure. This keeps the 💰 line fresh as tool calls + stream in, rather than showing a stale number. + """ + try: + turn_usage = extract_usage_and_cost( + selection.model, + selection.provider, + collected_events, + ) + except Exception: + turn_usage = {} + turn_cost = Decimal(str(turn_usage.get("cost_usd", "0") or "0")) + turn_tokens = int(turn_usage.get("total_tokens", 0) or 0) + cost = _pre_cost + turn_cost + tokens = _pre_tokens + turn_tokens + if not tokens and not cost: + return "" + return f"💰 Session: ${cost:.8f} · {tokens:,} tok" + + status_changed = asyncio.Event() + pending_statuses: list[str] = [] + pending_status_keys: set[str] = set() + turn_started_at = asyncio.get_running_loop().time() + + def _queue_status(text: str | None) -> None: + nonlocal latest_status + + normalized = str(text or "").strip() + if not normalized: + return + + latest_status = normalized + dedupe_key = normalized.casefold() + if dedupe_key in pending_status_keys: + return + + pending_status_keys.add(dedupe_key) + pending_statuses.append(normalized) + status_changed.set() + + def _drain_status_batch() -> str: + if not pending_statuses: + return latest_status + + batch: str = "...\n".join(pending_statuses) + pending_statuses.clear() + pending_status_keys.clear() + return batch + + async def _status_loop() -> None: + """Edit the Telegram placeholder when status changes, throttled to avoid rate-limits.""" + last_sent = "" + last_edit = 0.0 + last_elapsed_prefix = "" + loop = asyncio.get_running_loop() + while not stop_status.is_set(): + if not pending_statuses: + status_changed.clear() + status_wait = asyncio.create_task(status_changed.wait()) + stop_wait = asyncio.create_task(stop_status.wait()) + pending_waits = {status_wait, stop_wait} + wait_timeout = ( + None if last_edit == 0.0 else max(0.0, TG_STATUS_EDIT_INTERVAL - (loop.time() - last_edit)) + ) + try: + done, _ = await asyncio.wait( + pending_waits, + timeout=wait_timeout, + return_when=asyncio.FIRST_COMPLETED, + ) + if stop_wait in done or stop_status.is_set(): + break + finally: + for task in pending_waits: + if not task.done(): + task.cancel() + await asyncio.gather(*pending_waits, return_exceptions=True) + + elapsed = loop.time() - last_edit + if pending_statuses and last_edit and elapsed < TG_STATUS_EDIT_INTERVAL: + try: + await asyncio.wait_for(stop_status.wait(), timeout=TG_STATUS_EDIT_INTERVAL - elapsed) + break + except asyncio.TimeoutError: + pass + + # If flood backoff is active, wait it out before attempting an edit. + backoff_remaining = _flood_backoff_until - loop.time() + if backoff_remaining > 0: + try: + await asyncio.wait_for(stop_status.wait(), timeout=backoff_remaining) + break + except asyncio.TimeoutError: + pass + + display = format_tool_counts(tool_counts, current_status=_drain_status_batch()) + display = append_elapsed_status(display, elapsed_seconds=loop.time() - turn_started_at) + cost = _cost_line() + if cost: + display = f"{display}\n{cost}" if display else cost + + # Skip edit if only the elapsed timer changed (round to 10s to + # avoid trivial edits that trigger rate-limits during idle runs). + # Build a comparison key that ignores the exact seconds count. + elapsed_now = int(loop.time() - turn_started_at) + elapsed_bucket = elapsed_now // 10 # only changes every 10s + content_key = (display.split("\n")[0] if display else "", cost, elapsed_bucket) + if content_key == last_elapsed_prefix and not pending_statuses: + continue + + if display and display != last_sent: + await _edit_status_message(thinking, display) + last_sent = display + last_edit = loop.time() + last_elapsed_prefix = content_key + + status_task = asyncio.create_task(_status_loop()) + try: + store.add_message( + thread["id"], + "user", + normalized_text, + parts=_build_user_parts(normalized_text, normalized_uploads), + ) + # Log to identity-linked conversation store (T024) + _us_session = None + if current_user: + _us = _get_user_store() + _topic = str(_effective_topic_thread_id(update) or "") + _us_session = _us.get_or_create_session(current_user.id, "telegram", topic_id=_topic or None) + _us.log_message(_us_session.id, "user", normalized_text) + _queue_status(f"Inspecting request in {_session_label(update)}") + + history = store.build_agent_history(thread["id"], limit=MAX_HISTORY * 2) + prompt = format_prompt_with_history(history, normalized_text) + + # Build BlobAttachment dicts directly from the current Telegram uploads so + # the SDK receives the image bytes for this turn regardless of how prompt + # formatting collapses history into text. + current_attachments: list[dict[str, Any]] | None = None + if normalized_uploads and not selection.likely_supports_vision: + _queue_status(f"⚠️ {selection.model} doesn't support images — sending text only") + normalized_uploads = [] + if normalized_uploads: + blob_attachments: list[dict[str, Any]] = [] + for upload in normalized_uploads: + try: + data_url = store._media_store.build_data_url(upload["id"]) + except KeyError: + continue + m = re.match(r"data:([^;]+);base64,(.+)", data_url, re.DOTALL) + if not m: + continue + blob_attachments.append( + { + "type": "blob", + "mimeType": m.group(1), + "data": m.group(2), + "displayName": str(upload.get("name") or "image.jpg"), + } + ) + if blob_attachments: + current_attachments = blob_attachments + + topic_tools = _build_telegram_topic_tools(update, context) + system_message = _compose_system_message(update, context, user=current_user) + + bg_tools = _build_background_tools(update, context, thread, selection, system_message) + bg_context = _bg_manager(context).context_summary(thread["id"]) + if bg_context: + system_message = _compose_system_message( + update, context, background_summary=bg_context, user=current_user + ) + + all_tools = ( + (topic_tools or []) + + bg_tools + + build_integration_tools(current_user, thread_id=thread["id"], system_prompt=system_message) + ) + + latest_status = "Thinking ..." + collected_events: list = [] + + async for event in stream_session( + copilot, + model=selection.model, + provider_config=build_provider_config(selection), + system_message=system_message, + prompt=prompt, + tools=all_tools or None, + attachments=current_attachments, + thread_id=thread["id"], + ): + if stop_run.is_set(): + raise asyncio.CancelledError("Stopped by /stop") + + collected_events.append(event) + + # Handle session errors + if event.type == SessionEventType.SESSION_ERROR: + error_msg: str = ( + event.data.message + if event.data and isinstance(event.data.message, str) + else "Unknown session error" + ) + raise RuntimeError(f"Session error: {error_msg}") + + # Track tool calls for collapsed count display + trace = stream_trace_event(event) + if trace: + tool_name = trace.get("tool_name") or "" + category = trace.get("category", "") + if category == "tool_call" and tool_name: + # Don't count report_intent in tool counts + if tool_name != "report_intent" and not trace.get("output_detail"): + tool_counts[tool_name] = tool_counts.get(tool_name, 0) + 1 + elif category == "subagent": + tool_counts["handoffs"] = tool_counts.get("handoffs", 0) + 1 + + # Update current status text from stream + status_updates = stream_status_updates(event) + if status_updates: + for status_text in status_updates: + _queue_status(status_text) + elif trace: + status_changed.set() + + _queue_status("Sending final reply") + reply = extract_final_text(collected_events) + if not reply: + reply = ( + "I finished that run but did not get a usable final reply back. " + "Please send the request again, or narrow it to one repo, file, or command." + ) + usage = extract_usage_and_cost( + selection.model, selection.provider, collected_events, + advisor_usage=_get_advisor_usage(thread["id"]), + ) + total_usage = store.add_usage(thread["id"], usage) + cost_footer = "" + if total_usage: + total_cost = total_usage.get("cost_usd", "0") + total_tokens = total_usage.get("total_tokens", 0) + cost_footer = f"\n\n---\nTotal Session Cost: ${total_cost}\nTotal Session Tokens: {total_tokens:,}" + + stop_status.set() + # await safe_delete_message(thinking) + store.add_message(thread["id"], "assistant", reply) + if _us_session and current_user: + _get_user_store().log_message(_us_session.id, "assistant", reply) + + await _send_long(update, reply + cost_footer) + except asyncio.CancelledError: + stop_status.set() + # await safe_delete_message(thinking) + partial_usage = extract_usage_and_cost( + selection.model, selection.provider, collected_events, + advisor_usage=_get_advisor_usage(thread["id"]), + ) + if ( + partial_usage.get("request_count") + or partial_usage.get("total_tokens") + or partial_usage.get("cost_usd") != "0" + ): + store.add_usage(thread["id"], partial_usage) + await _send_long(update, "Stopped current run.") + except Exception as error: + logger.exception("Agent error for session %s", _session_key(update)) + stop_status.set() + # await safe_delete_message(thinking) + detail = format_session_error(surface="telegram", error=error) + store.add_message(thread["id"], "assistant", detail) + await _send_long(update, detail) + finally: + _clear_active_run(context, thread["id"], run_state) + stop_status.set() + + # Extract learnings from this turn (runs even on error so user + # facts are never lost — assistant_message may be empty on failure) + try: + assistant_msg = reply if "reply" in locals() else "" + project_learnings, global_learnings = extract_learnings_from_turn( + normalized_text, + assistant_msg, + ) + for fact, category in project_learnings: + store.add_project_learning(thread["id"], fact, category=category) + for fact, category in global_learnings: + store.add_global_learning(fact, category=category) + except Exception: + logger.warning("Learning extraction failed", exc_info=True) + + status_task.cancel() + try: + await status_task + except Exception: + pass + + +async def handle_message(update: Update, context: ContextTypes.DEFAULT_TYPE) -> None: + message = update.effective_message + if message is None or not message.text: + logger.info("Ignoring non-text Telegram update: %s", update) + return + + await _run_user_turn(update, context, text=message.text) + + +async def handle_photo(update: Update, context: ContextTypes.DEFAULT_TYPE) -> None: + message = update.effective_message + if message is None or not message.photo: + logger.info("Ignoring Telegram photo update without image payload: %s", update) + return + + try: + photo = message.photo[-1] + telegram_file = await photo.get_file() + photo_bytes = bytes(await telegram_file.download_as_bytearray()) + upload = _store(context).save_upload( + name=f"telegram-photo-{message.message_id}.jpg", + data=photo_bytes, + mime_type="image/jpeg", + ) + except Exception as error: + logger.exception("Failed to download Telegram photo for session %s", _session_key(update)) + await message.reply_text(format_session_error(surface="telegram", error=error)) + return + + await _run_user_turn(update, context, text=message.caption or "", uploads=[upload]) + + +async def _send_long(update: Update, text: str) -> None: + """Send a message as Telegram HTML, falling back to plain text.""" + thread_id = _effective_topic_thread_id(update) + kwargs: dict[str, Any] = {} + if thread_id and not _is_general_topic(update): + kwargs["message_thread_id"] = thread_id + + for index in range(0, len(text), TG_MESSAGE_LIMIT): + chunk = text[index : index + TG_MESSAGE_LIMIT] + try: + await update.effective_chat.send_message(markdown_to_telegram_html(chunk), parse_mode="HTML", **kwargs) + except TelegramError: + # HTML rejected — send as plain text instead + await update.effective_chat.send_message(chunk, **kwargs) + + +async def handle_error(update: object, context: ContextTypes.DEFAULT_TYPE) -> None: + logger.exception("Telegram application error", exc_info=context.error) + + +def _telegram_persistence_path() -> Path: + base_dir = Path(settings.TG_PERSISTENCE_DIR or settings.DATA_DIR).expanduser() + return base_dir / "telegram-bot-state.pkl" + + +def build_telegram_app(store: WebFallbackStore) -> Application: + if not settings.TG_BOT_TOKEN: + raise RuntimeError("TG_BOT_TOKEN is not configured") + + persistence_path = _telegram_persistence_path() + persistence_path.parent.mkdir(parents=True, exist_ok=True) + persistence = PicklePersistence(filepath=str(persistence_path), update_interval=10) + + async def _combined_post_init(app: Application) -> None: + # Store runtime objects as app attributes — NOT in bot_data. + # bot_data is persisted via PicklePersistence, and these objects + # contain unpicklable items (locks, asyncio state) which would + # corrupt the pickle file and wipe persisted aliases. + app._runtime_store = store # type: ignore[attr-defined] + app._runtime_bg_manager = BackgroundTaskManager() # type: ignore[attr-defined] + + # Restore aliases from durable JSON file if bot_data lost them + file_aliases = _load_alias_file() + if file_aliases: + bd_aliases = app.bot_data.get(_USER_MODEL_ALIASES_KEY) + if not isinstance(bd_aliases, dict) or not bd_aliases: + app.bot_data[_USER_MODEL_ALIASES_KEY] = file_aliases + logger.info("Restored %d alias owner(s) from JSON file", len(file_aliases)) + else: + # Merge: file fills in missing owners/keys + for owner, owner_aliases in file_aliases.items(): + if owner not in bd_aliases: + bd_aliases[owner] = owner_aliases + else: + for k, v in owner_aliases.items(): + bd_aliases.setdefault(k, v) + app.bot_data[_USER_MODEL_ALIASES_KEY] = bd_aliases + + await _post_init(app) + + app = ( + Application.builder() + .token(settings.TG_BOT_TOKEN) + .persistence(persistence) + .post_init(_combined_post_init) + .build() + ) + app.add_handler(TypeHandler(Update, _gate_telegram_access), group=-1) + app.add_handler(CommandHandler(["start", "help"], cmd_start)) + app.add_handler(CommandHandler("models", cmd_models)) + app.add_handler(CommandHandler("alias", cmd_alias)) + app.add_handler(CommandHandler("alias_export", cmd_alias_export)) + app.add_handler(CommandHandler("model", cmd_model)) + app.add_handler(CommandHandler("provider", cmd_provider)) + app.add_handler(CommandHandler(["topic", "rename"], cmd_topic)) + app.add_handler(CommandHandler("newtopic", cmd_newtopic)) + app.add_handler(CommandHandler("skills", cmd_skills)) + app.add_handler(CommandHandler("system", cmd_system)) + app.add_handler(CommandHandler("new", cmd_new)) + app.add_handler(CommandHandler("current", cmd_current)) + app.add_handler(CommandHandler("status", cmd_status)) + app.add_handler(CommandHandler("usage", cmd_usage)) + app.add_handler(CommandHandler("learnings", cmd_learnings)) + app.add_handler(CommandHandler("stop", cmd_stop)) + app.add_handler(CallbackQueryHandler(handle_prompt_callback, pattern="^prompt_app:")) + app.add_handler(CallbackQueryHandler(_handle_approval_callback, pattern="^tg_approve:")) + app.add_handler(CallbackQueryHandler(handle_skill_menu_callback, pattern=f"^{SKILL_MENU_CALLBACK_PREFIX}")) + app.add_handler(MessageHandler(filters.PHOTO, handle_photo)) + app.add_handler(MessageHandler(filters.TEXT & ~filters.COMMAND, handle_message)) + app.add_error_handler(handle_error) + return app + + +def webhook_secret() -> str: + return hashlib.sha256(settings.TG_BOT_TOKEN.encode()).hexdigest()[:32] + + +async def _post_init(app: Application) -> None: + commands = [ + BotCommand("start", "Show help"), + BotCommand("help", "Show help"), + BotCommand("models", "List model IDs"), + BotCommand("alias", "Save personal model alias"), + BotCommand("alias_export", "Download personal alias backup"), + BotCommand("model", "Switch model"), + BotCommand("provider", "Switch provider"), + BotCommand("topic", "Rename current topic"), + BotCommand("newtopic", "Create a new forum topic"), + BotCommand("skills", "Browse available skills"), + BotCommand("system", "Set or show topic instruction"), + BotCommand("new", "Start a new session"), + BotCommand("current", "Show current model"), + BotCommand("status", "Show system and task status"), + BotCommand("usage", "Show accumulated usage"), + BotCommand("learnings", "Show project and global learnings"), + BotCommand("stop", "Stop the current run"), + ] + # Clear stale commands for all scopes, then re-register + for scope in (BotCommandScopeDefault(), BotCommandScopeAllGroupChats(), BotCommandScopeAllChatAdministrators()): + await app.bot.delete_my_commands(scope=scope) + await app.bot.set_my_commands(commands, scope=scope) diff --git a/tool_pipeline.py b/tool_pipeline.py new file mode 100644 index 0000000..8343956 --- /dev/null +++ b/tool_pipeline.py @@ -0,0 +1,311 @@ +"""Shared tool pipeline — single source of truth for user context, tool set +activation, and integration tool building. + +Extracted from main.py and telegram_bot.py (T009) to eliminate code +duplication. Both entry points call these functions instead of maintaining +their own copies. + +T015/T020/T021: Added UserContext with lazy credential decryption and +per-user tool pipeline functions. +""" + +from __future__ import annotations + +import logging +from datetime import datetime, timezone +from typing import Any + +from config import settings +from tool_registry import registry as tool_registry + +logger = logging.getLogger(__name__) + + +# ── UserContext (T015) ─────────────────────────────────────────── + + +class UserContext: + """Per-request context that lazily decrypts user credentials. + + Credentials are decrypted on first access via ``get_credential()``, + cached for the duration of one request, and discarded when the object + goes out of scope. Expired credentials return ``None`` to signal + re-provisioning. + """ + + def __init__(self, user: Any, store: Any) -> None: + from user_store import User # deferred to avoid circular import + + self.user: User = user + self._store = store + self._cache: dict[str, str | None] = {} + + def get_credential(self, service: str) -> str | None: + """Return the decrypted API token for *service*, or None.""" + if service in self._cache: + return self._cache[service] + + from user_store import decrypt + + cred = self._store.get_credential(self.user.id, service) + if cred is None: + self._cache[service] = None + return None + # Check expiry + if cred.expires_at: + try: + exp = datetime.fromisoformat(cred.expires_at) + if exp < datetime.now(timezone.utc): + logger.warning( + "Credential for %s/%s expired at %s", + self.user.id, + service, + cred.expires_at, + ) + self._cache[service] = None + return None + except ValueError: + pass # ignore unparseable expiry — treat as valid + try: + token = decrypt(cred.encrypted_token) + except Exception: + logger.exception("Failed to decrypt credential for %s/%s", self.user.id, service) + self._cache[service] = None + return None + self._cache[service] = token + # Touch last_used_at + self._store.touch_credential(self.user.id, service) + return token + + +# ── Shared pipeline functions ──────────────────────────────────── + +# Service name → context key mapping used by tool set factories. +_SERVICE_CONTEXT_KEYS: dict[str, dict[str, str]] = { + "vikunja": { + "vikunja_api_url": "VIKUNJA_API_URL", + "vikunja_api_key": "__credential__", + }, + "karakeep": { + "karakeep_api_url": "KARAKEEP_API_URL", + "karakeep_api_key": "__credential__", + }, +} + + +def active_toolset_names(user: Any | None = None) -> list[str]: + """Return names of tool sets whose required credentials are available. + + When *user* is given (a ``User`` from user_store), credentials are + checked via the credential vault. When ``None``, falls back to the + legacy env-var check for backwards compatibility. + """ + if user is None: + # Legacy path — env-var based + ctx = _build_legacy_context() + active: list[str] = [] + for name, ts in tool_registry.available.items(): + if all(ctx.get(k) for k in ts.required_keys): + active.append(name) + return active + + # Per-user path + from user_store import get_store + + store = get_store() + uctx = UserContext(user, store) + ctx = _build_user_context_dict(user, uctx) + active = [] + for name, ts in tool_registry.available.items(): + if all(ctx.get(k) for k in ts.required_keys): + active.append(name) + return active + + +def build_user_context(user: Any | None = None) -> dict[str, Any]: + """Build the context dict consumed by tool set factories. + + When *user* is given, credentials come from the per-user vault. + When ``None``, falls back to env-var credentials. + """ + if user is None: + return _build_legacy_context() + + from user_store import get_store + + store = get_store() + uctx = UserContext(user, store) + return _build_user_context_dict(user, uctx) + + +def build_integration_tools( + user: Any | None = None, + *, + thread_id: str | None = None, + system_prompt: str | None = None, +) -> list: + """Build Copilot SDK tools for all active integrations. + + Always returns a list (possibly empty), never None. + """ + active = active_toolset_names(user) + if not active: + return [] + ctx = build_user_context(user) + if thread_id is not None: + ctx["_thread_id"] = thread_id + if system_prompt is not None: + ctx["_system_prompt"] = system_prompt + return tool_registry.get_tools(active, ctx) + + +async def try_provision(user: Any, service: str) -> str | None: + """Attempt to provision *service* for *user* with lock. + + Returns a human-readable status message, or None on success. + Uses a per-user per-service lock to prevent concurrent provisioning. + """ + from provisioners.base import provisioner_registry + from user_store import get_store + + provisioner = provisioner_registry.get(service) + if provisioner is None: + return f"No provisioner available for {service}." + + store = get_store() + # Check if already provisioned + existing = store.get_credential(user.id, service) + if existing: + return None # already has credentials + + lock = provisioner_registry.get_lock(user.id, service) + if lock.locked(): + return f"Provisioning {service} is already in progress." + + async with lock: + # Double-check after acquiring lock + existing = store.get_credential(user.id, service) + if existing: + return None + + try: + result = await provisioner.provision(user, store) + except Exception: + logger.exception("Provisioning %s for user %s failed", service, user.id) + store.log_provisioning(user.id, service, "provision_failed", '{"error": "unhandled exception"}') + return f"Failed to set up {service}. The error has been logged." + + if not result.success: + return result.error or f"Failed to set up {service}." + + # Mark onboarding complete on first successful provisioning + if not user.onboarding_complete: + store.set_onboarding_complete(user.id) + user.onboarding_complete = True + + return None + + +def get_provisionable_services(user: Any) -> list[dict[str, str]]: + """Return list of services that could be provisioned for *user*.""" + from provisioners.base import provisioner_registry + from user_store import get_store + + store = get_store() + existing = store.get_credentials(user.id) + result = [] + for name, prov in provisioner_registry.available.items(): + status = "active" if name in existing else "available" + result.append( + { + "service": name, + "capabilities": ", ".join(prov.capabilities), + "status": status, + } + ) + return result + + +def build_onboarding_fragment(user: Any) -> str | None: + """Return a system prompt fragment for new/onboarding users (T035).""" + if user.onboarding_complete: + return None + return ( + "This is a new user who hasn't set up any services yet. " + "Welcome them warmly, explain what you can help with, " + "and offer to set up their accounts. Available services: " + + ", ".join( + f"{s['capabilities']} ({s['service']})" + for s in get_provisionable_services(user) + if s["status"] == "available" + ) + + ". Ask them which services they'd like to activate." + ) + + +def build_capability_fragment(user: Any) -> str | None: + """Return a system prompt fragment showing active/available capabilities (T036).""" + services = get_provisionable_services(user) + if not services: + return None + active = [s for s in services if s["status"] == "active"] + available = [s for s in services if s["status"] == "available"] + parts = [] + if active: + parts.append("Active services: " + ", ".join(f"{s['capabilities']} ({s['service']})" for s in active)) + if available: + parts.append( + "Available (say 'set up ' to activate): " + + ", ".join(f"{s['capabilities']} ({s['service']})" for s in available) + ) + return "\n".join(parts) + + +def build_provisioning_confirmation(service: str, result: Any) -> str | None: + """Return a system prompt fragment confirming provisioning (T038).""" + from config import settings as _settings + + if not result or not result.success: + return None + parts = [f"I just created a {service} account for this user."] + if result.service_username: + parts.append(f"Username: {result.service_username}.") + if result.service_url: + parts.append(f"Service URL: {result.service_url}") + if not _settings.ALLOW_CREDENTIAL_REVEAL_IN_CHAT: + parts.append("Do NOT reveal the password in chat — Telegram messages are not E2E encrypted.") + return " ".join(parts) + + +# ── Internal helpers ───────────────────────────────────────────── + + +def _build_legacy_context() -> dict[str, Any]: + """Build user context from static env-var settings (owner-only path).""" + return { + "vikunja_api_url": settings.VIKUNJA_API_URL, + "vikunja_api_key": settings.VIKUNJA_API_KEY, + "vikunja_memory_path": settings.VIKUNJA_MEMORY_PATH, + "memory_owner": "default", + "karakeep_api_url": settings.KARAKEEP_API_URL, + "karakeep_api_key": settings.KARAKEEP_API_KEY, + "_user": None, + } + + +def _build_user_context_dict(user: Any, uctx: UserContext) -> dict[str, Any]: + """Build a context dict from per-user credentials.""" + ctx: dict[str, Any] = { + "vikunja_memory_path": settings.VIKUNJA_MEMORY_PATH, + "memory_owner": user.id, + "_user": user, # passed through for meta-tools + } + for service, key_map in _SERVICE_CONTEXT_KEYS.items(): + for ctx_key, source in key_map.items(): + if source == "__credential__": + val = uctx.get_credential(service) + else: + val = getattr(settings, source, "") + if val: + ctx[ctx_key] = val + return ctx diff --git a/tool_registry.py b/tool_registry.py new file mode 100644 index 0000000..76857ee --- /dev/null +++ b/tool_registry.py @@ -0,0 +1,211 @@ +"""Unified tool registry for the agent backend. + +A ToolSet groups related tools (e.g. "vikunja", "karakeep") with: + - a factory that builds Copilot SDK Tool objects given per-user context + - a system prompt fragment injected when the tool set is active + - an optional capability label (e.g. "tasks", "bookmarks") + +The registry collects tool sets and resolves the right tools + prompt +fragments for a given user at request time. +""" + +from __future__ import annotations + +import logging +from dataclasses import dataclass, field +from typing import Any, Awaitable, Callable + +from copilot import define_tool +from copilot.tools import Tool, ToolInvocation +from pydantic import create_model + +logger = logging.getLogger(__name__) + + +# ── ToolSet ────────────────────────────────────────────────────── + +ToolFactory = Callable[[dict[str, Any]], list[Tool]] + + +@dataclass +class ToolSet: + """A named collection of tools with a system prompt fragment. + + Attributes: + name: Unique identifier (e.g. "vikunja", "karakeep"). + description: Human-readable description displayed during onboarding. + capability: Abstract capability label (e.g. "tasks", "bookmarks"). + Multiple tool sets can share a capability, but a user + should only have one active per capability. + system_prompt_fragment: Text appended to the system message when + this tool set is active for the user. + build_tools: Factory function that receives a user_context dict + (credentials, config) and returns Copilot SDK Tool + instances. The dict keys are tool-set-specific. + required_keys: Context keys that must be present for this tool set + to be usable (e.g. ["vikunja_api_url", "vikunja_api_key"]). + """ + + name: str + description: str + capability: str + system_prompt_fragment: str + build_tools: ToolFactory + required_keys: list[str] = field(default_factory=list) + + +# ── ToolRegistry ───────────────────────────────────────────────── + + +class ToolRegistry: + """Central registry of available tool sets.""" + + def __init__(self) -> None: + self._toolsets: dict[str, ToolSet] = {} + + def register(self, toolset: ToolSet) -> None: + if toolset.name in self._toolsets: + logger.warning("Replacing existing tool set %r", toolset.name) + self._toolsets[toolset.name] = toolset + logger.info( + "Registered tool set %r (%s, %d required keys)", + toolset.name, + toolset.capability, + len(toolset.required_keys), + ) + + @property + def available(self) -> dict[str, ToolSet]: + return dict(self._toolsets) + + def get_tools( + self, + active_names: list[str], + user_context: dict[str, Any], + ) -> list[Tool]: + """Build Copilot SDK tools for the requested tool sets. + + Skips tool sets whose required context keys are missing. + """ + tools: list[Tool] = [] + for name in active_names: + ts = self._toolsets.get(name) + if ts is None: + logger.warning("Requested unknown tool set %r — skipped", name) + continue + missing = [k for k in ts.required_keys if not user_context.get(k)] + if missing: + logger.warning( + "Tool set %r skipped: missing context keys %s", + name, + missing, + ) + continue + try: + tools.extend(ts.build_tools(user_context)) + except Exception: + logger.exception("Failed to build tools for %r", name) + return tools + + def get_system_prompt_fragments(self, active_names: list[str]) -> list[str]: + """Return system prompt fragments for the given tool sets.""" + fragments: list[str] = [] + for name in active_names: + ts = self._toolsets.get(name) + if ts and ts.system_prompt_fragment: + fragments.append(ts.system_prompt_fragment) + return fragments + + +# ── OpenAI schema bridge ───────────────────────────────────────── + + +def _json_type_to_python(json_type: str) -> type: + """Map JSON Schema type strings to Python types for Pydantic.""" + mapping: dict[str, type] = { + "string": str, + "integer": int, + "number": float, + "boolean": bool, + "array": list, + "object": dict, + } + return mapping.get(json_type, str) + + +def openai_tools_to_copilot( + schemas: list[dict[str, Any]], + dispatcher: Callable[..., Awaitable[str]], + context_kwargs: dict[str, Any] | None = None, +) -> list[Tool]: + """Convert OpenAI function-calling tool schemas + a dispatcher into + Copilot SDK Tool objects. + + Args: + schemas: List of OpenAI tool dicts ({"type":"function","function":{...}}). + dispatcher: Async callable with signature + ``async def dispatcher(name, arguments, **context_kwargs) -> str``. + It receives the tool name, parsed argument dict, and any extra + keyword arguments from *context_kwargs*. + context_kwargs: Extra keyword arguments forwarded to every dispatcher + call (e.g. vikunja client, memory store). + + Returns: + List of Copilot SDK Tool objects ready to pass to create_session(). + """ + extra = context_kwargs or {} + tools: list[Tool] = [] + + for schema in schemas: + func = schema.get("function", {}) + name: str = func.get("name", "") + description: str = func.get("description", "") + params_spec: dict = func.get("parameters", {}) + properties: dict = params_spec.get("properties", {}) + required_fields: list[str] = params_spec.get("required", []) + + if not name: + continue + + # Build Pydantic model fields dynamically + fields: dict[str, Any] = {} + for prop_name, prop_def in properties.items(): + py_type = _json_type_to_python(prop_def.get("type", "string")) + # All non-required fields are optional with None default + if prop_name in required_fields: + fields[prop_name] = (py_type, ...) + else: + fields[prop_name] = (py_type | None, None) + + # Create a unique Pydantic model class + model_name = f"Params_{name}" + params_model = create_model(model_name, **fields) # type: ignore[call-overload] + + # Capture loop variables in closure + _name = name + _extra = extra + + async def _handler( + params: Any, + invocation: ToolInvocation, + *, + _tool_name: str = _name, + _ctx: dict[str, Any] = _extra, + ) -> str: + args = params.model_dump(exclude_none=True) + return await dispatcher(_tool_name, args, **_ctx) + + tool = define_tool( + name=name, + description=description, + handler=_handler, + params_type=params_model, + ) + tools.append(tool) + + return tools + + +# ── Module-level singleton ─────────────────────────────────────── + +registry = ToolRegistry() diff --git a/tools/__init__.py b/tools/__init__.py new file mode 100644 index 0000000..6d45df7 --- /dev/null +++ b/tools/__init__.py @@ -0,0 +1,9 @@ +"""Unified tool package. + +Importing this module registers all available tool sets with the +global tool_registry.registry singleton via instance.register_tools(). +""" + +from instance import register_tools + +register_tools() diff --git a/tools/advisor/__init__.py b/tools/advisor/__init__.py new file mode 100644 index 0000000..f392940 --- /dev/null +++ b/tools/advisor/__init__.py @@ -0,0 +1,299 @@ +"""Advisor tool — escalate hard decisions to a stronger model. + +The executor can call this tool to consult a stronger advisor model. +The advisor returns guidance text only; it cannot call tools or emit +user-facing text directly. +""" + +from __future__ import annotations + +import logging +from typing import Any + +from openai import AsyncOpenAI + +from config import settings +from model_selection import ModelSelection, build_provider_config, resolve_selection +from tool_registry import ToolSet, openai_tools_to_copilot + +logger = logging.getLogger(__name__) + +# ── System prompt fragment (T004) ──────────────────────────────── + +ADVISOR_SYSTEM_PROMPT = ( + "You have an `advisor` tool backed by a stronger model. " + "Call it before committing to a non-trivial design choice, " + "before deleting code you don't understand, " + "when a test fails for a non-obvious reason, " + "or when you are about to loop. " + "Do not call it for typos, lint, or routine edits." +) + +# ── Tool schema (T003) ────────────────────────────────────────── + +ADVISOR_TOOL_SCHEMA: list[dict[str, Any]] = [ + { + "type": "function", + "function": { + "name": "advisor", + "description": ( + "Consult a stronger model for a plan, correction, or stop signal. " + "Call this when you are uncertain about architecture, root cause, or next step. " + "You get back guidance text only; no tool calls are executed by the advisor." + ), + "parameters": { + "type": "object", + "required": ["question"], + "properties": { + "question": { + "type": "string", + "description": "What you need help deciding.", + }, + "context_summary": { + "type": "string", + "description": "Short summary of relevant state the advisor must know.", + }, + "stakes": { + "type": "string", + "enum": ["low", "medium", "high"], + "description": "How critical this decision is.", + }, + }, + }, + }, + }, +] + +# ── Per-run usage counter (T010) ───────────────────────────────── + +_usage_counter: dict[str, int] = {} + + +def reset_advisor_counter(thread_id: str) -> None: + """Reset the per-run advisor usage counter for a thread.""" + _usage_counter.pop(thread_id, None) + + +def _check_and_increment(thread_id: str) -> str | None: + """Increment counter; return error string if limit reached, else None.""" + current = _usage_counter.get(thread_id, 0) + if current >= settings.ADVISOR_MAX_USES: + return f"Advisor limit reached (max {settings.ADVISOR_MAX_USES} per run). Proceed on your own." + _usage_counter[thread_id] = current + 1 + return None + + +# ── Advisor trace + usage accumulators (T015, T021) ────────────── + +_advisor_usage: dict[str, dict[str, int]] = {} +_advisor_traces: dict[str, list[dict[str, Any]]] = {} + + +def get_advisor_usage(thread_id: str) -> dict[str, int] | None: + """Return accumulated advisor token usage for a thread, or None.""" + return _advisor_usage.get(thread_id) + + +def get_advisor_traces(thread_id: str) -> list[dict[str, Any]]: + """Return advisor trace records for a thread.""" + return _advisor_traces.get(thread_id, []) + + +def _reset_advisor_state(thread_id: str) -> None: + """Clear per-run advisor state (counter, usage, traces).""" + _usage_counter.pop(thread_id, None) + _advisor_usage.pop(thread_id, None) + _advisor_traces.pop(thread_id, None) + + +# ── Prompt builder (T007) ──────────────────────────────────────── + +_ADVISOR_SYSTEM_INSTRUCTION = ( + "You are an expert advisor. Provide concise, actionable guidance. " + "Do not call tools or produce user-facing text. Focus on the question." +) + +_SYSTEM_PROMPT_MAX_CHARS = 500 + + +def _build_advisor_prompt( + question: str, + context_summary: str, + stakes: str, + system_prompt_excerpt: str, +) -> list[dict[str, str]]: + """Build the message list for the advisor one-shot call.""" + # Truncate executor system prompt + trimmed = system_prompt_excerpt + if len(trimmed) > _SYSTEM_PROMPT_MAX_CHARS: + trimmed = trimmed[:_SYSTEM_PROMPT_MAX_CHARS] + "\u2026" + + system_parts = [_ADVISOR_SYSTEM_INSTRUCTION] + if trimmed: + system_parts.append(f"Executor context (trimmed):\n{trimmed}") + + user_parts = [] + if context_summary: + user_parts.append(f"Context: {context_summary}") + user_parts.append(f"Question [{stakes} stakes]: {question}") + + return [ + {"role": "system", "content": "\n\n".join(system_parts)}, + {"role": "user", "content": "\n\n".join(user_parts)}, + ] + + +# ── One-shot advisor completion (T008) ─────────────────────────── + + +async def _call_advisor_model( + messages: list[dict[str, str]], + model: str, + max_tokens: int, + provider_config: dict[str, Any] | None, + temperature: float = 0.2, +) -> tuple[str, dict[str, int]]: + """Send a one-shot, tool-less completion to the advisor model. + + Returns (response_text, {"prompt_tokens": N, "completion_tokens": N}). + On any error, returns a fallback string and zero usage. + """ + try: + if provider_config is None: + # Copilot provider — use the Copilot Models API endpoint + from config import settings as _s + + client = AsyncOpenAI( + base_url="https://api.githubcopilot.com", + api_key=_s.GITHUB_TOKEN, + ) + else: + client = AsyncOpenAI( + base_url=provider_config.get("base_url", ""), + api_key=provider_config.get("api_key", ""), + ) + + response = await client.chat.completions.create( + model=model, + messages=messages, # type: ignore[arg-type] + max_tokens=max_tokens, + temperature=temperature, + stream=False, + ) + + text = "" + if response.choices: + text = response.choices[0].message.content or "" + + usage_data: dict[str, int] = {"prompt_tokens": 0, "completion_tokens": 0} + if response.usage: + usage_data["prompt_tokens"] = response.usage.prompt_tokens or 0 + usage_data["completion_tokens"] = response.usage.completion_tokens or 0 + + return text.strip(), usage_data + except Exception as exc: + logger.warning("Advisor call failed: %s", exc) + return "Advisor unavailable. Proceed with your best judgment.", {"prompt_tokens": 0, "completion_tokens": 0} + + +# ── Dispatcher (T009) ──────────────────────────────────────────── + + +async def _handle_advisor_call( + arguments: dict[str, Any], + thread_id: str, + system_prompt_excerpt: str, +) -> str: + """Handle an advisor tool invocation.""" + question = arguments.get("question", "") + context_summary = arguments.get("context_summary", "") + stakes = arguments.get("stakes", "medium") + + if not question: + return "No question provided." + + # Check usage limit + limit_msg = _check_and_increment(thread_id) + if limit_msg: + return limit_msg + + # Token/temperature params based on stakes + max_tokens = settings.ADVISOR_MAX_TOKENS + temperature = 0.2 + if stakes == "high": + max_tokens *= 2 + temperature = 0.4 + + # Build prompt + messages = _build_advisor_prompt(question, context_summary, stakes, system_prompt_excerpt) + + # Resolve advisor model (per-thread override or default) + advisor_model_id = settings.ADVISOR_DEFAULT_MODEL + try: + advisor_selection = resolve_selection(model=advisor_model_id) + except Exception as exc: + logger.warning("Advisor model resolution failed: %s", exc) + return f"Advisor model resolution failed ({exc}). Proceed with your best judgment." + + provider_config = build_provider_config(advisor_selection) + + # Call advisor + response_text, usage_data = await _call_advisor_model( + messages, + advisor_selection.model, + max_tokens, + provider_config, + temperature=temperature, + ) + + # Record usage (T015) + existing = _advisor_usage.get(thread_id, {"prompt_tokens": 0, "completion_tokens": 0}) + existing["prompt_tokens"] = existing.get("prompt_tokens", 0) + usage_data["prompt_tokens"] + existing["completion_tokens"] = existing.get("completion_tokens", 0) + usage_data["completion_tokens"] + _advisor_usage[thread_id] = existing + + # Record trace (T021) + total_tokens = usage_data["prompt_tokens"] + usage_data["completion_tokens"] + _advisor_traces.setdefault(thread_id, []).append( + { + "kind": "advisor", + "question": question, + "guidance": response_text, + "model": advisor_model_id, + "tokens": total_tokens, + } + ) + + return response_text + + +# ── Tool factory (T003) ───────────────────────────────────────── + + +def _build_advisor_tools(user_context: dict[str, Any]) -> list: + """Factory that creates Copilot SDK advisor tools.""" + thread_id = user_context.get("_thread_id", "unknown") + system_prompt = user_context.get("_system_prompt", "") + advisor_model_override = user_context.get("advisor_model") + + async def dispatcher(name: str, arguments: dict, **_kw: Any) -> str: + if name == "advisor": + # Allow per-thread model override + if advisor_model_override: + arguments.setdefault("_advisor_model_override", advisor_model_override) + return await _handle_advisor_call(arguments, thread_id, system_prompt) + return f"Unknown advisor tool: {name}" + + return openai_tools_to_copilot(schemas=ADVISOR_TOOL_SCHEMA, dispatcher=dispatcher) + + +# ── ToolSet registration (T003) ────────────────────────────────── + +advisor_toolset = ToolSet( + name="advisor", + description="Consult a stronger model on hard decisions", + capability="advisor", + system_prompt_fragment=ADVISOR_SYSTEM_PROMPT, + build_tools=_build_advisor_tools, + required_keys=[], +) diff --git a/tools/meta/__init__.py b/tools/meta/__init__.py new file mode 100644 index 0000000..6326495 --- /dev/null +++ b/tools/meta/__init__.py @@ -0,0 +1,145 @@ +"""Credential meta-tools (T039-T041). + +Provides tools for users to inspect their service access and credentials. +""" + +from __future__ import annotations + +from typing import Any + +from tool_registry import ToolSet, openai_tools_to_copilot + +META_SYSTEM_PROMPT = """\ +You can help users check their service access and available integrations. \ +When a user asks about their accounts, services, or credentials, use the \ +list_my_services and get_my_credentials tools. \ +Never reveal passwords unless the system configuration explicitly allows it. +""" + +TOOLS = [ + { + "type": "function", + "function": { + "name": "list_my_services", + "description": ( + "List all services available to the user, showing which are active " + "(have credentials), which are available for setup, and their capabilities." + ), + "parameters": { + "type": "object", + "properties": {}, + "required": [], + }, + }, + }, + { + "type": "function", + "function": { + "name": "get_my_credentials", + "description": ( + "Get the user's credentials for a specific service. Returns the username " + "and service URL. Password is only shown if system configuration allows it." + ), + "parameters": { + "type": "object", + "properties": { + "service": { + "type": "string", + "description": "The service name (e.g. 'vikunja', 'karakeep')", + }, + }, + "required": ["service"], + }, + }, + }, +] + + +def _build_meta_tools(user_context: dict[str, Any]) -> list: + """Factory that creates Copilot SDK meta-tools.""" + user = user_context.get("_user") + + async def dispatcher(name: str, arguments: dict, **_kw: Any) -> str: + if name == "list_my_services": + return _handle_list_my_services(user) + if name == "get_my_credentials": + return _handle_get_my_credentials(user, arguments.get("service", "")) + return f"Unknown meta-tool: {name}" + + return openai_tools_to_copilot(schemas=TOOLS, dispatcher=dispatcher) + + +def _handle_list_my_services(user: Any) -> str: + if user is None: + return "Unable to determine your identity." + + from tool_pipeline import get_provisionable_services + + services = get_provisionable_services(user) + if not services: + return "No services are currently configured." + + lines = [] + for s in services: + status = s["status"].upper() + caps = s["capabilities"] or "general" + lines.append(f"- **{s['service']}** ({caps}): {status}") + return "Your services:\n" + "\n".join(lines) + + +def _handle_get_my_credentials(user: Any, service: str) -> str: + if user is None: + return "Unable to determine your identity." + if not service: + return "Please specify which service you want credentials for." + + from config import settings + from user_store import get_store + + store = get_store() + cred = store.get_credential(user.id, service) + if cred is None: + return f"You don't have credentials for {service}. Say 'set up {service}' to get started." + + parts = [f"**Service**: {service}"] + if cred.service_username: + parts.append(f"**Username**: {cred.service_username}") + + # Service URL from settings + url_attr = f"{service.upper()}_API_URL" + url = getattr(settings, url_attr, "") + if url: + # Strip /api/v1 suffix for display + display_url = url.rstrip("/") + for suffix in ("/api/v1", "/api"): + if display_url.endswith(suffix): + display_url = display_url[: -len(suffix)] + break + parts.append(f"**URL**: {display_url}") + + if cred.expires_at: + parts.append(f"**Expires**: {cred.expires_at}") + + if settings.ALLOW_CREDENTIAL_REVEAL_IN_CHAT: + from user_store import decrypt + + try: + token = decrypt(cred.encrypted_token) + parts.append(f"**API Token**: `{token}`") + parts.append("⚠️ Be careful — this token grants full access to your account.") + except Exception: + parts.append("**API Token**: (decryption failed)") + else: + parts.append("**Password/Token**: Stored securely. Access the service directly at the URL above.") + + return "\n".join(parts) + + +meta_toolset = ToolSet( + name="meta", + description="Service access and credential information", + capability="account_management", + system_prompt_fragment=META_SYSTEM_PROMPT, + build_tools=_build_meta_tools, + required_keys=[], # Available to all authenticated users +) diff --git a/tools/site_editing/__init__.py b/tools/site_editing/__init__.py new file mode 100644 index 0000000..ab78603 --- /dev/null +++ b/tools/site_editing/__init__.py @@ -0,0 +1,227 @@ +"""Site-editing tool set for BetterBot. + +Provides list_files, read_file, and write_file tools that operate on +mounted project directories (Better Life SG website, Memoraiz frontend). +After writing, changes are committed and pushed via git. +""" + +from __future__ import annotations + +import logging +import pathlib +import subprocess +from typing import Any + +from config import settings +from tool_registry import ToolSet, openai_tools_to_copilot + +logger = logging.getLogger(__name__) + +# ── Project definitions ────────────────────────────────────────── + + +def _build_projects() -> dict[str, dict[str, Any]]: + """Build the project map from environment-configured directories.""" + site_dir = pathlib.Path(settings.SITE_DIR) if hasattr(settings, "SITE_DIR") else pathlib.Path("/site") + memoraiz_dir = pathlib.Path(settings.MEMORAIZ_DIR) if hasattr(settings, "MEMORAIZ_DIR") else pathlib.Path("/memoraiz") + + projects: dict[str, dict[str, Any]] = {} + if site_dir.exists(): + projects["betterlifesg"] = { + "dir": site_dir, + "label": "Better Life SG website", + "git_repo": site_dir.parent, + } + if memoraiz_dir.exists(): + projects["memoraiz"] = { + "dir": memoraiz_dir, + "label": "Memoraiz app (React frontend)", + "git_repo": memoraiz_dir.parent, + } + return projects + + +PROJECTS = _build_projects() +PROJECT_NAMES = list(PROJECTS.keys()) or ["betterlifesg", "memoraiz"] + + +# ── Path safety ────────────────────────────────────────────────── + + +def _resolve(base: pathlib.Path, path: str) -> pathlib.Path: + """Resolve a relative path inside a base dir, preventing path traversal.""" + resolved = (base / path).resolve() + if not str(resolved).startswith(str(base.resolve())): + raise ValueError(f"Path traversal blocked: {path}") + return resolved + + +# ── Git push ───────────────────────────────────────────────────── + + +def _git_push(project_key: str, changed_file: str) -> str: + """Commit and push changes in the project's git repo.""" + proj = PROJECTS[project_key] + repo = proj["git_repo"] + if not (repo / ".git").exists(): + return "(no git repo — skipped push)" + try: + subprocess.run(["git", "add", "-A"], cwd=repo, check=True, capture_output=True) + subprocess.run( + ["git", "commit", "-m", f"betterbot: update {changed_file}"], + cwd=repo, + check=True, + capture_output=True, + ) + result = subprocess.run( + ["git", "push", "origin", "HEAD"], + cwd=repo, + check=True, + capture_output=True, + text=True, + ) + logger.info("Git push for %s: %s", project_key, result.stderr.strip()) + return f"Pushed {project_key} to git" + except subprocess.CalledProcessError as e: + logger.error("Git/deploy error: %s\nstdout: %s\nstderr: %s", e, e.stdout, e.stderr) + return f"Push failed: {e.stderr or e.stdout or str(e)}" + + +# ── Tool implementations ───────────────────────────────────────── + + +def handle_tool_call(name: str, args: dict) -> str: + """Execute a site-editing tool call and return the result as a string.""" + project_key = args.get("project", "betterlifesg") + if project_key not in PROJECTS: + return f"Unknown project: {project_key}. Available: {', '.join(PROJECTS.keys())}" + base = PROJECTS[project_key]["dir"] + + if name == "list_files": + subdir = args.get("subdirectory", "") + target = _resolve(base, subdir) if subdir else base + files = [] + for p in sorted(target.rglob("*")): + if p.is_file() and not any( + part in (".git", "node_modules", "__pycache__") for part in p.parts + ): + files.append(str(p.relative_to(base))) + return "\n".join(files[:200]) if files else "(no files found)" + + if name == "read_file": + path = _resolve(base, args["path"]) + if not path.exists(): + return f"Error: {args['path']} does not exist." + if path.suffix in (".png", ".jpg", ".jpeg", ".gif", ".webp", ".ico"): + return f"[Binary image file: {args['path']}, {path.stat().st_size} bytes]" + return path.read_text(encoding="utf-8") + + if name == "write_file": + path = _resolve(base, args["path"]) + path.parent.mkdir(parents=True, exist_ok=True) + path.write_text(args["content"], encoding="utf-8") + push_result = _git_push(project_key, args["path"]) + return f"OK — wrote {len(args['content'])} chars to {args['path']}. {push_result}" + + return f"Unknown tool: {name}" + + +# ── OpenAI function-calling tool schemas ───────────────────────── + +TOOLS = [ + { + "type": "function", + "function": { + "name": "list_files", + "description": "List all files in a project directory.", + "parameters": { + "type": "object", + "properties": { + "project": { + "type": "string", + "enum": PROJECT_NAMES, + "description": "Which project to list files for.", + }, + "subdirectory": { + "type": "string", + "description": "Optional subdirectory to list, e.g. 'src/pages'. Defaults to root.", + "default": "", + }, + }, + "required": ["project"], + }, + }, + }, + { + "type": "function", + "function": { + "name": "read_file", + "description": "Read the full contents of a project file.", + "parameters": { + "type": "object", + "properties": { + "project": { + "type": "string", + "enum": PROJECT_NAMES, + "description": "Which project the file belongs to.", + }, + "path": { + "type": "string", + "description": "Relative path inside the project directory.", + }, + }, + "required": ["project", "path"], + }, + }, + }, + { + "type": "function", + "function": { + "name": "write_file", + "description": "Write (create or overwrite) a text file in a project directory. After writing, changes are committed and pushed to git automatically.", + "parameters": { + "type": "object", + "properties": { + "project": { + "type": "string", + "enum": PROJECT_NAMES, + "description": "Which project the file belongs to.", + }, + "path": { + "type": "string", + "description": "Relative path inside the project directory.", + }, + "content": { + "type": "string", + "description": "The full file content to write.", + }, + }, + "required": ["project", "path", "content"], + }, + }, + }, +] + +SYSTEM_PROMPT = """\ +You have access to site-editing tools for managing project files. \ +When asked to change site content, use list_files to see what's available, \ +read_file to understand the current state, then write_file to apply changes. \ +Always write the COMPLETE file content — never partial. \ +Changes are committed and pushed to git automatically after writing.\ +""" + +# ── ToolSet registration ───────────────────────────────────────── + + +def _factory(context: dict) -> list: + """Build Copilot SDK tools from the OpenAI schemas.""" + return openai_tools_to_copilot(TOOLS, handler=handle_tool_call) + + +site_editing_toolset = ToolSet( + name="site_editing", + system_prompt=SYSTEM_PROMPT, + openai_schemas=TOOLS, + factory=_factory, + required_keys=[], +) diff --git a/user_store.py b/user_store.py new file mode 100644 index 0000000..7f3a4cb --- /dev/null +++ b/user_store.py @@ -0,0 +1,769 @@ +"""User identity store — SQLite-backed user management, credential vault, +conversation storage, and event tracking. + +Provides: +- Schema migrations applied idempotently at startup +- Fernet encryption for per-user API tokens at rest +- User resolution (provider + external_id → internal User) +- Credential CRUD with lazy decryption support +- Session / message / event persistence +- Owner bootstrap migration from env-var credentials +""" + +from __future__ import annotations + +import logging +import sqlite3 +import time +import uuid +from dataclasses import dataclass +from datetime import datetime, timezone +from pathlib import Path +from typing import Any + +from cryptography.fernet import Fernet + +from config import settings + +logger = logging.getLogger(__name__) + +# ── Data classes ───────────────────────────────────────────────── + + +def _now_iso() -> str: + return datetime.now(timezone.utc).isoformat() + + +@dataclass +class User: + id: str + display_name: str + email: str | None + created_at: str + updated_at: str + is_owner: bool + onboarding_complete: bool + telegram_approved: bool | None = None # None = pending, True = approved, False = denied + is_new: bool = False # transient — not persisted + + +@dataclass +class ExternalIdentity: + id: int + user_id: str + provider: str + external_id: str + metadata_json: str | None + created_at: str + + +@dataclass +class ServiceCredential: + id: int + user_id: str + service: str + encrypted_token: str + service_user_id: str | None + service_username: str | None + created_at: str + expires_at: str | None + last_used_at: str | None + + +@dataclass +class ProvisioningLogEntry: + id: int + user_id: str + service: str + action: str + detail_json: str | None + created_at: str + + +@dataclass +class Session: + id: str + user_id: str + surface: str + topic_id: str | None + title: str | None + created_at: str + last_active_at: str + + +@dataclass +class Message: + id: str + session_id: str + role: str + content: str + created_at: str + + +@dataclass +class Event: + id: int + user_id: str + source: str + event_type: str + summary: str + detail_json: str | None + created_at: str + consumed_at: str | None + + +# ── Fernet helpers (T012) ──────────────────────────────────────── + + +def _get_fernet() -> Fernet: + key = settings.CREDENTIAL_VAULT_KEY + if not key: + raise RuntimeError("CREDENTIAL_VAULT_KEY is not set — cannot encrypt/decrypt credentials") + return Fernet(key.encode() if isinstance(key, str) else key) + + +def encrypt(plaintext: str) -> str: + """Encrypt *plaintext* and return a base64-encoded TEXT string.""" + f = _get_fernet() + return f.encrypt(plaintext.encode()).decode() + + +def decrypt(ciphertext: str) -> str: + """Decrypt a Fernet ciphertext (base64 TEXT) back to the original string.""" + f = _get_fernet() + return f.decrypt(ciphertext.encode()).decode() + + +# ── Schema migrations (T011) ──────────────────────────────────── + +_MIGRATIONS: list[tuple[int, str, str]] = [ + ( + 1, + "initial_schema", + """ + CREATE TABLE IF NOT EXISTS users ( + id TEXT PRIMARY KEY, + display_name TEXT NOT NULL, + email TEXT UNIQUE, + created_at TEXT NOT NULL, + updated_at TEXT NOT NULL, + is_owner INTEGER NOT NULL DEFAULT 0, + onboarding_complete INTEGER NOT NULL DEFAULT 0 + ); + + CREATE TABLE IF NOT EXISTS external_identities ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + user_id TEXT NOT NULL REFERENCES users(id), + provider TEXT NOT NULL, + external_id TEXT NOT NULL, + metadata_json TEXT, + created_at TEXT NOT NULL, + UNIQUE(provider, external_id) + ); + CREATE INDEX IF NOT EXISTS idx_external_identities_lookup + ON external_identities(provider, external_id); + + CREATE TABLE IF NOT EXISTS service_credentials ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + user_id TEXT NOT NULL REFERENCES users(id), + service TEXT NOT NULL, + encrypted_token TEXT NOT NULL, + service_user_id TEXT, + service_username TEXT, + created_at TEXT NOT NULL, + expires_at TEXT, + last_used_at TEXT, + UNIQUE(user_id, service) + ); + CREATE INDEX IF NOT EXISTS idx_service_credentials_user_id + ON service_credentials(user_id); + + CREATE TABLE IF NOT EXISTS provisioning_log ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + user_id TEXT NOT NULL REFERENCES users(id), + service TEXT NOT NULL, + action TEXT NOT NULL, + detail_json TEXT, + created_at TEXT NOT NULL + ); + + CREATE TABLE IF NOT EXISTS sessions ( + id TEXT PRIMARY KEY, + user_id TEXT NOT NULL REFERENCES users(id), + surface TEXT NOT NULL, + topic_id TEXT, + title TEXT, + created_at TEXT NOT NULL, + last_active_at TEXT NOT NULL + ); + CREATE INDEX IF NOT EXISTS idx_sessions_user_id + ON sessions(user_id); + + CREATE TABLE IF NOT EXISTS messages ( + id TEXT PRIMARY KEY, + session_id TEXT NOT NULL REFERENCES sessions(id), + role TEXT NOT NULL, + content TEXT NOT NULL, + created_at TEXT NOT NULL + ); + CREATE INDEX IF NOT EXISTS idx_messages_session_id + ON messages(session_id); + CREATE INDEX IF NOT EXISTS idx_messages_created_at + ON messages(created_at); + + CREATE TABLE IF NOT EXISTS events ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + user_id TEXT NOT NULL REFERENCES users(id), + source TEXT NOT NULL, + event_type TEXT NOT NULL, + summary TEXT NOT NULL, + detail_json TEXT, + created_at TEXT NOT NULL, + consumed_at TEXT + ); + CREATE INDEX IF NOT EXISTS idx_events_user_created + ON events(user_id, created_at); + """, + ), + ( + 2, + "add_telegram_approved", + """ + ALTER TABLE users ADD COLUMN telegram_approved INTEGER; + """, + ), +] + + +def _apply_migrations(conn: sqlite3.Connection) -> None: + """Ensure all migrations are applied idempotently.""" + conn.execute( + """CREATE TABLE IF NOT EXISTS schema_migrations ( + version INTEGER PRIMARY KEY, + name TEXT NOT NULL, + applied_at TEXT NOT NULL + )""" + ) + applied = {row[0] for row in conn.execute("SELECT version FROM schema_migrations")} + for version, name, ddl in _MIGRATIONS: + if version in applied: + continue + logger.info("Applying migration %d: %s", version, name) + conn.executescript(ddl) + conn.execute( + "INSERT INTO schema_migrations (version, name, applied_at) VALUES (?, ?, ?)", + (version, name, _now_iso()), + ) + conn.commit() + logger.info("Schema migrations up to date (latest: %d)", max(v for v, _, _ in _MIGRATIONS)) + + +# ── SQLite connection (T010) ───────────────────────────────────── + +_MAX_WRITE_RETRIES = 3 +_RETRY_BASE_DELAY = 0.05 # seconds + + +def _connect(db_path: str | Path) -> sqlite3.Connection: + """Open a SQLite connection with WAL mode and foreign keys enabled.""" + conn = sqlite3.connect(str(db_path), timeout=10) + conn.execute("PRAGMA journal_mode=WAL") + conn.execute("PRAGMA foreign_keys=ON") + conn.row_factory = sqlite3.Row + return conn + + +def _write_with_retry(conn: sqlite3.Connection, fn: Any) -> Any: + """Execute *fn(conn)* with exponential-backoff retry on SQLITE_BUSY.""" + for attempt in range(_MAX_WRITE_RETRIES): + try: + return fn(conn) + except sqlite3.OperationalError as exc: + if "database is locked" not in str(exc) or attempt == _MAX_WRITE_RETRIES - 1: + raise + delay = _RETRY_BASE_DELAY * (2**attempt) + logger.warning("SQLITE_BUSY — retrying in %.2fs (attempt %d)", delay, attempt + 1) + time.sleep(delay) + return None # unreachable + + +# ── UserStore ──────────────────────────────────────────────────── + + +class UserStore: + """Central access layer for user identity, credentials, sessions, and events.""" + + def __init__(self, db_path: str | Path | None = None) -> None: + if db_path is None: + db_path = Path(settings.DATA_DIR) / "users.db" + self._db_path = Path(db_path) + self._db_path.parent.mkdir(parents=True, exist_ok=True) + self._conn = _connect(self._db_path) + _apply_migrations(self._conn) + + @property + def conn(self) -> sqlite3.Connection: + return self._conn + + def close(self) -> None: + self._conn.close() + + # ── User resolution (T013) ─────────────────────────────────── + + def resolve_or_create_user( + self, + provider: str, + external_id: str, + display_name: str, + email: str | None = None, + ) -> User: + """Resolve (provider, external_id) → User. Creates if not found.""" + + def _do(conn: sqlite3.Connection) -> User: + row = conn.execute( + """SELECT u.id, u.display_name, u.email, u.created_at, u.updated_at, + u.is_owner, u.onboarding_complete, u.telegram_approved + FROM external_identities ei + JOIN users u ON u.id = ei.user_id + WHERE ei.provider = ? AND ei.external_id = ?""", + (provider, external_id), + ).fetchone() + if row: + _raw_approved = row["telegram_approved"] + return User( + id=row["id"], + display_name=row["display_name"], + email=row["email"], + created_at=row["created_at"], + updated_at=row["updated_at"], + is_owner=bool(row["is_owner"]), + onboarding_complete=bool(row["onboarding_complete"]), + telegram_approved=None if _raw_approved is None else bool(_raw_approved), + is_new=False, + ) + # Create new user + now = _now_iso() + user_id = str(uuid.uuid4()) + conn.execute( + """INSERT INTO users (id, display_name, email, created_at, updated_at, + is_owner, onboarding_complete) + VALUES (?, ?, ?, ?, ?, 0, 0)""", + (user_id, display_name, email, now, now), + ) + conn.execute( + """INSERT INTO external_identities (user_id, provider, external_id, created_at) + VALUES (?, ?, ?, ?)""", + (user_id, provider, external_id, now), + ) + conn.commit() + return User( + id=user_id, + display_name=display_name, + email=email, + created_at=now, + updated_at=now, + is_owner=False, + onboarding_complete=False, + is_new=True, + ) + + return _write_with_retry(self._conn, _do) + + def get_user(self, user_id: str) -> User | None: + row = self._conn.execute( + """SELECT id, display_name, email, created_at, updated_at, + is_owner, onboarding_complete, telegram_approved + FROM users WHERE id = ?""", + (user_id,), + ).fetchone() + if not row: + return None + _raw_approved = row["telegram_approved"] + return User( + id=row["id"], + display_name=row["display_name"], + email=row["email"], + created_at=row["created_at"], + updated_at=row["updated_at"], + is_owner=bool(row["is_owner"]), + onboarding_complete=bool(row["onboarding_complete"]), + telegram_approved=None if _raw_approved is None else bool(_raw_approved), + ) + + def set_onboarding_complete(self, user_id: str) -> None: + def _do(conn: sqlite3.Connection) -> None: + conn.execute( + "UPDATE users SET onboarding_complete = 1, updated_at = ? WHERE id = ?", + (_now_iso(), user_id), + ) + conn.commit() + + _write_with_retry(self._conn, _do) + + def set_telegram_approval(self, user_id: str, approved: bool) -> None: + """Set telegram_approved to 1 (approved) or 0 (denied).""" + + def _do(conn: sqlite3.Connection) -> None: + conn.execute( + "UPDATE users SET telegram_approved = ?, updated_at = ? WHERE id = ?", + (int(approved), _now_iso(), user_id), + ) + conn.commit() + + _write_with_retry(self._conn, _do) + + # ── Credential CRUD (T014) ─────────────────────────────────── + + def get_credentials(self, user_id: str) -> dict[str, ServiceCredential]: + """Return all credentials for a user keyed by service name.""" + rows = self._conn.execute( + """SELECT id, user_id, service, encrypted_token, service_user_id, + service_username, created_at, expires_at, last_used_at + FROM service_credentials WHERE user_id = ?""", + (user_id,), + ).fetchall() + return { + row["service"]: ServiceCredential( + id=row["id"], + user_id=row["user_id"], + service=row["service"], + encrypted_token=row["encrypted_token"], + service_user_id=row["service_user_id"], + service_username=row["service_username"], + created_at=row["created_at"], + expires_at=row["expires_at"], + last_used_at=row["last_used_at"], + ) + for row in rows + } + + def get_credential(self, user_id: str, service: str) -> ServiceCredential | None: + row = self._conn.execute( + """SELECT id, user_id, service, encrypted_token, service_user_id, + service_username, created_at, expires_at, last_used_at + FROM service_credentials WHERE user_id = ? AND service = ?""", + (user_id, service), + ).fetchone() + if not row: + return None + return ServiceCredential( + id=row["id"], + user_id=row["user_id"], + service=row["service"], + encrypted_token=row["encrypted_token"], + service_user_id=row["service_user_id"], + service_username=row["service_username"], + created_at=row["created_at"], + expires_at=row["expires_at"], + last_used_at=row["last_used_at"], + ) + + def store_credential( + self, + user_id: str, + service: str, + token: str, + service_user_id: str | None = None, + service_username: str | None = None, + expires_at: str | None = None, + ) -> None: + """Store (or replace) a credential. Encrypts the token before writing.""" + enc_token = encrypt(token) + now = _now_iso() + + def _do(conn: sqlite3.Connection) -> None: + conn.execute( + """INSERT INTO service_credentials + (user_id, service, encrypted_token, service_user_id, + service_username, created_at, expires_at) + VALUES (?, ?, ?, ?, ?, ?, ?) + ON CONFLICT(user_id, service) DO UPDATE SET + encrypted_token = excluded.encrypted_token, + service_user_id = excluded.service_user_id, + service_username = excluded.service_username, + expires_at = excluded.expires_at, + last_used_at = NULL""", + (user_id, service, enc_token, service_user_id, service_username, now, expires_at), + ) + conn.commit() + + _write_with_retry(self._conn, _do) + + def delete_credential(self, user_id: str, service: str) -> None: + def _do(conn: sqlite3.Connection) -> None: + conn.execute( + "DELETE FROM service_credentials WHERE user_id = ? AND service = ?", + (user_id, service), + ) + conn.commit() + + _write_with_retry(self._conn, _do) + + def touch_credential(self, user_id: str, service: str) -> None: + """Update last_used_at for a credential.""" + + def _do(conn: sqlite3.Connection) -> None: + conn.execute( + "UPDATE service_credentials SET last_used_at = ? WHERE user_id = ? AND service = ?", + (_now_iso(), user_id, service), + ) + conn.commit() + + _write_with_retry(self._conn, _do) + + # ── Provisioning log ───────────────────────────────────────── + + def log_provisioning( + self, + user_id: str, + service: str, + action: str, + detail_json: str | None = None, + ) -> None: + def _do(conn: sqlite3.Connection) -> None: + conn.execute( + """INSERT INTO provisioning_log (user_id, service, action, detail_json, created_at) + VALUES (?, ?, ?, ?, ?)""", + (user_id, service, action, detail_json, _now_iso()), + ) + conn.commit() + + _write_with_retry(self._conn, _do) + + # ── Session + message CRUD (T023) ──────────────────────────── + + def get_or_create_session( + self, + user_id: str, + surface: str, + topic_id: str | None = None, + ) -> Session: + """Resolve or create a session for the given user/surface/topic.""" + + def _do(conn: sqlite3.Connection) -> Session: + if topic_id is not None: + row = conn.execute( + """SELECT id, user_id, surface, topic_id, title, created_at, last_active_at + FROM sessions WHERE user_id = ? AND surface = ? AND topic_id = ?""", + (user_id, surface, topic_id), + ).fetchone() + else: + row = conn.execute( + """SELECT id, user_id, surface, topic_id, title, created_at, last_active_at + FROM sessions WHERE user_id = ? AND surface = ? AND topic_id IS NULL""", + (user_id, surface), + ).fetchone() + if row: + now = _now_iso() + conn.execute( + "UPDATE sessions SET last_active_at = ? WHERE id = ?", + (now, row["id"]), + ) + conn.commit() + return Session( + id=row["id"], + user_id=row["user_id"], + surface=row["surface"], + topic_id=row["topic_id"], + title=row["title"], + created_at=row["created_at"], + last_active_at=now, + ) + now = _now_iso() + session_id = str(uuid.uuid4()) + conn.execute( + """INSERT INTO sessions (id, user_id, surface, topic_id, title, created_at, last_active_at) + VALUES (?, ?, ?, ?, NULL, ?, ?)""", + (session_id, user_id, surface, topic_id, now, now), + ) + conn.commit() + return Session( + id=session_id, + user_id=user_id, + surface=surface, + topic_id=topic_id, + title=None, + created_at=now, + last_active_at=now, + ) + + return _write_with_retry(self._conn, _do) + + def log_message(self, session_id: str, role: str, content: str) -> None: + def _do(conn: sqlite3.Connection) -> None: + conn.execute( + """INSERT INTO messages (id, session_id, role, content, created_at) + VALUES (?, ?, ?, ?, ?)""", + (str(uuid.uuid4()), session_id, role, content, _now_iso()), + ) + conn.commit() + + _write_with_retry(self._conn, _do) + + def get_session_messages(self, session_id: str) -> list[Message]: + rows = self._conn.execute( + """SELECT id, session_id, role, content, created_at + FROM messages WHERE session_id = ? ORDER BY created_at""", + (session_id,), + ).fetchall() + return [ + Message( + id=row["id"], + session_id=row["session_id"], + role=row["role"], + content=row["content"], + created_at=row["created_at"], + ) + for row in rows + ] + + # ── Event CRUD (T025) ──────────────────────────────────────── + + def store_event( + self, + user_id: str, + source: str, + event_type: str, + summary: str, + detail_json: str | None = None, + ) -> None: + def _do(conn: sqlite3.Connection) -> None: + conn.execute( + """INSERT INTO events (user_id, source, event_type, summary, detail_json, created_at) + VALUES (?, ?, ?, ?, ?, ?)""", + (user_id, source, event_type, summary, detail_json, _now_iso()), + ) + conn.commit() + + _write_with_retry(self._conn, _do) + + def get_recent_events(self, user_id: str, window_hours: int = 24) -> list[Event]: + cutoff = datetime.now(timezone.utc).isoformat() + # Use simple string comparison — ISO8601 sorts lexicographically + rows = self._conn.execute( + """SELECT id, user_id, source, event_type, summary, detail_json, created_at, consumed_at + FROM events + WHERE user_id = ? AND created_at >= datetime(?, '-' || ? || ' hours') + ORDER BY created_at DESC""", + (user_id, cutoff, window_hours), + ).fetchall() + return [ + Event( + id=row["id"], + user_id=row["user_id"], + source=row["source"], + event_type=row["event_type"], + summary=row["summary"], + detail_json=row["detail_json"], + created_at=row["created_at"], + consumed_at=row["consumed_at"], + ) + for row in rows + ] + + def mark_events_consumed(self, event_ids: list[int]) -> None: + if not event_ids: + return + + def _do(conn: sqlite3.Connection) -> None: + placeholders = ",".join("?" for _ in event_ids) + conn.execute( + f"UPDATE events SET consumed_at = ? WHERE id IN ({placeholders})", # noqa: S608 + [_now_iso(), *event_ids], + ) + conn.commit() + + _write_with_retry(self._conn, _do) + + # ── Owner bootstrap (T016) ─────────────────────────────────── + + def bootstrap_owner(self) -> User: + """Idempotent: ensure the owner user exists, migrate env-var credentials.""" + + def _do(conn: sqlite3.Connection) -> User: + row = conn.execute( + """SELECT id, display_name, email, created_at, updated_at, + is_owner, onboarding_complete + FROM users WHERE is_owner = 1""", + ).fetchone() + if row: + owner = User( + id=row["id"], + display_name=row["display_name"], + email=row["email"], + created_at=row["created_at"], + updated_at=row["updated_at"], + is_owner=True, + onboarding_complete=True, + ) + # Still migrate any new env-var credentials that weren't there before + self._migrate_env_credentials(owner.id) + return owner + + now = _now_iso() + owner_id = str(uuid.uuid4()) + conn.execute( + """INSERT INTO users (id, display_name, email, created_at, updated_at, + is_owner, onboarding_complete) + VALUES (?, ?, NULL, ?, ?, 1, 1)""", + (owner_id, "Owner", now, now), + ) + # Create "web" external identity for owner + conn.execute( + """INSERT INTO external_identities (user_id, provider, external_id, created_at) + VALUES (?, 'web', 'owner', ?)""", + (owner_id, now), + ) + conn.commit() + logger.info("Created owner user %s", owner_id) + + self._migrate_env_credentials(owner_id) + + return User( + id=owner_id, + display_name="Owner", + email=None, + created_at=now, + updated_at=now, + is_owner=True, + onboarding_complete=True, + is_new=True, + ) + + return _write_with_retry(self._conn, _do) + + def _migrate_env_credentials(self, owner_id: str) -> None: + """Migrate static env-var API keys into the owner's credential vault.""" + migrations = [ + ("vikunja", settings.VIKUNJA_API_KEY, settings.VIKUNJA_API_URL), + ("karakeep", settings.KARAKEEP_API_KEY, settings.KARAKEEP_API_URL), + ] + for service, api_key, _url in migrations: + if not api_key: + continue + existing = self.get_credential(owner_id, service) + if existing: + continue + self.store_credential(owner_id, service, api_key) + self.log_provisioning(owner_id, service, "env_migrated", '{"source": "env_var"}') + logger.info("Migrated %s env-var credential for owner %s", service, owner_id) + + +# ── Module-level singleton ─────────────────────────────────────── + +_store: UserStore | None = None + + +def get_store() -> UserStore: + """Return the module-level UserStore singleton, creating it if needed.""" + global _store # noqa: PLW0603 + if _store is None: + _store = UserStore() + return _store + + +def init_store(db_path: str | Path | None = None) -> UserStore: + """Initialise the UserStore singleton explicitly (e.g. in FastAPI lifespan).""" + global _store # noqa: PLW0603 + _store = UserStore(db_path) + return _store diff --git a/ux.py b/ux.py new file mode 100644 index 0000000..594323e --- /dev/null +++ b/ux.py @@ -0,0 +1,616 @@ +"""User-facing messaging helpers for Telegram and web chat surfaces.""" + +from __future__ import annotations + +import html +import json +import re +from typing import Any, Literal, cast + +from copilot.generated.session_events import Data, SessionEvent, SessionEventType, ToolRequest + +Surface = Literal["telegram", "web"] + + +def extract_final_text(events: list[SessionEvent]) -> str: + """Walk collected events and return the final assistant message text.""" + # Prefer the last ASSISTANT_MESSAGE event + for event in reversed(events): + if event.type == SessionEventType.ASSISTANT_MESSAGE and event.data and event.data.content: + return event.data.content.strip() + + # Fallback: concatenate deltas + parts: list[str] = [] + for event in events: + if event.type == SessionEventType.ASSISTANT_MESSAGE_DELTA and event.data and event.data.delta_content: + parts.append(event.data.delta_content) + text: str = "".join(parts).strip() + return text + + +def working_message(*, surface: Surface) -> str: + if surface == "telegram": + return "Thinking ..." + return "Working on it" + + +def busy_message(*, surface: Surface) -> str: + if surface == "telegram": + return "Still working on the previous message in this chat. Wait for that reply, or send /new to reset." + return "Still working on the previous message. Wait for that reply before sending another one." + + +def format_session_error(*, surface: Surface, error: Exception | str | None = None) -> str: + parts: list[str] = ["Run failed with an internal exception."] + detail: str = _format_error_detail(error) + if detail: + parts.append(f"Exception: {detail}") + if _looks_image_unsupported(detail): + parts.append( + "This model does not support image inputs. " + "Switch to a vision model (e.g. gpt-4o, claude-sonnet, gemini-2.5-pro) or resend without the image." + ) + elif _looks_rate_limited(detail): + parts.append( + "The provider is rate-limiting requests (HTTP 429). The SDK already retried several times before giving up." + ) + elif _looks_retryable(detail): + parts.append("All automatic retries were exhausted.") + parts.append(_retry_guidance(surface)) + return "\n\n".join(parts) + + +def extract_intent_from_tool(event: SessionEvent) -> str | None: + """If the event is a report_intent tool call, return the intent text.""" + if event.type != SessionEventType.TOOL_EXECUTION_START: + return None + tool_name: str = _event_tool_name(event) + if tool_name != "report_intent": + return None + args = event.data and event.data.arguments + if not args: + return None + if isinstance(args, str): + try: + args = json.loads(args) + except Exception: + return None + if isinstance(args, dict): + args_dict = cast(dict[str, Any], args) + intent = args_dict.get("intent", "") + if isinstance(intent, str) and intent.strip(): + return intent.strip() + return None + + +def extract_tool_intent_summary(event: SessionEvent) -> str | None: + """Extract intent_summary from tool_requests on any event. + + The Copilot SDK can attach ``tool_requests`` to events (e.g. before tool + execution starts). Each tool request may carry an ``intent_summary`` + describing *why* the agent wants to call that tool. + """ + data: Data | None = getattr(event, "data", None) + if data is None: + return None + tool_requests: list[ToolRequest] = getattr(data, "tool_requests") or [] + if not tool_requests: + return None + try: + summary: str = "\n".join( + intent_summary + for request in tool_requests + if (intent_summary := getattr(request, "intent_summary", "").strip()) + ) + return summary or None + except (IndexError, TypeError, KeyError): + return None + return None + + +def stream_status_updates(event: Any, *, include_reasoning_status: bool = True) -> list[str]: + """Return ordered, deduplicated user-facing status updates for a Copilot SDK event.""" + event_type: SessionEventType = event.type + data: Data | None = getattr(event, "data", None) + updates: list[str] = [] + seen: set[str] = set() + noise_texts: set[str] = {"tool done", "Thinking"} + ugly_texts: dict[str, str] = { + "Running view": "Viewing file(s)", + } + + def add(text: Any, *, prefix: str | None = None, limit: int = 220) -> None: + if text in (None, ""): + return + elif isinstance(text, str) and text.strip().lower() in noise_texts: + return + elif isinstance(text, str): + text = ugly_texts.get(text.strip(), text) + + normalized: str = _normalize_status_text(text, prefix=prefix, limit=limit) + if not normalized: + return + dedupe_key = normalized.casefold() + if dedupe_key in seen: + return + seen.add(dedupe_key) + updates.append(normalized) + + add(getattr(data, "progress_message", None), limit=240) + add(extract_tool_intent_summary(event), limit=240) + + if event_type == SessionEventType.TOOL_EXECUTION_START: + tool_name: str = _event_tool_name(event) + intent: str | None = extract_intent_from_tool(event) + if tool_name == "report_intent": + pass + elif not intent and not tool_name: + pass + else: + kwargs: dict = {"text": intent, "prefix": tool_name} if intent else {"text": f"Running {tool_name}"} + add(limit=160, **kwargs) + + if event_type == SessionEventType.TOOL_EXECUTION_COMPLETE: + tool_name: str = _event_tool_name(event) + if tool_name != "report_intent": + add(f"{tool_name} done", limit=160) + + if event_type == SessionEventType.SUBAGENT_SELECTED: + add(f"Routed to {_event_agent_name(event)}", limit=180) + + if event_type == SessionEventType.SUBAGENT_STARTED: + add(f"{_event_agent_name(event)} working", limit=180) + + if event_type == SessionEventType.SESSION_COMPACTION_START: + add("Compacting context", limit=120) + + if event_type == SessionEventType.SESSION_COMPACTION_COMPLETE: + add("Context compacted", limit=120) + + # if event_type == SessionEventType.ASSISTANT_TURN_START: + # add("Thinking", limit=80) + + if event_type == SessionEventType.ASSISTANT_INTENT: + add(getattr(data, "intent", None), limit=240) + + if include_reasoning_status and event_type in { + SessionEventType.ASSISTANT_REASONING, + SessionEventType.ASSISTANT_REASONING_DELTA, + }: + reasoning = (data and data.reasoning_text) or "" + if reasoning.strip(): + first_line = reasoning.strip().splitlines()[0].strip() + if first_line.lower().startswith(("intent:", "intent ")): + add(first_line, limit=240) + + add(getattr(data, "message", None), limit=240) + add(getattr(data, "title", None), prefix="Title", limit=200) + add(getattr(data, "summary", None), prefix="Summary", limit=240) + add(getattr(data, "summary_content", None), prefix="Context summary", limit=240) + add(getattr(data, "warning_type", None), prefix="Warning type", limit=160) + + for warning in _iter_status_values(getattr(data, "warnings", None)): + add(warning, prefix="Warning", limit=240) + + add(getattr(data, "error_reason", None), prefix="Error", limit=240) + + for error in _iter_status_values(getattr(data, "errors", None)): + add(error, prefix="Error", limit=240) + + add(getattr(data, "reason", None), prefix="Stop reason", limit=200) + add(_format_server_status(getattr(data, "status", None)), prefix="Server", limit=160) + add(getattr(data, "phase", None), prefix="Phase", limit=120) + add(getattr(data, "mcp_tool_name", None), prefix="MCP tool", limit=180) + add(_format_code_changes_status(getattr(data, "code_changes", None)), limit=200) + # add(_format_cache_status(data), limit=180) + + # The SDK's `duration` is only a subtotal for the current API round-trip. + # Total turn runtime is tracked by the caller and surfaced as a live + # elapsed clock while the overall request is still running. + + total_premium_requests = getattr(data, "total_premium_requests", None) + if total_premium_requests not in (None, ""): + add(f"Premium requests: {_format_metric_number(total_premium_requests)}", limit=140) + + add(getattr(data, "branch", None), prefix="Branch", limit=160) + add(getattr(data, "cwd", None), prefix="CWD", limit=220) + add(getattr(data, "git_root", None), prefix="Git root", limit=220) + + head_commit = getattr(data, "head_commit", None) + if head_commit: + add(f"Head: {str(head_commit).strip()[:12]}", limit=80) + + if getattr(data, "reasoning_opaque", None): + add("Encrypted reasoning attached", limit=120) + + if model := getattr(data, "model", None): + add(model, prefix="\n🤖", limit=160) + return updates + + +def stream_status_text(event: Any) -> str: + """Return a single concatenated status string for compatibility call sites.""" + return "\n".join(stream_status_updates(event)) + + +def stream_trace_event(event: Any) -> dict[str, str] | None: + """Extract structured trace entries for tool activity and subagent routing.""" + event_type = event.type + + if event_type == SessionEventType.TOOL_EXECUTION_START: + tool_name = _event_tool_name(event) + if tool_name == "report_intent": + return None # suppress report_intent from trace + tool_call_id = (event.data and event.data.tool_call_id) or "" + detail = _stringify_trace_detail(event.data and event.data.arguments) + return { + "kind": "trace", + "category": "tool_call", + "key": f"tool:{tool_call_id or tool_name}", + "tool_name": tool_name, + "title": f"Tool call: {tool_name}", + "summary": f"Called {tool_name}", + "text": f"Called {tool_name}", + "detail": detail or "No arguments exposed.", + } + + if event_type == SessionEventType.TOOL_EXECUTION_COMPLETE: + tool_name = _event_tool_name(event) + if tool_name == "report_intent": + return None # suppress report_intent from trace + tool_call_id = (event.data and event.data.tool_call_id) or "" + output_detail = _stringify_trace_detail(event.data and event.data.output) + return { + "kind": "trace", + "category": "tool_call", + "key": f"tool:{tool_call_id or tool_name}", + "tool_name": tool_name, + "title": f"Tool call: {tool_name}", + "summary": f"{tool_name} done", + "text": f"{tool_name} done", + "output_detail": output_detail or "Tool finished with no readable output.", + } + + if event_type == SessionEventType.SUBAGENT_SELECTED: + agent_name = _event_agent_name(event) + return { + "kind": "trace", + "category": "subagent", + "key": f"agent:{agent_name}", + "title": f"Subagent: {agent_name}", + "summary": f"Routed to {agent_name}", + "text": f"Routed to {agent_name}", + "detail": f"The run is now executing inside the {agent_name} subagent.", + } + + return None + + +def stream_reasoning_event(event: Any) -> tuple[str, str, bool] | None: + """Extract reasoning text from a Copilot SDK event when available.""" + if event.type == SessionEventType.ASSISTANT_REASONING_DELTA: + reasoning_id = (event.data and event.data.reasoning_id) or "reasoning" + text = (event.data and event.data.reasoning_text) or "" + if text.strip(): + return reasoning_id, text, False + return None + + if event.type == SessionEventType.ASSISTANT_REASONING: + reasoning_id = (event.data and event.data.reasoning_id) or "reasoning" + text = (event.data and event.data.reasoning_text) or "" + if text.strip(): + return reasoning_id, text, True + return None + + return None + + +def format_tool_counts(tool_counts: dict[str, int], *, current_status: str = "") -> str: + """Build a compact one-line summary of tool call counts.""" + if not tool_counts: + return current_status or "" + + parts: list[str] = [] + for name, count in sorted(tool_counts.items(), key=lambda kv: -kv[1]): + if count <= 0: + continue + parts.append(f"{count} {name}") + + lines: list[str] = [] + if current_status: + lines.append(current_status) + if parts: + lines.append(f"🔧 {' · '.join(parts)}") + return "\n".join(lines) + + +def format_elapsed_status(elapsed_seconds: float) -> str: + """Render a human-friendly turn runtime for live status displays.""" + total_seconds = max(0, int(elapsed_seconds)) + hours, remainder = divmod(total_seconds, 3600) + minutes, seconds = divmod(remainder, 60) + + if hours: + return f"Elapsed: {hours}h {minutes:02d}m {seconds:02d}s" + if minutes: + return f"Elapsed: {minutes}m {seconds:02d}s" + return f"Elapsed: {seconds}s" + + +def append_elapsed_status(text: Any, *, elapsed_seconds: float) -> str: + """Append the current turn runtime to a status line without duplicating it.""" + lines = [line for line in str(text or "").splitlines() if not line.strip().lower().startswith("elapsed:")] + base = "\n".join(lines).strip() + elapsed = format_elapsed_status(elapsed_seconds) + if not base: + return elapsed + return f"{base}\n{elapsed}" + + +async def safe_delete_message(message: Any) -> None: + if message is None: + return + try: + await message.delete() + except Exception: + return + + +def markdown_to_telegram_html(text: str) -> str: + """Convert common Markdown to Telegram-compatible HTML. + + Handles fenced code blocks, inline code, bold, italic, strikethrough, + and links. Falls back gracefully — anything it can't convert is + HTML-escaped and sent as plain text. + """ + # Split into fenced code blocks vs everything else + parts: list[str] = [] + # Match ```lang\n...\n``` (with optional language tag) + code_block_re = re.compile(r"```(\w*)\n(.*?)```", re.DOTALL) + last = 0 + for m in code_block_re.finditer(text): + # Process non-code text before this block + if m.start() > last: + parts.append(_md_inline_to_html(text[last : m.start()])) + lang = m.group(1) + code = html.escape(m.group(2).rstrip("\n")) + if lang: + parts.append(f'
{code}
') + else: + parts.append(f"
{code}
") + last = m.end() + # Remaining text after last code block + if last < len(text): + parts.append(_md_inline_to_html(text[last:])) + return "".join(parts) + + +def _md_inline_to_html(text: str) -> str: + """Convert inline Markdown (outside code blocks) to Telegram HTML.""" + # First, protect inline code spans so their contents aren't modified + inline_code_re = re.compile(r"`([^`]+)`") + placeholder = "\x00CODE\x00" + codes: list[str] = [] + + def _save_code(m: re.Match) -> str: + codes.append(html.escape(m.group(1))) + return f"{placeholder}{len(codes) - 1}{placeholder}" + + text = inline_code_re.sub(_save_code, text) + + # Escape HTML entities in the remaining text + text = html.escape(text) + + # Bold: **text** or __text__ + text = re.sub(r"\*\*(.+?)\*\*", r"\1", text) + text = re.sub(r"__(.+?)__", r"\1", text) + + # Italic: *text* or _text_ (but not inside words like foo_bar) + text = re.sub(r"(?\1", text) + text = re.sub(r"(?\1", text) + + # Strikethrough: ~~text~~ + text = re.sub(r"~~(.+?)~~", r"\1", text) + + # Links: [text](url) + text = re.sub(r"\[([^\]]+)\]\(([^)]+)\)", r'\1', text) + + # Restore inline code spans + for i, code_html in enumerate(codes): + text = text.replace(f"{placeholder}{i}{placeholder}", f"{code_html}") + + return text + + +# ── Private helpers ────────────────────────────────────────────────── + + +def _event_tool_name(event: Any) -> str: + if event.data: + return event.data.tool_name or event.data.name or "tool" + return "tool" + + +def _event_agent_name(event: Any) -> str: + if event.data: + return event.data.agent_name or event.data.agent_display_name or "specialist" + return "specialist" + + +def _retry_guidance(surface: Surface) -> str: + if surface == "telegram": + return "Reply with a narrower follow-up, switch model/provider, or send /new to reset the session." + return "Retry with a narrower follow-up, switch model/provider, or start a fresh chat." + + +def _looks_retryable(detail: str) -> bool: + """Check if the error detail matches a transient failure pattern.""" + if not detail: + return False + if _looks_rate_limited(detail): + return False + lower = detail.lower() + return any( + p in lower + for p in ( + "failed to get response", + "operation was aborted", + "timed out", + "timeout", + "502", + "503", + "504", + "service unavailable", + "overloaded", + ) + ) + + +def _looks_rate_limited(detail: str) -> bool: + """Check if the error is specifically a 429 / rate-limit.""" + if not detail: + return False + lower = detail.lower() + return any(p in lower for p in ("429", "rate limit", "rate_limit", "too many requests")) + + +def _looks_image_unsupported(detail: str) -> bool: + """Check if the error indicates the model does not accept image inputs.""" + if not detail: + return False + lower = detail.lower() + return any( + p in lower + for p in ("0 image(s) may be provided", "does not support image", "image input is not supported", "images are not supported") + ) + + +def _format_error_detail(error: Exception | str | None) -> str: + if error is None: + return "" + if isinstance(error, str): + return error.strip() + name = type(error).__name__ + message = str(error).strip() + if not message or message == name: + return name + return f"{name}: {message}" + + +def _normalize_status_text(text: Any, *, prefix: str | None = None, limit: int = 220) -> str: + if text in (None, ""): + return "" + + rendered = " ".join(str(text).split()) + if not rendered: + return "" + if prefix: + rendered = f"{prefix}: {rendered}" + if limit > 0 and len(rendered) > limit: + return f"{rendered[: limit - 3].rstrip()}..." + return rendered + + +def _format_metric_number(value: Any) -> str: + try: + number = float(value) + except (TypeError, ValueError): + return str(value).strip() + + if number.is_integer(): + return f"{int(number):,}" + return f"{number:,.2f}".rstrip("0").rstrip(".") + + +def _format_code_changes_status(code_changes: Any) -> str: + if not code_changes: + return "" + + files_modified = getattr(code_changes, "files_modified", None) + if files_modified is None and isinstance(code_changes, dict): + files_modified = code_changes.get("files_modified") + + lines_added = getattr(code_changes, "lines_added", None) + if lines_added is None and isinstance(code_changes, dict): + lines_added = code_changes.get("lines_added") + + lines_removed = getattr(code_changes, "lines_removed", None) + if lines_removed is None and isinstance(code_changes, dict): + lines_removed = code_changes.get("lines_removed") + + parts: list[str] = [] + if isinstance(files_modified, (list, tuple, set)): + parts.append(f"{len(files_modified)} files") + elif files_modified: + parts.append("1 file") + + if lines_added not in (None, "") or lines_removed not in (None, ""): + parts.append(f"+{_format_metric_number(lines_added or 0)}/-{_format_metric_number(lines_removed or 0)} lines") + + if not parts: + return "Code changes recorded" + return f"Code changes: {', '.join(parts)}" + + +def _format_cache_status(data: Data | None) -> str: + if data is None: + return "" + + cache_read_tokens = getattr(data, "cache_read_tokens", None) + cache_write_tokens = getattr(data, "cache_write_tokens", None) + if cache_read_tokens in (None, "") and cache_write_tokens in (None, ""): + return "" + + parts: list[str] = [] + if cache_read_tokens not in (None, ""): + parts.append(f"read {_format_metric_number(cache_read_tokens)}") + if cache_write_tokens not in (None, ""): + parts.append(f"wrote {_format_metric_number(cache_write_tokens)}") + return f"Prompt cache: {', '.join(parts)}" + + +def _format_server_status(status: Any) -> str: + if status in (None, ""): + return "" + if isinstance(status, str): + return status.strip() + + for attr in ("value", "status", "name"): + value = getattr(status, attr, None) + if isinstance(value, str) and value.strip(): + return value.strip() + + return str(status).strip() + + +def _iter_status_values(value: Any) -> list[Any]: + if value in (None, ""): + return [] + if isinstance(value, (list, tuple, set)): + return list(value) + return [value] + + +def _stringify_trace_detail(value: Any, *, limit: int = 1800) -> str: + if value in (None, ""): + return "" + + rendered = "" + if isinstance(value, str): + candidate = value.strip() + if candidate: + if candidate[:1] in {"{", "["}: + try: + rendered = json.dumps(json.loads(candidate), indent=2, ensure_ascii=False) + except Exception: + rendered = candidate + else: + rendered = candidate + elif isinstance(value, (dict, list, tuple)): + rendered = json.dumps(value, indent=2, ensure_ascii=False) + else: + rendered = str(value).strip() + + if len(rendered) <= limit: + return rendered + return f"{rendered[: limit - 3].rstrip()}..." diff --git a/web_fallback_store.py b/web_fallback_store.py new file mode 100644 index 0000000..74d2984 --- /dev/null +++ b/web_fallback_store.py @@ -0,0 +1,767 @@ +"""Persistent thread store shared by the web UI and Telegram bot sessions.""" + +from __future__ import annotations + +import asyncio +import json +import uuid +from collections import defaultdict +from decimal import Decimal +from pathlib import Path +from threading import RLock +from typing import Any + +from local_media_store import LocalMediaStore + +DEFAULT_TITLE = "New chat" +DEFAULT_PROVIDER = "openai" +DEFAULT_MODEL = "gpt-5.4" +DEFAULT_SOURCE = "web" +TITLE_SOURCE_DEFAULT = "default" +TITLE_SOURCE_AUTO = "auto" +TITLE_SOURCE_MANUAL = "manual" +TITLE_SOURCE_MAGIC = "magic" +KNOWN_TITLE_SOURCES = { + TITLE_SOURCE_DEFAULT, + TITLE_SOURCE_AUTO, + TITLE_SOURCE_MANUAL, + TITLE_SOURCE_MAGIC, +} + + +class WebFallbackStore: + def __init__(self, data_dir: str = "/data", media_store: LocalMediaStore | None = None): + self._threads: dict[str, dict[str, Any]] = {} + self._session_index: dict[str, str] = {} + self._locks: dict[str, asyncio.Lock] = defaultdict(asyncio.Lock) + self._state_lock = RLock() + self._file_path = Path(data_dir) / "web_threads.json" + self._media_store = media_store or LocalMediaStore(data_dir) + self._last_tg_update_id: int = 0 + self._topic_system_prompts: dict[str, dict[str, Any]] = {} + self._global_learnings: list[dict[str, Any]] = [] + self._load() + self._normalize_threads() + + def list_threads(self) -> list[dict[str, Any]]: + threads = sorted(self._threads.values(), key=lambda thread: thread["updated_at"], reverse=True) + return [self._serialize_thread(thread, include_messages=False) for thread in threads] + + def search_threads(self, query: str) -> list[dict[str, Any]]: + """Search threads by keyword across title and message content.""" + needle = query.strip().lower() + if not needle: + return self.list_threads() + matches: list[dict[str, Any]] = [] + for thread in self._threads.values(): + if needle in thread.get("title", "").lower(): + matches.append(thread) + continue + for msg in thread.get("messages", []): + if needle in str(msg.get("content", "")).lower(): + matches.append(thread) + break + matches.sort(key=lambda t: t["updated_at"], reverse=True) + return [self._serialize_thread(t, include_messages=False) for t in matches] + + def create_thread( + self, + title: str, + model: str, + provider: str = DEFAULT_PROVIDER, + *, + source: str = DEFAULT_SOURCE, + session_label: str | None = None, + ) -> dict[str, Any]: + with self._state_lock: + thread = self._build_thread( + title=title, model=model, provider=provider, source=source, session_label=session_label + ) + self._threads[thread["id"]] = thread + self._persist_locked() + return self._serialize_thread(thread) + + def get_thread(self, thread_id: str, *, include_messages: bool = True) -> dict[str, Any]: + thread = self._require_thread(thread_id) + return self._serialize_thread(thread, include_messages=include_messages) + + def get_session_thread(self, session_key: str, *, include_messages: bool = True) -> dict[str, Any] | None: + with self._state_lock: + thread = self._get_session_thread_record(session_key) + if thread is None: + return None + return self._serialize_thread(thread, include_messages=include_messages) + + def get_or_create_session_thread( + self, + *, + session_key: str, + title: str, + model: str, + provider: str = DEFAULT_PROVIDER, + source: str = "telegram", + session_label: str | None = None, + ) -> dict[str, Any]: + with self._state_lock: + thread = self._get_session_thread_record(session_key) + changed = False + if thread is None: + thread = self._build_thread( + title=title, + model=model, + provider=provider, + source=source, + session_label=session_label, + ) + self._threads[thread["id"]] = thread + self._session_index[session_key] = thread["id"] + changed = True + elif session_label and thread.get("session_label") != session_label: + thread["session_label"] = session_label + changed = True + + if changed: + self._persist_locked() + return self._serialize_thread(thread) + + def start_new_session_thread( + self, + *, + session_key: str, + title: str, + model: str, + provider: str = DEFAULT_PROVIDER, + source: str = "telegram", + session_label: str | None = None, + ) -> dict[str, Any]: + with self._state_lock: + thread = self._build_thread( + title=title, model=model, provider=provider, source=source, session_label=session_label + ) + self._threads[thread["id"]] = thread + self._session_index[session_key] = thread["id"] + self._persist_locked() + return self._serialize_thread(thread) + + def update_thread( + self, + thread_id: str, + *, + title: str | None = None, + title_source: str | None = None, + model: str | None = None, + provider: str | None = None, + ) -> dict[str, Any]: + with self._state_lock: + thread = self._require_thread(thread_id) + if title is not None: + normalized_title = str(title).strip() or DEFAULT_TITLE + thread["title"] = normalized_title + thread["title_source"] = self._normalize_title_source(title_source) or TITLE_SOURCE_MANUAL + if provider is not None: + normalized_provider = str(provider).strip().lower() + if not normalized_provider: + raise ValueError("Provider cannot be empty") + thread["provider"] = normalized_provider + if model is not None: + normalized_model = str(model).strip() + if not normalized_model: + raise ValueError("Model cannot be empty") + thread["model"] = normalized_model + thread["updated_at"] = self._timestamp() + self._persist_locked() + return self._serialize_thread(thread) + + def delete_thread(self, thread_id: str) -> None: + with self._state_lock: + if thread_id not in self._threads: + raise KeyError(thread_id) + del self._threads[thread_id] + stale_session_keys = [ + session_key + for session_key, mapped_thread_id in self._session_index.items() + if mapped_thread_id == thread_id + ] + for session_key in stale_session_keys: + self._session_index.pop(session_key, None) + self._locks.pop(thread_id, None) + self._persist_locked() + + def add_message( + self, + thread_id: str, + role: str, + content: str, + *, + parts: list[dict[str, Any]] | None = None, + ) -> dict[str, Any]: + with self._state_lock: + thread = self._require_thread(thread_id) + now = self._timestamp() + normalized_parts = self._normalize_message_parts(parts) + message = { + "id": uuid.uuid4().hex, + "role": role, + "content": str(content or ""), + "created_at": now, + } + if normalized_parts: + message["parts"] = normalized_parts + thread["messages"].append(message) + thread["updated_at"] = now + title_source = self._message_text(message) + if ( + role == "user" + and thread["title"] == DEFAULT_TITLE + and title_source + and not title_source.startswith("/") + ): + thread["title"] = self._derive_title(title_source) + thread["title_source"] = TITLE_SOURCE_AUTO + self._persist_locked() + return self._serialize_message(message) + + def remove_message(self, thread_id: str, message_id: str) -> None: + with self._state_lock: + thread = self._require_thread(thread_id) + original_count = len(thread["messages"]) + thread["messages"] = [message for message in thread["messages"] if message["id"] != message_id] + if len(thread["messages"]) == original_count: + return + if thread["messages"]: + thread["updated_at"] = thread["messages"][-1]["created_at"] + else: + thread["updated_at"] = thread["created_at"] + if thread["title"] != DEFAULT_TITLE: + thread["title"] = DEFAULT_TITLE + thread["title_source"] = TITLE_SOURCE_DEFAULT + self._persist_locked() + + def build_history(self, thread_id: str, *, limit: int | None = None) -> list[dict[str, str]]: + thread = self._require_thread(thread_id) + messages = thread["messages"][-limit:] if limit is not None else thread["messages"] + return [ + { + "role": str(message.get("role") or "assistant"), + "content": self._message_text(message) + if str(message.get("role") or "").strip().lower() == "user" + else str(message.get("content") or ""), + } + for message in messages + ] + + def build_agent_history(self, thread_id: str, *, limit: int | None = None) -> list[dict[str, Any]]: + thread = self._require_thread(thread_id) + messages = thread["messages"][-limit:] if limit is not None else thread["messages"] + history: list[dict[str, Any]] = [] + + for message in messages: + role = str(message.get("role") or "assistant").strip().lower() or "assistant" + parts = self._normalize_message_parts(message.get("parts")) + + if role == "user" and parts: + content_parts: list[dict[str, Any]] = [] + for part in parts: + if part["type"] == "input_text": + content_parts.append({"type": "input_text", "text": part["text"]}) + continue + + try: + content_parts.append( + { + "type": "input_image", + "image_url": self._media_store.build_data_url(part["file_id"]), + "detail": part.get("detail") or "auto", + } + ) + except KeyError: + missing_name = str(part.get("name") or "image") + content_parts.append( + { + "type": "input_text", + "text": f"A previously attached image named {missing_name} is no longer available.", + } + ) + + if content_parts: + history.append({"role": role, "content": content_parts}) + continue + + content = self._message_text(message) if role == "user" else str(message.get("content") or "") + if content: + history.append({"role": role, "content": content}) + + return history + + def save_upload(self, *, name: str, mime_type: str, data: bytes, file_id: str | None = None) -> dict[str, Any]: + metadata = self._media_store.save_bytes(data, name=name, mime_type=mime_type, file_id=file_id) + return self._serialize_upload(metadata) + + def get_upload(self, file_id: str) -> dict[str, Any]: + return self._serialize_upload(self._media_store.get_meta(file_id)) + + def read_upload(self, file_id: str) -> bytes: + return self._media_store.read_bytes(file_id) + + def delete_upload(self, file_id: str) -> None: + self._media_store.delete(file_id) + + def lock(self, thread_id: str) -> asyncio.Lock: + self._require_thread(thread_id) + return self._locks[thread_id] + + def get_topic_system_prompt(self, session_key: str) -> dict[str, Any] | None: + with self._state_lock: + record = self._topic_system_prompts.get(str(session_key)) + if not isinstance(record, dict): + return None + prompt = str(record.get("prompt") or "").strip() + if not prompt: + return None + return { + "session_key": str(session_key), + "prompt": prompt, + "updated_at": str(record.get("updated_at") or ""), + } + + def set_topic_system_prompt(self, session_key: str, prompt: str) -> dict[str, Any]: + normalized_session_key = str(session_key).strip() + if not normalized_session_key: + raise ValueError("Session key cannot be empty") + normalized_prompt = str(prompt or "").strip() + if not normalized_prompt: + raise ValueError("System prompt cannot be empty") + with self._state_lock: + record = { + "prompt": normalized_prompt, + "updated_at": self._timestamp(), + } + self._topic_system_prompts[normalized_session_key] = record + self._persist_locked() + return { + "session_key": normalized_session_key, + "prompt": record["prompt"], + "updated_at": record["updated_at"], + } + + def clear_topic_system_prompt(self, session_key: str) -> bool: + normalized_session_key = str(session_key).strip() + if not normalized_session_key: + return False + with self._state_lock: + removed = self._topic_system_prompts.pop(normalized_session_key, None) + if removed is None: + return False + self._persist_locked() + return True + + def add_usage(self, thread_id: str, usage: dict[str, Any] | None) -> dict[str, Any]: + normalized_usage = self._normalize_usage(usage) + with self._state_lock: + thread = self._require_thread(thread_id) + thread["usage"] = self._merge_usage(thread.get("usage"), normalized_usage) + thread["updated_at"] = self._timestamp() + self._persist_locked() + return self._serialize_usage(thread.get("usage")) + + def get_total_usage(self, thread_id: str) -> dict[str, Any]: + """Return the cumulative usage for a thread.""" + with self._state_lock: + thread = self._require_thread(thread_id) + return self._serialize_usage(thread.get("usage")) + + # ── Project learnings (per-thread) ──────────────────────────────── + + MAX_PROJECT_LEARNINGS = 30 + + def get_project_learnings(self, thread_id: str) -> list[dict[str, Any]]: + """Return project learnings for a thread.""" + with self._state_lock: + thread = self._require_thread(thread_id) + return list(thread.get("project_learnings") or []) + + def add_project_learning(self, thread_id: str, fact: str, *, category: str = "general") -> None: + """Add a project-scoped learning to a thread.""" + fact = str(fact or "").strip() + if not fact: + return + with self._state_lock: + thread = self._require_thread(thread_id) + learnings: list[dict[str, Any]] = list(thread.get("project_learnings") or []) + # Skip near-duplicates (same fact text ignoring case/whitespace) + normalized = fact.lower().strip() + for existing in learnings: + if existing.get("fact", "").lower().strip() == normalized: + return + learnings.append({"fact": fact, "category": str(category or "general").strip(), "updated_at": self._timestamp()}) + # Cap size — keep most recent + if len(learnings) > self.MAX_PROJECT_LEARNINGS: + learnings = learnings[-self.MAX_PROJECT_LEARNINGS:] + thread["project_learnings"] = learnings + thread["updated_at"] = self._timestamp() + self._persist_locked() + + # ── Global learnings (user preferences, cross-project) ──────────── + + MAX_GLOBAL_LEARNINGS = 40 + + def get_global_learnings(self) -> list[dict[str, Any]]: + """Return all global (cross-project) learnings.""" + with self._state_lock: + return list(self._global_learnings) + + def add_global_learning(self, fact: str, *, category: str = "general") -> None: + """Add a global learning (user preference, personality, contacts).""" + fact = str(fact or "").strip() + if not fact: + return + with self._state_lock: + normalized = fact.lower().strip() + for existing in self._global_learnings: + if existing.get("fact", "").lower().strip() == normalized: + return + self._global_learnings.append( + {"fact": fact, "category": str(category or "general").strip(), "updated_at": self._timestamp()} + ) + if len(self._global_learnings) > self.MAX_GLOBAL_LEARNINGS: + self._global_learnings = self._global_learnings[-self.MAX_GLOBAL_LEARNINGS:] + self._persist_locked() + + def _build_thread( + self, + *, + title: str, + model: str, + provider: str, + source: str, + session_label: str | None, + ) -> dict[str, Any]: + now = self._timestamp() + normalized_session_label = str(session_label).strip() if session_label else "" + thread = { + "id": uuid.uuid4().hex, + "title": str(title).strip() or DEFAULT_TITLE, + "title_source": TITLE_SOURCE_DEFAULT + if (str(title).strip() or DEFAULT_TITLE) == DEFAULT_TITLE + else TITLE_SOURCE_MANUAL, + "provider": str(provider).strip().lower() or DEFAULT_PROVIDER, + "model": str(model).strip() or DEFAULT_MODEL, + "source": str(source).strip().lower() or DEFAULT_SOURCE, + "created_at": now, + "updated_at": now, + "messages": [], + "usage": self._serialize_usage(None), + } + if normalized_session_label: + thread["session_label"] = normalized_session_label + return thread + + def _normalize_usage(self, usage: dict[str, Any] | None) -> dict[str, Any]: + if not isinstance(usage, dict): + return {} + + token_fields = ( + "prompt_tokens", + "completion_tokens", + "total_tokens", + "reasoning_tokens", + "cached_tokens", + "web_search_requests", + "request_count", + ) + money_fields = ("cost_usd",) + text_fields = ("pricing_source",) + + normalized: dict[str, Any] = {} + for field in token_fields: + value = usage.get(field) + if value in (None, ""): + continue + try: + normalized[field] = int(value) + except (TypeError, ValueError): + continue + + for field in money_fields: + value = usage.get(field) + if value in (None, ""): + continue + try: + normalized[field] = format(Decimal(str(value)), "f") + except Exception: + continue + + for field in text_fields: + value = usage.get(field) + if value not in (None, ""): + normalized[field] = str(value) + + breakdown = usage.get("cost_breakdown") + if isinstance(breakdown, dict): + clean_breakdown: dict[str, str] = {} + for key, value in breakdown.items(): + if value in (None, ""): + continue + try: + clean_breakdown[str(key)] = format(Decimal(str(value)), "f") + except Exception: + continue + if clean_breakdown: + normalized["cost_breakdown"] = clean_breakdown + + return normalized + + def _merge_usage(self, existing: Any, incoming: dict[str, Any]) -> dict[str, Any]: + current = self._normalize_usage(existing if isinstance(existing, dict) else {}) + if not incoming: + return self._serialize_usage(current) + + for field in ( + "prompt_tokens", + "completion_tokens", + "total_tokens", + "reasoning_tokens", + "cached_tokens", + "web_search_requests", + "request_count", + ): + current[field] = int(current.get(field, 0) or 0) + int(incoming.get(field, 0) or 0) + + current_cost = Decimal(str(current.get("cost_usd", "0") or "0")) + incoming_cost = Decimal(str(incoming.get("cost_usd", "0") or "0")) + current["cost_usd"] = format(current_cost + incoming_cost, "f") + + existing_breakdown = current.get("cost_breakdown") if isinstance(current.get("cost_breakdown"), dict) else {} + incoming_breakdown = incoming.get("cost_breakdown") if isinstance(incoming.get("cost_breakdown"), dict) else {} + merged_breakdown: dict[str, str] = {} + for key in set(existing_breakdown) | set(incoming_breakdown): + total = Decimal(str(existing_breakdown.get(key, "0") or "0")) + Decimal( + str(incoming_breakdown.get(key, "0") or "0") + ) + if total: + merged_breakdown[key] = format(total, "f") + if merged_breakdown: + current["cost_breakdown"] = merged_breakdown + else: + current.pop("cost_breakdown", None) + + pricing_source = incoming.get("pricing_source") or current.get("pricing_source") + if pricing_source: + current["pricing_source"] = str(pricing_source) + + return self._serialize_usage(current) + + def _serialize_usage(self, usage: Any) -> dict[str, Any]: + normalized = self._normalize_usage(usage if isinstance(usage, dict) else {}) + normalized.setdefault("prompt_tokens", 0) + normalized.setdefault("completion_tokens", 0) + normalized.setdefault("total_tokens", normalized["prompt_tokens"] + normalized["completion_tokens"]) + normalized.setdefault("reasoning_tokens", 0) + normalized.setdefault("cached_tokens", 0) + normalized.setdefault("web_search_requests", 0) + normalized.setdefault("request_count", 0) + normalized.setdefault("cost_usd", "0") + normalized.setdefault("cost_breakdown", {}) + return normalized + + def _serialize_thread(self, thread: dict[str, Any], *, include_messages: bool = True) -> dict[str, Any]: + data = { + "id": thread["id"], + "title": thread["title"], + "title_source": self._normalize_title_source(thread.get("title_source")) or TITLE_SOURCE_DEFAULT, + "provider": thread.get("provider") or DEFAULT_PROVIDER, + "model": thread["model"], + "source": thread.get("source") or DEFAULT_SOURCE, + "created_at": thread["created_at"], + "updated_at": thread["updated_at"], + "message_count": len(thread["messages"]), + "usage": self._serialize_usage(thread.get("usage")), + } + if thread.get("session_label"): + data["session_label"] = thread["session_label"] + if thread.get("project_learnings"): + data["project_learnings"] = list(thread["project_learnings"]) + if include_messages: + data["messages"] = [self._serialize_message(message) for message in thread["messages"]] + return data + + def _require_thread(self, thread_id: str) -> dict[str, Any]: + if thread_id not in self._threads: + raise KeyError(thread_id) + return self._threads[thread_id] + + def _get_session_thread_record(self, session_key: str) -> dict[str, Any] | None: + thread_id = self._session_index.get(session_key) + if not thread_id: + return None + thread = self._threads.get(thread_id) + if thread is None: + self._session_index.pop(session_key, None) + self._persist_locked() + return None + return thread + + def is_tg_update_seen(self, update_id: int) -> bool: + """Return True if this Telegram update_id has already been processed.""" + return update_id <= self._last_tg_update_id + + def mark_tg_update(self, update_id: int) -> None: + """Record a Telegram update_id as processed.""" + with self._state_lock: + if update_id > self._last_tg_update_id: + self._last_tg_update_id = update_id + self._persist_locked() + + def _persist_locked(self) -> None: + self._file_path.parent.mkdir(parents=True, exist_ok=True) + payload = { + "threads": self._threads, + "session_index": self._session_index, + "last_tg_update_id": self._last_tg_update_id, + "topic_system_prompts": self._topic_system_prompts, + "global_learnings": self._global_learnings, + } + self._file_path.write_text(json.dumps(payload, indent=2), encoding="utf-8") + + def _load(self) -> None: + if not self._file_path.exists(): + return + try: + payload = json.loads(self._file_path.read_text(encoding="utf-8")) + except (json.JSONDecodeError, OSError): + return + self._threads = payload.get("threads") or {} + self._session_index = payload.get("session_index") or {} + self._last_tg_update_id = int(payload.get("last_tg_update_id") or 0) + raw_prompts = payload.get("topic_system_prompts") or {} + self._topic_system_prompts = raw_prompts if isinstance(raw_prompts, dict) else {} + raw_learnings = payload.get("global_learnings") or [] + self._global_learnings = raw_learnings if isinstance(raw_learnings, list) else [] + + def _normalize_threads(self) -> None: + changed = False + cleaned_topic_prompts: dict[str, dict[str, Any]] = {} + for session_key, raw_record in self._topic_system_prompts.items(): + if not isinstance(raw_record, dict): + changed = True + continue + normalized_session_key = str(session_key).strip() + normalized_prompt = str(raw_record.get("prompt") or "").strip() + if not normalized_session_key or not normalized_prompt: + changed = True + continue + normalized_record = { + "prompt": normalized_prompt, + "updated_at": str(raw_record.get("updated_at") or self._timestamp()), + } + cleaned_topic_prompts[normalized_session_key] = normalized_record + if normalized_session_key != session_key or raw_record != normalized_record: + changed = True + if cleaned_topic_prompts != self._topic_system_prompts: + self._topic_system_prompts = cleaned_topic_prompts + changed = True + for thread in self._threads.values(): + if not isinstance(thread.get("messages"), list): + thread["messages"] = [] + changed = True + thread["title"] = str(thread.get("title") or DEFAULT_TITLE).strip() or DEFAULT_TITLE + normalized_title_source = self._normalize_title_source(thread.get("title_source")) + expected_title_source = normalized_title_source + if expected_title_source is None: + expected_title_source = ( + TITLE_SOURCE_DEFAULT if thread["title"] == DEFAULT_TITLE else TITLE_SOURCE_MANUAL + ) + if thread.get("title_source") != expected_title_source: + thread["title_source"] = expected_title_source + changed = True + provider = str(thread.get("provider") or DEFAULT_PROVIDER).strip().lower() or DEFAULT_PROVIDER + if thread.get("provider") != provider: + thread["provider"] = provider + changed = True + source = str(thread.get("source") or DEFAULT_SOURCE).strip().lower() or DEFAULT_SOURCE + if thread.get("source") != source: + thread["source"] = source + changed = True + usage = self._serialize_usage(thread.get("usage")) + if thread.get("usage") != usage: + thread["usage"] = usage + changed = True + if changed: + self._persist_locked() + + def _serialize_message(self, message: dict[str, Any]) -> dict[str, Any]: + serialized = { + "id": message["id"], + "role": message["role"], + "content": message["content"], + "created_at": message["created_at"], + } + normalized_parts = self._normalize_message_parts(message.get("parts")) + if normalized_parts: + serialized["parts"] = normalized_parts + return serialized + + def _serialize_upload(self, metadata: dict[str, Any]) -> dict[str, Any]: + return { + "id": str(metadata.get("id") or ""), + "name": str(metadata.get("name") or "upload"), + "mime_type": str(metadata.get("mime_type") or "application/octet-stream"), + "size": int(metadata.get("size") or 0), + "preview_url": f"/uploads/{metadata.get('id')}" + if str(metadata.get("mime_type") or "").startswith("image/") + else None, + } + + def _normalize_message_parts(self, parts: Any) -> list[dict[str, Any]]: + if not isinstance(parts, list): + return [] + + normalized_parts: list[dict[str, Any]] = [] + for part in parts: + if not isinstance(part, dict): + continue + part_type = str(part.get("type") or "").strip() + if part_type == "input_text": + text = str(part.get("text") or "") + if text: + normalized_parts.append({"type": "input_text", "text": text}) + continue + if part_type == "input_image": + file_id = str(part.get("file_id") or "").strip() + if not file_id: + continue + normalized_part = {"type": "input_image", "file_id": file_id} + if part.get("name"): + normalized_part["name"] = str(part.get("name")) + if part.get("mime_type"): + normalized_part["mime_type"] = str(part.get("mime_type")) + if part.get("detail"): + normalized_part["detail"] = str(part.get("detail")) + normalized_parts.append(normalized_part) + return normalized_parts + + def _message_text(self, message: dict[str, Any]) -> str: + parts = self._normalize_message_parts(message.get("parts")) + if parts: + texts = [str(part.get("text") or "") for part in parts if part.get("type") == "input_text"] + text = "\n".join(part for part in texts if part).strip() + if text: + return text + return str(message.get("content") or "").strip() + + def _derive_title(self, content: str) -> str: + single_line = " ".join(str(content).split()) + if len(single_line) <= 48: + return single_line + return single_line[:45].rstrip() + "..." + + def _normalize_title_source(self, raw_value: Any) -> str | None: + value = str(raw_value or "").strip().lower() + if value in KNOWN_TITLE_SOURCES: + return value + return None + + def _timestamp(self) -> str: + from datetime import datetime, timezone + + return datetime.now(timezone.utc).isoformat()