first commit

This commit is contained in:
Tanmay Karande
2026-02-13 23:56:09 -05:00
commit ec8bd80a3d
27 changed files with 6725 additions and 0 deletions

34
.env.example Normal file
View File

@@ -0,0 +1,34 @@
# =============================================================================
# Aetheel Configuration
# =============================================================================
# Copy this file to .env and fill in your values.
# See docs/slack-setup.md and docs/opencode-setup.md for instructions.
# --- Slack Tokens (required) ------------------------------------------------
# Get these from https://api.slack.com/apps → your app settings
SLACK_BOT_TOKEN=xoxb-your-bot-token-here
SLACK_APP_TOKEN=xapp-your-app-token-here
# --- OpenCode Runtime (required for AI) -------------------------------------
# Mode: "cli" (subprocess) or "sdk" (opencode serve API)
OPENCODE_MODE=cli
# Model to use (optional — uses your OpenCode default if not set)
# Examples:
# anthropic/claude-sonnet-4-20250514
# openai/gpt-5.1
# google/gemini-3-pro
# OPENCODE_MODEL=
# CLI timeout in seconds (for CLI mode)
OPENCODE_TIMEOUT=120
# Server URL for SDK mode (only needed if OPENCODE_MODE=sdk)
# OPENCODE_SERVER_URL=http://localhost:4096
# OPENCODE_SERVER_PASSWORD=
# Working directory for OpenCode (optional — defaults to current directory)
# OPENCODE_WORKSPACE=/path/to/your/project
# --- Logging -----------------------------------------------------------------
LOG_LEVEL=INFO

21
.gitignore vendored Normal file
View File

@@ -0,0 +1,21 @@
# Environment & secrets
.env
*.env.local
# Python-generated files
__pycache__/
*.py[oc]
build/
dist/
wheels/
*.egg-info
inspiration/
# Virtual environments
.venv
# OS
.DS_Store
# Logs
*.log

1
.python-version Normal file
View File

@@ -0,0 +1 @@
3.14

0
README.md Normal file
View File

2
adapters/__init__.py Normal file
View File

@@ -0,0 +1,2 @@
# Aetheel Adapters
# Channel adapters for connecting the AI agent to messaging platforms.

497
adapters/slack_adapter.py Normal file
View File

@@ -0,0 +1,497 @@
"""
Aetheel Slack Adapter
=====================
Connects to Slack via Socket Mode (no public URL needed).
Inspired by OpenClaw's Slack implementation (src/slack/).
Features:
- Socket Mode connection (no public URL / webhook needed)
- Receives DMs and @mentions in channels
- Each thread = persistent conversation context
- Sends replies back to the same thread
- Message handler callback for plugging in AI logic
Architecture (from OpenClaw):
- OpenClaw uses @slack/bolt (Node.js) with socketMode: true
- We replicate this with slack_bolt (Python) which is the official Python SDK
- Like OpenClaw, we separate: token resolution, message handling, and sending
Usage:
from adapters.slack_adapter import SlackAdapter
adapter = SlackAdapter()
adapter.on_message(my_handler)
adapter.start()
"""
import logging
import os
import re
import threading
import time
from dataclasses import dataclass, field
from datetime import datetime, timezone
from typing import Any, Callable
from slack_bolt import App
from slack_bolt.adapter.socket_mode import SocketModeHandler
from slack_sdk import WebClient
from slack_sdk.errors import SlackApiError
logger = logging.getLogger("aetheel.slack")
# ---------------------------------------------------------------------------
# Types
# ---------------------------------------------------------------------------
@dataclass
class SlackMessage:
"""Represents an incoming Slack message (mirrors OpenClaw's SlackMessageEvent)."""
text: str
user_id: str
user_name: str
channel_id: str
channel_name: str
thread_ts: str | None
message_ts: str
is_dm: bool
is_mention: bool
is_thread_reply: bool
raw_event: dict = field(default_factory=dict, repr=False)
@property
def conversation_id(self) -> str:
"""
Unique conversation identifier.
Uses thread_ts if in a thread, otherwise the message_ts.
This mirrors OpenClaw's session isolation per thread.
"""
return self.thread_ts or self.message_ts
@property
def timestamp(self) -> datetime:
"""Parse Slack ts into a datetime."""
ts_float = float(self.message_ts)
return datetime.fromtimestamp(ts_float, tz=timezone.utc)
@dataclass
class SlackSendResult:
"""Result of sending a Slack message (mirrors OpenClaw's SlackSendResult)."""
message_id: str
channel_id: str
thread_ts: str | None = None
# Type alias for the message handler callback
MessageHandler = Callable[[SlackMessage], str | None]
# ---------------------------------------------------------------------------
# Token Resolution (inspired by OpenClaw src/slack/token.ts)
# ---------------------------------------------------------------------------
def resolve_bot_token(explicit: str | None = None) -> str:
"""
Resolve the Slack bot token.
Priority: explicit param > SLACK_BOT_TOKEN env var.
"""
token = (explicit or os.environ.get("SLACK_BOT_TOKEN", "")).strip()
if not token:
raise ValueError(
"Slack bot token is required. "
"Set SLACK_BOT_TOKEN environment variable or pass it explicitly."
)
if not token.startswith("xoxb-"):
logger.warning("Bot token doesn't start with 'xoxb-' — double-check the token.")
return token
def resolve_app_token(explicit: str | None = None) -> str:
"""
Resolve the Slack app-level token (required for Socket Mode).
Priority: explicit param > SLACK_APP_TOKEN env var.
"""
token = (explicit or os.environ.get("SLACK_APP_TOKEN", "")).strip()
if not token:
raise ValueError(
"Slack app-level token is required for Socket Mode. "
"Set SLACK_APP_TOKEN environment variable or pass it explicitly."
)
if not token.startswith("xapp-"):
logger.warning("App token doesn't start with 'xapp-' — double-check the token.")
return token
# ---------------------------------------------------------------------------
# Slack Adapter
# ---------------------------------------------------------------------------
class SlackAdapter:
"""
Slack adapter using Socket Mode.
Inspired by OpenClaw's monitorSlackProvider() in src/slack/monitor/provider.ts:
- Connects via Socket Mode (no public URL needed)
- Handles DMs and @mentions
- Thread-based conversation isolation
- Configurable message handler callback
Example:
adapter = SlackAdapter()
@adapter.on_message
def handle(msg: SlackMessage) -> str:
return f"Echo: {msg.text}"
adapter.start()
"""
def __init__(
self,
bot_token: str | None = None,
app_token: str | None = None,
log_level: str = "INFO",
):
self._bot_token = resolve_bot_token(bot_token)
self._app_token = resolve_app_token(app_token)
self._message_handlers: list[MessageHandler] = []
self._bot_user_id: str = ""
self._bot_user_name: str = ""
self._team_id: str = ""
self._user_cache: dict[str, str] = {}
self._channel_cache: dict[str, str] = {}
self._running = False
self._socket_handler: SocketModeHandler | None = None
# Configure logging
logging.basicConfig(
level=getattr(logging, log_level.upper(), logging.INFO),
format="%(asctime)s [%(name)s] %(levelname)s: %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
)
# Initialize Slack Bolt app with Socket Mode
# This mirrors OpenClaw's: new App({ token: botToken, appToken, socketMode: true })
self._app = App(
token=self._bot_token,
logger=logger,
)
self._client: WebClient = self._app.client
# Register event handlers
self._register_handlers()
# -------------------------------------------------------------------
# Public API
# -------------------------------------------------------------------
def on_message(self, handler: MessageHandler) -> MessageHandler:
"""
Register a message handler (can be used as a decorator).
The handler receives a SlackMessage and should return a response string or None.
"""
self._message_handlers.append(handler)
return handler
def start(self) -> None:
"""
Start the Slack adapter in Socket Mode.
This is a blocking call (like OpenClaw's `await app.start()`).
"""
# Resolve bot identity (like OpenClaw's auth.test call)
self._resolve_identity()
logger.info("=" * 60)
logger.info(" Aetheel Slack Adapter")
logger.info("=" * 60)
logger.info(f" Bot: @{self._bot_user_name} ({self._bot_user_id})")
logger.info(f" Team: {self._team_id}")
logger.info(f" Mode: Socket Mode")
logger.info(f" Handlers: {len(self._message_handlers)} registered")
logger.info("=" * 60)
self._running = True
self._socket_handler = SocketModeHandler(self._app, self._app_token)
try:
self._socket_handler.start()
except KeyboardInterrupt:
logger.info("Shutting down...")
self.stop()
def start_async(self) -> None:
"""Start the adapter in a background thread (non-blocking)."""
self._resolve_identity()
self._running = True
self._socket_handler = SocketModeHandler(self._app, self._app_token)
self._socket_handler.connect()
logger.info(
f"Slack adapter connected (bot=@{self._bot_user_name}, mode=socket)"
)
def stop(self) -> None:
"""Stop the Slack adapter gracefully."""
self._running = False
if self._socket_handler:
try:
self._socket_handler.close()
except Exception:
pass
logger.info("Slack adapter stopped.")
def send_message(
self,
channel: str,
text: str,
thread_ts: str | None = None,
) -> SlackSendResult:
"""
Send a message to a Slack channel or DM.
Mirrors OpenClaw's sendMessageSlack() in src/slack/send.ts.
Args:
channel: Channel ID (C...), user ID (U...), or DM channel (D...)
text: Message text (supports Slack mrkdwn formatting)
thread_ts: Optional thread timestamp to reply in a thread
Returns:
SlackSendResult with message_id and channel_id
"""
if not text.strip():
raise ValueError("Cannot send an empty message.")
# If it looks like a user ID, open a DM first
# (mirrors OpenClaw's resolveChannelId)
if channel.startswith("U") or channel.startswith("W"):
try:
dm_response = self._client.conversations_open(users=[channel])
channel = dm_response["channel"]["id"]
except SlackApiError as e:
raise RuntimeError(f"Failed to open DM with {channel}: {e}") from e
# Chunk long messages (OpenClaw's SLACK_TEXT_LIMIT = 4000)
SLACK_TEXT_LIMIT = 4000
chunks = self._chunk_text(text, SLACK_TEXT_LIMIT)
last_ts = ""
for chunk in chunks:
try:
response = self._client.chat_postMessage(
channel=channel,
text=chunk,
thread_ts=thread_ts,
)
last_ts = response.get("ts", "")
except SlackApiError as e:
logger.error(f"Failed to send message: {e}")
raise
return SlackSendResult(
message_id=last_ts or "unknown",
channel_id=channel,
thread_ts=thread_ts,
)
# -------------------------------------------------------------------
# Internal: Event handlers
# -------------------------------------------------------------------
def _register_handlers(self) -> None:
"""Register Slack event handlers on the Bolt app."""
# Handle direct messages and channel messages WITHOUT @mention
# Note: When someone @mentions the bot, Slack fires BOTH a "message"
# event and an "app_mention" event. We only want to respond once,
# so the message handler skips messages containing the bot @mention.
# Those are handled exclusively by handle_mention_event below.
@self._app.event("message")
def handle_message_event(event: dict, say: Callable, client: WebClient) -> None:
# Skip if this message contains an @mention of our bot
# (it will be handled by app_mention instead)
raw_text = event.get("text", "")
if self._bot_user_id and f"<@{self._bot_user_id}>" in raw_text:
return
self._process_incoming(event, say, client)
# Handle @mentions in channels
@self._app.event("app_mention")
def handle_mention_event(event: dict, say: Callable, client: WebClient) -> None:
self._process_incoming(event, say, client, is_mention=True)
def _process_incoming(
self,
event: dict,
say: Callable,
client: WebClient,
is_mention: bool = False,
) -> None:
"""
Process an incoming Slack message.
This is the core handler, inspired by OpenClaw's createSlackMessageHandler().
"""
# Skip bot messages (including our own)
if event.get("bot_id") or event.get("subtype") in (
"bot_message",
"message_changed",
"message_deleted",
"channel_join",
"channel_leave",
):
return
user_id = event.get("user", "")
if not user_id or user_id == self._bot_user_id:
return
raw_text = event.get("text", "")
channel_id = event.get("channel", "")
channel_type = event.get("channel_type", "")
thread_ts = event.get("thread_ts")
message_ts = event.get("ts", "")
is_dm = channel_type in ("im", "mpim")
# Strip the bot mention from the text (like OpenClaw does)
clean_text = self._strip_mention(raw_text).strip()
if not clean_text:
return
# Resolve user and channel names
user_name = self._resolve_user_name(user_id, client)
channel_name = self._resolve_channel_name(channel_id, client)
# Build the SlackMessage object
msg = SlackMessage(
text=clean_text,
user_id=user_id,
user_name=user_name,
channel_id=channel_id,
channel_name=channel_name,
thread_ts=thread_ts,
message_ts=message_ts,
is_dm=is_dm,
is_mention=is_mention,
is_thread_reply=thread_ts is not None,
raw_event=event,
)
logger.info(
f"📨 Message from @{user_name} in #{channel_name}: {clean_text[:100]}"
)
# Call all registered handlers
for handler in self._message_handlers:
try:
response = handler(msg)
if response:
# Reply in the same thread
# (OpenClaw uses thread_ts for thread isolation)
reply_thread = thread_ts or message_ts
say(text=response, thread_ts=reply_thread)
logger.info(
f"📤 Reply sent to #{channel_name} (thread={reply_thread[:10]}...)"
)
except Exception as e:
logger.error(f"Handler error: {e}", exc_info=True)
try:
say(
text=f"⚠️ Something went wrong processing your message.",
thread_ts=thread_ts or message_ts,
)
except Exception:
pass
# -------------------------------------------------------------------
# Internal: Helpers
# -------------------------------------------------------------------
def _resolve_identity(self) -> None:
"""
Resolve bot identity via auth.test.
Mirrors OpenClaw's auth.test call in monitorSlackProvider().
"""
try:
auth = self._client.auth_test()
self._bot_user_id = auth.get("user_id", "")
self._bot_user_name = auth.get("user", "unknown")
self._team_id = auth.get("team_id", "")
logger.info(
f"Auth OK: bot={self._bot_user_name} team={self._team_id}"
)
except SlackApiError as e:
logger.warning(f"auth.test failed (non-fatal): {e}")
def _strip_mention(self, text: str) -> str:
"""Remove @bot mentions from message text."""
if self._bot_user_id:
text = re.sub(rf"<@{re.escape(self._bot_user_id)}>", "", text)
return text.strip()
def _resolve_user_name(self, user_id: str, client: WebClient) -> str:
"""Resolve a user ID to a display name (with caching)."""
if user_id in self._user_cache:
return self._user_cache[user_id]
try:
info = client.users_info(user=user_id)
name = (
info["user"].get("real_name")
or info["user"].get("name")
or user_id
)
self._user_cache[user_id] = name
return name
except SlackApiError:
self._user_cache[user_id] = user_id
return user_id
def _resolve_channel_name(self, channel_id: str, client: WebClient) -> str:
"""Resolve a channel ID to a name (with caching)."""
if channel_id in self._channel_cache:
return self._channel_cache[channel_id]
try:
info = client.conversations_info(channel=channel_id)
name = info["channel"].get("name", channel_id)
self._channel_cache[channel_id] = name
return name
except SlackApiError:
self._channel_cache[channel_id] = channel_id
return channel_id
@staticmethod
def _chunk_text(text: str, limit: int = 4000) -> list[str]:
"""
Split text into chunks respecting Slack's character limit.
Mirrors OpenClaw's chunkMarkdownTextWithMode().
Tries to split at newlines, then at spaces, then hard-splits.
"""
if len(text) <= limit:
return [text]
chunks = []
remaining = text
while remaining:
if len(remaining) <= limit:
chunks.append(remaining)
break
# Try to find a good break point
cut = limit
# Prefer breaking at a newline
newline_pos = remaining.rfind("\n", 0, limit)
if newline_pos > limit // 2:
cut = newline_pos + 1
else:
# Fall back to space
space_pos = remaining.rfind(" ", 0, limit)
if space_pos > limit // 2:
cut = space_pos + 1
chunks.append(remaining[:cut])
remaining = remaining[cut:]
return chunks

1
agent/__init__.py Normal file
View File

@@ -0,0 +1 @@
# Agent runtime module — wraps OpenCode CLI as the AI brain for Aetheel.

419
agent/claude_runtime.py Normal file
View File

@@ -0,0 +1,419 @@
"""
Claude Code Agent Runtime
=========================
Wraps the Claude Code CLI as an alternative AI brain for Aetheel.
Like OpenCodeRuntime, this provides a subprocess-based interface to an
AI coding agent. Claude Code uses the `claude` CLI from Anthropic.
Usage:
from agent.claude_runtime import ClaudeCodeRuntime, ClaudeCodeConfig
runtime = ClaudeCodeRuntime()
response = runtime.chat("What is Python?", conversation_id="slack-thread-123")
"""
import json
import logging
import os
import shutil
import subprocess
import time
from dataclasses import dataclass, field
# Re-use AgentResponse and SessionStore from opencode_runtime
from agent.opencode_runtime import AgentResponse, SessionStore
logger = logging.getLogger("aetheel.agent.claude")
# ---------------------------------------------------------------------------
# CLI Resolution
# ---------------------------------------------------------------------------
def _resolve_claude_command(explicit: str | None = None) -> str:
"""
Resolve the claude binary path.
Checks common install locations since subprocesses may not
have the same PATH as the user's shell.
"""
if explicit:
resolved = shutil.which(explicit)
if resolved:
return resolved
return explicit # Let subprocess fail with a clear error
# 1. Try PATH first
found = shutil.which("claude")
if found:
return found
# 2. Common install locations
home = os.path.expanduser("~")
candidates = [
os.path.join(home, ".claude", "bin", "claude"),
os.path.join(home, ".local", "bin", "claude"),
"/usr/local/bin/claude",
os.path.join(home, ".npm-global", "bin", "claude"),
]
for path in candidates:
if os.path.isfile(path) and os.access(path, os.X_OK):
return path
return "claude" # Fallback — will error at runtime if not found
# ---------------------------------------------------------------------------
# Configuration
# ---------------------------------------------------------------------------
@dataclass
class ClaudeCodeConfig:
"""Configuration for the Claude Code runtime."""
command: str = ""
model: str | None = None # e.g., "claude-sonnet-4-20250514"
timeout_seconds: int = 120
max_turns: int = 3 # Limit tool use turns for faster responses
workspace_dir: str | None = None
system_prompt: str | None = None
session_ttl_hours: int = 24
# claude -p flags
output_format: str = "json" # "json", "text", or "stream-json"
# Permission settings
allowed_tools: list[str] = field(default_factory=list)
# Whether to disable all tool use (pure conversation mode)
no_tools: bool = True # Default: no tools for chat responses
@classmethod
def from_env(cls) -> "ClaudeCodeConfig":
"""Create config from environment variables."""
return cls(
command=os.environ.get("CLAUDE_COMMAND", ""),
model=os.environ.get("CLAUDE_MODEL"),
timeout_seconds=int(os.environ.get("CLAUDE_TIMEOUT", "120")),
max_turns=int(os.environ.get("CLAUDE_MAX_TURNS", "3")),
workspace_dir=os.environ.get("CLAUDE_WORKSPACE"),
system_prompt=os.environ.get("CLAUDE_SYSTEM_PROMPT"),
no_tools=os.environ.get("CLAUDE_NO_TOOLS", "true").lower() == "true",
)
# ---------------------------------------------------------------------------
# Claude Code Runtime
# ---------------------------------------------------------------------------
class ClaudeCodeRuntime:
"""
Claude Code Agent Runtime — alternative AI brain for Aetheel.
Uses the `claude` CLI in non-interactive mode (`claude -p`).
Supports:
- Non-interactive execution with `-p` flag
- JSON output parsing with `--output-format json`
- Session continuity with `--continue` and `--session-id`
- System prompt injection with `--system-prompt`
- Model selection with `--model`
- Tool restriction with `--allowedTools` or `--disallowedTools`
"""
def __init__(self, config: ClaudeCodeConfig | None = None):
self._config = config or ClaudeCodeConfig()
self._config.command = _resolve_claude_command(self._config.command)
self._sessions = SessionStore()
# Validate on init
self._validate_installation()
logger.info(
f"ClaudeCodeRuntime initialized: "
f"command={self._config.command}, "
f"model={self._config.model or 'default'}"
)
def chat(
self,
message: str,
conversation_id: str | None = None,
system_prompt: str | None = None,
) -> AgentResponse:
"""
Send a message to Claude Code and get a response.
This is the main entry point, matching OpenCodeRuntime.chat().
"""
start = time.monotonic()
try:
result = self._run_claude(message, conversation_id, system_prompt)
result.duration_ms = int((time.monotonic() - start) * 1000)
return result
except Exception as e:
logger.error(f"Claude runtime error: {e}", exc_info=True)
return AgentResponse(
text="",
error=str(e),
duration_ms=int((time.monotonic() - start) * 1000),
)
def get_status(self) -> dict:
"""Get the runtime status (for the /status command)."""
return {
"mode": "claude-code",
"model": self._config.model or "default",
"provider": "anthropic",
"active_sessions": self._sessions.count,
"claude_available": self._is_claude_available(),
}
def cleanup_sessions(self) -> int:
"""Clean up stale sessions. Returns count removed."""
return self._sessions.cleanup(self._config.session_ttl_hours)
# -------------------------------------------------------------------
# Core: Run claude CLI
# -------------------------------------------------------------------
def _run_claude(
self,
message: str,
conversation_id: str | None = None,
system_prompt: str | None = None,
) -> AgentResponse:
"""
Run `claude -p <message>` and parse the response.
Claude Code's `-p` flag runs in non-interactive (print) mode:
- Processes the message
- Returns the response
- Exits immediately
"""
args = self._build_cli_args(message, conversation_id, system_prompt)
logger.info(
f"Claude exec: claude -p "
f"(prompt_chars={len(message)}, "
f"session={conversation_id or 'new'})"
)
try:
result = subprocess.run(
args,
capture_output=True,
text=True,
timeout=self._config.timeout_seconds,
cwd=self._config.workspace_dir or os.getcwd(),
env=self._build_cli_env(),
)
stdout = result.stdout.strip()
stderr = result.stderr.strip()
if result.returncode != 0:
error_text = stderr or stdout or "Claude CLI command failed"
logger.error(
f"Claude failed (code={result.returncode}): {error_text[:200]}"
)
return AgentResponse(
text="",
error=f"Claude Code error: {error_text[:500]}",
)
# Parse the output
response_text, session_id = self._parse_output(stdout)
if not response_text:
response_text = stdout # Fallback to raw output
# Store session mapping
if session_id and conversation_id:
self._sessions.set(conversation_id, session_id)
return AgentResponse(
text=response_text,
session_id=session_id,
model=self._config.model,
provider="anthropic",
)
except subprocess.TimeoutExpired:
logger.error(
f"Claude timeout after {self._config.timeout_seconds}s"
)
return AgentResponse(
text="",
error=f"Request timed out after {self._config.timeout_seconds}s",
)
def _build_cli_args(
self,
message: str,
conversation_id: str | None = None,
system_prompt: str | None = None,
) -> list[str]:
"""
Build CLI arguments for `claude -p`.
Claude Code CLI flags:
-p, --print Non-interactive mode (print and exit)
--output-format json | text | stream-json
--model Model to use
--system-prompt System prompt (claude supports this natively!)
--continue Continue the most recent session
--session-id Resume a specific session
--max-turns Limit agentic turns
--allowedTools Restrict tool access
--disallowedTools Block specific tools
"""
args = [self._config.command, "-p"]
# Model selection
if self._config.model:
args.extend(["--model", self._config.model])
# Output format
args.extend(["--output-format", self._config.output_format])
# System prompt — claude supports this natively (unlike opencode)
prompt = system_prompt or self._config.system_prompt
if prompt:
args.extend(["--system-prompt", prompt])
# Session continuity
existing_session = None
if conversation_id:
existing_session = self._sessions.get(conversation_id)
if existing_session:
args.extend(["--session-id", existing_session, "--continue"])
# Max turns for tool use
if self._config.max_turns:
args.extend(["--max-turns", str(self._config.max_turns)])
# Tool restrictions
if self._config.no_tools:
# Disable all tools for pure conversation
args.extend(["--allowedTools", ""])
elif self._config.allowed_tools:
for tool in self._config.allowed_tools:
args.extend(["--allowedTools", tool])
# The message (positional arg, must come last)
args.append(message)
return args
def _build_cli_env(self) -> dict[str, str]:
"""Build environment variables for the CLI subprocess."""
env = os.environ.copy()
return env
def _parse_output(self, stdout: str) -> tuple[str, str | None]:
"""
Parse claude CLI output.
With --output-format json, claude returns a JSON object:
{
"type": "result",
"subtype": "success",
"cost_usd": 0.003,
"is_error": false,
"duration_ms": 1234,
"duration_api_ms": 1100,
"num_turns": 1,
"result": "The response text...",
"session_id": "abc123-..."
}
With --output-format text, it returns plain text.
"""
if not stdout.strip():
return "", None
# Try JSON format first
try:
data = json.loads(stdout)
if isinstance(data, dict):
# Standard JSON response
text = data.get("result", "")
session_id = data.get("session_id")
if data.get("is_error"):
error_msg = text or data.get("error", "Unknown error")
logger.warning(f"Claude returned error: {error_msg[:200]}")
return f"⚠️ {error_msg}", session_id
return text, session_id
except json.JSONDecodeError:
pass
# Try JSONL (stream-json) format
text_parts = []
session_id = None
for line in stdout.splitlines():
line = line.strip()
if not line:
continue
try:
event = json.loads(line)
if isinstance(event, dict):
if event.get("type") == "result":
text_parts.append(event.get("result", ""))
session_id = event.get("session_id", session_id)
elif event.get("type") == "assistant" and "message" in event:
# Extract text from content blocks
msg = event["message"]
if "content" in msg:
for block in msg["content"]:
if block.get("type") == "text":
text_parts.append(block.get("text", ""))
session_id = event.get("session_id", session_id)
except json.JSONDecodeError:
continue
if text_parts:
return "\n".join(text_parts), session_id
# Fallback: treat as plain text
return stdout, None
# -------------------------------------------------------------------
# Validation
# -------------------------------------------------------------------
def _validate_installation(self) -> None:
"""Check that Claude Code CLI is installed and accessible."""
cmd = self._config.command
if not cmd:
logger.warning("Claude Code command is empty")
return
resolved = shutil.which(cmd)
if resolved:
logger.info(f"Claude Code found: {resolved}")
else:
logger.warning(
f"Claude Code CLI not found at '{cmd}'. "
"Install with: npm install -g @anthropic-ai/claude-code"
)
def _is_claude_available(self) -> bool:
"""Check if Claude Code CLI is available."""
try:
result = subprocess.run(
[self._config.command, "--version"],
capture_output=True,
text=True,
timeout=5,
)
return result.returncode == 0
except Exception:
return False

868
agent/opencode_runtime.py Normal file
View File

@@ -0,0 +1,868 @@
"""
OpenCode Agent Runtime
======================
Wraps OpenCode CLI as the AI brain for Aetheel, inspired by OpenClaw's cli-runner.ts.
Two modes of operation:
1. **SDK Mode** (preferred) — Connects to a running `opencode serve` instance
via the official Python SDK (`opencode-ai`). Persistent sessions, low latency.
2. **CLI Mode** (fallback) — Spawns `opencode run` as a subprocess for each
request. No persistent server needed, but higher per-request latency.
Architecture (modeled after OpenClaw):
- OpenClaw's `cli-runner.ts` runs CLI agents as subprocesses with configurable
backends (claude-cli, codex-cli, etc.) via `runCommandWithTimeout()`.
- OpenClaw's `cli-backends.ts` defines backend configs with command, args,
output format, model aliases, session handling, etc.
- We replicate this pattern for OpenCode, but leverage OpenCode's `serve` mode
and its Python SDK for a cleaner integration.
Session Management:
- Each Slack thread maps to an OpenCode session (via `conversation_id`).
- Sessions are created on first message and reused for follow-ups.
- This mirrors OpenClaw's session isolation strategy.
Usage:
from agent.opencode_runtime import OpenCodeRuntime
runtime = OpenCodeRuntime(mode="sdk")
response = runtime.chat("What is Python?", session_id="slack-thread-123")
"""
import json
import logging
import os
import shutil
import subprocess
import threading
import time
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import Any, Callable
logger = logging.getLogger("aetheel.agent")
def _resolve_opencode_command(explicit: str | None = None) -> str:
"""
Resolve the opencode binary path.
Python subprocesses don't source ~/.zshrc or ~/.bashrc, so paths like
~/.opencode/bin won't be in PATH. This function checks common install
locations to find the binary automatically.
Priority:
1. Explicit path (from OPENCODE_COMMAND env var)
2. shutil.which (already in system PATH)
3. ~/.opencode/bin/opencode (official installer default)
4. ~/.local/bin/opencode (common Linux/macOS location)
5. npm global installs (npx-style locations)
"""
cmd = explicit or "opencode"
# If explicit path is absolute and exists, use it directly
if os.path.isabs(cmd) and os.path.isfile(cmd):
return cmd
# Try system PATH first
found = shutil.which(cmd)
if found:
return found
# Check common install locations
home = Path.home()
candidates = [
home / ".opencode" / "bin" / "opencode", # official installer
home / ".local" / "bin" / "opencode", # common Linux/macOS
Path("/usr/local/bin/opencode"), # Homebrew / manual
Path("/opt/homebrew/bin/opencode"), # Homebrew (Apple Silicon)
]
for candidate in candidates:
if candidate.is_file() and os.access(candidate, os.X_OK):
logger.info(f"Auto-discovered opencode at: {candidate}")
return str(candidate)
# Return the original command (will fail at runtime with a clear error)
return cmd
# ---------------------------------------------------------------------------
# Configuration
# ---------------------------------------------------------------------------
class RuntimeMode(Enum):
"""How the runtime connects to OpenCode."""
SDK = "sdk" # via opencode serve + Python SDK
CLI = "cli" # via opencode run subprocess
@dataclass
class OpenCodeConfig:
"""
Configuration for the OpenCode runtime.
Modeled after OpenClaw's CliBackendConfig in cli-backends.ts.
"""
# Connection
mode: RuntimeMode = RuntimeMode.CLI
server_url: str = "http://localhost:4096"
server_password: str | None = None
server_username: str = "opencode"
# CLI settings (for CLI mode, mirroring OpenClaw's DEFAULT_CLAUDE_BACKEND)
command: str = "opencode"
timeout_seconds: int = 120
# Model
model: str | None = None # e.g., "anthropic/claude-sonnet-4-20250514"
provider: str | None = None # e.g., "anthropic"
# Agent behavior
system_prompt: str | None = None
workspace_dir: str | None = None
format: str = "json" # output format: "default" (formatted) or "json" (raw events)
# Session
auto_create_sessions: bool = True
session_ttl_hours: int = 24
@classmethod
def from_env(cls) -> "OpenCodeConfig":
"""Create config from environment variables."""
mode_str = os.environ.get("OPENCODE_MODE", "cli").lower()
mode = RuntimeMode.SDK if mode_str == "sdk" else RuntimeMode.CLI
return cls(
mode=mode,
server_url=os.environ.get(
"OPENCODE_SERVER_URL", "http://localhost:4096"
),
server_password=os.environ.get("OPENCODE_SERVER_PASSWORD"),
server_username=os.environ.get("OPENCODE_SERVER_USERNAME", "opencode"),
command=_resolve_opencode_command(
os.environ.get("OPENCODE_COMMAND")
),
timeout_seconds=int(os.environ.get("OPENCODE_TIMEOUT", "120")),
model=os.environ.get("OPENCODE_MODEL"),
provider=os.environ.get("OPENCODE_PROVIDER"),
system_prompt=os.environ.get("OPENCODE_SYSTEM_PROMPT"),
workspace_dir=os.environ.get(
"OPENCODE_WORKSPACE",
os.environ.get("AETHEEL_WORKSPACE"),
),
format=os.environ.get("OPENCODE_FORMAT", "json"),
)
# ---------------------------------------------------------------------------
# Agent Response
# ---------------------------------------------------------------------------
@dataclass
class AgentResponse:
"""Response from the agent runtime."""
text: str
session_id: str | None = None
model: str | None = None
provider: str | None = None
duration_ms: int = 0
usage: dict | None = None
error: str | None = None
@property
def ok(self) -> bool:
return self.error is None and bool(self.text.strip())
# ---------------------------------------------------------------------------
# Session Store
# ---------------------------------------------------------------------------
class SessionStore:
"""
Maps external IDs (e.g., Slack thread_ts) to OpenCode session IDs.
Mirrors OpenClaw's session isolation: each channel thread gets its own session.
"""
def __init__(self):
self._sessions: dict[str, dict] = {}
self._lock = threading.Lock()
def get(self, external_id: str) -> str | None:
"""Get the OpenCode session ID for an external conversation ID."""
with self._lock:
entry = self._sessions.get(external_id)
if entry:
entry["last_used"] = time.time()
return entry["session_id"]
return None
def set(self, external_id: str, session_id: str) -> None:
"""Map an external ID to an OpenCode session ID."""
with self._lock:
self._sessions[external_id] = {
"session_id": session_id,
"created": time.time(),
"last_used": time.time(),
}
def remove(self, external_id: str) -> None:
"""Remove -a session mapping."""
with self._lock:
self._sessions.pop(external_id, None)
def cleanup(self, ttl_hours: int = 24) -> int:
"""Remove stale sessions older than ttl_hours. Returns count removed."""
cutoff = time.time() - (ttl_hours * 3600)
removed = 0
with self._lock:
stale = [
k
for k, v in self._sessions.items()
if v["last_used"] < cutoff
]
for k in stale:
del self._sessions[k]
removed += 1
return removed
@property
def count(self) -> int:
with self._lock:
return len(self._sessions)
# ---------------------------------------------------------------------------
# OpenCode Runtime
# ---------------------------------------------------------------------------
class OpenCodeRuntime:
"""
OpenCode Agent Runtime — the AI brain for Aetheel.
Inspired by OpenClaw's `runCliAgent()` in cli-runner.ts:
- Resolves the CLI backend config
- Builds CLI args (model, session, system prompt, etc.)
- Runs the command with a timeout
- Parses the JSON or text output
- Returns structured results
We adapt this for OpenCode's two modes:
- SDK mode: uses the opencode-ai Python SDK to talk to `opencode serve`
- CLI mode: spawns `opencode run` subprocess (like OpenClaw's approach)
"""
def __init__(self, config: OpenCodeConfig | None = None):
self._config = config or OpenCodeConfig.from_env()
self._sessions = SessionStore()
self._sdk_client = None
self._sdk_available = False
# Validate OpenCode is available
self._validate_installation()
# Try to initialize SDK client if in SDK mode
if self._config.mode == RuntimeMode.SDK:
self._init_sdk_client()
logger.info(
f"OpenCode runtime initialized "
f"(mode={self._config.mode.value}, "
f"model={self._config.model or 'default'})"
)
# -------------------------------------------------------------------
# Public API
# -------------------------------------------------------------------
def chat(
self,
message: str,
conversation_id: str | None = None,
system_prompt: str | None = None,
) -> AgentResponse:
"""
Send a message to the AI agent and get a response.
This is the main entry point, used by the Slack adapter's message handler.
Args:
message: The user's message text
conversation_id: External conversation ID (e.g., Slack thread_ts)
for session isolation
system_prompt: Optional per-request system prompt override
Returns:
AgentResponse with the AI's reply
"""
started = time.time()
if not message.strip():
return AgentResponse(
text="", error="Empty message", duration_ms=0
)
try:
# Route to the appropriate mode
if self._config.mode == RuntimeMode.SDK and self._sdk_available:
result = self._chat_sdk(message, conversation_id, system_prompt)
else:
result = self._chat_cli(message, conversation_id, system_prompt)
result.duration_ms = int((time.time() - started) * 1000)
return result
except Exception as e:
duration_ms = int((time.time() - started) * 1000)
logger.error(f"Agent error: {e}", exc_info=True)
return AgentResponse(
text="",
error=str(e),
duration_ms=duration_ms,
)
def get_status(self) -> dict:
"""Get the runtime status (for the /status command)."""
status = {
"mode": self._config.mode.value,
"model": self._config.model or "default",
"provider": self._config.provider or "auto",
"active_sessions": self._sessions.count,
"opencode_available": self._is_opencode_available(),
}
if self._config.mode == RuntimeMode.SDK:
status["server_url"] = self._config.server_url
status["sdk_connected"] = self._sdk_available
return status
def cleanup_sessions(self) -> int:
"""Clean up stale sessions. Returns count removed."""
return self._sessions.cleanup(self._config.session_ttl_hours)
# -------------------------------------------------------------------
# CLI Mode: Subprocess execution
# (mirrors OpenClaw's runCliAgent → runCommandWithTimeout pattern)
# -------------------------------------------------------------------
def _chat_cli(
self,
message: str,
conversation_id: str | None = None,
system_prompt: str | None = None,
) -> AgentResponse:
"""
Run OpenCode in CLI mode via `opencode run`.
This mirrors OpenClaw's cli-runner.ts:
1. Build the CLI args (like buildCliArgs)
2. Run the command with a timeout
3. Parse the output (like parseCliJson)
4. Return structured results
"""
# Build CLI args — modeled after OpenClaw's buildCliArgs()
args = self._build_cli_args(message, conversation_id, system_prompt)
logger.info(
f"CLI exec: {self._config.command} run "
f"(prompt_chars={len(message)}, "
f"session={conversation_id or 'new'})"
)
try:
# Run the command — mirrors OpenClaw's runCommandWithTimeout()
result = subprocess.run(
args,
capture_output=True,
text=True,
timeout=self._config.timeout_seconds,
cwd=self._config.workspace_dir or os.getcwd(),
env=self._build_cli_env(),
)
stdout = result.stdout.strip()
stderr = result.stderr.strip()
if result.returncode != 0:
# Mirror OpenClaw's error classification
error_text = stderr or stdout or "CLI command failed"
logger.error(
f"CLI failed (code={result.returncode}): {error_text[:200]}"
)
return AgentResponse(
text="",
error=f"OpenCode CLI error: {error_text[:500]}",
)
# Parse the output — mirrors OpenClaw's parseCliJson/parseCliJsonl
response_text = self._parse_cli_output(stdout)
if not response_text:
response_text = stdout # fallback to raw output
# Extract session ID if returned
session_id = self._extract_session_id(stdout)
if session_id and conversation_id:
self._sessions.set(conversation_id, session_id)
return AgentResponse(
text=response_text,
session_id=session_id,
model=self._config.model,
)
except subprocess.TimeoutExpired:
logger.error(
f"CLI timeout after {self._config.timeout_seconds}s"
)
return AgentResponse(
text="",
error=f"Request timed out after {self._config.timeout_seconds}s",
)
def _build_cli_args(
self,
message: str,
conversation_id: str | None = None,
system_prompt: str | None = None,
) -> list[str]:
"""
Build CLI arguments for `opencode run`.
Modeled after OpenClaw's buildCliArgs() in cli-runner/helpers.ts:
- base args (command + run)
- model arg (--model)
- session arg (--session / --continue)
- system prompt (prepended to message as XML block)
- format arg (--format)
- the prompt itself
"""
args = [self._config.command, "run"]
# Model selection
if self._config.model:
args.extend(["--model", self._config.model])
# Session continuity — like OpenClaw's sessionArg
existing_session = None
if conversation_id:
existing_session = self._sessions.get(conversation_id)
if existing_session:
# Continue an existing session
args.extend(["--continue", "--session", existing_session])
# For new conversations, OpenCode creates a new session automatically
# Output format — use JSON for structured parsing, default for plain text
# Valid choices: "default" (formatted), "json" (raw JSON events)
if self._config.format and self._config.format in ("default", "json"):
args.extend(["--format", self._config.format])
# Build the full prompt — prepend system prompt if provided
# opencode run doesn't have a --system-prompt flag, so we inject it
# as an XML-tagged block before the user message
if system_prompt:
full_message = (
f"<system_instructions>\n{system_prompt}\n</system_instructions>\n\n"
f"<user_message>\n{message}\n</user_message>"
)
else:
full_message = message
# The prompt message (must come last as a positional arg)
args.append(full_message)
return args
def _build_cli_env(self) -> dict[str, str]:
"""
Build environment variables for the CLI subprocess.
Note: OpenCode reads OPENCODE_* env vars as config overrides and
tries to parse their values as JSON. We must NOT set arbitrary
OPENCODE_* vars here — only pass through the parent environment.
"""
env = os.environ.copy()
return env
def _parse_cli_output(self, stdout: str) -> str:
"""
Parse CLI output to extract the response text.
OpenCode's `--format json` emits JSONL (one JSON object per line):
{"type":"step_start", "sessionID":"ses_...", "part":{...}}
{"type":"text", "sessionID":"ses_...", "part":{"type":"text","text":"Hello!"}}
{"type":"step_finish","sessionID":"ses_...", "part":{"type":"step-finish",...}}
We extract text from events where type == "text" and part.text exists.
"""
if not stdout.strip():
return ""
# Parse JSONL lines — collect text from "text" type events
lines = stdout.strip().split("\n")
texts = []
for line in lines:
line = line.strip()
if not line:
continue
try:
event = json.loads(line)
if not isinstance(event, dict):
continue
# OpenCode event format: extract text from part.text
event_type = event.get("type", "")
part = event.get("part", {})
if event_type == "text" and isinstance(part, dict):
text = part.get("text", "")
if text:
texts.append(text)
continue
# Fallback: try generic text extraction (for non-OpenCode formats)
text = self._collect_text(event)
if text:
texts.append(text)
except json.JSONDecodeError:
# Not JSON — might be plain text output (--format default)
texts.append(line)
if texts:
return "\n".join(texts)
# Final fallback to raw text
return stdout.strip()
def _collect_text(self, value: Any) -> str:
"""
Recursively collect text from a parsed JSON object.
Adapted from OpenClaw's collectText() from helpers.ts,
with awareness of OpenCode's event structure.
"""
if not value:
return ""
if isinstance(value, str):
return value
if isinstance(value, list):
return "".join(self._collect_text(item) for item in value)
if isinstance(value, dict):
# Skip OpenCode event wrapper — dig into "part" first
if "part" in value and isinstance(value["part"], dict):
part = value["part"]
if "text" in part and isinstance(part["text"], str):
return part["text"]
# Try common text fields
if "content" in value and isinstance(value["content"], str):
return value["content"]
if "content" in value and isinstance(value["content"], list):
return "".join(
self._collect_text(item) for item in value["content"]
)
if "message" in value and isinstance(value["message"], dict):
return self._collect_text(value["message"])
if "result" in value:
return self._collect_text(value["result"])
return ""
def _extract_session_id(self, stdout: str) -> str | None:
"""
Extract session ID from CLI output.
OpenCode includes sessionID in every JSONL event line:
{"type":"text", "sessionID":"ses_abc123", ...}
We grab it from the first event that has one.
"""
lines = stdout.strip().split("\n")
for line in lines:
line = line.strip()
if not line:
continue
try:
event = json.loads(line)
if not isinstance(event, dict):
continue
# OpenCode format: top-level sessionID
session_id = event.get("sessionID")
if isinstance(session_id, str) and session_id.strip():
return session_id.strip()
# Fallback: check nested part.sessionID
part = event.get("part", {})
if isinstance(part, dict):
session_id = part.get("sessionID")
if isinstance(session_id, str) and session_id.strip():
return session_id.strip()
except json.JSONDecodeError:
continue
return None
# -------------------------------------------------------------------
# SDK Mode: OpenCode serve API
# (enhanced version of CLI mode, using the official Python SDK)
# -------------------------------------------------------------------
def _init_sdk_client(self) -> None:
"""Initialize the OpenCode Python SDK client."""
try:
from opencode_ai import Opencode
kwargs: dict[str, Any] = {
"base_url": self._config.server_url,
}
if self._config.server_password:
import httpx
kwargs["http_client"] = httpx.Client(
auth=(
self._config.server_username,
self._config.server_password,
)
)
self._sdk_client = Opencode(**kwargs)
# Test connectivity
try:
self._sdk_client.app.get()
self._sdk_available = True
logger.info(
f"SDK connected to {self._config.server_url}"
)
except Exception as e:
logger.warning(
f"SDK connection test failed: {e}. "
f"Will fall back to CLI mode."
)
self._sdk_available = False
except ImportError:
logger.warning(
"opencode-ai SDK not installed. "
"Install with: pip install opencode-ai. "
"Falling back to CLI mode."
)
self._sdk_available = False
def _chat_sdk(
self,
message: str,
conversation_id: str | None = None,
system_prompt: str | None = None,
) -> AgentResponse:
"""
Chat using the OpenCode Python SDK.
Uses the server API:
1. Create or reuse a session (POST /session)
2. Send a message (POST /session/:id/message → client.session.chat)
3. Parse the AssistantMessage response
"""
if not self._sdk_client:
return self._chat_cli(message, conversation_id, system_prompt)
try:
# Resolve or create session
session_id = None
if conversation_id:
session_id = self._sessions.get(conversation_id)
if not session_id:
# Create a new session
session = self._sdk_client.session.create()
session_id = session.id
if conversation_id:
self._sessions.set(conversation_id, session_id)
logger.info(f"SDK: created session {session_id}")
# Build message parts
parts = [{"type": "text", "text": message}]
# Build chat params
chat_kwargs: dict[str, Any] = {"parts": parts}
if self._config.model:
chat_kwargs["model"] = self._config.model
if system_prompt:
chat_kwargs["system"] = system_prompt
# Send message and get response
logger.info(
f"SDK chat: session={session_id[:8]}... "
f"prompt_chars={len(message)}"
)
response = self._sdk_client.session.chat(
session_id, **chat_kwargs
)
# Extract text from the AssistantMessage response
response_text = self._extract_sdk_response_text(response)
return AgentResponse(
text=response_text,
session_id=session_id,
model=self._config.model,
)
except Exception as e:
logger.warning(
f"SDK chat failed: {e}. Falling back to CLI mode."
)
# Graceful fallback to CLI mode
return self._chat_cli(message, conversation_id, system_prompt)
def _extract_sdk_response_text(self, response: Any) -> str:
"""Extract text content from the SDK's AssistantMessage response."""
# The response is an AssistantMessage which has parts
if hasattr(response, "parts"):
texts = []
for part in response.parts:
if hasattr(part, "text"):
texts.append(part.text)
elif hasattr(part, "content"):
texts.append(str(part.content))
return "\n".join(texts).strip()
# Fallback: try to get text directly
if hasattr(response, "text"):
return response.text.strip()
# Last resort: stringify
return str(response).strip()
# -------------------------------------------------------------------
# Validation & Utilities
# -------------------------------------------------------------------
def _validate_installation(self) -> None:
"""Check that OpenCode CLI is installed and accessible."""
cmd = self._config.command
# If the resolved command doesn't exist, try resolving again
if not os.path.isfile(cmd) and not shutil.which(cmd):
resolved = _resolve_opencode_command()
if resolved != "opencode" and os.path.isfile(resolved):
self._config.command = resolved
logger.info(f"Resolved opencode binary: {resolved}")
else:
logger.warning(
f"'{cmd}' not found. "
f"Install with: curl -fsSL https://opencode.ai/install | bash "
f"or: npm install -g opencode-ai"
)
if self._config.mode == RuntimeMode.CLI:
logger.warning(
"CLI mode requires opencode to be installed. "
"If using SDK mode, set OPENCODE_MODE=sdk."
)
def _is_opencode_available(self) -> bool:
"""Check if OpenCode CLI is available."""
try:
result = subprocess.run(
[self._config.command, "--version"],
capture_output=True,
text=True,
timeout=5,
)
return result.returncode == 0
except (subprocess.TimeoutExpired, FileNotFoundError, OSError):
return False
# ---------------------------------------------------------------------------
# System Prompt Builder
# (Mirrors OpenClaw's buildSystemPrompt in cli-runner/helpers.ts)
# ---------------------------------------------------------------------------
def build_aetheel_system_prompt(
user_name: str | None = None,
channel_name: str | None = None,
is_dm: bool = False,
extra_context: str | None = None,
) -> str:
"""
Build the system prompt for Aetheel.
Like OpenClaw's buildAgentSystemPrompt(), this constructs a comprehensive
prompt that gives the AI its identity, capabilities, and context.
"""
lines = [
"You are Aetheel — a personal AI assistant that lives inside Slack.",
"",
"# Identity",
"- Your name is Aetheel",
"- You ARE a Slack bot — you are already running inside Slack right now",
"- You have your own Slack bot token and can send messages to any channel",
"- You have a persistent memory system with identity files (SOUL.md, USER.md, MEMORY.md)",
"- You can read and update your memory files across sessions",
"",
"# Your Capabilities",
"- **Direct messaging**: You are already in the user's Slack workspace — no setup needed",
"- **Memory**: You have SOUL.md (your personality), USER.md (user profile), MEMORY.md (long-term memory)",
"- **Session logs**: Conversations are automatically saved to daily/ session files",
"- **Reminders**: You can schedule messages to be sent later using action tags (see below)",
"",
"# Action Tags",
"You can perform actions by including special tags in your response.",
"The system will parse these tags and execute the actions automatically.",
"",
"## Reminders",
"To schedule a reminder, include this tag anywhere in your response:",
"```",
"[ACTION:remind|<minutes>|<message>]",
"```",
"Example: `[ACTION:remind|2|Time to drink water! 💧]` — sends a Slack message in 2 minutes",
"Example: `[ACTION:remind|30|Stand up and stretch! 🧘]` — sends a message in 30 minutes",
"",
"When scheduling a reminder, confirm to the user that it's been set,",
"and include the action tag in your response (it will be hidden from the user).",
"",
"# Guidelines",
"- Be helpful, concise, and friendly",
"- Use Slack formatting (bold with *text*, code with `text`, etc.)",
"- Keep responses focused and relevant",
"- If you don't know something, say so honestly",
"- Avoid extremely long responses unless asked for detail",
"- NEVER ask for Slack tokens, webhook URLs, or API keys — you already have them",
"- NEVER suggest the user 'set up' Slack — you ARE the Slack bot",
"",
"# Context",
]
if user_name:
lines.append(f"- You are chatting with: {user_name}")
if channel_name and not is_dm:
lines.append(f"- Channel: #{channel_name}")
if is_dm:
lines.append("- This is a direct message (private conversation)")
if extra_context:
lines.append("")
lines.append(extra_context)
return "\n".join(lines)

371
docs/memory-system.md Normal file
View File

@@ -0,0 +1,371 @@
# Aetheel Memory System
> **Date:** 2026-02-13
> **Inspired by:** OpenClaw's `src/memory/` (49 files, 2,300+ LOC manager)
> **Implementation:** ~600 lines of Python across 6 modules
---
## Table of Contents
1. [Overview](#overview)
2. [Architecture](#architecture)
3. [File Structure](#file-structure)
4. [Identity Files](#identity-files)
5. [How It Works](#how-it-works)
6. [Configuration](#configuration)
7. [API Reference](#api-reference)
8. [Dependencies](#dependencies)
9. [Testing](#testing)
10. [OpenClaw Mapping](#openclaw-mapping)
---
## 1. Overview
The memory system gives Aetheel **persistent, searchable memory** using a combination of markdown files and SQLite. It follows the same design as OpenClaw's memory architecture:
- **Markdown IS the database** — identity files (`SOUL.md`, `USER.md`, `MEMORY.md`) are human-readable and editable in any text editor or Obsidian
- **Hybrid search** — combines vector similarity (cosine, 0.7 weight) with BM25 keyword search (0.3 weight) for accurate retrieval
- **Fully local** — uses fastembed ONNX embeddings (384-dim), zero API calls
- **Incremental sync** — only re-indexes files that have changed (SHA-256 hash comparison)
- **Session logging** — conversation transcripts stored in `daily/` and indexed for search
---
## 2. Architecture
```
┌──────────────────────────┐
│ MemoryManager │
│ (memory/manager.py) │
├──────────────────────────┤
│ • sync() │
│ • search() │
│ • log_session() │
│ • read/update identity │
│ • file watching │
└────────┬─────────────────┘
┌───────────────┼───────────────┐
▼ ▼ ▼
┌──────────────┐ ┌─────────────┐ ┌──────────────┐
│ Workspace │ │ SQLite │ │ fastembed │
│ (.md files)│ │ Database │ │ (ONNX) │
├──────────────┤ ├─────────────┤ ├──────────────┤
│ SOUL.md │ │ files │ │ bge-small │
│ USER.md │ │ chunks │ │ 384-dim │
│ MEMORY.md │ │ chunks_fts │ │ L2-normalized│
│ memory/ │ │ emb_cache │ │ local only │
│ daily/ │ │ session_logs│ │ │
└──────────────┘ └─────────────┘ └──────────────┘
```
### Search Flow
```
Query: "what are my preferences?"
┌──────────────────┐ ┌──────────────────┐
│ Vector Search │ │ Keyword Search │
│ (cosine sim) │ │ (FTS5 / BM25) │
│ weight: 0.7 │ │ weight: 0.3 │
└────────┬─────────┘ └────────┬─────────┘
│ │
└──────────┬─────────────┘
┌───────────────┐
│ Hybrid Merge │
│ dedupe by ID │
│ sort by score│
└───────┬───────┘
Top-N results with
score ≥ min_score
```
---
## 3. File Structure
### Source Code
```
memory/
├── __init__.py # Package exports (MemoryManager, MemorySearchResult, MemorySource)
├── types.py # Data classes: MemoryConfig, MemorySearchResult, MemoryChunk, etc.
├── internal.py # Utilities: hashing, chunking, file discovery, cosine similarity
├── hybrid.py # Hybrid search merging (0.7 vector + 0.3 BM25)
├── schema.py # SQLite schema (files, chunks, FTS5, embedding cache)
├── embeddings.py # Local fastembed ONNX embeddings (384-dim)
└── manager.py # Main MemoryManager orchestrator (~400 LOC)
```
### Workspace (Created Automatically)
```
~/.aetheel/workspace/
├── SOUL.md # Personality & values — "who you are"
├── USER.md # User profile — "who I am"
├── MEMORY.md # Long-term memory — decisions, lessons, context
├── memory/ # Additional markdown memory files (optional)
│ └── *.md
└── daily/ # Session logs by date
├── 2026-02-13.md
├── 2026-02-14.md
└── ...
```
---
## 4. Identity Files
Inspired by OpenClaw's template system (`docs/reference/templates/SOUL.md`).
### SOUL.md — Who You Are
The agent's personality, values, and behavioral guidelines. Created with sensible defaults:
- Core truths (be helpful, have opinions, be resourceful)
- Boundaries (privacy, external actions)
- Continuity rules (files ARE the memory)
### USER.md — Who I Am
The user's profile — name, role, timezone, preferences, current focus, tools. Fill this in to personalize the agent.
### MEMORY.md — Long-Term Memory
Persistent decisions, lessons learned, and context that carries across sessions. The agent appends entries with timestamps:
```markdown
### [2026-02-13 12:48]
Learned that the user prefers concise responses with code examples.
```
---
## 5. How It Works
### Sync (`await manager.sync()`)
1. **Discover files** — scans `SOUL.md`, `USER.md`, `MEMORY.md`, `memory/*.md`
2. **Check hashes** — compares SHA-256 content hash against stored hash in `files` table
3. **Skip unchanged** — files with matching hashes are skipped (incremental sync)
4. **Chunk** — splits changed files into overlapping text chunks (~512 tokens, 50 token overlap)
5. **Embed** — generates 384-dim vectors via fastembed (checks embedding cache first)
6. **Store** — inserts chunks + embeddings into SQLite, updates FTS5 index
7. **Clean** — removes stale entries for deleted files
8. **Sessions** — repeats for `daily/*.md` session log files
### Search (`await manager.search("query")`)
1. **Auto-sync** — triggers sync if workspace is dirty (configurable)
2. **Keyword search** — runs FTS5 `MATCH` query with BM25 ranking
3. **Vector search** — embeds query, computes cosine similarity against all chunk embeddings
4. **Hybrid merge** — combines results: `score = 0.7 × vector + 0.3 × keyword`
5. **Deduplicate** — merges chunks found by both methods (by chunk ID)
6. **Filter & rank** — removes results below `min_score`, returns top-N sorted by score
### Session Logging (`manager.log_session(content)`)
1. Creates/appends to `daily/YYYY-MM-DD.md`
2. Adds timestamped entry with channel label
3. Marks index as dirty for next sync
---
## 6. Configuration
```python
from memory.types import MemoryConfig
config = MemoryConfig(
# Workspace directory containing identity files
workspace_dir="~/.aetheel/workspace",
# SQLite database path
db_path="~/.aetheel/memory.db",
# Chunking parameters
chunk_tokens=512, # ~2048 characters per chunk
chunk_overlap=50, # ~200 character overlap between chunks
# Search parameters
max_results=10, # maximum results per search
min_score=0.1, # minimum hybrid score threshold
vector_weight=0.7, # weight for vector similarity
text_weight=0.3, # weight for BM25 keyword score
# Embedding model (local ONNX)
embedding_model="BAAI/bge-small-en-v1.5",
embedding_dims=384,
# Sync behavior
watch=True, # enable file watching via watchdog
watch_debounce_ms=2000, # debounce file change events
sync_on_search=True, # auto-sync before search if dirty
# Session logs directory (defaults to workspace_dir/daily/)
sessions_dir=None,
# Sources to index
sources=["memory", "sessions"],
)
```
---
## 7. API Reference
### `MemoryManager`
```python
from memory import MemoryManager
from memory.types import MemoryConfig
# Create with custom config (or defaults)
mgr = MemoryManager(config=MemoryConfig(...))
# Sync workspace → index
stats = await mgr.sync(force=False)
# Returns: {"files_found": 4, "files_indexed": 4, "chunks_created": 5, ...}
# Hybrid search
results = await mgr.search("what are my preferences?", max_results=5, min_score=0.1)
# Returns: list[MemorySearchResult]
# .path — relative file path (e.g., "USER.md")
# .start_line — chunk start line
# .end_line — chunk end line
# .score — hybrid score (0.0 - 1.0)
# .snippet — text snippet (max 700 chars)
# .source — MemorySource.MEMORY or MemorySource.SESSIONS
# Identity files
soul = mgr.read_soul() # Read SOUL.md
user = mgr.read_user() # Read USER.md
memory = mgr.read_long_term_memory() # Read MEMORY.md
mgr.append_to_memory("learned X") # Append timestamped entry to MEMORY.md
mgr.update_identity_file("USER.md", new_content) # Overwrite a file
# Session logging
path = mgr.log_session("User: hi\nAssistant: hello", channel="slack")
# File reading
data = mgr.read_file("SOUL.md", from_line=1, num_lines=10)
# Status
status = mgr.status()
# Returns: {"files": 5, "chunks": 5, "cached_embeddings": 4, ...}
# File watching
mgr.start_watching() # auto-mark dirty on workspace changes
mgr.stop_watching()
# Cleanup
mgr.close()
```
### `MemorySearchResult`
```python
@dataclass
class MemorySearchResult:
path: str # Relative path to the markdown file
start_line: int # First line of the matching chunk
end_line: int # Last line of the matching chunk
score: float # Hybrid score (0.0 - 1.0)
snippet: str # Text snippet (max 700 characters)
source: MemorySource # "memory" or "sessions"
citation: str | None = None
```
---
## 8. Dependencies
| Package | Version | Purpose |
|---------|---------|---------|
| `fastembed` | 0.7.4 | Local ONNX embeddings (BAAI/bge-small-en-v1.5, 384-dim) |
| `watchdog` | 6.0.0 | File system watching for auto re-indexing |
| `sqlite3` | (stdlib) | Database engine with FTS5 full-text search |
Added to `pyproject.toml`:
```toml
dependencies = [
"fastembed>=0.7.4",
"watchdog>=6.0.0",
# ... existing deps
]
```
---
## 9. Testing
Run the smoke test:
```bash
uv run python test_memory.py
```
### Test Results (2026-02-13)
| Test | Result |
|------|--------|
| `hash_text()` | ✅ SHA-256 produces 64-char hex string |
| `chunk_markdown()` | ✅ Splits text into overlapping chunks with correct line numbers |
| Identity file creation | ✅ SOUL.md (793 chars), USER.md (417 chars), MEMORY.md (324 chars) |
| Append to MEMORY.md | ✅ Content grows with timestamped entry |
| Session logging | ✅ Creates `daily/2026-02-13.md` with channel + timestamp |
| Sync (first run) | ✅ 4 files found, 4 indexed, 5 chunks, 1 session |
| Search "personality values" | ✅ 5 results — top: SOUL.md (score 0.595) |
| Search "preferences" | ✅ 5 results — top: USER.md (score 0.583) |
| FTS5 keyword search | ✅ Available |
| Embedding cache | ✅ 4 entries cached (skip re-computation on next sync) |
| Status report | ✅ All fields populated correctly |
---
## 10. OpenClaw Mapping
How our Python implementation maps to OpenClaw's TypeScript source:
| OpenClaw File | Aetheel File | Description |
|---------------|-------------|-------------|
| `src/memory/types.ts` | `memory/types.py` | Core types (MemorySearchResult, MemorySource, etc.) |
| `src/memory/internal.ts` | `memory/internal.py` | hashText, chunkMarkdown, listMemoryFiles, cosineSimilarity |
| `src/memory/hybrid.ts` | `memory/hybrid.py` | buildFtsQuery, bm25RankToScore, mergeHybridResults |
| `src/memory/memory-schema.ts` | `memory/schema.py` | ensureMemoryIndexSchema → ensure_schema |
| `src/memory/embeddings.ts` | `memory/embeddings.py` | createEmbeddingProvider → embed_query/embed_batch (fastembed) |
| `src/memory/manager.ts` (2,300 LOC) | `memory/manager.py` (~400 LOC) | MemoryIndexManager → MemoryManager |
| `src/memory/sync-memory-files.ts` | Inlined in `manager.py` | syncMemoryFiles → _run_sync |
| `src/memory/session-files.ts` | Inlined in `manager.py` | buildSessionEntry → _sync_session_files |
| `docs/reference/templates/SOUL.md` | Auto-created by manager | Default identity file templates |
### Key Simplifications vs. OpenClaw
| Feature | OpenClaw | Aetheel |
|---------|----------|---------|
| **Embedding providers** | OpenAI, Voyage, Gemini, local ONNX (4 providers) | fastembed only (local ONNX, zero API calls) |
| **Vector storage** | sqlite-vec extension (C library) | JSON-serialized in chunks table (pure Python) |
| **File watching** | chokidar (Node.js) | watchdog (Python) |
| **Batch embedding** | OpenAI/Voyage batch APIs, concurrency pools | fastembed batch (single-threaded, local) |
| **Config system** | JSON5 + TypeBox + Zod schemas (100k+ LOC) | Simple Python dataclass |
| **Codebase** | 49 files, 2,300+ LOC manager alone | 6 files, ~600 LOC total |
### What We Kept
- ✅ Same identity file pattern (SOUL.md, USER.md, MEMORY.md)
- ✅ Same hybrid search algorithm (0.7 vector + 0.3 BM25)
- ✅ Same chunking approach (token-based with overlap)
- ✅ Same incremental sync (hash-based change detection)
- ✅ Same FTS5 full-text search with BM25 ranking
- ✅ Same embedding cache (avoids re-computing unchanged chunks)
- ✅ Same session log pattern (daily/ directory)
---
*This memory system is Phase 1 of the Aetheel build process as outlined in `openclaw-analysis.md`.*

View File

@@ -0,0 +1,232 @@
# OpenCode Runtime Integration — Summary
> Integration of OpenCode CLI as the agent runtime for Aetheel.
> Completed: 2026-02-13
---
## Overview
OpenCode CLI has been integrated as the AI "brain" for Aetheel, replacing the placeholder `smart_handler` with a full agent runtime. The architecture is directly inspired by OpenClaw's `cli-runner.ts` and `cli-backends.ts`, adapted for OpenCode's API and Python.
---
## Files Created & Modified
### New Files
| File | Purpose |
|------|---------|
| `agent/__init__.py` | Package init for the agent module |
| `agent/opencode_runtime.py` | Core runtime — ~750 lines covering both CLI and SDK modes |
| `docs/opencode-setup.md` | Comprehensive setup guide |
| `docs/opencode-integration-summary.md` | This summary document |
### Modified Files
| File | Change |
|------|--------|
| `main.py` | Rewired to use `ai_handler` backed by `OpenCodeRuntime` instead of placeholder `smart_handler` |
| `.env.example` | Added all OpenCode config variables |
| `requirements.txt` | Added optional `opencode-ai` SDK dependency note |
---
## Architecture
```
Slack Message → ai_handler() → OpenCodeRuntime.chat() → OpenCode → LLM → Response
```
### Two Runtime Modes
1. **CLI Mode** (default) — Spawns `opencode run` as a subprocess per request.
Direct port of OpenClaw's `runCliAgent()``runCommandWithTimeout()` pattern
from `cli-runner.ts`.
2. **SDK Mode** — Connects to `opencode serve` via the official Python SDK
(`opencode-ai`). Uses `client.session.create()``client.session.chat()`
for lower latency and better session management.
### Component Diagram
```
┌─────────────────────┐
│ Slack │
│ (messages) │
└──────┬──────────────┘
│ WebSocket
┌──────▼──────────────┐
│ Slack Adapter │
│ (slack_adapter.py) │
│ │
│ • Socket Mode │
│ • Event handling │
│ • Thread isolation │
└──────┬──────────────┘
│ ai_handler()
┌──────▼──────────────┐
│ OpenCode Runtime │
│ (opencode_runtime) │
│ │
│ • Session store │
│ • System prompt │
│ • Mode routing │
└──────┬──────────────┘
┌────┴────┐
│ │
▼ ▼
CLI Mode SDK Mode
┌──────────┐ ┌──────────────┐
│ opencode │ │ opencode │
│ run │ │ serve API │
│ (subproc)│ │ (HTTP/SDK) │
└──────────┘ └──────────────┘
│ │
└──────┬───────┘
┌──────▼──────┐
│ LLM │
│ (Anthropic, │
│ OpenAI, │
│ Gemini) │
└─────────────┘
```
---
## Key Components (OpenClaw → Aetheel Mapping)
| OpenClaw (`cli-runner.ts`) | Aetheel (`opencode_runtime.py`) |
|---|---|
| `CliBackendConfig` | `OpenCodeConfig` dataclass |
| `runCliAgent()` | `OpenCodeRuntime.chat()` |
| `buildCliArgs()` | `_build_cli_args()` |
| `runCommandWithTimeout()` | `subprocess.run(timeout=...)` |
| `parseCliJson()` / `collectText()` | `_parse_cli_output()` / `_collect_text()` |
| `pickSessionId()` | `_extract_session_id()` |
| `buildSystemPrompt()` | `build_aetheel_system_prompt()` |
| Session per thread | `SessionStore` (thread_ts → session_id) |
---
## Key Design Decisions
### 1. Dual-Mode Runtime (CLI + SDK)
- **CLI mode** is the default because it requires no persistent server — just `opencode` in PATH.
- **SDK mode** is preferred for production because it avoids cold-start latency and provides better session management.
- The runtime gracefully falls back from SDK → CLI if the server is unreachable or the SDK is not installed.
### 2. Session Isolation per Thread
- Each Slack thread (`thread_ts`) maps to a unique OpenCode session via the `SessionStore`.
- New threads get new sessions; replies within a thread reuse the same session.
- Stale sessions are cleaned up after `session_ttl_hours` (default 24h).
### 3. System Prompt Injection
- `build_aetheel_system_prompt()` constructs a per-message system prompt with the bot's identity, guidelines, and context (user name, channel, DM vs. mention).
- This mirrors OpenClaw's `buildAgentSystemPrompt()` from `cli-runner/helpers.ts`.
### 4. Output Parsing (from OpenClaw)
- The `_parse_cli_output()` method tries JSON → JSONL → raw text, matching OpenClaw's `parseCliJson()` and `parseCliJsonl()`.
- The `_collect_text()` method recursively traverses JSON objects to find text content, a direct port of OpenClaw's `collectText()`.
### 5. Built-in Commands Bypass AI
- Commands like `status`, `help`, `time`, and `sessions` are handled directly without calling the AI, for instant responses.
---
## Configuration Reference
All settings go in `.env`:
```env
# Runtime mode
OPENCODE_MODE=cli # "cli" or "sdk"
# Model (optional — uses OpenCode default if not set)
OPENCODE_MODEL=anthropic/claude-sonnet-4-20250514
# CLI mode settings
OPENCODE_COMMAND=opencode # path to the opencode binary
OPENCODE_TIMEOUT=120 # seconds before timeout
# SDK mode settings (only needed when OPENCODE_MODE=sdk)
OPENCODE_SERVER_URL=http://localhost:4096
OPENCODE_SERVER_PASSWORD= # optional HTTP basic auth
OPENCODE_SERVER_USERNAME=opencode # default username
# Workspace directory for OpenCode
OPENCODE_WORKSPACE=/path/to/project
# Output format
OPENCODE_FORMAT=text # "text" or "json"
```
CLI flags can override config:
```bash
python main.py --cli # force CLI mode
python main.py --sdk # force SDK mode
python main.py --model anthropic/claude-sonnet-4-20250514
python main.py --test # echo-only (no AI)
```
---
## OpenCode Research Summary
### OpenCode CLI
- **What:** Go-based AI coding agent for the terminal
- **Install:** `curl -fsSL https://opencode.ai/install | bash` or `npm install -g opencode-ai`
- **Key commands:**
- `opencode` — TUI mode
- `opencode run "prompt"` — non-interactive, returns output
- `opencode serve` — headless HTTP server (OpenAPI 3.1 spec)
- `opencode auth login` — configure LLM providers
- `opencode models` — list available models
- `opencode init` — generate `AGENTS.md` for a project
### OpenCode Server API (via `opencode serve`)
- Default: `http://localhost:4096`
- Auth: HTTP basic auth via `OPENCODE_SERVER_PASSWORD`
- Key endpoints:
- `GET /session` — list sessions
- `POST /session` — create session
- `POST /session/:id/message` — send message (returns `AssistantMessage`)
- `POST /session/:id/abort` — abort in-progress request
- `GET /event` — SSE event stream
### OpenCode Python SDK (`opencode-ai`)
- Install: `pip install opencode-ai`
- Key methods:
- `client.session.create()``Session`
- `client.session.chat(id, parts=[...])``AssistantMessage`
- `client.session.list()``Session[]`
- `client.session.abort(id)` → abort
- `client.app.get()` → app info
- `client.app.providers()` → available providers
---
## Quick Start
1. Install OpenCode: `curl -fsSL https://opencode.ai/install | bash`
2. Configure a provider: `opencode auth login`
3. Test standalone: `opencode run "Hello, what are you?"`
4. Configure `.env` (copy from `.env.example`)
5. Run Aetheel: `python main.py`
6. In Slack: send a message to the bot and get an AI response
---
## Next Steps
1. **Memory System** — Add conversation persistence (SQLite) so sessions survive restarts
2. **Heartbeat** — Proactive messages via cron/scheduler
3. **Skills** — Loadable skill modules (like OpenClaw's `skills/` directory)
4. **Multi-Channel** — Discord, Telegram adapters
5. **Streaming** — Use SSE events from `opencode serve` for real-time streaming responses

412
docs/opencode-setup.md Normal file
View File

@@ -0,0 +1,412 @@
# OpenCode Setup Guide
> Configure OpenCode CLI as the AI brain for Aetheel.
---
## Table of Contents
1. [Overview](#overview)
2. [Install OpenCode](#step-1-install-opencode)
3. [Configure a Provider](#step-2-configure-a-provider)
4. [Choose a Runtime Mode](#step-3-choose-a-runtime-mode)
5. [Configure Aetheel](#step-4-configure-aetheel)
6. [Test the Integration](#step-5-test-the-integration)
7. [Architecture](#architecture)
8. [Troubleshooting](#troubleshooting)
---
## Overview
Aetheel uses [OpenCode](https://opencode.ai) as its AI runtime — the "brain" that
generates responses to Slack messages. OpenCode is a terminal-native AI coding agent
that supports multiple LLM providers (Anthropic, OpenAI, Google, etc.).
### How It Works
```
Slack Message → Slack Adapter → OpenCode Runtime → LLM → Response → Slack Reply
```
Two runtime modes are available:
| Mode | Description | Best For |
|------|-------------|----------|
| **CLI** (default) | Runs `opencode run` as a subprocess per request | Simple setup, no persistent server |
| **SDK** | Talks to `opencode serve` via HTTP API | Lower latency, persistent sessions |
### Relationship to OpenClaw
This architecture is inspired by OpenClaw's `cli-runner.ts`:
- OpenClaw spawns CLI agents (Claude CLI, Codex CLI) as subprocesses
- Each CLI call gets: model args, session ID, system prompt, timeout
- Output is parsed from JSON/JSONL to extract the response text
- Sessions are mapped per-thread for conversation isolation
We replicate this pattern in Python, adapted for OpenCode's API.
---
## Step 1: Install OpenCode
### macOS / Linux (recommended)
```bash
curl -fsSL https://opencode.ai/install | bash
```
### npm (all platforms)
```bash
npm install -g opencode-ai
```
### Homebrew (macOS)
```bash
brew install anomalyco/tap/opencode
```
### Verify
```bash
opencode --version
```
---
## Step 2: Configure a Provider
OpenCode needs at least one LLM provider configured. Run:
```bash
opencode auth login
```
This will guide you through connecting to a provider. Options include:
| Provider | Auth Method |
|----------|-------------|
| **OpenCode Zen** | Token-based (opencode.ai account) |
| **Anthropic** | API key (`ANTHROPIC_API_KEY`) |
| **OpenAI** | API key (`OPENAI_API_KEY`) |
| **Google** | API key (`GEMINI_API_KEY`) |
### Using Environment Variables
Alternatively, set provider API keys in your `.env`:
```env
# Anthropic
ANTHROPIC_API_KEY=sk-ant-...
# OpenAI
OPENAI_API_KEY=sk-...
# Google Gemini
GEMINI_API_KEY=AI...
```
### Verify models are available
```bash
opencode models
```
---
## Step 3: Choose a Runtime Mode
### CLI Mode (Default — Recommended to Start)
CLI mode spawns `opencode run` for each message. No persistent server needed.
**Pros:**
- ✅ Simple — just install OpenCode and go
- ✅ No server to manage
- ✅ Isolated — each request is independent
**Cons:**
- ⚠️ Higher latency (cold start per request)
- ⚠️ Limited session continuity (uses `--continue` flag)
```env
OPENCODE_MODE=cli
```
### SDK Mode (Advanced — Lower Latency)
SDK mode talks to a running `opencode serve` instance via HTTP.
**Pros:**
- ✅ Lower latency (warm server, no cold start)
- ✅ Better session management
- ✅ Full API access
**Cons:**
- ⚠️ Requires running `opencode serve` separately
- ⚠️ Needs the `opencode-ai` Python package
```env
OPENCODE_MODE=sdk
```
#### Start the OpenCode server:
```bash
# Terminal 1: Start the headless server
opencode serve --port 4096
# Optional: with authentication
OPENCODE_SERVER_PASSWORD=my-secret opencode serve
```
#### Install the Python SDK:
```bash
pip install opencode-ai
```
---
## Step 4: Configure Aetheel
Edit your `.env` file:
```env
# --- Slack (see docs/slack-setup.md) ---
SLACK_BOT_TOKEN=xoxb-...
SLACK_APP_TOKEN=xapp-...
# --- OpenCode Runtime ---
OPENCODE_MODE=cli
# OPENCODE_MODEL=anthropic/claude-sonnet-4-20250514
OPENCODE_TIMEOUT=120
# --- SDK mode only ---
# OPENCODE_SERVER_URL=http://localhost:4096
# OPENCODE_SERVER_PASSWORD=
LOG_LEVEL=INFO
```
### Model Selection
You can specify a model explicitly, or let OpenCode use its default:
```env
# Anthropic Claude
OPENCODE_MODEL=anthropic/claude-sonnet-4-20250514
# OpenAI GPT-5
OPENCODE_MODEL=openai/gpt-5.1
# Google Gemini
OPENCODE_MODEL=google/gemini-3-pro
# OpenCode Zen (pay-as-you-go)
OPENCODE_MODEL=opencode/claude-opus-4-6
```
Or override at launch:
```bash
python main.py --model anthropic/claude-sonnet-4-20250514
```
---
## Step 5: Test the Integration
### 1. Verify OpenCode works standalone
```bash
# Quick test
opencode run "What is Python?"
# With a specific model
opencode run --model anthropic/claude-sonnet-4-20250514 "Hello"
```
### 2. Test the runtime directly
```bash
# Quick Python test
python -c "
from agent.opencode_runtime import OpenCodeRuntime
runtime = OpenCodeRuntime()
print(runtime.get_status())
response = runtime.chat('Hello, what are you?')
print(f'Response: {response.text[:200]}')
print(f'OK: {response.ok}, Duration: {response.duration_ms}ms')
"
```
### 3. Test via Slack
```bash
# Start in test mode first (echo only, no AI)
python main.py --test
# Then start with AI
python main.py
# Or force a specific mode
python main.py --cli
python main.py --sdk
```
### 4. In Slack
- Send `status` — see the runtime status
- Send `help` — see available commands
- Send any question — get an AI response
- Reply in a thread — conversation continues in context
---
## Architecture
### Component Diagram
```
┌─────────────────────┐
│ Slack │
│ (messages) │
└──────┬──────────────┘
│ WebSocket
┌──────▼──────────────┐
│ Slack Adapter │
│ (slack_adapter.py) │
│ │
│ • Socket Mode │
│ • Event handling │
│ • Thread isolation │
└──────┬──────────────┘
│ ai_handler()
┌──────▼──────────────┐
│ OpenCode Runtime │
│ (opencode_runtime) │
│ │
│ • Session store │
│ • System prompt │
│ • Mode routing │
└──────┬──────────────┘
┌────┴────┐
│ │
▼ ▼
CLI Mode SDK Mode
┌──────────┐ ┌──────────────┐
│ opencode │ │ opencode │
│ run │ │ serve API │
│ (subproc)│ │ (HTTP/SDK) │
└──────────┘ └──────────────┘
│ │
└──────┬───────┘
┌──────▼──────┐
│ LLM │
│ (Anthropic, │
│ OpenAI, │
│ Gemini) │
└─────────────┘
```
### How OpenClaw Inspired This
| OpenClaw Pattern | Aetheel Implementation |
|------------------|----------------------|
| `cli-runner.ts``runCliAgent()` | `opencode_runtime.py``OpenCodeRuntime.chat()` |
| `cli-backends.ts``CliBackendConfig` | `OpenCodeConfig` dataclass |
| `buildCliArgs()` | `_build_cli_args()` |
| `runCommandWithTimeout()` | `subprocess.run(timeout=...)` |
| `parseCliJson()` / `collectText()` | `_parse_cli_output()` / `_collect_text()` |
| `pickSessionId()` | `_extract_session_id()` |
| `buildSystemPrompt()` | `build_aetheel_system_prompt()` |
| Session per thread | `SessionStore` mapping conversation_id → session_id |
### File Map
| File | Purpose |
|------|---------|
| `agent/__init__.py` | Agent package init |
| `agent/opencode_runtime.py` | OpenCode runtime (CLI + SDK modes) |
| `adapters/slack_adapter.py` | Slack Socket Mode adapter |
| `main.py` | Entry point with AI handler |
| `docs/opencode-setup.md` | This setup guide |
| `docs/slack-setup.md` | Slack bot setup guide |
---
## Troubleshooting
### ❌ "opencode not found in PATH"
**Fix:** Install OpenCode:
```bash
curl -fsSL https://opencode.ai/install | bash
```
Then verify:
```bash
opencode --version
```
### ❌ "CLI command failed" or empty responses
**Check:**
1. Verify OpenCode works standalone: `opencode run "Hello"`
2. Check that a provider is configured: `opencode auth login`
3. Check that the model is available: `opencode models`
4. Check your API key is set (e.g., `ANTHROPIC_API_KEY`)
### ❌ "Request timed out"
**Fix:** Increase the timeout:
```env
OPENCODE_TIMEOUT=300
```
Or simplify your prompt — complex prompts take longer.
### ❌ SDK mode: "connection test failed"
**Fix:**
1. Make sure `opencode serve` is running: `opencode serve --port 4096`
2. Check the URL in `.env`: `OPENCODE_SERVER_URL=http://localhost:4096`
3. If using auth, set both `OPENCODE_SERVER_PASSWORD` in `.env` and when starting the server
### ❌ "opencode-ai SDK not installed"
**Fix:**
```bash
pip install opencode-ai
```
If you don't want to install the SDK, switch to CLI mode:
```env
OPENCODE_MODE=cli
```
### ❌ Responses are cut off or garbled
This usually means the output format parsing failed.
**Fix:** Try setting the format to text:
```env
OPENCODE_FORMAT=text
```
---
## Next Steps
1. **Memory System** — Add conversation persistence (SQLite)
2. **Heartbeat** — Proactive messages via cron/scheduler
3. **Skills** — Loadable skill modules (like OpenClaw's skills/)
4. **Multi-Channel** — Discord, Telegram adapters

363
docs/slack-setup.md Normal file
View File

@@ -0,0 +1,363 @@
# Slack Bot Setup Guide
> Complete guide to creating a Slack bot and connecting it to Aetheel.
---
## Table of Contents
1. [Overview](#overview)
2. [Create a Slack App](#step-1-create-a-slack-app)
3. [Configure Bot Permissions](#step-2-configure-bot-permissions)
4. [Enable Socket Mode](#step-3-enable-socket-mode)
5. [Enable Event Subscriptions](#step-4-enable-event-subscriptions)
6. [Install the App to Your Workspace](#step-5-install-the-app-to-your-workspace)
7. [Get Your Tokens](#step-6-get-your-tokens)
8. [Configure Aetheel](#step-7-configure-aetheel)
9. [Run and Test](#step-8-run-and-test)
10. [Troubleshooting](#troubleshooting)
11. [Architecture Reference](#architecture-reference)
---
## Overview
Aetheel connects to Slack using **Socket Mode**, which means:
-**No public URL needed** — works behind firewalls and NAT
-**No webhook setup** — Slack pushes events via WebSocket
-**Real-time** — instant message delivery
-**Secure** — encrypted WebSocket connection
This is the same approach used by [OpenClaw](https://github.com/openclaw/openclaw) (see `src/slack/monitor/provider.ts`), where they use `@slack/bolt` with `socketMode: true`.
### What You'll Need
| Item | Description |
|------|-------------|
| **Slack Workspace** | A Slack workspace where you have admin permissions |
| **Bot Token** | `xoxb-...` — for API calls (sending messages, reading info) |
| **App Token** | `xapp-...` — for Socket Mode connection |
| **Python 3.10+** | Runtime for the Aetheel service |
---
## Step 1: Create a Slack App
1. Go to [https://api.slack.com/apps](https://api.slack.com/apps)
2. Click **"Create New App"**
3. Choose **"From scratch"**
4. Fill in:
- **App Name:** `Aetheel` (or any name you prefer)
- **Workspace:** Select your workspace
5. Click **"Create App"**
You'll be taken to your app's **Basic Information** page.
---
## Step 2: Configure Bot Permissions
Navigate to **OAuth & Permissions** in the left sidebar.
Scroll down to **Scopes****Bot Token Scopes** and add the following:
### Required Scopes
| Scope | Purpose |
|-------|---------|
| `app_mentions:read` | Receive @mentions in channels |
| `channels:history` | Read messages in public channels |
| `channels:read` | View basic channel info |
| `chat:write` | Send messages |
| `groups:history` | Read messages in private channels |
| `groups:read` | View private channel info |
| `im:history` | Read direct messages |
| `im:read` | View DM info |
| `im:write` | Open DM conversations |
| `mpim:history` | Read group DMs |
| `mpim:read` | View group DM info |
| `users:read` | Look up user info (for display names) |
### Optional Scopes (for future features)
| Scope | Purpose |
|-------|---------|
| `files:read` | Read files shared in messages |
| `files:write` | Upload files |
| `reactions:read` | Read emoji reactions |
| `reactions:write` | Add emoji reactions |
> **Tip:** You can always add more scopes later, but you'll need to reinstall the app.
---
## Step 3: Enable Socket Mode
1. Navigate to **Socket Mode** in the left sidebar
2. Toggle **"Enable Socket Mode"** to **ON**
3. You'll be prompted to create an **App-Level Token**:
- **Token Name:** `aetheel-socket` (or any name)
- **Scopes:** Add `connections:write`
4. Click **"Generate"**
5. **⚠️ Copy the `xapp-...` token now!** You won't be able to see it again.
- Save it somewhere safe — you'll need it in Step 6.
---
## Step 4: Enable Event Subscriptions
1. Navigate to **Event Subscriptions** in the left sidebar
2. Toggle **"Enable Events"** to **ON**
3. Under **Subscribe to bot events**, add:
| Event | Description |
|-------|-------------|
| `message.channels` | Messages in public channels the bot is in |
| `message.groups` | Messages in private channels the bot is in |
| `message.im` | Direct messages to the bot |
| `message.mpim` | Group DMs that include the bot |
| `app_mention` | When someone @mentions the bot |
4. Click **"Save Changes"**
> **Note:** With Socket Mode enabled, you do NOT need a Request URL.
---
## Step 5: Install the App to Your Workspace
1. Navigate to **Install App** in the left sidebar
2. Click **"Install to Workspace"**
3. Review the permissions and click **"Allow"**
4. You'll see the **Bot User OAuth Token** (`xoxb-...`) — copy it!
> After installation, invite the bot to any channels where you want it to respond:
> - In Slack, go to the channel
> - Type `/invite @Aetheel` (or your bot's name)
---
## Step 6: Get Your Tokens
After completing the steps above, you should have two tokens:
| Token | Format | Where to Find |
|-------|--------|---------------|
| **Bot Token** | `xoxb-1234-5678-abc...` | **OAuth & Permissions** → Bot User OAuth Token |
| **App Token** | `xapp-1-A0123-456...` | **Basic Information** → App-Level Tokens (or from Step 3) |
---
## Step 7: Configure Aetheel
### Option A: Using `.env` file (recommended)
```bash
# Copy the example env file
cp .env.example .env
# Edit .env with your tokens
```
Edit `.env`:
```env
SLACK_BOT_TOKEN=xoxb-your-actual-bot-token
SLACK_APP_TOKEN=xapp-your-actual-app-token
LOG_LEVEL=INFO
```
### Option B: Export environment variables
```bash
export SLACK_BOT_TOKEN="xoxb-your-actual-bot-token"
export SLACK_APP_TOKEN="xapp-your-actual-app-token"
```
---
## Step 8: Run and Test
### Install dependencies
```bash
pip install -r requirements.txt
```
### Run the bot
```bash
# Start with smart handler (default)
python main.py
# Start in test/echo mode
python main.py --test
# Start with debug logging
python main.py --log DEBUG
```
### Test sending and receiving
```bash
# Run the test suite — sends test messages to a channel
python test_slack.py --channel C0123456789
# Or send a DM test
python test_slack.py --dm U0123456789
# Send-only (no listening)
python test_slack.py --channel C0123456789 --send-only
```
### Verify it's working
1. **In Slack**, go to a channel where the bot is invited
2. Type `@Aetheel help` — you should see the help response
3. Type `@Aetheel status` — you should see the bot's status
4. Send a DM to the bot — it should echo back with details
---
## Troubleshooting
### ❌ "Slack bot token is required"
**Problem:** `SLACK_BOT_TOKEN` is not set or empty.
**Fix:**
1. Check your `.env` file exists and contains the token
2. Make sure there are no extra spaces or quotes around the token
3. Verify the token starts with `xoxb-`
### ❌ "Slack app-level token is required for Socket Mode"
**Problem:** `SLACK_APP_TOKEN` is not set.
**Fix:**
1. Go to your Slack app → **Basic Information****App-Level Tokens**
2. If no token exists, generate one with `connections:write` scope
3. Add it to your `.env` file
### ❌ "not_authed" or "invalid_auth"
**Problem:** The bot token is invalid or revoked.
**Fix:**
1. Go to **OAuth & Permissions** → check the Bot User OAuth Token
2. If it says "Not installed", reinstall the app
3. If you recently changed scopes, you need to reinstall
### ❌ Bot doesn't respond in channels
**Problem:** The bot is not invited to the channel, or you're not @mentioning it.
**Fix:**
1. In the Slack channel, type `/invite @Aetheel`
2. Make sure you @mention the bot: `@Aetheel hello`
3. For DMs, just message the bot directly — no @mention needed
### ❌ "channel_not_found" when sending
**Problem:** Using a channel name instead of ID, or bot isn't in the channel.
**Fix:**
1. Use channel **ID** not name. Find it in Slack:
- Right-click the channel name → "View channel details"
- The ID is at the bottom (starts with `C`)
2. Invite the bot to the channel first
### ❌ Socket Mode connection drops
**Problem:** The WebSocket connection is unstable.
**Fix:**
1. Check your internet connection
2. The SDK automatically reconnects — this is usually transient
3. If persistent, check Slack's [status page](https://status.slack.com/)
### ❌ "missing_scope"
**Problem:** The bot token doesn't have the required OAuth scopes.
**Fix:**
1. Go to **OAuth & Permissions****Bot Token Scopes**
2. Add the missing scope mentioned in the error
3. **Reinstall the app** (scope changes require reinstallation)
---
## Architecture Reference
### How It Works
```
┌──────────────────────┐
│ Your Slack │
│ Workspace │
│ │
│ #general │
│ #random │
│ DMs │
└──────┬───────────────┘
│ WebSocket (Socket Mode)
┌──────▼───────────────┐
│ Aetheel Slack │
│ Adapter │
│ │
│ • Token resolution │
│ • Event handling │
│ • Thread isolation │
│ • Message chunking │
│ • User/channel │
│ name resolution │
└──────┬───────────────┘
│ Callback
┌──────▼───────────────┐
│ Message Handler │
│ │
│ • Echo (test) │
│ • Smart (commands) │
│ • AI (future) │
└──────────────────────┘
```
### Key Files
| File | Purpose |
|------|---------|
| `adapters/slack_adapter.py` | Core Slack adapter (Socket Mode, send/receive) |
| `main.py` | Entry point with echo and smart handlers |
| `test_slack.py` | Integration test suite |
| `.env` | Your Slack tokens (not committed to git) |
| `.env.example` | Token template |
| `requirements.txt` | Python dependencies |
### Comparison with OpenClaw
| Feature | OpenClaw (TypeScript) | Aetheel (Python) |
|---------|----------------------|------------------|
| **Library** | `@slack/bolt` | `slack_bolt` (official Python SDK) |
| **Mode** | Socket Mode (`socketMode: true`) | Socket Mode (`SocketModeHandler`) |
| **Auth** | `auth.test()` for identity | `auth_test()` for identity |
| **Sending** | `chat.postMessage` with chunking | `chat_postMessage` with chunking |
| **Threading** | `thread_ts` for conversation isolation | `thread_ts` for conversation isolation |
| **DM Handling** | `conversations.open` for user DMs | `conversations_open` for user DMs |
| **Text Limit** | 4000 chars (chunked) | 4000 chars (chunked) |
| **Config** | JSON5 config file | `.env` file |
| **Accounts** | Multi-account support | Single account (MVP) |
---
## Next Steps
Once the Slack adapter is working, you can:
1. **Connect AI** — Replace the echo handler with an AI-powered handler (Claude API)
2. **Add Memory** — Integrate the memory system for conversation context
3. **Add Heartbeat** — Set up proactive notifications via Slack
4. **Add Skills** — Load skills from the `.claude/skills/` directory
See the main [OpenClaw Analysis](../openclaw-analysis.md) for the full architecture plan.

549
main.py Normal file
View File

@@ -0,0 +1,549 @@
#!/usr/bin/env python3
"""
Aetheel Slack Service — Main Entry Point
=========================================
Starts the Slack adapter in Socket Mode, connected to the OpenCode AI runtime.
Usage:
python main.py # Run with OpenCode AI handler
python main.py --test # Run with echo handler for testing
python main.py --cli # Force CLI mode (subprocess)
python main.py --sdk # Force SDK mode (opencode serve)
Environment:
SLACK_BOT_TOKEN — Slack bot token (xoxb-...)
SLACK_APP_TOKEN — Slack app-level token (xapp-...)
OPENCODE_MODE — "cli" or "sdk" (default: cli)
OPENCODE_MODEL — Model to use (e.g., anthropic/claude-sonnet-4-20250514)
OPENCODE_SERVER_URL — SDK server URL (default: http://localhost:4096)
OPENCODE_TIMEOUT — CLI timeout in seconds (default: 120)
LOG_LEVEL — Optional, default: INFO
"""
import argparse
import asyncio
import logging
import os
import re
import sys
import threading
from datetime import datetime
from dotenv import load_dotenv
# Load .env file
load_dotenv()
from adapters.slack_adapter import SlackAdapter, SlackMessage
from agent.claude_runtime import ClaudeCodeConfig, ClaudeCodeRuntime
from agent.opencode_runtime import (
AgentResponse,
OpenCodeConfig,
OpenCodeRuntime,
RuntimeMode,
build_aetheel_system_prompt,
)
from memory import MemoryManager
from memory.types import MemoryConfig
logger = logging.getLogger("aetheel")
# Type alias for either runtime
AnyRuntime = OpenCodeRuntime | ClaudeCodeRuntime
# Global runtime instance (initialized in main)
_runtime: AnyRuntime | None = None
_memory: MemoryManager | None = None
_slack_adapter: SlackAdapter | None = None
# Regex for parsing action tags from AI responses
_ACTION_RE = re.compile(r"\[ACTION:remind\|(\d+)\|(.+?)\]", re.DOTALL)
# ---------------------------------------------------------------------------
# Message Handlers
# ---------------------------------------------------------------------------
def echo_handler(msg: SlackMessage) -> str:
"""
Simple echo handler for testing.
Returns a formatted response with message details.
"""
response_lines = [
f"👋 *Aetheel received your message!*",
"",
f"📝 *Text:* {msg.text}",
f"👤 *From:* {msg.user_name} (`{msg.user_id}`)",
f"📍 *Channel:* #{msg.channel_name} (`{msg.channel_id}`)",
f"💬 *Type:* {'DM' if msg.is_dm else 'Mention' if msg.is_mention else 'Channel'}",
f"🧵 *Thread:* `{msg.conversation_id[:15]}...`",
f"🕐 *Time:* {msg.timestamp.strftime('%Y-%m-%d %H:%M:%S UTC')}",
"",
f"_This is an echo response from the Aetheel test handler._",
]
return "\n".join(response_lines)
def _build_memory_context(msg: SlackMessage) -> str:
"""
Build memory context to inject into the system prompt.
Reads identity files (SOUL.md, USER.md) and searches long-term
memory for relevant context based on the user's message.
"""
global _memory
if _memory is None:
return ""
sections: list[str] = []
# ── Identity: SOUL.md ──
soul = _memory.read_soul()
if soul:
sections.append(f"# Your Identity (SOUL.md)\n\n{soul}")
# ── User profile: USER.md ──
user = _memory.read_user()
if user:
sections.append(f"# About the User (USER.md)\n\n{user}")
# ── Long-term memory: MEMORY.md ──
ltm = _memory.read_long_term_memory()
if ltm:
sections.append(f"# Long-Term Memory (MEMORY.md)\n\n{ltm}")
# ── Relevant memory search results ──
try:
results = asyncio.run(_memory.search(msg.text, max_results=3, min_score=0.2))
if results:
snippets = []
for r in results:
# Skip if it's just the identity files themselves (already included)
if r.path in ("SOUL.md", "USER.md", "MEMORY.md"):
continue
snippets.append(
f"**{r.path}** (lines {r.start_line}-{r.end_line}, "
f"relevance {r.score:.0%}):\n{r.snippet[:500]}"
)
if snippets:
sections.append(
"# Relevant Memory Context\n\n"
+ "\n\n---\n\n".join(snippets)
)
except Exception as e:
logger.debug(f"Memory search failed: {e}")
return "\n\n---\n\n".join(sections)
def ai_handler(msg: SlackMessage) -> str:
"""
AI-powered handler using OpenCode runtime.
This is the heart of Aetheel — it routes incoming Slack messages
through the OpenCode agent runtime, which handles:
- Memory context injection (SOUL.md, USER.md, MEMORY.md)
- Session management (per-thread)
- Model selection
- System prompt injection
- Response generation
- Conversation logging
Flow:
Slack message → memory context → ai_handler → OpenCodeRuntime.chat() → AI response → session log
"""
global _runtime, _memory
if _runtime is None:
return "⚠️ AI runtime not initialized. Please restart the service."
text_lower = msg.text.lower().strip()
# Built-in commands (bypass AI)
if text_lower in ("status", "/status", "ping"):
return _format_status()
if text_lower in ("help", "/help"):
return _format_help()
if text_lower == "time":
return f"🕐 Server time: *{datetime.now().strftime('%Y-%m-%d %H:%M:%S')}*"
if text_lower in ("sessions", "/sessions"):
return _format_sessions()
# Build memory context from identity files + search
memory_context = _build_memory_context(msg)
# Route to AI via OpenCode
system_prompt = build_aetheel_system_prompt(
user_name=msg.user_name,
channel_name=msg.channel_name,
is_dm=msg.is_dm,
extra_context=memory_context,
)
response = _runtime.chat(
message=msg.text,
conversation_id=msg.conversation_id,
system_prompt=system_prompt,
)
if not response.ok:
error_msg = response.error or "Unknown error"
logger.error(f"AI error: {error_msg}")
# Provide a helpful error message
if "not found" in error_msg.lower() or "not installed" in error_msg.lower():
return (
"⚠️ OpenCode CLI is not available.\n"
"Install it with: `curl -fsSL https://opencode.ai/install | bash`\n"
"See `docs/opencode-setup.md` for details."
)
if "timeout" in error_msg.lower():
return (
"⏳ The AI took too long to respond. "
"Try a shorter or simpler question."
)
return f"⚠️ AI error: {error_msg[:200]}"
# Log response stats
logger.info(
f"🤖 AI response: {len(response.text)} chars, "
f"{response.duration_ms}ms"
)
# Parse and execute action tags (e.g., reminders)
reply_text = _process_action_tags(response.text, msg)
# Log conversation to memory session log
if _memory:
try:
channel = "dm" if msg.is_dm else msg.channel_name or "slack"
_memory.log_session(
f"**User ({msg.user_name}):** {msg.text}\n\n"
f"**Aetheel:** {reply_text}",
channel=channel,
)
except Exception as e:
logger.debug(f"Session logging failed: {e}")
return reply_text
# ---------------------------------------------------------------------------
# Action Tag Processing
# ---------------------------------------------------------------------------
def _process_action_tags(text: str, msg: SlackMessage) -> str:
"""
Parse and execute action tags from the AI response.
Currently supports:
[ACTION:remind|<minutes>|<message>]
Returns the response text with action tags stripped out.
"""
cleaned = text
# Find all reminder action tags
for match in _ACTION_RE.finditer(text):
minutes_str, reminder_msg = match.group(1), match.group(2)
try:
minutes = int(minutes_str)
_schedule_reminder(
delay_minutes=minutes,
message=reminder_msg.strip(),
channel_id=msg.channel_id,
thread_ts=msg.thread_ts if hasattr(msg, "thread_ts") else None,
user_name=msg.user_name,
)
logger.info(
f"⏰ Reminder scheduled: '{reminder_msg.strip()[:50]}' "
f"in {minutes} min for #{msg.channel_name}"
)
except Exception as e:
logger.warning(f"Failed to schedule reminder: {e}")
# Strip the action tag from the visible response
cleaned = cleaned.replace(match.group(0), "").strip()
return cleaned
def _schedule_reminder(
*,
delay_minutes: int,
message: str,
channel_id: str,
thread_ts: str | None = None,
user_name: str | None = None,
) -> None:
"""
Schedule a Slack message to be sent after a delay.
Uses a background thread with a timer.
"""
global _slack_adapter
delay_seconds = delay_minutes * 60
def _send_reminder():
try:
if _slack_adapter and _slack_adapter._app:
mention = f"@{user_name}" if user_name else ""
reminder_text = f"⏰ *Reminder* {mention}: {message}"
kwargs = {
"channel": channel_id,
"text": reminder_text,
}
if thread_ts:
kwargs["thread_ts"] = thread_ts
_slack_adapter._app.client.chat_postMessage(**kwargs)
logger.info(f"⏰ Reminder sent: '{message[:50]}'")
else:
logger.warning("Cannot send reminder: Slack adapter not available")
except Exception as e:
logger.error(f"Failed to send reminder: {e}")
timer = threading.Timer(delay_seconds, _send_reminder)
timer.daemon = True
timer.start()
# ---------------------------------------------------------------------------
# Formatting Helpers
# ---------------------------------------------------------------------------
def _format_status() -> str:
"""Format the /status response with runtime info."""
global _runtime
lines = [
"🟢 *Aetheel is online*",
"",
]
if _runtime:
status = _runtime.get_status()
lines.extend([
f"• *Mode:* {status['mode']}",
f"• *Model:* {status['model']}",
f"• *Provider:* {status['provider']}",
f"• *Active Sessions:* {status['active_sessions']}",
f"• *OpenCode Available:* {'' if status['opencode_available'] else ''}",
])
if "sdk_connected" in status:
lines.append(
f"• *SDK Connected:* {'' if status['sdk_connected'] else ''}"
)
else:
lines.append("• Runtime: not initialized")
lines.extend([
"",
f"• *Time:* {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}",
])
return "\n".join(lines)
def _format_help() -> str:
"""Format the /help response."""
return (
"🦾 *Aetheel — AI-Powered Assistant*\n"
"\n"
"*Built-in Commands:*\n"
"• `status` — Check bot and AI runtime status\n"
"• `help` — Show this help message\n"
"• `time` — Current server time\n"
"• `sessions` — Active session count\n"
"\n"
"*AI Chat:*\n"
"• Send any message and the AI will respond\n"
"• Each thread maintains its own conversation\n"
"• DMs work too — just message me directly\n"
"\n"
"_Powered by OpenCode — https://opencode.ai_"
)
def _format_sessions() -> str:
"""Format session info."""
global _runtime
if _runtime:
count = _runtime.get_status()["active_sessions"]
cleaned = _runtime.cleanup_sessions()
return (
f"🧵 *Active Sessions:* {count}\n"
f"🧹 *Cleaned up:* {cleaned} stale sessions"
)
return "⚠️ Runtime not initialized."
# ---------------------------------------------------------------------------
# Main
# ---------------------------------------------------------------------------
def main():
parser = argparse.ArgumentParser(
description="Aetheel Slack Service — AI-Powered via OpenCode or Claude Code",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog="""
Examples:
python main.py Start with AI handler (OpenCode)
python main.py --claude Start with Claude Code runtime
python main.py --test Start with echo-only handler
python main.py --cli Force CLI mode (subprocess, OpenCode)
python main.py --sdk Force SDK mode (opencode serve)
python main.py --model anthropic/claude-sonnet-4-20250514
python main.py --log DEBUG Start with debug logging
""",
)
parser.add_argument(
"--test",
action="store_true",
help="Use simple echo handler for testing",
)
parser.add_argument(
"--claude",
action="store_true",
help="Use Claude Code runtime instead of OpenCode",
)
parser.add_argument(
"--cli",
action="store_true",
help="Force CLI mode (opencode run subprocess)",
)
parser.add_argument(
"--sdk",
action="store_true",
help="Force SDK mode (opencode serve API)",
)
parser.add_argument(
"--model",
default=None,
help="Model to use (e.g., anthropic/claude-sonnet-4-20250514)",
)
parser.add_argument(
"--log",
default=os.environ.get("LOG_LEVEL", "INFO"),
help="Log level (DEBUG, INFO, WARNING, ERROR)",
)
args = parser.parse_args()
# Configure logging
logging.basicConfig(
level=getattr(logging, args.log.upper(), logging.INFO),
format="%(asctime)s [%(name)s] %(levelname)s: %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
)
# Validate Slack tokens are present
if not os.environ.get("SLACK_BOT_TOKEN"):
print("❌ SLACK_BOT_TOKEN is not set!")
print(" Copy .env.example to .env and add your tokens.")
print(" See docs/slack-setup.md for instructions.")
sys.exit(1)
if not os.environ.get("SLACK_APP_TOKEN"):
print("❌ SLACK_APP_TOKEN is not set!")
print(" Copy .env.example to .env and add your tokens.")
print(" See docs/slack-setup.md for instructions.")
sys.exit(1)
# Initialize memory system
global _runtime, _memory
workspace_dir = os.environ.get(
"AETHEEL_WORKSPACE", os.path.expanduser("~/.aetheel/workspace")
)
db_path = os.environ.get(
"AETHEEL_MEMORY_DB", os.path.expanduser("~/.aetheel/memory.db")
)
try:
mem_config = MemoryConfig(
workspace_dir=workspace_dir,
db_path=db_path,
)
_memory = MemoryManager(mem_config)
logger.info(
f"Memory system initialized: workspace={workspace_dir}"
)
# Initial sync (indexes identity files on first run)
stats = asyncio.run(_memory.sync())
logger.info(
f"Memory sync: {stats.get('files_indexed', 0)} files indexed, "
f"{stats.get('chunks_created', 0)} chunks"
)
except Exception as e:
logger.warning(f"Memory system init failed (continuing without): {e}")
_memory = None
# Initialize AI runtime (unless in test mode)
if not args.test:
if args.claude:
# Claude Code runtime
claude_config = ClaudeCodeConfig.from_env()
if args.model:
claude_config.model = args.model
_runtime = ClaudeCodeRuntime(claude_config)
runtime_label = f"claude-code, model={claude_config.model or 'default'}"
else:
# OpenCode runtime (default)
config = OpenCodeConfig.from_env()
# CLI flag overrides
if args.cli:
config.mode = RuntimeMode.CLI
elif args.sdk:
config.mode = RuntimeMode.SDK
if args.model:
config.model = args.model
_runtime = OpenCodeRuntime(config)
runtime_label = (
f"opencode/{config.mode.value}, "
f"model={config.model or 'default'}"
)
# Create Slack adapter
global _slack_adapter
adapter = SlackAdapter(log_level=args.log)
_slack_adapter = adapter
# Register handler
if args.test:
adapter.on_message(echo_handler)
logger.info("Using echo handler (test mode)")
else:
adapter.on_message(ai_handler)
logger.info(f"Using AI handler ({runtime_label})")
# Start file watching for automatic memory re-indexing
if _memory:
_memory.start_watching()
# Start (blocking)
try:
adapter.start()
except KeyboardInterrupt:
if _memory:
_memory.close()
adapter.stop()
except Exception as e:
if _memory:
_memory.close()
logger.error(f"Fatal error: {e}", exc_info=True)
sys.exit(1)
if __name__ == "__main__":
main()

25
memory/__init__.py Normal file
View File

@@ -0,0 +1,25 @@
"""
Aetheel Memory System
=====================
Hybrid search memory with SQLite + markdown + local embeddings.
Inspired by OpenClaw's memory architecture (src/memory/):
• Identity files: SOUL.md, USER.md, MEMORY.md
• SQLite storage: chunks, FTS5, vector similarity
• Hybrid search: vector (0.7) + BM25 keyword (0.3)
• Local embeddings: fastembed ONNX (384-dim, zero API calls)
• File watching: auto re-index on workspace changes
• Session logs: daily/ conversation transcripts
Usage:
from memory import MemoryManager
manager = MemoryManager(workspace_dir="~/.aetheel/workspace")
await manager.sync()
results = await manager.search("what are my preferences?")
"""
from memory.manager import MemoryManager
from memory.types import MemorySearchResult, MemorySource
__all__ = ["MemoryManager", "MemorySearchResult", "MemorySource"]

88
memory/embeddings.py Normal file
View File

@@ -0,0 +1,88 @@
"""
Embedding provider for the memory system.
Uses fastembed (ONNX) for fully local, zero-API-call embeddings.
Inspired by OpenClaw's src/memory/embeddings.ts, simplified to:
• Single provider: fastembed with BAAI/bge-small-en-v1.5 (384-dim)
• Local only — no OpenAI/Voyage/Gemini API calls
• Thread-safe lazy initialization
"""
import logging
import threading
from memory.internal import normalize_embedding
logger = logging.getLogger("aetheel.memory.embeddings")
# The fastembed model is loaded lazily on first use
_model_lock = threading.Lock()
_model = None
_model_name: str | None = None
def _ensure_model(model_name: str = "BAAI/bge-small-en-v1.5"):
"""Lazy-load the fastembed model (thread-safe)."""
global _model, _model_name
if _model is not None and _model_name == model_name:
return _model
with _model_lock:
# Double-check after acquiring lock
if _model is not None and _model_name == model_name:
return _model
try:
from fastembed import TextEmbedding
except ImportError:
raise ImportError(
"fastembed is required for local embeddings.\n"
"Install with: uv add fastembed\n"
"Or: pip install fastembed"
)
logger.info(f"Loading embedding model: {model_name}...")
_model = TextEmbedding(model_name=model_name)
_model_name = model_name
logger.info(f"Embedding model loaded: {model_name}")
return _model
def embed_query(text: str, model_name: str = "BAAI/bge-small-en-v1.5") -> list[float]:
"""
Generate an embedding vector for a single query string.
Returns a normalized 384-dimensional vector.
"""
model = _ensure_model(model_name)
embeddings = list(model.query_embed([text]))
if not embeddings:
return []
vec = embeddings[0].tolist()
return normalize_embedding(vec)
def embed_batch(
texts: list[str],
model_name: str = "BAAI/bge-small-en-v1.5",
) -> list[list[float]]:
"""
Generate embedding vectors for a batch of text strings.
Returns a list of normalized 384-dimensional vectors.
"""
if not texts:
return []
model = _ensure_model(model_name)
embeddings = list(model.passage_embed(texts))
return [normalize_embedding(e.tolist()) for e in embeddings]
def get_embedding_dims(model_name: str = "BAAI/bge-small-en-v1.5") -> int:
"""Get the dimensionality of the embedding model."""
# Known dimensions for common models
known_dims = {
"BAAI/bge-small-en-v1.5": 384,
"BAAI/bge-base-en-v1.5": 768,
"sentence-transformers/all-MiniLM-L6-v2": 384,
}
return known_dims.get(model_name, 384)

111
memory/hybrid.py Normal file
View File

@@ -0,0 +1,111 @@
"""
Hybrid search — merges vector similarity + BM25 keyword results.
Direct port of OpenClaw's src/memory/hybrid.ts.
The algorithm:
1. Run vector search → ranked by cosine similarity
2. Run FTS5 keyword search → ranked by BM25
3. Merge by weighted score: 0.7 × vector + 0.3 × keyword
4. Deduplicate by chunk ID
5. Sort by combined score (descending)
"""
import re
from memory.types import MemorySearchResult, MemorySource
def build_fts_query(raw: str) -> str | None:
"""
Build an FTS5 match query from raw text.
Port of OpenClaw's buildFtsQuery() — quotes each token
and joins with AND for a conjunctive match.
Example: "hello world"'"hello" AND "world"'
"""
tokens = re.findall(r"[A-Za-z0-9_]+", raw)
if not tokens:
return None
quoted = [f'"{t}"' for t in tokens]
return " AND ".join(quoted)
def bm25_rank_to_score(rank: float) -> float:
"""
Convert FTS5 BM25 rank (negative = better) to a 0-1 score.
Port of OpenClaw's bm25RankToScore().
"""
normalized = max(0.0, rank) if isinstance(rank, (int, float)) else 999.0
return 1.0 / (1.0 + normalized)
def merge_hybrid_results(
vector: list[dict],
keyword: list[dict],
vector_weight: float = 0.7,
text_weight: float = 0.3,
) -> list[MemorySearchResult]:
"""
Merge vector and keyword search results with weighted scoring.
Direct port of OpenClaw's mergeHybridResults() from hybrid.ts.
Each vector result dict has: id, path, start_line, end_line, source, snippet, vector_score
Each keyword result dict has: id, path, start_line, end_line, source, snippet, text_score
"""
by_id: dict[str, dict] = {}
# Process vector results
for r in vector:
by_id[r["id"]] = {
"id": r["id"],
"path": r["path"],
"start_line": r["start_line"],
"end_line": r["end_line"],
"source": r["source"],
"snippet": r["snippet"],
"vector_score": r.get("vector_score", 0.0),
"text_score": 0.0,
}
# Process keyword results — merge with existing or create new
for r in keyword:
existing = by_id.get(r["id"])
if existing:
existing["text_score"] = r.get("text_score", 0.0)
# Prefer keyword snippet if available (often more relevant)
if r.get("snippet"):
existing["snippet"] = r["snippet"]
else:
by_id[r["id"]] = {
"id": r["id"],
"path": r["path"],
"start_line": r["start_line"],
"end_line": r["end_line"],
"source": r["source"],
"snippet": r["snippet"],
"vector_score": 0.0,
"text_score": r.get("text_score", 0.0),
}
# Compute weighted score and convert to MemorySearchResult
merged: list[MemorySearchResult] = []
for entry in by_id.values():
score = (
vector_weight * entry["vector_score"]
+ text_weight * entry["text_score"]
)
source = entry["source"]
if isinstance(source, str):
source = MemorySource(source)
merged.append(MemorySearchResult(
path=entry["path"],
start_line=entry["start_line"],
end_line=entry["end_line"],
score=score,
snippet=entry["snippet"],
source=source,
))
# Sort by score descending
merged.sort(key=lambda r: r.score, reverse=True)
return merged

214
memory/internal.py Normal file
View File

@@ -0,0 +1,214 @@
"""
Internal utilities for the memory system.
Port of OpenClaw's src/memory/internal.ts:
• hashText — SHA-256 content hashing
• chunkMarkdown — split markdown into overlapping chunks
• listMemoryFiles — discover .md files in workspace
• buildFileEntry — create MemoryFileEntry from a file
• cosineSimilarity — vector similarity calculation
"""
import hashlib
import os
from pathlib import Path
from memory.types import MemoryChunk, MemoryFileEntry
def hash_text(value: str) -> str:
"""SHA-256 hash of text content. Mirrors OpenClaw's hashText()."""
return hashlib.sha256(value.encode("utf-8")).hexdigest()
def chunk_markdown(
content: str,
chunk_tokens: int = 512,
chunk_overlap: int = 50,
) -> list[MemoryChunk]:
"""
Split markdown content into overlapping chunks.
Direct port of OpenClaw's chunkMarkdown() from internal.ts.
Uses character-based approximation: ~4 chars per token.
"""
lines = content.split("\n")
if not lines:
return []
max_chars = max(32, chunk_tokens * 4)
overlap_chars = max(0, chunk_overlap * 4)
chunks: list[MemoryChunk] = []
current: list[tuple[str, int]] = [] # (line_text, 1-indexed line_no)
current_chars = 0
def flush() -> None:
nonlocal current, current_chars
if not current:
return
text = "\n".join(line for line, _ in current)
start_line = current[0][1]
end_line = current[-1][1]
chunks.append(MemoryChunk(
start_line=start_line,
end_line=end_line,
text=text,
hash=hash_text(text),
))
def carry_overlap() -> None:
nonlocal current, current_chars
if overlap_chars <= 0 or not current:
current = []
current_chars = 0
return
acc = 0
kept: list[tuple[str, int]] = []
for line_text, line_no in reversed(current):
acc += len(line_text) + 1
kept.insert(0, (line_text, line_no))
if acc >= overlap_chars:
break
current = kept
current_chars = sum(len(lt) + 1 for lt, _ in kept)
for i, line in enumerate(lines):
line_no = i + 1
# Handle very long lines by splitting into segments
segments = [""] if not line else [
line[start:start + max_chars]
for start in range(0, len(line), max_chars)
]
for segment in segments:
line_size = len(segment) + 1
if current_chars + line_size > max_chars and current:
flush()
carry_overlap()
current.append((segment, line_no))
current_chars += line_size
flush()
return chunks
def list_memory_files(
workspace_dir: str,
extra_paths: list[str] | None = None,
) -> list[str]:
"""
List all markdown files in the workspace memory directory.
Port of OpenClaw's listMemoryFiles() from internal.ts.
Searches for:
- MEMORY.md (or memory.md) in workspace root
- All .md files in memory/ subdirectory
- Any additional paths specified
"""
result: list[str] = []
ws = Path(workspace_dir).expanduser().resolve()
# Check MEMORY.md and memory.md in workspace root
for name in ("MEMORY.md", "memory.md"):
candidate = ws / name
if candidate.is_file() and not candidate.is_symlink():
result.append(str(candidate))
# Check SOUL.md and USER.md (identity files)
for name in ("SOUL.md", "USER.md"):
candidate = ws / name
if candidate.is_file() and not candidate.is_symlink():
result.append(str(candidate))
# Walk memory/ subdirectory
memory_dir = ws / "memory"
if memory_dir.is_dir() and not memory_dir.is_symlink():
_walk_md_files(memory_dir, result)
# Extra paths
if extra_paths:
for extra in extra_paths:
p = Path(extra).expanduser().resolve()
if p.is_symlink():
continue
if p.is_dir():
_walk_md_files(p, result)
elif p.is_file() and p.suffix == ".md":
result.append(str(p))
# Deduplicate by resolved path
seen: set[str] = set()
deduped: list[str] = []
for entry in result:
real = os.path.realpath(entry)
if real not in seen:
seen.add(real)
deduped.append(entry)
return deduped
def _walk_md_files(directory: Path, result: list[str]) -> None:
"""Recursively collect .md files from a directory."""
try:
for entry in sorted(directory.iterdir()):
if entry.is_symlink():
continue
if entry.is_dir():
_walk_md_files(entry, result)
elif entry.is_file() and entry.suffix == ".md":
result.append(str(entry))
except PermissionError:
pass
def build_file_entry(abs_path: str, workspace_dir: str) -> MemoryFileEntry:
"""
Create a MemoryFileEntry from a file path.
Port of OpenClaw's buildFileEntry() from internal.ts.
"""
stat = os.stat(abs_path)
with open(abs_path, "r", encoding="utf-8") as f:
content = f.read()
content_hash = hash_text(content)
rel_path = os.path.relpath(abs_path, workspace_dir).replace("\\", "/")
return MemoryFileEntry(
path=rel_path,
abs_path=abs_path,
mtime_ms=stat.st_mtime * 1000,
size=stat.st_size,
hash=content_hash,
)
def cosine_similarity(a: list[float], b: list[float]) -> float:
"""
Compute cosine similarity between two vectors.
Port of OpenClaw's cosineSimilarity() from internal.ts.
"""
if not a or not b:
return 0.0
length = min(len(a), len(b))
dot = 0.0
norm_a = 0.0
norm_b = 0.0
for i in range(length):
av = a[i]
bv = b[i]
dot += av * bv
norm_a += av * av
norm_b += bv * bv
if norm_a == 0.0 or norm_b == 0.0:
return 0.0
return dot / (norm_a ** 0.5 * norm_b ** 0.5)
def normalize_embedding(vec: list[float]) -> list[float]:
"""
L2-normalize an embedding vector.
Port of OpenClaw's sanitizeAndNormalizeEmbedding().
"""
sanitized = [v if isinstance(v, (int, float)) and v == v else 0.0 for v in vec]
magnitude = sum(v * v for v in sanitized) ** 0.5
if magnitude < 1e-10:
return sanitized
return [v / magnitude for v in sanitized]

839
memory/manager.py Normal file
View File

@@ -0,0 +1,839 @@
"""
MemoryManager — the main memory system orchestrator.
Port of OpenClaw's MemoryIndexManager (src/memory/manager.ts, 2,300 LOC).
Lifecycle: sync → chunk → embed → store → search
Key features:
• Incremental sync — only re-indexes changed files (hash-based)
• Hybrid search — vector (0.7) + BM25 keyword (0.3)
• File watching — auto re-index on workspace changes (via watchdog)
• Embedding cache — avoids re-computing embeddings for unchanged chunks
• Session log indexing — indexes daily/ conversation transcripts
"""
import json
import logging
import os
import sqlite3
import threading
import time
import uuid
from pathlib import Path
from memory.embeddings import embed_batch, embed_query, get_embedding_dims
from memory.hybrid import bm25_rank_to_score, build_fts_query, merge_hybrid_results
from memory.internal import (
build_file_entry,
chunk_markdown,
hash_text,
list_memory_files,
)
from memory.schema import ensure_schema
from memory.types import (
MemoryConfig,
MemorySearchResult,
MemorySource,
)
logger = logging.getLogger("aetheel.memory")
SNIPPET_MAX_CHARS = 700
class MemoryManager:
"""
Main memory system — manages the full lifecycle:
sync → chunk → embed → store → search
Inspired by OpenClaw's MemoryIndexManager.
"""
def __init__(self, config: MemoryConfig | None = None):
self._config = config or MemoryConfig()
self._workspace_dir = str(
Path(self._config.workspace_dir).expanduser().resolve()
)
self._db_path = str(Path(self._config.db_path).expanduser().resolve())
self._sessions_dir = (
str(Path(self._config.sessions_dir).expanduser().resolve())
if self._config.sessions_dir
else os.path.join(self._workspace_dir, "daily")
)
# Ensure directories exist
os.makedirs(self._workspace_dir, exist_ok=True)
os.makedirs(self._sessions_dir, exist_ok=True)
os.makedirs(os.path.dirname(self._db_path), exist_ok=True)
# Open database and ensure schema
self._db = sqlite3.connect(self._db_path, check_same_thread=False)
self._db.row_factory = sqlite3.Row
schema_result = ensure_schema(self._db, fts_enabled=True)
self._fts_available = schema_result.get("fts_available", False)
# State
self._dirty = True
self._syncing = False
self._sync_lock = threading.Lock()
self._closed = False
self._watcher = None
# Create default identity files if they don't exist
self._ensure_identity_files()
logger.info(
f"MemoryManager initialized: workspace={self._workspace_dir}, "
f"db={self._db_path}, fts={self._fts_available}"
)
# ── Identity File Bootstrap ──────────────────────────────────────
def _ensure_identity_files(self) -> None:
"""Create default SOUL.md, USER.md, MEMORY.md if they don't exist."""
defaults = {
"SOUL.md": (
"# SOUL.md — Who You Are\n\n"
"_You're not a chatbot. You're becoming someone._\n\n"
"## Core Truths\n\n"
"**Be genuinely helpful, not performatively helpful.** "
"Skip the filler — just help.\n\n"
"**Have opinions.** You're allowed to disagree, prefer things, "
"find stuff amusing or boring.\n\n"
"**Be resourceful before asking.** Try to figure it out first. "
"Then ask if you're stuck.\n\n"
"**Earn trust through competence.** Be careful with external actions. "
"Be bold with internal ones.\n\n"
"## Boundaries\n\n"
"- Private things stay private. Period.\n"
"- When in doubt, ask before acting externally.\n"
"- Never send half-baked replies.\n\n"
"## Continuity\n\n"
"Each session, you wake up fresh. These files _are_ your memory. "
"Read them. Update them. They're how you persist.\n\n"
"---\n\n"
"_This file is yours to evolve. As you learn who you are, update it._\n"
),
"USER.md": (
"# USER.md — Who I Am\n\n"
"## About Me\n\n"
"<!-- Fill in your details -->\n"
"- **Name:** \n"
"- **Role:** \n"
"- **Location:** \n"
"- **Timezone:** \n\n"
"## Preferences\n\n"
"<!-- How you like to communicate -->\n"
"- **Communication style:** \n"
"- **Response length:** \n"
"- **Technical level:** \n\n"
"## Current Focus\n\n"
"<!-- What you're working on -->\n\n"
"## Tools & Services\n\n"
"<!-- Services you use regularly -->\n\n"
"---\n\n"
"_Update this file as your preferences evolve._\n"
),
"MEMORY.md": (
"# MEMORY.md — Long-Term Memory\n\n"
"## Decisions & Lessons\n\n"
"<!-- Record important decisions and lessons learned -->\n\n"
"## Context\n\n"
"<!-- Persistent context that should carry across sessions -->\n\n"
"## Notes\n\n"
"<!-- Anything worth remembering -->\n\n"
"---\n\n"
"_This file persists across sessions. "
"Update it when you learn something important._\n"
),
}
for filename, content in defaults.items():
filepath = os.path.join(self._workspace_dir, filename)
if not os.path.exists(filepath):
with open(filepath, "w", encoding="utf-8") as f:
f.write(content)
logger.info(f"Created default identity file: {filepath}")
# ── Search ───────────────────────────────────────────────────────
async def search(
self,
query: str,
*,
max_results: int | None = None,
min_score: float | None = None,
) -> list[MemorySearchResult]:
"""
Search memory using hybrid vector + keyword search.
Port of OpenClaw's MemoryIndexManager.search().
Steps:
1. (Optional) Trigger sync if dirty
2. Run FTS5 keyword search → BM25 scored
3. Generate query embedding → vector search
4. Merge results with weighted scoring (0.7v + 0.3k)
5. Filter by min_score and return top-N results
"""
# Auto-sync if dirty
if self._config.sync_on_search and self._dirty:
await self.sync()
cleaned = query.strip()
if not cleaned:
return []
max_r = max_results or self._config.max_results
min_s = min_score if min_score is not None else self._config.min_score
candidates = min(200, max(1, max_r * 3))
# Keyword search (BM25)
keyword_results = self._search_keyword(cleaned, candidates)
# Vector search
try:
query_vec = embed_query(cleaned, self._config.embedding_model)
has_vector = any(v != 0 for v in query_vec)
except Exception as e:
logger.warning(f"Embedding failed, falling back to keyword-only: {e}")
query_vec = []
has_vector = False
vector_results = (
self._search_vector(query_vec, candidates) if has_vector else []
)
# If no keyword results, return vector-only
if not keyword_results:
return [
r for r in self._vector_to_search_results(vector_results)
if r.score >= min_s
][:max_r]
# Merge hybrid results
merged = merge_hybrid_results(
vector=vector_results,
keyword=keyword_results,
vector_weight=self._config.vector_weight,
text_weight=self._config.text_weight,
)
return [r for r in merged if r.score >= min_s][:max_r]
def _search_vector(
self, query_vec: list[float], limit: int
) -> list[dict]:
"""
Search chunks by vector cosine similarity.
Uses embedding stored as JSON in the chunks table.
"""
if not query_vec:
return []
try:
rows = self._db.execute(
"SELECT id, path, start_line, end_line, source, text, embedding "
"FROM chunks ORDER BY rowid"
).fetchall()
except Exception as e:
logger.warning(f"Vector search failed: {e}")
return []
from memory.internal import cosine_similarity
results = []
for row in rows:
try:
stored_vec = json.loads(row["embedding"])
if not stored_vec:
continue
score = cosine_similarity(query_vec, stored_vec)
snippet = row["text"][:SNIPPET_MAX_CHARS]
results.append({
"id": row["id"],
"path": row["path"],
"start_line": row["start_line"],
"end_line": row["end_line"],
"source": row["source"],
"snippet": snippet,
"vector_score": max(0.0, score),
})
except (json.JSONDecodeError, TypeError):
continue
results.sort(key=lambda r: r["vector_score"], reverse=True)
return results[:limit]
def _search_keyword(self, query: str, limit: int) -> list[dict]:
"""
Search chunks using FTS5 full-text search with BM25 ranking.
Port of OpenClaw's searchKeyword().
"""
if not self._fts_available:
return []
fts_query = build_fts_query(query)
if not fts_query:
return []
try:
rows = self._db.execute(
"SELECT id, path, start_line, end_line, source, text, "
"rank AS bm25_rank "
"FROM chunks_fts "
"WHERE chunks_fts MATCH ? "
"ORDER BY rank "
"LIMIT ?",
(fts_query, limit),
).fetchall()
except Exception as e:
logger.debug(f"FTS search failed for query '{fts_query}': {e}")
return []
results = []
for row in rows:
# FTS5 rank is negative (lower = better), convert to 0-1 score
bm25_rank = abs(row["bm25_rank"]) if row["bm25_rank"] else 999.0
text_score = bm25_rank_to_score(bm25_rank)
snippet = row["text"][:SNIPPET_MAX_CHARS]
results.append({
"id": row["id"],
"path": row["path"],
"start_line": row["start_line"],
"end_line": row["end_line"],
"source": row["source"],
"snippet": snippet,
"text_score": text_score,
})
return results
def _vector_to_search_results(
self, vector_results: list[dict]
) -> list[MemorySearchResult]:
"""Convert raw vector results to MemorySearchResult objects."""
return [
MemorySearchResult(
path=r["path"],
start_line=r["start_line"],
end_line=r["end_line"],
score=r["vector_score"],
snippet=r["snippet"],
source=MemorySource(r["source"]),
)
for r in vector_results
]
# ── Sync ─────────────────────────────────────────────────────────
async def sync(self, *, force: bool = False) -> dict:
"""
Synchronize workspace markdown files into the index.
Port of OpenClaw's MemoryIndexManager.sync().
Steps:
1. List all memory files (SOUL.md, USER.md, MEMORY.md, memory/*)
2. For each file, check if content hash has changed
3. If changed: chunk → embed → store in DB
4. Remove stale entries for deleted files
5. Optionally sync session logs from daily/
Returns a summary dict with counts.
"""
if self._syncing and not force:
logger.debug("Sync already in progress, skipping")
return {"skipped": True}
with self._sync_lock:
self._syncing = True
try:
return self._run_sync(force=force)
finally:
self._syncing = False
self._dirty = False
def _run_sync(self, *, force: bool = False) -> dict:
"""Execute the actual sync logic."""
stats = {
"files_found": 0,
"files_indexed": 0,
"files_skipped": 0,
"chunks_created": 0,
"stale_removed": 0,
"sessions_indexed": 0,
}
# ── Memory files ──
if "memory" in self._config.sources:
files = list_memory_files(self._workspace_dir)
stats["files_found"] = len(files)
active_paths: set[str] = set()
for abs_path in files:
entry = build_file_entry(abs_path, self._workspace_dir)
active_paths.add(entry.path)
# Check if file has changed
row = self._db.execute(
"SELECT hash FROM files WHERE path = ? AND source = ?",
(entry.path, MemorySource.MEMORY.value),
).fetchone()
if not force and row and row["hash"] == entry.hash:
stats["files_skipped"] += 1
continue
# File is new or changed — re-index it
self._index_file(entry, MemorySource.MEMORY)
stats["files_indexed"] += 1
# Remove stale entries for deleted files
stale_rows = self._db.execute(
"SELECT path FROM files WHERE source = ?",
(MemorySource.MEMORY.value,),
).fetchall()
for stale in stale_rows:
if stale["path"] not in active_paths:
self._remove_file(stale["path"], MemorySource.MEMORY)
stats["stale_removed"] += 1
# ── Session files ──
if "sessions" in self._config.sources:
session_count = self._sync_session_files(force=force)
stats["sessions_indexed"] = session_count
# Count total chunks
row = self._db.execute("SELECT COUNT(*) as c FROM chunks").fetchone()
stats["chunks_created"] = row["c"] if row else 0
self._db.commit()
logger.info(
f"Sync complete: {stats['files_indexed']} indexed, "
f"{stats['files_skipped']} unchanged, "
f"{stats['stale_removed']} removed, "
f"{stats['chunks_created']} total chunks"
)
return stats
def _index_file(self, entry, source: MemorySource) -> None:
"""
Index a single file: read → chunk → embed → store.
Port of OpenClaw's indexFile method.
"""
try:
with open(entry.abs_path, "r", encoding="utf-8") as f:
content = f.read()
except Exception as e:
logger.warning(f"Failed to read {entry.abs_path}: {e}")
return
if not content.strip():
return
# Chunk the content
chunks = chunk_markdown(
content,
chunk_tokens=self._config.chunk_tokens,
chunk_overlap=self._config.chunk_overlap,
)
if not chunks:
return
# Check embedding cache and compute new embeddings
texts_to_embed = []
chunk_hashes = []
cached_embeddings: dict[str, list[float]] = {}
for chunk in chunks:
# Check cache first
cache_row = self._db.execute(
"SELECT embedding FROM embedding_cache WHERE model = ? AND hash = ?",
(self._config.embedding_model, chunk.hash),
).fetchone()
if cache_row:
cached_embeddings[chunk.hash] = json.loads(cache_row["embedding"])
else:
texts_to_embed.append(chunk.text)
chunk_hashes.append(chunk.hash)
# Batch embed uncached chunks
new_embeddings: dict[str, list[float]] = {}
if texts_to_embed:
try:
vectors = embed_batch(texts_to_embed, self._config.embedding_model)
now = int(time.time())
for i, chunk_hash in enumerate(chunk_hashes):
vec = vectors[i] if i < len(vectors) else []
new_embeddings[chunk_hash] = vec
# Store in cache
self._db.execute(
"INSERT OR REPLACE INTO embedding_cache "
"(model, hash, embedding, dims, updated_at) "
"VALUES (?, ?, ?, ?, ?)",
(
self._config.embedding_model,
chunk_hash,
json.dumps(vec),
len(vec),
now,
),
)
except Exception as e:
logger.warning(f"Embedding batch failed for {entry.path}: {e}")
# Fall back to empty embeddings
for chunk_hash in chunk_hashes:
new_embeddings[chunk_hash] = []
# Remove old chunks for this file
self._remove_file_chunks(entry.path, source)
# Insert new chunks
now = int(time.time())
for chunk in chunks:
chunk_id = str(uuid.uuid4())
embedding = cached_embeddings.get(chunk.hash) or new_embeddings.get(
chunk.hash, []
)
self._db.execute(
"INSERT INTO chunks "
"(id, path, source, start_line, end_line, hash, model, text, embedding, updated_at) "
"VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)",
(
chunk_id,
entry.path,
source.value,
chunk.start_line,
chunk.end_line,
chunk.hash,
self._config.embedding_model,
chunk.text,
json.dumps(embedding),
now,
),
)
# Insert into FTS index
if self._fts_available:
try:
self._db.execute(
"INSERT INTO chunks_fts "
"(text, id, path, source, model, start_line, end_line) "
"VALUES (?, ?, ?, ?, ?, ?, ?)",
(
chunk.text,
chunk_id,
entry.path,
source.value,
self._config.embedding_model,
chunk.start_line,
chunk.end_line,
),
)
except Exception as e:
logger.debug(f"FTS insert failed for chunk {chunk_id}: {e}")
# Update files table
self._db.execute(
"INSERT OR REPLACE INTO files (path, source, hash, mtime, size) "
"VALUES (?, ?, ?, ?, ?)",
(
entry.path,
source.value,
entry.hash,
int(entry.mtime_ms),
entry.size,
),
)
def _remove_file_chunks(self, path: str, source: MemorySource) -> None:
"""Remove all chunks (and FTS entries) for a given file."""
# Get chunk IDs for FTS cleanup
if self._fts_available:
chunk_ids = self._db.execute(
"SELECT id FROM chunks WHERE path = ? AND source = ?",
(path, source.value),
).fetchall()
for row in chunk_ids:
try:
self._db.execute(
"DELETE FROM chunks_fts WHERE id = ?", (row["id"],)
)
except Exception:
pass
self._db.execute(
"DELETE FROM chunks WHERE path = ? AND source = ?",
(path, source.value),
)
def _remove_file(self, path: str, source: MemorySource) -> None:
"""Remove a file and all its chunks from the index."""
self._remove_file_chunks(path, source)
self._db.execute(
"DELETE FROM files WHERE path = ? AND source = ?",
(path, source.value),
)
# ── Session Logs ─────────────────────────────────────────────────
def _sync_session_files(self, *, force: bool = False) -> int:
"""
Sync session log files from the daily/ directory.
Returns the number of session files indexed.
"""
sessions_dir = Path(self._sessions_dir)
if not sessions_dir.is_dir():
return 0
indexed = 0
active_paths: set[str] = set()
for md_file in sorted(sessions_dir.glob("*.md")):
if md_file.is_symlink() or not md_file.is_file():
continue
entry = build_file_entry(str(md_file), self._workspace_dir)
active_paths.add(entry.path)
# Check if changed
row = self._db.execute(
"SELECT hash FROM files WHERE path = ? AND source = ?",
(entry.path, MemorySource.SESSIONS.value),
).fetchone()
if not force and row and row["hash"] == entry.hash:
continue
self._index_file(entry, MemorySource.SESSIONS)
indexed += 1
# Clean stale session entries
stale_rows = self._db.execute(
"SELECT path FROM files WHERE source = ?",
(MemorySource.SESSIONS.value,),
).fetchall()
for stale in stale_rows:
if stale["path"] not in active_paths:
self._remove_file(stale["path"], MemorySource.SESSIONS)
return indexed
def log_session(
self,
content: str,
*,
date: str | None = None,
channel: str = "slack",
) -> str:
"""
Append to today's session log in daily/.
Args:
content: The text to log (e.g., a user message or AI response).
date: Optional date string (YYYY-MM-DD). Defaults to today.
channel: Channel the conversation came from.
Returns:
Path to the session log file.
"""
if date is None:
date = time.strftime("%Y-%m-%d")
log_path = os.path.join(self._sessions_dir, f"{date}.md")
# Create file with header if it doesn't exist
if not os.path.exists(log_path):
header = f"# Session Log — {date}\n\n"
with open(log_path, "w", encoding="utf-8") as f:
f.write(header)
# Append the content
timestamp = time.strftime("%H:%M:%S")
with open(log_path, "a", encoding="utf-8") as f:
f.write(f"\n---\n\n**[{timestamp}] ({channel})**\n\n{content}\n")
# Mark as dirty for next sync
self._dirty = True
return log_path
# ── Identity File Access ─────────────────────────────────────────
def read_identity_file(self, name: str) -> str | None:
"""Read an identity file (SOUL.md, USER.md, MEMORY.md)."""
filepath = os.path.join(self._workspace_dir, name)
if not os.path.isfile(filepath):
return None
with open(filepath, "r", encoding="utf-8") as f:
return f.read()
def update_identity_file(self, name: str, content: str) -> None:
"""Update an identity file and mark index as dirty."""
filepath = os.path.join(self._workspace_dir, name)
with open(filepath, "w", encoding="utf-8") as f:
f.write(content)
self._dirty = True
logger.info(f"Updated identity file: {name}")
def read_soul(self) -> str | None:
return self.read_identity_file("SOUL.md")
def read_user(self) -> str | None:
return self.read_identity_file("USER.md")
def read_long_term_memory(self) -> str | None:
return self.read_identity_file("MEMORY.md")
def append_to_memory(self, entry: str) -> None:
"""Append a new entry to MEMORY.md."""
filepath = os.path.join(self._workspace_dir, "MEMORY.md")
timestamp = time.strftime("%Y-%m-%d %H:%M")
with open(filepath, "a", encoding="utf-8") as f:
f.write(f"\n### [{timestamp}]\n\n{entry}\n")
self._dirty = True
logger.info("Appended to MEMORY.md")
# ── File Reading ─────────────────────────────────────────────────
def read_file(
self,
rel_path: str,
*,
from_line: int | None = None,
num_lines: int | None = None,
) -> dict:
"""
Read a memory file by relative path.
Port of OpenClaw's readFile().
"""
raw = rel_path.strip()
if not raw:
raise ValueError("path required")
if os.path.isabs(raw):
abs_path = os.path.realpath(raw)
else:
abs_path = os.path.realpath(
os.path.join(self._workspace_dir, raw)
)
if not abs_path.endswith(".md"):
raise ValueError("Only .md files are supported")
if not os.path.isfile(abs_path):
raise FileNotFoundError(f"File not found: {abs_path}")
with open(abs_path, "r", encoding="utf-8") as f:
content = f.read()
if from_line is None and num_lines is None:
return {"text": content, "path": rel_path}
lines = content.split("\n")
start = max(1, from_line or 1)
count = max(1, num_lines or len(lines))
sliced = lines[start - 1 : start - 1 + count]
return {"text": "\n".join(sliced), "path": rel_path}
# ── Status ───────────────────────────────────────────────────────
def status(self) -> dict:
"""Get the current status of the memory index."""
files_row = self._db.execute(
"SELECT COUNT(*) as c FROM files"
).fetchone()
chunks_row = self._db.execute(
"SELECT COUNT(*) as c FROM chunks"
).fetchone()
cache_row = self._db.execute(
"SELECT COUNT(*) as c FROM embedding_cache"
).fetchone()
return {
"workspace_dir": self._workspace_dir,
"db_path": self._db_path,
"sessions_dir": self._sessions_dir,
"files": files_row["c"] if files_row else 0,
"chunks": chunks_row["c"] if chunks_row else 0,
"cached_embeddings": cache_row["c"] if cache_row else 0,
"fts_available": self._fts_available,
"dirty": self._dirty,
"embedding_model": self._config.embedding_model,
"embedding_dims": get_embedding_dims(self._config.embedding_model),
"vector_weight": self._config.vector_weight,
"text_weight": self._config.text_weight,
}
# ── File Watching ────────────────────────────────────────────────
def start_watching(self) -> None:
"""
Start watching the workspace for file changes.
Uses watchdog for cross-platform file system events.
"""
if self._watcher or not self._config.watch:
return
try:
from watchdog.events import FileSystemEventHandler
from watchdog.observers import Observer
except ImportError:
logger.warning(
"watchdog not installed — file watching disabled. "
"Install with: uv add watchdog"
)
return
manager = self
class MemoryFileHandler(FileSystemEventHandler):
def on_any_event(self, event):
if event.is_directory:
return
src = getattr(event, "src_path", "")
if src.endswith(".md"):
manager._dirty = True
logger.debug(f"Workspace change detected: {src}")
observer = Observer()
handler = MemoryFileHandler()
observer.schedule(handler, self._workspace_dir, recursive=True)
observer.start()
self._watcher = observer
logger.info(f"File watching started: {self._workspace_dir}")
def stop_watching(self) -> None:
"""Stop the file watcher."""
if self._watcher:
self._watcher.stop()
self._watcher.join()
self._watcher = None
logger.info("File watching stopped")
# ── Lifecycle ────────────────────────────────────────────────────
def close(self) -> None:
"""Close the memory manager and release resources."""
if self._closed:
return
self._closed = True
self.stop_watching()
self._db.close()
logger.info("MemoryManager closed")
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
def __del__(self):
if not self._closed:
try:
self.close()
except Exception:
pass

124
memory/schema.py Normal file
View File

@@ -0,0 +1,124 @@
"""
SQLite schema for the memory system.
Port of OpenClaw's src/memory/memory-schema.ts.
Tables:
• meta — key-value store for index metadata
• files — tracked files with content hashes (for incremental sync)
• chunks — text chunks with embeddings
• chunks_fts — FTS5 virtual table for keyword/BM25 search
• chunks_vec — sqlite-vec virtual table for vector similarity (optional)
"""
import logging
import sqlite3
logger = logging.getLogger("aetheel.memory.schema")
def ensure_schema(
db: sqlite3.Connection,
*,
fts_enabled: bool = True,
) -> dict:
"""
Create all required tables if they don't exist.
Returns a dict with 'fts_available' and optionally 'fts_error'.
"""
# Meta table — stores index config (model, dimensions, etc.)
db.execute("""
CREATE TABLE IF NOT EXISTS meta (
key TEXT PRIMARY KEY,
value TEXT NOT NULL
)
""")
# Files table — tracks which files have been indexed and their content hash
db.execute("""
CREATE TABLE IF NOT EXISTS files (
path TEXT NOT NULL,
source TEXT NOT NULL DEFAULT 'memory',
hash TEXT NOT NULL,
mtime INTEGER NOT NULL,
size INTEGER NOT NULL,
PRIMARY KEY (path, source)
)
""")
# Chunks table — stores text chunks and their embeddings
db.execute("""
CREATE TABLE IF NOT EXISTS chunks (
id TEXT PRIMARY KEY,
path TEXT NOT NULL,
source TEXT NOT NULL DEFAULT 'memory',
start_line INTEGER NOT NULL,
end_line INTEGER NOT NULL,
hash TEXT NOT NULL,
model TEXT NOT NULL,
text TEXT NOT NULL,
embedding TEXT NOT NULL,
updated_at INTEGER NOT NULL
)
""")
# Indices for efficient lookups
db.execute("CREATE INDEX IF NOT EXISTS idx_chunks_path ON chunks(path)")
db.execute("CREATE INDEX IF NOT EXISTS idx_chunks_source ON chunks(source)")
db.execute("CREATE INDEX IF NOT EXISTS idx_chunks_hash ON chunks(hash)")
# FTS5 full-text search table for keyword/BM25 matching
fts_available = False
fts_error = None
if fts_enabled:
try:
db.execute("""
CREATE VIRTUAL TABLE IF NOT EXISTS chunks_fts USING fts5(
text,
id UNINDEXED,
path UNINDEXED,
source UNINDEXED,
model UNINDEXED,
start_line UNINDEXED,
end_line UNINDEXED
)
""")
fts_available = True
except Exception as e:
fts_error = str(e)
logger.warning(f"FTS5 unavailable: {fts_error}")
# Embedding cache table — avoids re-computing embeddings
db.execute("""
CREATE TABLE IF NOT EXISTS embedding_cache (
model TEXT NOT NULL,
hash TEXT NOT NULL,
embedding TEXT NOT NULL,
dims INTEGER,
updated_at INTEGER NOT NULL,
PRIMARY KEY (model, hash)
)
""")
db.execute(
"CREATE INDEX IF NOT EXISTS idx_embedding_cache_updated_at "
"ON embedding_cache(updated_at)"
)
# Session logs table — tracks daily session transcripts
db.execute("""
CREATE TABLE IF NOT EXISTS session_logs (
session_date TEXT NOT NULL,
channel TEXT NOT NULL DEFAULT 'slack',
user_id TEXT,
summary TEXT,
raw_transcript TEXT,
created_at INTEGER NOT NULL,
PRIMARY KEY (session_date, channel)
)
""")
db.commit()
result = {"fts_available": fts_available}
if fts_error:
result["fts_error"] = fts_error
return result

104
memory/types.py Normal file
View File

@@ -0,0 +1,104 @@
"""
Memory system types — mirrors OpenClaw's src/memory/types.ts.
"""
from dataclasses import dataclass, field
from enum import Enum
class MemorySource(str, Enum):
"""Source of a memory entry — either workspace markdown or session logs."""
MEMORY = "memory"
SESSIONS = "sessions"
@dataclass
class MemorySearchResult:
"""
A single search result from the memory system.
Mirrors OpenClaw's MemorySearchResult type.
"""
path: str
start_line: int
end_line: int
score: float
snippet: str
source: MemorySource
citation: str | None = None
@dataclass
class MemoryChunk:
"""
A chunk of text extracted from a markdown file.
Mirrors OpenClaw's MemoryChunk from internal.ts.
"""
start_line: int
end_line: int
text: str
hash: str
@dataclass
class MemoryFileEntry:
"""
Metadata about an indexed markdown file.
Mirrors OpenClaw's MemoryFileEntry from internal.ts.
"""
path: str # relative path within workspace
abs_path: str # absolute filesystem path
mtime_ms: float # modification time (ms since epoch)
size: int # file size in bytes
hash: str # SHA-256 of file content
@dataclass
class SessionFileEntry:
"""
Metadata about an indexed session transcript file.
Mirrors OpenClaw's SessionFileEntry from session-files.ts.
"""
path: str # relative path (sessions/<filename>)
abs_path: str # absolute filesystem path
mtime_ms: float
size: int
hash: str
content: str # extracted text content
line_map: list[int] = field(default_factory=list)
@dataclass
class MemoryConfig:
"""
Configuration for the memory system.
"""
# Workspace directory containing SOUL.md, USER.md, MEMORY.md, etc.
workspace_dir: str = "~/.aetheel/workspace"
# SQLite database path (created automatically)
db_path: str = "~/.aetheel/memory.db"
# Chunking
chunk_tokens: int = 512
chunk_overlap: int = 50
# Search
max_results: int = 10
min_score: float = 0.1
vector_weight: float = 0.7
text_weight: float = 0.3
# Embedding
embedding_model: str = "BAAI/bge-small-en-v1.5"
embedding_dims: int = 384
# Sync
watch: bool = True
watch_debounce_ms: int = 2000
sync_on_search: bool = True
# Session logs
sessions_dir: str | None = None # defaults to workspace_dir/daily/
# Sources to index
sources: list[str] = field(default_factory=lambda: ["memory", "sessions"])

414
openclaw-analysis.md Normal file
View File

@@ -0,0 +1,414 @@
# OpenClaw Analysis & "My Own OpenClaw" Comparison
> **Date:** 2026-02-13
> **Source Repo:** `inspiration/openclaw/` (local clone)
> **Diagram Reference:** `inspiration/MyOwnOpenClaw.png`
---
## Table of Contents
1. [What Is OpenClaw?](#what-is-openclaw)
2. [OpenClaw Architecture Deep Dive](#openclaw-architecture-deep-dive)
3. [MyOwnOpenClaw — The Simplified Blueprint](#myownopenclaw--the-simplified-blueprint)
4. [Side-by-Side Comparison](#side-by-side-comparison)
5. [Key Takeaways for Building Our Own](#key-takeaways-for-building-our-own)
6. [Recommended Build Process for Aetheel](#recommended-build-process-for-aetheel)
---
## 1. What Is OpenClaw?
OpenClaw is an **open-source personal AI assistant** (MIT licensed, 176k+ stars, 443 contributors, 175k+ lines of TypeScript). It runs locally on your own devices and acts as a **gateway-centric control plane** that connects an AI agent to every messaging channel you already use.
**Core value proposition:** A single, always-on AI assistant that talks to you through WhatsApp, Telegram, Slack, Discord, Signal, iMessage, Microsoft Teams, Google Chat, Matrix, WebChat, and more — while keeping everything local and under your control.
---
## 2. OpenClaw Architecture Deep Dive
### 2.1 The Four Pillars
Based on both the source code analysis and the `MyOwnOpenClaw.png` diagram, OpenClaw's architecture rests on **four core subsystems**:
---
### Pillar 1: Memory System — "How It Remembers You"
**Source files:** `src/memory/` (49 files, including `manager.ts` at 2,300+ lines)
**How it works:**
| Component | Details |
|-----------|---------|
| **Identity Files** | `SOUL.md` — personality & values; `USER.md` — who you are; `AGENTS.md` — agent behavior rules; `HEARTBEAT.md` — what to proactively check |
| **Long-term Memory** | `MEMORY.md` — persisted decisions, lessons, context |
| **Session Logs** | `daily/` — session logs organized by date |
| **Search** | **Hybrid search** = vector (embeddings) + keyword (BM25) via `sqlite-vec` or `pgvector` |
| **Embedding Providers** | OpenAI, Voyage AI, Gemini, or local via `node-llama-cpp` (ONNX) |
| **Storage** | SQLite database with `sqlite-vec` extension for vector similarity |
| **Sync** | File watcher (chokidar) monitors workspace for changes, auto-re-indexes |
**Key architectural details from the code:**
- `MemoryIndexManager` class (2,300 LOC) manages the full lifecycle: sync → chunk → embed → store → search
- Hybrid search weighting: configurable vector weight + keyword weight (default 0.7 × vector + 0.3 × keyword as shown in the diagram)
- Supports batch embedding with Voyage, OpenAI, and Gemini batch APIs
- FTS5 full-text search table for keyword matching
- Vector table via `sqlite-vec` for similarity search
- Automatic chunking with configurable token sizes and overlap
---
### Pillar 2: Heartbeat — "How It Acts Proactively"
**Source files:** `src/cron/` (37 files including service, scheduling, delivery)
**How it works:**
| Component | Details |
|-----------|---------|
| **Scheduling** | Cron-based scheduling using the `croner` library |
| **Service Architecture** | `src/cron/service/` — manages job lifecycle, timers, catch-up after restarts |
| **Normalization** | `normalize.ts` (13k) — normalizes cron expressions and job definitions |
| **Delivery** | `delivery.ts` — routes cron job output to the correct channel/session |
| **Run Logging** | `run-log.ts` — persists execution history |
| **Session Reaper** | `session-reaper.ts` — cleans up stale sessions |
**What happens on each heartbeat:**
1. Cron fires at scheduled intervals
2. Gateway processes the event
3. Checks all integrated services (Gmail, Calendar, Asana, Slack, etc.)
4. AI reasons over the data
5. Sends notification if needed (e.g., "Meeting in 15 min — prep doc is empty")
6. Or returns `HEARTBEAT_OK` (nothing to report)
**Key detail:** Runs **without user prompting** — this is what makes it feel "proactive."
---
### Pillar 3: Channel Adapters — "How It Works Everywhere"
**Source files:** `src/channels/`, `src/whatsapp/`, `src/telegram/`, `src/discord/`, `src/slack/`, `src/signal/`, `src/imessage/`, `src/web/`, plus `extensions/` (35 extension directories)
**Built-in channels:**
| Channel | Library | Status |
|---------|---------|--------|
| WhatsApp | `@whiskeysockets/baileys` | Core |
| Telegram | `grammy` | Core |
| Slack | `@slack/bolt` | Core |
| Discord | `discord.js` / `@buape/carbon` | Core |
| Signal | `signal-cli` | Core |
| iMessage | BlueBubbles (recommended) or legacy `imsg` | Core |
| WebChat | Built into Gateway WS | Core |
**Extension channels** (via plugin system):
Microsoft Teams, Matrix, Zalo, Zalo Personal, Google Chat, IRC, Mattermost, Twitch, LINE, Feishu, Nextcloud Talk, Nostr, Tlon, voice calls
**Architecture:**
- **Gateway-centric** — all channels connect through a single WebSocket control plane (`ws://127.0.0.1:18789`)
- **Channel Dock** (`src/channels/dock.ts`, 17k) — unified registration and lifecycle management
- **Session isolation** — each channel/conversation gets its own session with isolated context
- **Group routing** — configurable mention gating, reply tags, per-channel chunking
- **DM security** — pairing codes for unknown senders, allowlists
---
### Pillar 4: Skills Registry — "How It Extends to Anything"
**Source files:** `skills/` (52 skill directories)
**How it works:**
| Component | Details |
|-----------|---------|
| **Structure** | Each skill is a directory with a `SKILL.md` file |
| **Installation** | Drop a file in `~/.openclaw/workspace/skills/<skill>/SKILL.md` — instantly available |
| **Registry** | ClawHub (5,700+ skills) — community-built extensions |
| **Types** | Bundled, managed, and workspace skills |
| **Scope** | Local files only — no public registry dependency, no supply chain attack surface |
**Built-in skill examples:**
`1password`, `apple-notes`, `apple-reminders`, `bear-notes`, `github`, `notion`, `obsidian`, `spotify-player`, `weather`, `canvas`, `coding-agent`, `discord`, `slack`, `openai-image-gen`, `openai-whisper`, `session-logs`, `summarize`, `video-frames`, `voice-call`, etc.
---
### 2.2 Gateway Architecture
The Gateway is the **central nervous system** of OpenClaw:
```
WhatsApp / Telegram / Slack / Discord / Signal / iMessage / Teams / WebChat
┌───────────────────────────────┐
│ Gateway │
│ (WS control plane) │
│ ws://127.0.0.1:18789 │
├───────────────────────────────┤
│ • Session management │
│ • Channel routing │
│ • Cron/heartbeat engine │
│ • Tool registration │
│ • Presence & typing │
│ • Auth & pairing │
│ • Plugin loading │
│ • Memory manager │
│ • Config hot-reload │
└──────────────┬────────────────┘
├─ Pi agent (RPC) — AI reasoning engine
├─ CLI (openclaw …)
├─ WebChat UI
├─ macOS app (menu bar)
├─ iOS / Android nodes
└─ Browser control (CDP)
```
**Key source files:**
- `src/gateway/server.impl.ts` (22k) — main gateway server implementation
- `src/gateway/server-http.ts` (17k) — HTTP server
- `src/gateway/ws-log.ts` (14k) — WebSocket logging
- `src/gateway/session-utils.ts` (22k) — session management
- `src/gateway/config-reload.ts` (11k) — hot config reload
### 2.3 Configuration
- **File:** `~/.openclaw/openclaw.json` (JSON5 format)
- **Schema:** Massive TypeBox schema system (`src/config/schema.ts`, `schema.hints.ts` at 46k, `schema.field-metadata.ts` at 45k)
- **Validation:** Zod schemas (`src/config/zod-schema.ts`, 20k)
- **Hot Reload:** Config changes apply without restart
### 2.4 Tech Stack
| Category | Technology |
|----------|-----------|
| **Language** | TypeScript (ESM) |
| **Runtime** | Node.js ≥22 (Bun also supported) |
| **Package Manager** | pnpm (bun optional) |
| **Build** | `tsdown` (based on Rolldown) |
| **Testing** | Vitest with V8 coverage |
| **Linting** | Oxlint + Oxfmt |
| **AI Runtime** | Pi agent (`@mariozechner/pi-agent-core`) in RPC mode |
| **Database** | SQLite with `sqlite-vec` for vector search |
| **Embedding** | OpenAI, Voyage, Gemini, or local ONNX |
| **HTTP** | Express 5 |
| **WebSocket** | `ws` library |
---
## 3. MyOwnOpenClaw — The Simplified Blueprint
The `MyOwnOpenClaw.png` diagram presents a **dramatically simplified** version of the same architecture, built with:
**Tools:** Claude Code + Claude Agent SDK + SQLite + Markdown + Obsidian
### The 4 Custom Modules
#### ① My Memory (SQLite + Markdown + Obsidian)
| Feature | Implementation |
|---------|---------------|
| `SOUL.md` | Personality & values |
| `USER.md` | Who I am, preferences |
| `MEMORY.md` | Decisions & lessons |
| `daily/` | Session logs |
| **Hybrid Search** | 0.7 × vector + 0.3 × keyword (BM25) |
| **Embeddings** | SQLite (or Postgres) + FastEmbed (384-dim, ONNX) |
| **Key principle** | Fully local — zero API calls |
| **Storage philosophy** | "Markdown IS the database" — Obsidian syncs it everywhere |
#### ② My Heartbeat (Claude Agent SDK + Python APIs)
| Feature | Implementation |
|---------|---------------|
| **Frequency** | Every 30 minutes |
| **Action** | Python gathers data from sources: Gmail, Calendar, Asana, Slack |
| **Reasoning** | Claude reasons over the data, decides what's important |
| **Notification** | Sends notification if needed |
| **Example** | "Meeting in 15 min — prep doc is empty" |
| **Fallback** | `HEARTBEAT_OK (nothing to report)` |
#### ③ My Adapters (Slack + Terminal)
| Feature | Implementation |
|---------|---------------|
| **Slack** | Socket Mode — no public URL needed; each thread = persistent conversation |
| **Terminal** | Claude Code — direct interaction; full skill + hook access either way |
| **One-shot** | With Claude Code |
| **Future** | Discord, Teams — add when needed |
#### ④ My Skills (Local `.claude/skills/`)
| Feature | Implementation |
|---------|---------------|
| **Location** | Local `.claude/skills/` directory |
| **Examples** | `content-engine/`, `direct-integrations/`, `yt-script/`, `pptx-generator/`, `excalidraw-diagram/`, `...15+ more` |
| **Installation** | Drop in `SKILL.md` — instantly available |
| **Security** | Local files only — NO public registry, no supply chain attack surface |
### The Vision: "Your Ultra-Personalized AI Agent"
> - 🔵 Remembers your decisions, preferences, and context
> - 🟣 Checks your email and calendar — before you ask
> - 🟢 Talk to it from Slack, terminal, anywhere
> - 🟡 Add any capability with a single file
>
> **"Acts on your behalf. Anticipates what you need. Knows you better every day."**
### Build Stack
```
Claude Code ──→ Claude Agent SDK ──→ SQLite + Markdown ──→ Obsidian
(skills + hooks) (heartbeat + background) (hybrid search, fully local) (your canvas, sync anywhere)
```
**~2,000 lines of Python + Markdown** — "You can build it in just a couple days."
---
## 4. Side-by-Side Comparison
| Feature | OpenClaw (Full) | MyOwnOpenClaw (Custom) |
|---------|----------------|----------------------|
| **Codebase Size** | 175k+ lines TypeScript | ~2,000 lines Python + Markdown |
| **Language** | TypeScript (ESM) | Python |
| **AI Provider** | Any (Anthropic, OpenAI, etc. via Pi) | Claude (via Claude Agent SDK) |
| **Memory System** | SQLite + sqlite-vec, multiple embedding providers | SQLite + FastEmbed (384-dim ONNX) |
| **Hybrid Search** | Vector + BM25 (configurable weights) | 0.7 vector + 0.3 keyword (BM25) |
| **Embeddings** | OpenAI, Voyage, Gemini, local ONNX | FastEmbed local ONNX only — zero API calls |
| **Prompt Files** | SOUL.md, USER.md, AGENTS.md, HEARTBEAT.md, TOOLS.md | SOUL.md, USER.md, MEMORY.md, daily/ |
| **Heartbeat** | Full cron system with croner library | Simple 30-minute Python script |
| **Data Sources** | Configurable via plugins/skills | Gmail, Calendar, Asana, Slack |
| **Channels** | 15+ (WhatsApp, Telegram, Slack, Discord, Signal, iMessage, Teams, Matrix, etc.) | Slack (Socket Mode) + Terminal (Claude Code) |
| **Gateway** | Full WS control plane with auth, routing, sessions | None — direct connection |
| **Skills** | 52 bundled + ClawHub registry (5,700+) | Local `.claude/skills/` directory (15+ custom) |
| **Skill Format** | `SKILL.md` file in directory | `SKILL.md` file in directory (same pattern!) |
| **Apps** | macOS, iOS, Android, WebChat | None — Slack + CLI |
| **Voice** | Voice Wake + Talk Mode (ElevenLabs) | Not included |
| **Browser** | Playwright-based CDP control | Not included |
| **Canvas** | Agent-driven visual workspace (A2UI) | Not included |
| **Config** | JSON5 with massive schema validation | Simple Markdown files |
| **Sync** | File watcher (chokidar) | Obsidian sync |
| **Storage Philosophy** | SQLite is the DB | "Markdown IS the database" — Obsidian syncs everywhere |
| **Installation** | `npm install -g openclaw` + wizard | Clone repo + point Claude Code at it |
| **Security** | DM pairing, allowlists, Docker sandboxing | Local only by default |
| **Multi-agent** | Session isolation, agent-to-agent messaging | Not included |
| **Complexity** | Enterprise-grade, production-ready | Personal, lightweight, hackable |
---
## 5. Key Takeaways for Building Our Own
### What OpenClaw Gets Right (and we should learn from):
1. **The Memory Architecture** — The combination of identity files (`SOUL.md`, `USER.md`) + long-term memory (`MEMORY.md`) + session logs (`daily/`) is the core pattern. Both systems use this.
2. **Hybrid Search** — Vector + keyword search is essential for good memory retrieval. The 0.7/0.3 weighting is a good starting point.
3. **Skill Drop-in Pattern** — Just put a `SKILL.md` file in a directory and it's instantly available. No compilation, no registry. OpenClaw invented this pattern and the custom version copies it directly.
4. **Proactive Heartbeat** — Running on a schedule, checking your data sources before you ask. This is what makes the agent feel like an assistant rather than a chatbot.
5. **The Separation of Concerns** — Memory, Heartbeat, Adapters, and Skills are clean, independent modules. Each can be built and tested separately.
### What MyOwnOpenClaw Simplifies:
1. **No Gateway** — Direct connections instead of a WS control plane. Much simpler but less flexible.
2. **Python over TypeScript** — More accessible for quick prototyping and data processing.
3. **Claude-only** — No model switching, no failover. Simpler but locked to one provider.
4. **Obsidian as sync** — Uses Obsidian's existing sync infrastructure instead of building custom file watching.
5. **Two adapters max** — Slack + Terminal vs. 15+ channels. Start small, add as needed.
### The Process (from the diagram):
> 1. Clone the OpenClaw repository (MIT licensed, 100% open source)
> 2. Point your coding agent at it — "Explain how the memory system works"
> 3. "Now build that into my own system here (optional: with customization XYZ)"
> 4. Repeat for heartbeat, adapters, skills. That's it.
**Use OpenClaw as your blueprint, not your dependency.**
---
## 6. Recommended Build Process for Aetheel
Based on this analysis, here's the recommended order for building a custom AI assistant inspired by OpenClaw:
### Phase 1: Memory System
- Create `SOUL.md`, `USER.md`, `MEMORY.md` files
- Implement SQLite database with `sqlite-vec` or FastEmbed for vector search
- Build hybrid search (vector + BM25 keyword)
- Set up file watching for automatic re-indexing
- Use Obsidian for cross-device sync
### Phase 2: Heartbeat
- Build a Python script using Claude Agent SDK
- Connect to Gmail, Calendar, Asana (start with most-used services)
- Set up 30-minute cron schedule
- Implement notification delivery (start with terminal notifications)
### Phase 3: Adapters
- Start with Terminal (Claude Code) for direct interaction
- Add Slack (Socket Mode) for messaging
- Build conversation threading support
### Phase 4: Skills
- Create `.claude/skills/` directory structure
- Port most-used skills from OpenClaw as inspiration
- Build custom skills specific to your workflow
---
## Appendix: OpenClaw File Structure Reference
```
openclaw/
├── src/ # Core source code (175k+ LOC)
│ ├── memory/ # Memory system (49 files)
│ │ ├── manager.ts # Main memory manager (2,300 LOC)
│ │ ├── hybrid.ts # Hybrid search (vector + keyword)
│ │ ├── embeddings.ts # Embedding provider abstraction
│ │ ├── qmd-manager.ts # Query+doc management (33k)
│ │ └── ...
│ ├── cron/ # Heartbeat/cron system (37 files)
│ │ ├── service/ # Cron service lifecycle
│ │ ├── schedule.ts # Scheduling logic
│ │ ├── delivery.ts # Output delivery
│ │ └── ...
│ ├── channels/ # Channel adapter framework (28 files)
│ │ ├── dock.ts # Unified channel dock (17k)
│ │ ├── registry.ts # Channel registration
│ │ └── ...
│ ├── gateway/ # Gateway WS control plane (129+ files)
│ │ ├── server.impl.ts # Main server (22k)
│ │ ├── server-http.ts # HTTP layer (17k)
│ │ ├── session-utils.ts # Session management (22k)
│ │ └── ...
│ ├── config/ # Configuration system (130+ files)
│ ├── agents/ # Agent runtime
│ ├── browser/ # Browser control (Playwright)
│ └── ...
├── skills/ # Built-in skills (52 directories)
│ ├── obsidian/
│ ├── github/
│ ├── notion/
│ ├── spotify-player/
│ └── ...
├── extensions/ # Extension channels (35 directories)
│ ├── msteams/
│ ├── matrix/
│ ├── voice-call/
│ └── ...
├── apps/ # Companion apps
│ ├── macos/
│ ├── ios/
│ └── android/
├── AGENTS.md # Agent behavior guidelines
├── openclaw.json # Configuration
└── package.json # Dependencies & scripts
```

13
pyproject.toml Normal file
View File

@@ -0,0 +1,13 @@
[project]
name = "aetheel"
version = "0.1.0"
description = "Add your description here"
readme = "README.md"
requires-python = ">=3.14"
dependencies = [
"fastembed>=0.7.4",
"python-dotenv>=1.2.1,<2.0.0",
"slack-bolt>=1.27.0,<2.0.0",
"slack-sdk>=3.40.0,<4.0.0",
"watchdog>=6.0.0",
]

11
requirements.txt Normal file
View File

@@ -0,0 +1,11 @@
# Aetheel dependencies
# ====================
# Slack adapter
slack-bolt>=1.27.0,<2.0.0
slack-sdk>=3.40.0,<4.0.0
python-dotenv>=1.2.1,<2.0.0
# OpenCode agent runtime (optional — only needed for SDK mode)
# Install with: pip install opencode-ai
# opencode-ai>=0.1.0

113
test_memory.py Normal file
View File

@@ -0,0 +1,113 @@
"""Quick smoke test for the memory system."""
import asyncio
import os
import shutil
from memory import MemoryManager
from memory.types import MemoryConfig
from memory.internal import chunk_markdown, hash_text, list_memory_files
def test_internals():
print("── Internal utilities ──")
# Hashing
h = hash_text("hello world")
assert len(h) == 64
print(f"✅ hash_text: {h[:16]}...")
# Chunking
text = "# Title\n\nLine1\nLine2\nLine3\n\n## Section\n\nMore text here"
chunks = chunk_markdown(text, chunk_tokens=50, chunk_overlap=10)
assert len(chunks) >= 1
print(f"✅ chunk_markdown: {len(chunks)} chunks")
for c in chunks:
print(f" lines {c.start_line}-{c.end_line}: {c.text[:50]!r}")
print()
async def test_manager():
print("── MemoryManager ──")
# Clean slate
test_dir = "/tmp/aetheel_test_workspace"
test_db = "/tmp/aetheel_test_memory.db"
for p in [test_dir, test_db]:
if os.path.exists(p):
if os.path.isdir(p):
shutil.rmtree(p)
else:
os.remove(p)
config = MemoryConfig(
workspace_dir=test_dir,
db_path=test_db,
)
mgr = MemoryManager(config)
print(f"✅ Created: workspace={mgr._workspace_dir}")
# Identity files
soul = mgr.read_soul()
assert soul and len(soul) > 0
print(f"✅ SOUL.md: {len(soul)} chars")
user = mgr.read_user()
assert user and len(user) > 0
print(f"✅ USER.md: {len(user)} chars")
memory = mgr.read_long_term_memory()
assert memory and len(memory) > 0
print(f"✅ MEMORY.md: {len(memory)} chars")
# Append to memory
mgr.append_to_memory("Test entry: Python 3.14 works great!")
memory2 = mgr.read_long_term_memory()
assert len(memory2) > len(memory)
print(f"✅ Appended to MEMORY.md: {len(memory2)} chars")
# Log a session
log_path = mgr.log_session(
"User: Hello!\nAssistant: Hi, how can I help?",
channel="terminal",
)
assert os.path.exists(log_path)
print(f"✅ Session logged: {log_path}")
# Sync
print("\n⏳ Syncing (loading embedding model on first run)...")
stats = await mgr.sync()
print(f"✅ Sync complete:")
for k, v in stats.items():
print(f" {k}: {v}")
# Search
print("\n🔍 Searching for 'personality values'...")
results = await mgr.search("personality values")
print(f"✅ Found {len(results)} results:")
for i, r in enumerate(results[:3]):
print(f" [{i+1}] score={r.score:.3f} path={r.path} lines={r.start_line}-{r.end_line}")
print(f" {r.snippet[:80]}...")
print("\n🔍 Searching for 'preferences'...")
results2 = await mgr.search("preferences")
print(f"✅ Found {len(results2)} results:")
for i, r in enumerate(results2[:3]):
print(f" [{i+1}] score={r.score:.3f} path={r.path} lines={r.start_line}-{r.end_line}")
print(f" {r.snippet[:80]}...")
# Status
print("\n📊 Status:")
status = mgr.status()
for k, v in status.items():
print(f" {k}: {v}")
mgr.close()
print("\n✅ All memory system tests passed!")
if __name__ == "__main__":
test_internals()
asyncio.run(test_manager())

244
test_slack.py Normal file
View File

@@ -0,0 +1,244 @@
#!/usr/bin/env python3
"""
Aetheel Slack Adapter — Integration Test
==========================================
Tests the Slack adapter by:
1. Connecting to Slack via Socket Mode
2. Sending a test message to a specified channel
3. Verifying the bot can send and receive
Usage:
python test_slack.py # Interactive — prompts for channel
python test_slack.py --channel C0123456789 # Send to a specific channel
python test_slack.py --dm U0123456789 # Send a DM to a user
python test_slack.py --send-only # Just send, don't listen
Requirements:
- SLACK_BOT_TOKEN and SLACK_APP_TOKEN set in .env
- Bot must be invited to the target channel
"""
import argparse
import logging
import os
import sys
import time
import threading
from dotenv import load_dotenv
load_dotenv()
from adapters.slack_adapter import SlackAdapter, SlackMessage
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s [%(name)s] %(levelname)s: %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
)
logger = logging.getLogger("aetheel.test")
# ---------------------------------------------------------------------------
# Test 1: Send a message
# ---------------------------------------------------------------------------
def test_send_message(adapter: SlackAdapter, target: str) -> bool:
"""Test sending a message to a channel or user."""
print("\n" + "=" * 60)
print(" TEST 1: Send Message")
print("=" * 60)
try:
result = adapter.send_message(
channel=target,
text=(
"🧪 *Aetheel Slack Test*\n\n"
"If you can see this message, the Slack adapter is working!\n\n"
f"• Bot ID: `{adapter._bot_user_id}`\n"
f"• Bot Name: `@{adapter._bot_user_name}`\n"
f"• Timestamp: `{time.strftime('%Y-%m-%d %H:%M:%S')}`\n"
f"• Mode: Socket Mode\n\n"
"_Reply to this message to test receiving._"
),
)
print(f" ✅ Message sent successfully!")
print(f" Channel: {result.channel_id}")
print(f" Message ID: {result.message_id}")
return True
except Exception as e:
print(f" ❌ Failed to send: {e}")
return False
# ---------------------------------------------------------------------------
# Test 2: Send a threaded reply
# ---------------------------------------------------------------------------
def test_threaded_reply(adapter: SlackAdapter, target: str) -> bool:
"""Test sending a message and then replying in a thread."""
print("\n" + "=" * 60)
print(" TEST 2: Threaded Reply")
print("=" * 60)
try:
# Send parent message
parent = adapter.send_message(
channel=target,
text="🧵 *Thread Test* — This is the parent message.",
)
print(f" ✅ Parent message sent (ts={parent.message_id})")
time.sleep(1)
# Send threaded reply
reply = adapter.send_message(
channel=target,
text="↳ This is a threaded reply! Thread isolation is working.",
thread_ts=parent.message_id,
)
print(f" ✅ Thread reply sent (ts={reply.message_id})")
return True
except Exception as e:
print(f" ❌ Failed: {e}")
return False
# ---------------------------------------------------------------------------
# Test 3: Long message chunking
# ---------------------------------------------------------------------------
def test_long_message(adapter: SlackAdapter, target: str) -> bool:
"""Test that long messages are properly chunked."""
print("\n" + "=" * 60)
print(" TEST 3: Long Message Chunking")
print("=" * 60)
try:
# Create a message that exceeds 4000 chars
long_text = "📜 *Long Message Test*\n\n"
for i in range(1, 101):
long_text += f"{i}. This is line number {i} of the long message test. " \
f"It contains enough text to test the chunking behavior.\n"
result = adapter.send_message(channel=target, text=long_text)
print(f" ✅ Long message sent (length={len(long_text)}, id={result.message_id})")
return True
except Exception as e:
print(f" ❌ Failed: {e}")
return False
# ---------------------------------------------------------------------------
# Test 4: Receive messages (interactive)
# ---------------------------------------------------------------------------
def test_receive_messages(adapter: SlackAdapter, duration: int = 30) -> bool:
"""
Test receiving messages by listening for a specified duration.
The bot will echo back any messages it receives.
"""
print("\n" + "=" * 60)
print(" TEST 4: Receive Messages (Interactive)")
print("=" * 60)
print(f" Listening for {duration} seconds...")
print(f" Send a message to @{adapter._bot_user_name} to test receiving.")
print(f" Press Ctrl+C to stop early.\n")
received = []
def test_handler(msg: SlackMessage) -> str:
received.append(msg)
print(f" 📨 Received: '{msg.text}' from @{msg.user_name}")
return f"✅ Got it! You said: _{msg.text}_"
adapter.on_message(test_handler)
try:
adapter.start_async()
time.sleep(duration)
except KeyboardInterrupt:
print("\n Stopped by user.")
finally:
adapter.stop()
print(f"\n Messages received: {len(received)}")
if received:
print(" ✅ Receive test PASSED")
return True
else:
print(" ⚠️ No messages received (send a message to the bot to test)")
return True # Not a failure — just no one sent a message
# ---------------------------------------------------------------------------
# Main
# ---------------------------------------------------------------------------
def main():
parser = argparse.ArgumentParser(description="Test the Aetheel Slack Adapter")
group = parser.add_mutually_exclusive_group()
group.add_argument("--channel", help="Channel ID to send test messages to (C...)")
group.add_argument("--dm", help="User ID to DM for testing (U...)")
parser.add_argument(
"--send-only",
action="store_true",
help="Only run send tests (don't listen for messages)",
)
parser.add_argument(
"--duration",
type=int,
default=30,
help="How long to listen for messages in seconds (default: 30)",
)
args = parser.parse_args()
# Validate tokens
if not os.environ.get("SLACK_BOT_TOKEN") or not os.environ.get("SLACK_APP_TOKEN"):
print("❌ Missing SLACK_BOT_TOKEN or SLACK_APP_TOKEN in environment.")
print(" Copy .env.example to .env and fill in your tokens.")
sys.exit(1)
# Get target
target = args.channel or args.dm
if not target:
print("You need to specify a target for send tests.")
print(" --channel C0123456789 (channel ID)")
print(" --dm U0123456789 (user ID for DM)")
target = input("\nEnter a channel or user ID (or press Enter to skip send tests): ").strip()
# Create adapter
adapter = SlackAdapter(log_level="INFO")
# Resolve identity first
adapter._resolve_identity()
# Run tests
results = {}
if target:
results["send"] = test_send_message(adapter, target)
results["thread"] = test_threaded_reply(adapter, target)
results["chunking"] = test_long_message(adapter, target)
else:
print("\n⏭️ Skipping send tests (no target specified)")
if not args.send_only:
results["receive"] = test_receive_messages(adapter, duration=args.duration)
# Summary
print("\n" + "=" * 60)
print(" TEST RESULTS")
print("=" * 60)
for test_name, passed in results.items():
icon = "" if passed else ""
print(f" {icon} {test_name}")
total = len(results)
passed = sum(1 for v in results.values() if v)
print(f"\n {passed}/{total} tests passed")
print("=" * 60)
return 0 if all(results.values()) else 1
if __name__ == "__main__":
sys.exit(main())

655
uv.lock generated Normal file
View File

@@ -0,0 +1,655 @@
version = 1
revision = 3
requires-python = ">=3.14"
[[package]]
name = "aetheel"
version = "0.1.0"
source = { virtual = "." }
dependencies = [
{ name = "fastembed" },
{ name = "python-dotenv" },
{ name = "slack-bolt" },
{ name = "slack-sdk" },
{ name = "watchdog" },
]
[package.metadata]
requires-dist = [
{ name = "fastembed", specifier = ">=0.7.4" },
{ name = "python-dotenv", specifier = ">=1.2.1,<2.0.0" },
{ name = "slack-bolt", specifier = ">=1.27.0,<2.0.0" },
{ name = "slack-sdk", specifier = ">=3.40.0,<4.0.0" },
{ name = "watchdog", specifier = ">=6.0.0" },
]
[[package]]
name = "annotated-doc"
version = "0.0.4"
source = { registry = "https://pypi.org/simple" }
sdist = { url = "https://files.pythonhosted.org/packages/57/ba/046ceea27344560984e26a590f90bc7f4a75b06701f653222458922b558c/annotated_doc-0.0.4.tar.gz", hash = "sha256:fbcda96e87e9c92ad167c2e53839e57503ecfda18804ea28102353485033faa4", size = 7288, upload-time = "2025-11-10T22:07:42.062Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/1e/d3/26bf1008eb3d2daa8ef4cacc7f3bfdc11818d111f7e2d0201bc6e3b49d45/annotated_doc-0.0.4-py3-none-any.whl", hash = "sha256:571ac1dc6991c450b25a9c2d84a3705e2ae7a53467b5d111c24fa8baabbed320", size = 5303, upload-time = "2025-11-10T22:07:40.673Z" },
]
[[package]]
name = "anyio"
version = "4.12.1"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "idna" },
]
sdist = { url = "https://files.pythonhosted.org/packages/96/f0/5eb65b2bb0d09ac6776f2eb54adee6abe8228ea05b20a5ad0e4945de8aac/anyio-4.12.1.tar.gz", hash = "sha256:41cfcc3a4c85d3f05c932da7c26d0201ac36f72abd4435ba90d0464a3ffed703", size = 228685, upload-time = "2026-01-06T11:45:21.246Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/38/0e/27be9fdef66e72d64c0cdc3cc2823101b80585f8119b5c112c2e8f5f7dab/anyio-4.12.1-py3-none-any.whl", hash = "sha256:d405828884fc140aa80a3c667b8beed277f1dfedec42ba031bd6ac3db606ab6c", size = 113592, upload-time = "2026-01-06T11:45:19.497Z" },
]
[[package]]
name = "certifi"
version = "2026.1.4"
source = { registry = "https://pypi.org/simple" }
sdist = { url = "https://files.pythonhosted.org/packages/e0/2d/a891ca51311197f6ad14a7ef42e2399f36cf2f9bd44752b3dc4eab60fdc5/certifi-2026.1.4.tar.gz", hash = "sha256:ac726dd470482006e014ad384921ed6438c457018f4b3d204aea4281258b2120", size = 154268, upload-time = "2026-01-04T02:42:41.825Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/e6/ad/3cc14f097111b4de0040c83a525973216457bbeeb63739ef1ed275c1c021/certifi-2026.1.4-py3-none-any.whl", hash = "sha256:9943707519e4add1115f44c2bc244f782c0249876bf51b6599fee1ffbedd685c", size = 152900, upload-time = "2026-01-04T02:42:40.15Z" },
]
[[package]]
name = "charset-normalizer"
version = "3.4.4"
source = { registry = "https://pypi.org/simple" }
sdist = { url = "https://files.pythonhosted.org/packages/13/69/33ddede1939fdd074bce5434295f38fae7136463422fe4fd3e0e89b98062/charset_normalizer-3.4.4.tar.gz", hash = "sha256:94537985111c35f28720e43603b8e7b43a6ecfb2ce1d3058bbe955b73404e21a", size = 129418, upload-time = "2025-10-14T04:42:32.879Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/2a/35/7051599bd493e62411d6ede36fd5af83a38f37c4767b92884df7301db25d/charset_normalizer-3.4.4-cp314-cp314-macosx_10_13_universal2.whl", hash = "sha256:da3326d9e65ef63a817ecbcc0df6e94463713b754fe293eaa03da99befb9a5bd", size = 207746, upload-time = "2025-10-14T04:41:33.773Z" },
{ url = "https://files.pythonhosted.org/packages/10/9a/97c8d48ef10d6cd4fcead2415523221624bf58bcf68a802721a6bc807c8f/charset_normalizer-3.4.4-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:8af65f14dc14a79b924524b1e7fffe304517b2bff5a58bf64f30b98bbc5079eb", size = 147889, upload-time = "2025-10-14T04:41:34.897Z" },
{ url = "https://files.pythonhosted.org/packages/10/bf/979224a919a1b606c82bd2c5fa49b5c6d5727aa47b4312bb27b1734f53cd/charset_normalizer-3.4.4-cp314-cp314-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:74664978bb272435107de04e36db5a9735e78232b85b77d45cfb38f758efd33e", size = 143641, upload-time = "2025-10-14T04:41:36.116Z" },
{ url = "https://files.pythonhosted.org/packages/ba/33/0ad65587441fc730dc7bd90e9716b30b4702dc7b617e6ba4997dc8651495/charset_normalizer-3.4.4-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:752944c7ffbfdd10c074dc58ec2d5a8a4cd9493b314d367c14d24c17684ddd14", size = 160779, upload-time = "2025-10-14T04:41:37.229Z" },
{ url = "https://files.pythonhosted.org/packages/67/ed/331d6b249259ee71ddea93f6f2f0a56cfebd46938bde6fcc6f7b9a3d0e09/charset_normalizer-3.4.4-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:d1f13550535ad8cff21b8d757a3257963e951d96e20ec82ab44bc64aeb62a191", size = 159035, upload-time = "2025-10-14T04:41:38.368Z" },
{ url = "https://files.pythonhosted.org/packages/67/ff/f6b948ca32e4f2a4576aa129d8bed61f2e0543bf9f5f2b7fc3758ed005c9/charset_normalizer-3.4.4-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:ecaae4149d99b1c9e7b88bb03e3221956f68fd6d50be2ef061b2381b61d20838", size = 152542, upload-time = "2025-10-14T04:41:39.862Z" },
{ url = "https://files.pythonhosted.org/packages/16/85/276033dcbcc369eb176594de22728541a925b2632f9716428c851b149e83/charset_normalizer-3.4.4-cp314-cp314-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:cb6254dc36b47a990e59e1068afacdcd02958bdcce30bb50cc1700a8b9d624a6", size = 149524, upload-time = "2025-10-14T04:41:41.319Z" },
{ url = "https://files.pythonhosted.org/packages/9e/f2/6a2a1f722b6aba37050e626530a46a68f74e63683947a8acff92569f979a/charset_normalizer-3.4.4-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:c8ae8a0f02f57a6e61203a31428fa1d677cbe50c93622b4149d5c0f319c1d19e", size = 150395, upload-time = "2025-10-14T04:41:42.539Z" },
{ url = "https://files.pythonhosted.org/packages/60/bb/2186cb2f2bbaea6338cad15ce23a67f9b0672929744381e28b0592676824/charset_normalizer-3.4.4-cp314-cp314-musllinux_1_2_armv7l.whl", hash = "sha256:47cc91b2f4dd2833fddaedd2893006b0106129d4b94fdb6af1f4ce5a9965577c", size = 143680, upload-time = "2025-10-14T04:41:43.661Z" },
{ url = "https://files.pythonhosted.org/packages/7d/a5/bf6f13b772fbb2a90360eb620d52ed8f796f3c5caee8398c3b2eb7b1c60d/charset_normalizer-3.4.4-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:82004af6c302b5d3ab2cfc4cc5f29db16123b1a8417f2e25f9066f91d4411090", size = 162045, upload-time = "2025-10-14T04:41:44.821Z" },
{ url = "https://files.pythonhosted.org/packages/df/c5/d1be898bf0dc3ef9030c3825e5d3b83f2c528d207d246cbabe245966808d/charset_normalizer-3.4.4-cp314-cp314-musllinux_1_2_riscv64.whl", hash = "sha256:2b7d8f6c26245217bd2ad053761201e9f9680f8ce52f0fcd8d0755aeae5b2152", size = 149687, upload-time = "2025-10-14T04:41:46.442Z" },
{ url = "https://files.pythonhosted.org/packages/a5/42/90c1f7b9341eef50c8a1cb3f098ac43b0508413f33affd762855f67a410e/charset_normalizer-3.4.4-cp314-cp314-musllinux_1_2_s390x.whl", hash = "sha256:799a7a5e4fb2d5898c60b640fd4981d6a25f1c11790935a44ce38c54e985f828", size = 160014, upload-time = "2025-10-14T04:41:47.631Z" },
{ url = "https://files.pythonhosted.org/packages/76/be/4d3ee471e8145d12795ab655ece37baed0929462a86e72372fd25859047c/charset_normalizer-3.4.4-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:99ae2cffebb06e6c22bdc25801d7b30f503cc87dbd283479e7b606f70aff57ec", size = 154044, upload-time = "2025-10-14T04:41:48.81Z" },
{ url = "https://files.pythonhosted.org/packages/b0/6f/8f7af07237c34a1defe7defc565a9bc1807762f672c0fde711a4b22bf9c0/charset_normalizer-3.4.4-cp314-cp314-win32.whl", hash = "sha256:f9d332f8c2a2fcbffe1378594431458ddbef721c1769d78e2cbc06280d8155f9", size = 99940, upload-time = "2025-10-14T04:41:49.946Z" },
{ url = "https://files.pythonhosted.org/packages/4b/51/8ade005e5ca5b0d80fb4aff72a3775b325bdc3d27408c8113811a7cbe640/charset_normalizer-3.4.4-cp314-cp314-win_amd64.whl", hash = "sha256:8a6562c3700cce886c5be75ade4a5db4214fda19fede41d9792d100288d8f94c", size = 107104, upload-time = "2025-10-14T04:41:51.051Z" },
{ url = "https://files.pythonhosted.org/packages/da/5f/6b8f83a55bb8278772c5ae54a577f3099025f9ade59d0136ac24a0df4bde/charset_normalizer-3.4.4-cp314-cp314-win_arm64.whl", hash = "sha256:de00632ca48df9daf77a2c65a484531649261ec9f25489917f09e455cb09ddb2", size = 100743, upload-time = "2025-10-14T04:41:52.122Z" },
{ url = "https://files.pythonhosted.org/packages/0a/4c/925909008ed5a988ccbb72dcc897407e5d6d3bd72410d69e051fc0c14647/charset_normalizer-3.4.4-py3-none-any.whl", hash = "sha256:7a32c560861a02ff789ad905a2fe94e3f840803362c84fecf1851cb4cf3dc37f", size = 53402, upload-time = "2025-10-14T04:42:31.76Z" },
]
[[package]]
name = "click"
version = "8.3.1"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "colorama", marker = "sys_platform == 'win32'" },
]
sdist = { url = "https://files.pythonhosted.org/packages/3d/fa/656b739db8587d7b5dfa22e22ed02566950fbfbcdc20311993483657a5c0/click-8.3.1.tar.gz", hash = "sha256:12ff4785d337a1bb490bb7e9c2b1ee5da3112e94a8622f26a6c77f5d2fc6842a", size = 295065, upload-time = "2025-11-15T20:45:42.706Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/98/78/01c019cdb5d6498122777c1a43056ebb3ebfeef2076d9d026bfe15583b2b/click-8.3.1-py3-none-any.whl", hash = "sha256:981153a64e25f12d547d3426c367a4857371575ee7ad18df2a6183ab0545b2a6", size = 108274, upload-time = "2025-11-15T20:45:41.139Z" },
]
[[package]]
name = "colorama"
version = "0.4.6"
source = { registry = "https://pypi.org/simple" }
sdist = { url = "https://files.pythonhosted.org/packages/d8/53/6f443c9a4a8358a93a6792e2acffb9d9d5cb0a5cfd8802644b7b1c9a02e4/colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44", size = 27697, upload-time = "2022-10-25T02:36:22.414Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/d1/d6/3965ed04c63042e047cb6a3e6ed1a63a35087b6a609aa3a15ed8ac56c221/colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6", size = 25335, upload-time = "2022-10-25T02:36:20.889Z" },
]
[[package]]
name = "fastembed"
version = "0.7.4"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "huggingface-hub" },
{ name = "loguru" },
{ name = "mmh3" },
{ name = "numpy" },
{ name = "onnxruntime" },
{ name = "pillow" },
{ name = "py-rust-stemmers" },
{ name = "requests" },
{ name = "tokenizers" },
{ name = "tqdm" },
]
sdist = { url = "https://files.pythonhosted.org/packages/4c/c2/9c708680de1b54480161e0505f9d6d3d8eb47a1dc1a1f7f3c5106ba355d2/fastembed-0.7.4.tar.gz", hash = "sha256:8b8a4ea860ca295002f4754e8f5820a636e1065a9444959e18d5988d7f27093b", size = 68807, upload-time = "2025-12-05T12:08:10.447Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/10/3b/8da01492bc8b69184257d0c951bf0e77aec8ce110f06d8ce16c6ed9084f7/fastembed-0.7.4-py3-none-any.whl", hash = "sha256:79250a775f70bd6addb0e054204df042b5029ecae501e40e5bbd08e75844ad83", size = 108491, upload-time = "2025-12-05T12:08:09.059Z" },
]
[[package]]
name = "filelock"
version = "3.21.2"
source = { registry = "https://pypi.org/simple" }
sdist = { url = "https://files.pythonhosted.org/packages/73/71/74364ff065ca78914d8bd90b312fe78ddc5e11372d38bc9cb7104f887ce1/filelock-3.21.2.tar.gz", hash = "sha256:cfd218cfccf8b947fce7837da312ec3359d10ef2a47c8602edd59e0bacffb708", size = 31486, upload-time = "2026-02-13T01:27:15.223Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/98/73/3a18f1e1276810e81477c431009b55eeccebbd7301d28a350b77aacf3c33/filelock-3.21.2-py3-none-any.whl", hash = "sha256:d6cd4dbef3e1bb63bc16500fc5aa100f16e405bbff3fb4231711851be50c1560", size = 21479, upload-time = "2026-02-13T01:27:13.611Z" },
]
[[package]]
name = "flatbuffers"
version = "25.12.19"
source = { registry = "https://pypi.org/simple" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/e8/2d/d2a548598be01649e2d46231d151a6c56d10b964d94043a335ae56ea2d92/flatbuffers-25.12.19-py2.py3-none-any.whl", hash = "sha256:7634f50c427838bb021c2d66a3d1168e9d199b0607e6329399f04846d42e20b4", size = 26661, upload-time = "2025-12-19T23:16:13.622Z" },
]
[[package]]
name = "fsspec"
version = "2026.2.0"
source = { registry = "https://pypi.org/simple" }
sdist = { url = "https://files.pythonhosted.org/packages/51/7c/f60c259dcbf4f0c47cc4ddb8f7720d2dcdc8888c8e5ad84c73ea4531cc5b/fsspec-2026.2.0.tar.gz", hash = "sha256:6544e34b16869f5aacd5b90bdf1a71acb37792ea3ddf6125ee69a22a53fb8bff", size = 313441, upload-time = "2026-02-05T21:50:53.743Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/e6/ab/fb21f4c939bb440104cc2b396d3be1d9b7a9fd3c6c2a53d98c45b3d7c954/fsspec-2026.2.0-py3-none-any.whl", hash = "sha256:98de475b5cb3bd66bedd5c4679e87b4fdfe1a3bf4d707b151b3c07e58c9a2437", size = 202505, upload-time = "2026-02-05T21:50:51.819Z" },
]
[[package]]
name = "h11"
version = "0.16.0"
source = { registry = "https://pypi.org/simple" }
sdist = { url = "https://files.pythonhosted.org/packages/01/ee/02a2c011bdab74c6fb3c75474d40b3052059d95df7e73351460c8588d963/h11-0.16.0.tar.gz", hash = "sha256:4e35b956cf45792e4caa5885e69fba00bdbc6ffafbfa020300e549b208ee5ff1", size = 101250, upload-time = "2025-04-24T03:35:25.427Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/04/4b/29cac41a4d98d144bf5f6d33995617b185d14b22401f75ca86f384e87ff1/h11-0.16.0-py3-none-any.whl", hash = "sha256:63cf8bbe7522de3bf65932fda1d9c2772064ffb3dae62d55932da54b31cb6c86", size = 37515, upload-time = "2025-04-24T03:35:24.344Z" },
]
[[package]]
name = "hf-xet"
version = "1.2.0"
source = { registry = "https://pypi.org/simple" }
sdist = { url = "https://files.pythonhosted.org/packages/5e/6e/0f11bacf08a67f7fb5ee09740f2ca54163863b07b70d579356e9222ce5d8/hf_xet-1.2.0.tar.gz", hash = "sha256:a8c27070ca547293b6890c4bf389f713f80e8c478631432962bb7f4bc0bd7d7f", size = 506020, upload-time = "2025-10-24T19:04:32.129Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/e2/51/f7e2caae42f80af886db414d4e9885fac959330509089f97cccb339c6b87/hf_xet-1.2.0-cp314-cp314t-macosx_10_12_x86_64.whl", hash = "sha256:10bfab528b968c70e062607f663e21e34e2bba349e8038db546646875495179e", size = 2861861, upload-time = "2025-10-24T19:04:19.01Z" },
{ url = "https://files.pythonhosted.org/packages/6e/1d/a641a88b69994f9371bd347f1dd35e5d1e2e2460a2e350c8d5165fc62005/hf_xet-1.2.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:2a212e842647b02eb6a911187dc878e79c4aa0aa397e88dd3b26761676e8c1f8", size = 2717699, upload-time = "2025-10-24T19:04:17.306Z" },
{ url = "https://files.pythonhosted.org/packages/df/e0/e5e9bba7d15f0318955f7ec3f4af13f92e773fbb368c0b8008a5acbcb12f/hf_xet-1.2.0-cp314-cp314t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:30e06daccb3a7d4c065f34fc26c14c74f4653069bb2b194e7f18f17cbe9939c0", size = 3314885, upload-time = "2025-10-24T19:04:07.642Z" },
{ url = "https://files.pythonhosted.org/packages/21/90/b7fe5ff6f2b7b8cbdf1bd56145f863c90a5807d9758a549bf3d916aa4dec/hf_xet-1.2.0-cp314-cp314t-manylinux_2_28_aarch64.whl", hash = "sha256:29c8fc913a529ec0a91867ce3d119ac1aac966e098cf49501800c870328cc090", size = 3221550, upload-time = "2025-10-24T19:04:05.55Z" },
{ url = "https://files.pythonhosted.org/packages/6f/cb/73f276f0a7ce46cc6a6ec7d6c7d61cbfe5f2e107123d9bbd0193c355f106/hf_xet-1.2.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:66e159cbfcfbb29f920db2c09ed8b660eb894640d284f102ada929b6e3dc410a", size = 3408010, upload-time = "2025-10-24T19:04:28.598Z" },
{ url = "https://files.pythonhosted.org/packages/b8/1e/d642a12caa78171f4be64f7cd9c40e3ca5279d055d0873188a58c0f5fbb9/hf_xet-1.2.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:9c91d5ae931510107f148874e9e2de8a16052b6f1b3ca3c1b12f15ccb491390f", size = 3503264, upload-time = "2025-10-24T19:04:30.397Z" },
{ url = "https://files.pythonhosted.org/packages/17/b5/33764714923fa1ff922770f7ed18c2daae034d21ae6e10dbf4347c854154/hf_xet-1.2.0-cp314-cp314t-win_amd64.whl", hash = "sha256:210d577732b519ac6ede149d2f2f34049d44e8622bf14eb3d63bbcd2d4b332dc", size = 2901071, upload-time = "2025-10-24T19:04:37.463Z" },
{ url = "https://files.pythonhosted.org/packages/96/2d/22338486473df5923a9ab7107d375dbef9173c338ebef5098ef593d2b560/hf_xet-1.2.0-cp37-abi3-macosx_10_12_x86_64.whl", hash = "sha256:46740d4ac024a7ca9b22bebf77460ff43332868b661186a8e46c227fdae01848", size = 2866099, upload-time = "2025-10-24T19:04:15.366Z" },
{ url = "https://files.pythonhosted.org/packages/7f/8c/c5becfa53234299bc2210ba314eaaae36c2875e0045809b82e40a9544f0c/hf_xet-1.2.0-cp37-abi3-macosx_11_0_arm64.whl", hash = "sha256:27df617a076420d8845bea087f59303da8be17ed7ec0cd7ee3b9b9f579dff0e4", size = 2722178, upload-time = "2025-10-24T19:04:13.695Z" },
{ url = "https://files.pythonhosted.org/packages/9a/92/cf3ab0b652b082e66876d08da57fcc6fa2f0e6c70dfbbafbd470bb73eb47/hf_xet-1.2.0-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3651fd5bfe0281951b988c0facbe726aa5e347b103a675f49a3fa8144c7968fd", size = 3320214, upload-time = "2025-10-24T19:04:03.596Z" },
{ url = "https://files.pythonhosted.org/packages/46/92/3f7ec4a1b6a65bf45b059b6d4a5d38988f63e193056de2f420137e3c3244/hf_xet-1.2.0-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:d06fa97c8562fb3ee7a378dd9b51e343bc5bc8190254202c9771029152f5e08c", size = 3229054, upload-time = "2025-10-24T19:04:01.949Z" },
{ url = "https://files.pythonhosted.org/packages/0b/dd/7ac658d54b9fb7999a0ccb07ad863b413cbaf5cf172f48ebcd9497ec7263/hf_xet-1.2.0-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:4c1428c9ae73ec0939410ec73023c4f842927f39db09b063b9482dac5a3bb737", size = 3413812, upload-time = "2025-10-24T19:04:24.585Z" },
{ url = "https://files.pythonhosted.org/packages/92/68/89ac4e5b12a9ff6286a12174c8538a5930e2ed662091dd2572bbe0a18c8a/hf_xet-1.2.0-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:a55558084c16b09b5ed32ab9ed38421e2d87cf3f1f89815764d1177081b99865", size = 3508920, upload-time = "2025-10-24T19:04:26.927Z" },
{ url = "https://files.pythonhosted.org/packages/cb/44/870d44b30e1dcfb6a65932e3e1506c103a8a5aea9103c337e7a53180322c/hf_xet-1.2.0-cp37-abi3-win_amd64.whl", hash = "sha256:e6584a52253f72c9f52f9e549d5895ca7a471608495c4ecaa6cc73dba2b24d69", size = 2905735, upload-time = "2025-10-24T19:04:35.928Z" },
]
[[package]]
name = "httpcore"
version = "1.0.9"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "certifi" },
{ name = "h11" },
]
sdist = { url = "https://files.pythonhosted.org/packages/06/94/82699a10bca87a5556c9c59b5963f2d039dbd239f25bc2a63907a05a14cb/httpcore-1.0.9.tar.gz", hash = "sha256:6e34463af53fd2ab5d807f399a9b45ea31c3dfa2276f15a2c3f00afff6e176e8", size = 85484, upload-time = "2025-04-24T22:06:22.219Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/7e/f5/f66802a942d491edb555dd61e3a9961140fd64c90bce1eafd741609d334d/httpcore-1.0.9-py3-none-any.whl", hash = "sha256:2d400746a40668fc9dec9810239072b40b4484b640a8c38fd654a024c7a1bf55", size = 78784, upload-time = "2025-04-24T22:06:20.566Z" },
]
[[package]]
name = "httpx"
version = "0.28.1"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "anyio" },
{ name = "certifi" },
{ name = "httpcore" },
{ name = "idna" },
]
sdist = { url = "https://files.pythonhosted.org/packages/b1/df/48c586a5fe32a0f01324ee087459e112ebb7224f646c0b5023f5e79e9956/httpx-0.28.1.tar.gz", hash = "sha256:75e98c5f16b0f35b567856f597f06ff2270a374470a5c2392242528e3e3e42fc", size = 141406, upload-time = "2024-12-06T15:37:23.222Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/2a/39/e50c7c3a983047577ee07d2a9e53faf5a69493943ec3f6a384bdc792deb2/httpx-0.28.1-py3-none-any.whl", hash = "sha256:d909fcccc110f8c7faf814ca82a9a4d816bc5a6dbfea25d6591d6985b8ba59ad", size = 73517, upload-time = "2024-12-06T15:37:21.509Z" },
]
[[package]]
name = "huggingface-hub"
version = "1.4.1"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "filelock" },
{ name = "fsspec" },
{ name = "hf-xet", marker = "platform_machine == 'AMD64' or platform_machine == 'aarch64' or platform_machine == 'amd64' or platform_machine == 'arm64' or platform_machine == 'x86_64'" },
{ name = "httpx" },
{ name = "packaging" },
{ name = "pyyaml" },
{ name = "shellingham" },
{ name = "tqdm" },
{ name = "typer-slim" },
{ name = "typing-extensions" },
]
sdist = { url = "https://files.pythonhosted.org/packages/c4/fc/eb9bc06130e8bbda6a616e1b80a7aa127681c448d6b49806f61db2670b61/huggingface_hub-1.4.1.tar.gz", hash = "sha256:b41131ec35e631e7383ab26d6146b8d8972abc8b6309b963b306fbcca87f5ed5", size = 642156, upload-time = "2026-02-06T09:20:03.013Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/d5/ae/2f6d96b4e6c5478d87d606a1934b5d436c4a2bce6bb7c6fdece891c128e3/huggingface_hub-1.4.1-py3-none-any.whl", hash = "sha256:9931d075fb7a79af5abc487106414ec5fba2c0ae86104c0c62fd6cae38873d18", size = 553326, upload-time = "2026-02-06T09:20:00.728Z" },
]
[[package]]
name = "idna"
version = "3.11"
source = { registry = "https://pypi.org/simple" }
sdist = { url = "https://files.pythonhosted.org/packages/6f/6d/0703ccc57f3a7233505399edb88de3cbd678da106337b9fcde432b65ed60/idna-3.11.tar.gz", hash = "sha256:795dafcc9c04ed0c1fb032c2aa73654d8e8c5023a7df64a53f39190ada629902", size = 194582, upload-time = "2025-10-12T14:55:20.501Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/0e/61/66938bbb5fc52dbdf84594873d5b51fb1f7c7794e9c0f5bd885f30bc507b/idna-3.11-py3-none-any.whl", hash = "sha256:771a87f49d9defaf64091e6e6fe9c18d4833f140bd19464795bc32d966ca37ea", size = 71008, upload-time = "2025-10-12T14:55:18.883Z" },
]
[[package]]
name = "loguru"
version = "0.7.3"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "colorama", marker = "sys_platform == 'win32'" },
{ name = "win32-setctime", marker = "sys_platform == 'win32'" },
]
sdist = { url = "https://files.pythonhosted.org/packages/3a/05/a1dae3dffd1116099471c643b8924f5aa6524411dc6c63fdae648c4f1aca/loguru-0.7.3.tar.gz", hash = "sha256:19480589e77d47b8d85b2c827ad95d49bf31b0dcde16593892eb51dd18706eb6", size = 63559, upload-time = "2024-12-06T11:20:56.608Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/0c/29/0348de65b8cc732daa3e33e67806420b2ae89bdce2b04af740289c5c6c8c/loguru-0.7.3-py3-none-any.whl", hash = "sha256:31a33c10c8e1e10422bfd431aeb5d351c7cf7fa671e3c4df004162264b28220c", size = 61595, upload-time = "2024-12-06T11:20:54.538Z" },
]
[[package]]
name = "markdown-it-py"
version = "4.0.0"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "mdurl" },
]
sdist = { url = "https://files.pythonhosted.org/packages/5b/f5/4ec618ed16cc4f8fb3b701563655a69816155e79e24a17b651541804721d/markdown_it_py-4.0.0.tar.gz", hash = "sha256:cb0a2b4aa34f932c007117b194e945bd74e0ec24133ceb5bac59009cda1cb9f3", size = 73070, upload-time = "2025-08-11T12:57:52.854Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/94/54/e7d793b573f298e1c9013b8c4dade17d481164aa517d1d7148619c2cedbf/markdown_it_py-4.0.0-py3-none-any.whl", hash = "sha256:87327c59b172c5011896038353a81343b6754500a08cd7a4973bb48c6d578147", size = 87321, upload-time = "2025-08-11T12:57:51.923Z" },
]
[[package]]
name = "mdurl"
version = "0.1.2"
source = { registry = "https://pypi.org/simple" }
sdist = { url = "https://files.pythonhosted.org/packages/d6/54/cfe61301667036ec958cb99bd3efefba235e65cdeb9c84d24a8293ba1d90/mdurl-0.1.2.tar.gz", hash = "sha256:bb413d29f5eea38f31dd4754dd7377d4465116fb207585f97bf925588687c1ba", size = 8729, upload-time = "2022-08-14T12:40:10.846Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/b3/38/89ba8ad64ae25be8de66a6d463314cf1eb366222074cfda9ee839c56a4b4/mdurl-0.1.2-py3-none-any.whl", hash = "sha256:84008a41e51615a49fc9966191ff91509e3c40b939176e643fd50a5c2196b8f8", size = 9979, upload-time = "2022-08-14T12:40:09.779Z" },
]
[[package]]
name = "mmh3"
version = "5.2.0"
source = { registry = "https://pypi.org/simple" }
sdist = { url = "https://files.pythonhosted.org/packages/a7/af/f28c2c2f51f31abb4725f9a64bc7863d5f491f6539bd26aee2a1d21a649e/mmh3-5.2.0.tar.gz", hash = "sha256:1efc8fec8478e9243a78bb993422cf79f8ff85cb4cf6b79647480a31e0d950a8", size = 33582, upload-time = "2025-07-29T07:43:48.49Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/f7/ca/a20db059a8a47048aaf550da14a145b56e9c7386fb8280d3ce2962dcebf7/mmh3-5.2.0-cp314-cp314-ios_13_0_arm64_iphoneos.whl", hash = "sha256:e5015f0bb6eb50008bed2d4b1ce0f2a294698a926111e4bb202c0987b4f89078", size = 39209, upload-time = "2025-07-29T07:42:51.559Z" },
{ url = "https://files.pythonhosted.org/packages/98/dd/e5094799d55c7482d814b979a0fd608027d0af1b274bfb4c3ea3e950bfd5/mmh3-5.2.0-cp314-cp314-ios_13_0_arm64_iphonesimulator.whl", hash = "sha256:e0f3ed828d709f5b82d8bfe14f8856120718ec4bd44a5b26102c3030a1e12501", size = 39843, upload-time = "2025-07-29T07:42:52.536Z" },
{ url = "https://files.pythonhosted.org/packages/f4/6b/7844d7f832c85400e7cc89a1348e4e1fdd38c5a38415bb5726bbb8fcdb6c/mmh3-5.2.0-cp314-cp314-ios_13_0_x86_64_iphonesimulator.whl", hash = "sha256:f35727c5118aba95f0397e18a1a5b8405425581bfe53e821f0fb444cbdc2bc9b", size = 40648, upload-time = "2025-07-29T07:42:53.392Z" },
{ url = "https://files.pythonhosted.org/packages/1f/bf/71f791f48a21ff3190ba5225807cbe4f7223360e96862c376e6e3fb7efa7/mmh3-5.2.0-cp314-cp314-macosx_10_13_universal2.whl", hash = "sha256:3bc244802ccab5220008cb712ca1508cb6a12f0eb64ad62997156410579a1770", size = 56164, upload-time = "2025-07-29T07:42:54.267Z" },
{ url = "https://files.pythonhosted.org/packages/70/1f/f87e3d34d83032b4f3f0f528c6d95a98290fcacf019da61343a49dccfd51/mmh3-5.2.0-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:ff3d50dc3fe8a98059f99b445dfb62792b5d006c5e0b8f03c6de2813b8376110", size = 40692, upload-time = "2025-07-29T07:42:55.234Z" },
{ url = "https://files.pythonhosted.org/packages/a6/e2/db849eaed07117086f3452feca8c839d30d38b830ac59fe1ce65af8be5ad/mmh3-5.2.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:37a358cc881fe796e099c1db6ce07ff757f088827b4e8467ac52b7a7ffdca647", size = 40068, upload-time = "2025-07-29T07:42:56.158Z" },
{ url = "https://files.pythonhosted.org/packages/df/6b/209af927207af77425b044e32f77f49105a0b05d82ff88af6971d8da4e19/mmh3-5.2.0-cp314-cp314-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:b9a87025121d1c448f24f27ff53a5fe7b6ef980574b4a4f11acaabe702420d63", size = 97367, upload-time = "2025-07-29T07:42:57.037Z" },
{ url = "https://files.pythonhosted.org/packages/ca/e0/78adf4104c425606a9ce33fb351f790c76a6c2314969c4a517d1ffc92196/mmh3-5.2.0-cp314-cp314-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:1ba55d6ca32eeef8b2625e1e4bfc3b3db52bc63014bd7e5df8cc11bf2b036b12", size = 103306, upload-time = "2025-07-29T07:42:58.522Z" },
{ url = "https://files.pythonhosted.org/packages/a3/79/c2b89f91b962658b890104745b1b6c9ce38d50a889f000b469b91eeb1b9e/mmh3-5.2.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c9ff37ba9f15637e424c2ab57a1a590c52897c845b768e4e0a4958084ec87f22", size = 106312, upload-time = "2025-07-29T07:42:59.552Z" },
{ url = "https://files.pythonhosted.org/packages/4b/14/659d4095528b1a209be90934778c5ffe312177d51e365ddcbca2cac2ec7c/mmh3-5.2.0-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:a094319ec0db52a04af9fdc391b4d39a1bc72bc8424b47c4411afb05413a44b5", size = 113135, upload-time = "2025-07-29T07:43:00.745Z" },
{ url = "https://files.pythonhosted.org/packages/8d/6f/cd7734a779389a8a467b5c89a48ff476d6f2576e78216a37551a97e9e42a/mmh3-5.2.0-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:c5584061fd3da584659b13587f26c6cad25a096246a481636d64375d0c1f6c07", size = 120775, upload-time = "2025-07-29T07:43:02.124Z" },
{ url = "https://files.pythonhosted.org/packages/1d/ca/8256e3b96944408940de3f9291d7e38a283b5761fe9614d4808fcf27bd62/mmh3-5.2.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:ecbfc0437ddfdced5e7822d1ce4855c9c64f46819d0fdc4482c53f56c707b935", size = 99178, upload-time = "2025-07-29T07:43:03.182Z" },
{ url = "https://files.pythonhosted.org/packages/8a/32/39e2b3cf06b6e2eb042c984dab8680841ac2a0d3ca6e0bea30db1f27b565/mmh3-5.2.0-cp314-cp314-musllinux_1_2_i686.whl", hash = "sha256:7b986d506a8e8ea345791897ba5d8ba0d9d8820cd4fc3e52dbe6de19388de2e7", size = 98738, upload-time = "2025-07-29T07:43:04.207Z" },
{ url = "https://files.pythonhosted.org/packages/61/d3/7bbc8e0e8cf65ebbe1b893ffa0467b7ecd1bd07c3bbf6c9db4308ada22ec/mmh3-5.2.0-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:38d899a156549da8ef6a9f1d6f7ef231228d29f8f69bce2ee12f5fba6d6fd7c5", size = 106510, upload-time = "2025-07-29T07:43:05.656Z" },
{ url = "https://files.pythonhosted.org/packages/10/99/b97e53724b52374e2f3859046f0eb2425192da356cb19784d64bc17bb1cf/mmh3-5.2.0-cp314-cp314-musllinux_1_2_s390x.whl", hash = "sha256:d86651fa45799530885ba4dab3d21144486ed15285e8784181a0ab37a4552384", size = 110053, upload-time = "2025-07-29T07:43:07.204Z" },
{ url = "https://files.pythonhosted.org/packages/ac/62/3688c7d975ed195155671df68788c83fed6f7909b6ec4951724c6860cb97/mmh3-5.2.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:c463d7c1c4cfc9d751efeaadd936bbba07b5b0ed81a012b3a9f5a12f0872bd6e", size = 97546, upload-time = "2025-07-29T07:43:08.226Z" },
{ url = "https://files.pythonhosted.org/packages/ca/3b/c6153250f03f71a8b7634cded82939546cdfba02e32f124ff51d52c6f991/mmh3-5.2.0-cp314-cp314-win32.whl", hash = "sha256:bb4fe46bdc6104fbc28db7a6bacb115ee6368ff993366bbd8a2a7f0076e6f0c0", size = 41422, upload-time = "2025-07-29T07:43:09.216Z" },
{ url = "https://files.pythonhosted.org/packages/74/01/a27d98bab083a435c4c07e9d1d720d4c8a578bf4c270bae373760b1022be/mmh3-5.2.0-cp314-cp314-win_amd64.whl", hash = "sha256:7c7f0b342fd06044bedd0b6e72177ddc0076f54fd89ee239447f8b271d919d9b", size = 42135, upload-time = "2025-07-29T07:43:10.183Z" },
{ url = "https://files.pythonhosted.org/packages/cb/c9/dbba5507e95429b8b380e2ba091eff5c20a70a59560934dff0ad8392b8c8/mmh3-5.2.0-cp314-cp314-win_arm64.whl", hash = "sha256:3193752fc05ea72366c2b63ff24b9a190f422e32d75fdeae71087c08fff26115", size = 39879, upload-time = "2025-07-29T07:43:11.106Z" },
{ url = "https://files.pythonhosted.org/packages/b5/d1/c8c0ef839c17258b9de41b84f663574fabcf8ac2007b7416575e0f65ff6e/mmh3-5.2.0-cp314-cp314t-macosx_10_13_universal2.whl", hash = "sha256:69fc339d7202bea69ef9bd7c39bfdf9fdabc8e6822a01eba62fb43233c1b3932", size = 57696, upload-time = "2025-07-29T07:43:11.989Z" },
{ url = "https://files.pythonhosted.org/packages/2f/55/95e2b9ff201e89f9fe37036037ab61a6c941942b25cdb7b6a9df9b931993/mmh3-5.2.0-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:12da42c0a55c9d86ab566395324213c319c73ecb0c239fad4726324212b9441c", size = 41421, upload-time = "2025-07-29T07:43:13.269Z" },
{ url = "https://files.pythonhosted.org/packages/77/79/9be23ad0b7001a4b22752e7693be232428ecc0a35068a4ff5c2f14ef8b20/mmh3-5.2.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:f7f9034c7cf05ddfaac8d7a2e63a3c97a840d4615d0a0e65ba8bdf6f8576e3be", size = 40853, upload-time = "2025-07-29T07:43:14.888Z" },
{ url = "https://files.pythonhosted.org/packages/ac/1b/96b32058eda1c1dee8264900c37c359a7325c1f11f5ff14fd2be8e24eff9/mmh3-5.2.0-cp314-cp314t-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:11730eeb16dfcf9674fdea9bb6b8e6dd9b40813b7eb839bc35113649eef38aeb", size = 109694, upload-time = "2025-07-29T07:43:15.816Z" },
{ url = "https://files.pythonhosted.org/packages/8d/6f/a2ae44cd7dad697b6dea48390cbc977b1e5ca58fda09628cbcb2275af064/mmh3-5.2.0-cp314-cp314t-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:932a6eec1d2e2c3c9e630d10f7128d80e70e2d47fe6b8c7ea5e1afbd98733e65", size = 117438, upload-time = "2025-07-29T07:43:16.865Z" },
{ url = "https://files.pythonhosted.org/packages/a0/08/bfb75451c83f05224a28afeaf3950c7b793c0b71440d571f8e819cfb149a/mmh3-5.2.0-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:3ca975c51c5028947bbcfc24966517aac06a01d6c921e30f7c5383c195f87991", size = 120409, upload-time = "2025-07-29T07:43:18.207Z" },
{ url = "https://files.pythonhosted.org/packages/9f/ea/8b118b69b2ff8df568f742387d1a159bc654a0f78741b31437dd047ea28e/mmh3-5.2.0-cp314-cp314t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:5b0b58215befe0f0e120b828f7645e97719bbba9f23b69e268ed0ac7adde8645", size = 125909, upload-time = "2025-07-29T07:43:19.39Z" },
{ url = "https://files.pythonhosted.org/packages/3e/11/168cc0b6a30650032e351a3b89b8a47382da541993a03af91e1ba2501234/mmh3-5.2.0-cp314-cp314t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:29c2b9ce61886809d0492a274a5a53047742dea0f703f9c4d5d223c3ea6377d3", size = 135331, upload-time = "2025-07-29T07:43:20.435Z" },
{ url = "https://files.pythonhosted.org/packages/31/05/e3a9849b1c18a7934c64e831492c99e67daebe84a8c2f2c39a7096a830e3/mmh3-5.2.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:a367d4741ac0103f8198c82f429bccb9359f543ca542b06a51f4f0332e8de279", size = 110085, upload-time = "2025-07-29T07:43:21.92Z" },
{ url = "https://files.pythonhosted.org/packages/d9/d5/a96bcc306e3404601418b2a9a370baec92af84204528ba659fdfe34c242f/mmh3-5.2.0-cp314-cp314t-musllinux_1_2_i686.whl", hash = "sha256:5a5dba98e514fb26241868f6eb90a7f7ca0e039aed779342965ce24ea32ba513", size = 111195, upload-time = "2025-07-29T07:43:23.066Z" },
{ url = "https://files.pythonhosted.org/packages/af/29/0fd49801fec5bff37198684e0849b58e0dab3a2a68382a357cfffb0fafc3/mmh3-5.2.0-cp314-cp314t-musllinux_1_2_ppc64le.whl", hash = "sha256:941603bfd75a46023807511c1ac2f1b0f39cccc393c15039969806063b27e6db", size = 116919, upload-time = "2025-07-29T07:43:24.178Z" },
{ url = "https://files.pythonhosted.org/packages/2d/04/4f3c32b0a2ed762edca45d8b46568fc3668e34f00fb1e0a3b5451ec1281c/mmh3-5.2.0-cp314-cp314t-musllinux_1_2_s390x.whl", hash = "sha256:132dd943451a7c7546978863d2f5a64977928410782e1a87d583cb60eb89e667", size = 123160, upload-time = "2025-07-29T07:43:25.26Z" },
{ url = "https://files.pythonhosted.org/packages/91/76/3d29eaa38821730633d6a240d36fa8ad2807e9dfd432c12e1a472ed211eb/mmh3-5.2.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:f698733a8a494466432d611a8f0d1e026f5286dee051beea4b3c3146817e35d5", size = 110206, upload-time = "2025-07-29T07:43:26.699Z" },
{ url = "https://files.pythonhosted.org/packages/44/1c/ccf35892684d3a408202e296e56843743e0b4fb1629e59432ea88cdb3909/mmh3-5.2.0-cp314-cp314t-win32.whl", hash = "sha256:6d541038b3fc360ec538fc116de87462627944765a6750308118f8b509a8eec7", size = 41970, upload-time = "2025-07-29T07:43:27.666Z" },
{ url = "https://files.pythonhosted.org/packages/75/b2/b9e4f1e5adb5e21eb104588fcee2cd1eaa8308255173481427d5ecc4284e/mmh3-5.2.0-cp314-cp314t-win_amd64.whl", hash = "sha256:e912b19cf2378f2967d0c08e86ff4c6c360129887f678e27e4dde970d21b3f4d", size = 43063, upload-time = "2025-07-29T07:43:28.582Z" },
{ url = "https://files.pythonhosted.org/packages/6a/fc/0e61d9a4e29c8679356795a40e48f647b4aad58d71bfc969f0f8f56fb912/mmh3-5.2.0-cp314-cp314t-win_arm64.whl", hash = "sha256:e7884931fe5e788163e7b3c511614130c2c59feffdc21112290a194487efb2e9", size = 40455, upload-time = "2025-07-29T07:43:29.563Z" },
]
[[package]]
name = "mpmath"
version = "1.3.0"
source = { registry = "https://pypi.org/simple" }
sdist = { url = "https://files.pythonhosted.org/packages/e0/47/dd32fa426cc72114383ac549964eecb20ecfd886d1e5ccf5340b55b02f57/mpmath-1.3.0.tar.gz", hash = "sha256:7a28eb2a9774d00c7bc92411c19a89209d5da7c4c9a9e227be8330a23a25b91f", size = 508106, upload-time = "2023-03-07T16:47:11.061Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/43/e3/7d92a15f894aa0c9c4b49b8ee9ac9850d6e63b03c9c32c0367a13ae62209/mpmath-1.3.0-py3-none-any.whl", hash = "sha256:a0b2b9fe80bbcd81a6647ff13108738cfb482d481d826cc0e02f5b35e5c88d2c", size = 536198, upload-time = "2023-03-07T16:47:09.197Z" },
]
[[package]]
name = "numpy"
version = "2.4.2"
source = { registry = "https://pypi.org/simple" }
sdist = { url = "https://files.pythonhosted.org/packages/57/fd/0005efbd0af48e55eb3c7208af93f2862d4b1a56cd78e84309a2d959208d/numpy-2.4.2.tar.gz", hash = "sha256:659a6107e31a83c4e33f763942275fd278b21d095094044eb35569e86a21ddae", size = 20723651, upload-time = "2026-01-31T23:13:10.135Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/18/88/b7df6050bf18fdcfb7046286c6535cabbdd2064a3440fca3f069d319c16e/numpy-2.4.2-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:444be170853f1f9d528428eceb55f12918e4fda5d8805480f36a002f1415e09b", size = 16663092, upload-time = "2026-01-31T23:12:04.521Z" },
{ url = "https://files.pythonhosted.org/packages/25/7a/1fee4329abc705a469a4afe6e69b1ef7e915117747886327104a8493a955/numpy-2.4.2-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:d1240d50adff70c2a88217698ca844723068533f3f5c5fa6ee2e3220e3bdb000", size = 14698770, upload-time = "2026-01-31T23:12:06.96Z" },
{ url = "https://files.pythonhosted.org/packages/fb/0b/f9e49ba6c923678ad5bc38181c08ac5e53b7a5754dbca8e581aa1a56b1ff/numpy-2.4.2-cp314-cp314-macosx_14_0_arm64.whl", hash = "sha256:7cdde6de52fb6664b00b056341265441192d1291c130e99183ec0d4b110ff8b1", size = 5208562, upload-time = "2026-01-31T23:12:09.632Z" },
{ url = "https://files.pythonhosted.org/packages/7d/12/d7de8f6f53f9bb76997e5e4c069eda2051e3fe134e9181671c4391677bb2/numpy-2.4.2-cp314-cp314-macosx_14_0_x86_64.whl", hash = "sha256:cda077c2e5b780200b6b3e09d0b42205a3d1c68f30c6dceb90401c13bff8fe74", size = 6543710, upload-time = "2026-01-31T23:12:11.969Z" },
{ url = "https://files.pythonhosted.org/packages/09/63/c66418c2e0268a31a4cf8a8b512685748200f8e8e8ec6c507ce14e773529/numpy-2.4.2-cp314-cp314-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:d30291931c915b2ab5717c2974bb95ee891a1cf22ebc16a8006bd59cd210d40a", size = 15677205, upload-time = "2026-01-31T23:12:14.33Z" },
{ url = "https://files.pythonhosted.org/packages/5d/6c/7f237821c9642fb2a04d2f1e88b4295677144ca93285fd76eff3bcba858d/numpy-2.4.2-cp314-cp314-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:bba37bc29d4d85761deed3954a1bc62be7cf462b9510b51d367b769a8c8df325", size = 16611738, upload-time = "2026-01-31T23:12:16.525Z" },
{ url = "https://files.pythonhosted.org/packages/c2/a7/39c4cdda9f019b609b5c473899d87abff092fc908cfe4d1ecb2fcff453b0/numpy-2.4.2-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:b2f0073ed0868db1dcd86e052d37279eef185b9c8db5bf61f30f46adac63c909", size = 17028888, upload-time = "2026-01-31T23:12:19.306Z" },
{ url = "https://files.pythonhosted.org/packages/da/b3/e84bb64bdfea967cc10950d71090ec2d84b49bc691df0025dddb7c26e8e3/numpy-2.4.2-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:7f54844851cdb630ceb623dcec4db3240d1ac13d4990532446761baede94996a", size = 18339556, upload-time = "2026-01-31T23:12:21.816Z" },
{ url = "https://files.pythonhosted.org/packages/88/f5/954a291bc1192a27081706862ac62bb5920fbecfbaa302f64682aa90beed/numpy-2.4.2-cp314-cp314-win32.whl", hash = "sha256:12e26134a0331d8dbd9351620f037ec470b7c75929cb8a1537f6bfe411152a1a", size = 6006899, upload-time = "2026-01-31T23:12:24.14Z" },
{ url = "https://files.pythonhosted.org/packages/05/cb/eff72a91b2efdd1bc98b3b8759f6a1654aa87612fc86e3d87d6fe4f948c4/numpy-2.4.2-cp314-cp314-win_amd64.whl", hash = "sha256:068cdb2d0d644cdb45670810894f6a0600797a69c05f1ac478e8d31670b8ee75", size = 12443072, upload-time = "2026-01-31T23:12:26.33Z" },
{ url = "https://files.pythonhosted.org/packages/37/75/62726948db36a56428fce4ba80a115716dc4fad6a3a4352487f8bb950966/numpy-2.4.2-cp314-cp314-win_arm64.whl", hash = "sha256:6ed0be1ee58eef41231a5c943d7d1375f093142702d5723ca2eb07db9b934b05", size = 10494886, upload-time = "2026-01-31T23:12:28.488Z" },
{ url = "https://files.pythonhosted.org/packages/36/2f/ee93744f1e0661dc267e4b21940870cabfae187c092e1433b77b09b50ac4/numpy-2.4.2-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:98f16a80e917003a12c0580f97b5f875853ebc33e2eaa4bccfc8201ac6869308", size = 14818567, upload-time = "2026-01-31T23:12:30.709Z" },
{ url = "https://files.pythonhosted.org/packages/a7/24/6535212add7d76ff938d8bdc654f53f88d35cddedf807a599e180dcb8e66/numpy-2.4.2-cp314-cp314t-macosx_14_0_arm64.whl", hash = "sha256:20abd069b9cda45874498b245c8015b18ace6de8546bf50dfa8cea1696ed06ef", size = 5328372, upload-time = "2026-01-31T23:12:32.962Z" },
{ url = "https://files.pythonhosted.org/packages/5e/9d/c48f0a035725f925634bf6b8994253b43f2047f6778a54147d7e213bc5a7/numpy-2.4.2-cp314-cp314t-macosx_14_0_x86_64.whl", hash = "sha256:e98c97502435b53741540a5717a6749ac2ada901056c7db951d33e11c885cc7d", size = 6649306, upload-time = "2026-01-31T23:12:34.797Z" },
{ url = "https://files.pythonhosted.org/packages/81/05/7c73a9574cd4a53a25907bad38b59ac83919c0ddc8234ec157f344d57d9a/numpy-2.4.2-cp314-cp314t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:da6cad4e82cb893db4b69105c604d805e0c3ce11501a55b5e9f9083b47d2ffe8", size = 15722394, upload-time = "2026-01-31T23:12:36.565Z" },
{ url = "https://files.pythonhosted.org/packages/35/fa/4de10089f21fc7d18442c4a767ab156b25c2a6eaf187c0db6d9ecdaeb43f/numpy-2.4.2-cp314-cp314t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:9e4424677ce4b47fe73c8b5556d876571f7c6945d264201180db2dc34f676ab5", size = 16653343, upload-time = "2026-01-31T23:12:39.188Z" },
{ url = "https://files.pythonhosted.org/packages/b8/f9/d33e4ffc857f3763a57aa85650f2e82486832d7492280ac21ba9efda80da/numpy-2.4.2-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:2b8f157c8a6f20eb657e240f8985cc135598b2b46985c5bccbde7616dc9c6b1e", size = 17078045, upload-time = "2026-01-31T23:12:42.041Z" },
{ url = "https://files.pythonhosted.org/packages/c8/b8/54bdb43b6225badbea6389fa038c4ef868c44f5890f95dd530a218706da3/numpy-2.4.2-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:5daf6f3914a733336dab21a05cdec343144600e964d2fcdabaac0c0269874b2a", size = 18380024, upload-time = "2026-01-31T23:12:44.331Z" },
{ url = "https://files.pythonhosted.org/packages/a5/55/6e1a61ded7af8df04016d81b5b02daa59f2ea9252ee0397cb9f631efe9e5/numpy-2.4.2-cp314-cp314t-win32.whl", hash = "sha256:8c50dd1fc8826f5b26a5ee4d77ca55d88a895f4e4819c7ecc2a9f5905047a443", size = 6153937, upload-time = "2026-01-31T23:12:47.229Z" },
{ url = "https://files.pythonhosted.org/packages/45/aa/fa6118d1ed6d776b0983f3ceac9b1a5558e80df9365b1c3aa6d42bf9eee4/numpy-2.4.2-cp314-cp314t-win_amd64.whl", hash = "sha256:fcf92bee92742edd401ba41135185866f7026c502617f422eb432cfeca4fe236", size = 12631844, upload-time = "2026-01-31T23:12:48.997Z" },
{ url = "https://files.pythonhosted.org/packages/32/0a/2ec5deea6dcd158f254a7b372fb09cfba5719419c8d66343bab35237b3fb/numpy-2.4.2-cp314-cp314t-win_arm64.whl", hash = "sha256:1f92f53998a17265194018d1cc321b2e96e900ca52d54c7c77837b71b9465181", size = 10565379, upload-time = "2026-01-31T23:12:51.345Z" },
]
[[package]]
name = "onnxruntime"
version = "1.24.1"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "flatbuffers" },
{ name = "numpy" },
{ name = "packaging" },
{ name = "protobuf" },
{ name = "sympy" },
]
wheels = [
{ url = "https://files.pythonhosted.org/packages/87/23/167d964414cee2af9c72af323b28d2c4cb35beed855c830a23f198265c79/onnxruntime-1.24.1-cp314-cp314-macosx_14_0_arm64.whl", hash = "sha256:890c503ca187bc883c3aa72c53f2a604ec8e8444bdd1bf6ac243ec6d5e085202", size = 17214004, upload-time = "2026-02-05T17:31:11.917Z" },
{ url = "https://files.pythonhosted.org/packages/b4/24/6e5558fdd51027d6830cf411bc003ae12c64054826382e2fab89e99486a0/onnxruntime-1.24.1-cp314-cp314-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:4da1b84b3bdeec543120df169e5e62a1445bf732fc2c7fb036c2f8a4090455e8", size = 15017034, upload-time = "2026-02-05T17:31:04.331Z" },
{ url = "https://files.pythonhosted.org/packages/91/d4/3cb1c9eaae1103265ed7eb00a3eaeb0d9ba51dc88edc398b7071c9553bed/onnxruntime-1.24.1-cp314-cp314-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:557753ec345efa227c6a65139f3d29c76330fcbd54cc10dd1b64232ebb939c13", size = 17097531, upload-time = "2026-02-05T17:31:40.303Z" },
{ url = "https://files.pythonhosted.org/packages/0f/da/4522b199c12db7c5b46aaf265ee0d741abe65ea912f6c0aaa2cc18a4654d/onnxruntime-1.24.1-cp314-cp314-win_amd64.whl", hash = "sha256:ea4942104805e868f3ddddfa1fbb58b04503a534d489ab2d1452bbfa345c78c2", size = 12795556, upload-time = "2026-02-05T17:32:11.886Z" },
{ url = "https://files.pythonhosted.org/packages/a1/53/3b8969417276b061ff04502ccdca9db4652d397abbeb06c9f6ae05cec9ca/onnxruntime-1.24.1-cp314-cp314t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ea8963a99e0f10489acdf00ef3383c3232b7e44aa497b063c63be140530d9f85", size = 15025434, upload-time = "2026-02-05T17:31:06.942Z" },
{ url = "https://files.pythonhosted.org/packages/ab/a2/cfcf009eb38d90cc628c087b6506b3dfe1263387f3cbbf8d272af4fef957/onnxruntime-1.24.1-cp314-cp314t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:34488aa760fb5c2e6d06a7ca9241124eb914a6a06f70936a14c669d1b3df9598", size = 17099815, upload-time = "2026-02-05T17:31:43.092Z" },
]
[[package]]
name = "packaging"
version = "26.0"
source = { registry = "https://pypi.org/simple" }
sdist = { url = "https://files.pythonhosted.org/packages/65/ee/299d360cdc32edc7d2cf530f3accf79c4fca01e96ffc950d8a52213bd8e4/packaging-26.0.tar.gz", hash = "sha256:00243ae351a257117b6a241061796684b084ed1c516a08c48a3f7e147a9d80b4", size = 143416, upload-time = "2026-01-21T20:50:39.064Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/b7/b9/c538f279a4e237a006a2c98387d081e9eb060d203d8ed34467cc0f0b9b53/packaging-26.0-py3-none-any.whl", hash = "sha256:b36f1fef9334a5588b4166f8bcd26a14e521f2b55e6b9de3aaa80d3ff7a37529", size = 74366, upload-time = "2026-01-21T20:50:37.788Z" },
]
[[package]]
name = "pillow"
version = "11.3.0"
source = { registry = "https://pypi.org/simple" }
sdist = { url = "https://files.pythonhosted.org/packages/f3/0d/d0d6dea55cd152ce3d6767bb38a8fc10e33796ba4ba210cbab9354b6d238/pillow-11.3.0.tar.gz", hash = "sha256:3828ee7586cd0b2091b6209e5ad53e20d0649bbe87164a459d0676e035e8f523", size = 47113069, upload-time = "2025-07-01T09:16:30.666Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/73/f4/04905af42837292ed86cb1b1dabe03dce1edc008ef14c473c5c7e1443c5d/pillow-11.3.0-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:d9da3df5f9ea2a89b81bb6087177fb1f4d1c7146d583a3fe5c672c0d94e55e12", size = 5278520, upload-time = "2025-07-01T09:15:17.429Z" },
{ url = "https://files.pythonhosted.org/packages/41/b0/33d79e377a336247df6348a54e6d2a2b85d644ca202555e3faa0cf811ecc/pillow-11.3.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:0b275ff9b04df7b640c59ec5a3cb113eefd3795a8df80bac69646ef699c6981a", size = 4686116, upload-time = "2025-07-01T09:15:19.423Z" },
{ url = "https://files.pythonhosted.org/packages/49/2d/ed8bc0ab219ae8768f529597d9509d184fe8a6c4741a6864fea334d25f3f/pillow-11.3.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:0743841cabd3dba6a83f38a92672cccbd69af56e3e91777b0ee7f4dba4385632", size = 5864597, upload-time = "2025-07-03T13:10:38.404Z" },
{ url = "https://files.pythonhosted.org/packages/b5/3d/b932bb4225c80b58dfadaca9d42d08d0b7064d2d1791b6a237f87f661834/pillow-11.3.0-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:2465a69cf967b8b49ee1b96d76718cd98c4e925414ead59fdf75cf0fd07df673", size = 7638246, upload-time = "2025-07-03T13:10:44.987Z" },
{ url = "https://files.pythonhosted.org/packages/09/b5/0487044b7c096f1b48f0d7ad416472c02e0e4bf6919541b111efd3cae690/pillow-11.3.0-cp314-cp314-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:41742638139424703b4d01665b807c6468e23e699e8e90cffefe291c5832b027", size = 5973336, upload-time = "2025-07-01T09:15:21.237Z" },
{ url = "https://files.pythonhosted.org/packages/a8/2d/524f9318f6cbfcc79fbc004801ea6b607ec3f843977652fdee4857a7568b/pillow-11.3.0-cp314-cp314-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:93efb0b4de7e340d99057415c749175e24c8864302369e05914682ba642e5d77", size = 6642699, upload-time = "2025-07-01T09:15:23.186Z" },
{ url = "https://files.pythonhosted.org/packages/6f/d2/a9a4f280c6aefedce1e8f615baaa5474e0701d86dd6f1dede66726462bbd/pillow-11.3.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:7966e38dcd0fa11ca390aed7c6f20454443581d758242023cf36fcb319b1a874", size = 6083789, upload-time = "2025-07-01T09:15:25.1Z" },
{ url = "https://files.pythonhosted.org/packages/fe/54/86b0cd9dbb683a9d5e960b66c7379e821a19be4ac5810e2e5a715c09a0c0/pillow-11.3.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:98a9afa7b9007c67ed84c57c9e0ad86a6000da96eaa638e4f8abe5b65ff83f0a", size = 6720386, upload-time = "2025-07-01T09:15:27.378Z" },
{ url = "https://files.pythonhosted.org/packages/e7/95/88efcaf384c3588e24259c4203b909cbe3e3c2d887af9e938c2022c9dd48/pillow-11.3.0-cp314-cp314-win32.whl", hash = "sha256:02a723e6bf909e7cea0dac1b0e0310be9d7650cd66222a5f1c571455c0a45214", size = 6370911, upload-time = "2025-07-01T09:15:29.294Z" },
{ url = "https://files.pythonhosted.org/packages/2e/cc/934e5820850ec5eb107e7b1a72dd278140731c669f396110ebc326f2a503/pillow-11.3.0-cp314-cp314-win_amd64.whl", hash = "sha256:a418486160228f64dd9e9efcd132679b7a02a5f22c982c78b6fc7dab3fefb635", size = 7117383, upload-time = "2025-07-01T09:15:31.128Z" },
{ url = "https://files.pythonhosted.org/packages/d6/e9/9c0a616a71da2a5d163aa37405e8aced9a906d574b4a214bede134e731bc/pillow-11.3.0-cp314-cp314-win_arm64.whl", hash = "sha256:155658efb5e044669c08896c0c44231c5e9abcaadbc5cd3648df2f7c0b96b9a6", size = 2511385, upload-time = "2025-07-01T09:15:33.328Z" },
{ url = "https://files.pythonhosted.org/packages/1a/33/c88376898aff369658b225262cd4f2659b13e8178e7534df9e6e1fa289f6/pillow-11.3.0-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:59a03cdf019efbfeeed910bf79c7c93255c3d54bc45898ac2a4140071b02b4ae", size = 5281129, upload-time = "2025-07-01T09:15:35.194Z" },
{ url = "https://files.pythonhosted.org/packages/1f/70/d376247fb36f1844b42910911c83a02d5544ebd2a8bad9efcc0f707ea774/pillow-11.3.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:f8a5827f84d973d8636e9dc5764af4f0cf2318d26744b3d902931701b0d46653", size = 4689580, upload-time = "2025-07-01T09:15:37.114Z" },
{ url = "https://files.pythonhosted.org/packages/eb/1c/537e930496149fbac69efd2fc4329035bbe2e5475b4165439e3be9cb183b/pillow-11.3.0-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:ee92f2fd10f4adc4b43d07ec5e779932b4eb3dbfbc34790ada5a6669bc095aa6", size = 5902860, upload-time = "2025-07-03T13:10:50.248Z" },
{ url = "https://files.pythonhosted.org/packages/bd/57/80f53264954dcefeebcf9dae6e3eb1daea1b488f0be8b8fef12f79a3eb10/pillow-11.3.0-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:c96d333dcf42d01f47b37e0979b6bd73ec91eae18614864622d9b87bbd5bbf36", size = 7670694, upload-time = "2025-07-03T13:10:56.432Z" },
{ url = "https://files.pythonhosted.org/packages/70/ff/4727d3b71a8578b4587d9c276e90efad2d6fe0335fd76742a6da08132e8c/pillow-11.3.0-cp314-cp314t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:4c96f993ab8c98460cd0c001447bff6194403e8b1d7e149ade5f00594918128b", size = 6005888, upload-time = "2025-07-01T09:15:39.436Z" },
{ url = "https://files.pythonhosted.org/packages/05/ae/716592277934f85d3be51d7256f3636672d7b1abfafdc42cf3f8cbd4b4c8/pillow-11.3.0-cp314-cp314t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:41342b64afeba938edb034d122b2dda5db2139b9a4af999729ba8818e0056477", size = 6670330, upload-time = "2025-07-01T09:15:41.269Z" },
{ url = "https://files.pythonhosted.org/packages/e7/bb/7fe6cddcc8827b01b1a9766f5fdeb7418680744f9082035bdbabecf1d57f/pillow-11.3.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:068d9c39a2d1b358eb9f245ce7ab1b5c3246c7c8c7d9ba58cfa5b43146c06e50", size = 6114089, upload-time = "2025-07-01T09:15:43.13Z" },
{ url = "https://files.pythonhosted.org/packages/8b/f5/06bfaa444c8e80f1a8e4bff98da9c83b37b5be3b1deaa43d27a0db37ef84/pillow-11.3.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:a1bc6ba083b145187f648b667e05a2534ecc4b9f2784c2cbe3089e44868f2b9b", size = 6748206, upload-time = "2025-07-01T09:15:44.937Z" },
{ url = "https://files.pythonhosted.org/packages/f0/77/bc6f92a3e8e6e46c0ca78abfffec0037845800ea38c73483760362804c41/pillow-11.3.0-cp314-cp314t-win32.whl", hash = "sha256:118ca10c0d60b06d006be10a501fd6bbdfef559251ed31b794668ed569c87e12", size = 6377370, upload-time = "2025-07-01T09:15:46.673Z" },
{ url = "https://files.pythonhosted.org/packages/4a/82/3a721f7d69dca802befb8af08b7c79ebcab461007ce1c18bd91a5d5896f9/pillow-11.3.0-cp314-cp314t-win_amd64.whl", hash = "sha256:8924748b688aa210d79883357d102cd64690e56b923a186f35a82cbc10f997db", size = 7121500, upload-time = "2025-07-01T09:15:48.512Z" },
{ url = "https://files.pythonhosted.org/packages/89/c7/5572fa4a3f45740eaab6ae86fcdf7195b55beac1371ac8c619d880cfe948/pillow-11.3.0-cp314-cp314t-win_arm64.whl", hash = "sha256:79ea0d14d3ebad43ec77ad5272e6ff9bba5b679ef73375ea760261207fa8e0aa", size = 2512835, upload-time = "2025-07-01T09:15:50.399Z" },
]
[[package]]
name = "protobuf"
version = "6.33.5"
source = { registry = "https://pypi.org/simple" }
sdist = { url = "https://files.pythonhosted.org/packages/ba/25/7c72c307aafc96fa87062aa6291d9f7c94836e43214d43722e86037aac02/protobuf-6.33.5.tar.gz", hash = "sha256:6ddcac2a081f8b7b9642c09406bc6a4290128fce5f471cddd165960bb9119e5c", size = 444465, upload-time = "2026-01-29T21:51:33.494Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/b1/79/af92d0a8369732b027e6d6084251dd8e782c685c72da161bd4a2e00fbabb/protobuf-6.33.5-cp310-abi3-win32.whl", hash = "sha256:d71b040839446bac0f4d162e758bea99c8251161dae9d0983a3b88dee345153b", size = 425769, upload-time = "2026-01-29T21:51:21.751Z" },
{ url = "https://files.pythonhosted.org/packages/55/75/bb9bc917d10e9ee13dee8607eb9ab963b7cf8be607c46e7862c748aa2af7/protobuf-6.33.5-cp310-abi3-win_amd64.whl", hash = "sha256:3093804752167bcab3998bec9f1048baae6e29505adaf1afd14a37bddede533c", size = 437118, upload-time = "2026-01-29T21:51:24.022Z" },
{ url = "https://files.pythonhosted.org/packages/a2/6b/e48dfc1191bc5b52950246275bf4089773e91cb5ba3592621723cdddca62/protobuf-6.33.5-cp39-abi3-macosx_10_9_universal2.whl", hash = "sha256:a5cb85982d95d906df1e2210e58f8e4f1e3cdc088e52c921a041f9c9a0386de5", size = 427766, upload-time = "2026-01-29T21:51:25.413Z" },
{ url = "https://files.pythonhosted.org/packages/4e/b1/c79468184310de09d75095ed1314b839eb2f72df71097db9d1404a1b2717/protobuf-6.33.5-cp39-abi3-manylinux2014_aarch64.whl", hash = "sha256:9b71e0281f36f179d00cbcb119cb19dec4d14a81393e5ea220f64b286173e190", size = 324638, upload-time = "2026-01-29T21:51:26.423Z" },
{ url = "https://files.pythonhosted.org/packages/c5/f5/65d838092fd01c44d16037953fd4c2cc851e783de9b8f02b27ec4ffd906f/protobuf-6.33.5-cp39-abi3-manylinux2014_s390x.whl", hash = "sha256:8afa18e1d6d20af15b417e728e9f60f3aa108ee76f23c3b2c07a2c3b546d3afd", size = 339411, upload-time = "2026-01-29T21:51:27.446Z" },
{ url = "https://files.pythonhosted.org/packages/9b/53/a9443aa3ca9ba8724fdfa02dd1887c1bcd8e89556b715cfbacca6b63dbec/protobuf-6.33.5-cp39-abi3-manylinux2014_x86_64.whl", hash = "sha256:cbf16ba3350fb7b889fca858fb215967792dc125b35c7976ca4818bee3521cf0", size = 323465, upload-time = "2026-01-29T21:51:28.925Z" },
{ url = "https://files.pythonhosted.org/packages/57/bf/2086963c69bdac3d7cff1cc7ff79b8ce5ea0bec6797a017e1be338a46248/protobuf-6.33.5-py3-none-any.whl", hash = "sha256:69915a973dd0f60f31a08b8318b73eab2bd6a392c79184b3612226b0a3f8ec02", size = 170687, upload-time = "2026-01-29T21:51:32.557Z" },
]
[[package]]
name = "py-rust-stemmers"
version = "0.1.5"
source = { registry = "https://pypi.org/simple" }
sdist = { url = "https://files.pythonhosted.org/packages/8e/63/4fbc14810c32d2a884e2e94e406a7d5bf8eee53e1103f558433817230342/py_rust_stemmers-0.1.5.tar.gz", hash = "sha256:e9c310cfb5c2470d7c7c8a0484725965e7cab8b1237e106a0863d5741da3e1f7", size = 9388, upload-time = "2025-02-19T13:56:28.708Z" }
[[package]]
name = "pygments"
version = "2.19.2"
source = { registry = "https://pypi.org/simple" }
sdist = { url = "https://files.pythonhosted.org/packages/b0/77/a5b8c569bf593b0140bde72ea885a803b82086995367bf2037de0159d924/pygments-2.19.2.tar.gz", hash = "sha256:636cb2477cec7f8952536970bc533bc43743542f70392ae026374600add5b887", size = 4968631, upload-time = "2025-06-21T13:39:12.283Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/c7/21/705964c7812476f378728bdf590ca4b771ec72385c533964653c68e86bdc/pygments-2.19.2-py3-none-any.whl", hash = "sha256:86540386c03d588bb81d44bc3928634ff26449851e99741617ecb9037ee5ec0b", size = 1225217, upload-time = "2025-06-21T13:39:07.939Z" },
]
[[package]]
name = "python-dotenv"
version = "1.2.1"
source = { registry = "https://pypi.org/simple" }
sdist = { url = "https://files.pythonhosted.org/packages/f0/26/19cadc79a718c5edbec86fd4919a6b6d3f681039a2f6d66d14be94e75fb9/python_dotenv-1.2.1.tar.gz", hash = "sha256:42667e897e16ab0d66954af0e60a9caa94f0fd4ecf3aaf6d2d260eec1aa36ad6", size = 44221, upload-time = "2025-10-26T15:12:10.434Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/14/1b/a298b06749107c305e1fe0f814c6c74aea7b2f1e10989cb30f544a1b3253/python_dotenv-1.2.1-py3-none-any.whl", hash = "sha256:b81ee9561e9ca4004139c6cbba3a238c32b03e4894671e181b671e8cb8425d61", size = 21230, upload-time = "2025-10-26T15:12:09.109Z" },
]
[[package]]
name = "pyyaml"
version = "6.0.3"
source = { registry = "https://pypi.org/simple" }
sdist = { url = "https://files.pythonhosted.org/packages/05/8e/961c0007c59b8dd7729d542c61a4d537767a59645b82a0b521206e1e25c2/pyyaml-6.0.3.tar.gz", hash = "sha256:d76623373421df22fb4cf8817020cbb7ef15c725b9d5e45f17e189bfc384190f", size = 130960, upload-time = "2025-09-25T21:33:16.546Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/9d/8c/f4bd7f6465179953d3ac9bc44ac1a8a3e6122cf8ada906b4f96c60172d43/pyyaml-6.0.3-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:8d1fab6bb153a416f9aeb4b8763bc0f22a5586065f86f7664fc23339fc1c1fac", size = 181814, upload-time = "2025-09-25T21:32:35.712Z" },
{ url = "https://files.pythonhosted.org/packages/bd/9c/4d95bb87eb2063d20db7b60faa3840c1b18025517ae857371c4dd55a6b3a/pyyaml-6.0.3-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:34d5fcd24b8445fadc33f9cf348c1047101756fd760b4dacb5c3e99755703310", size = 173809, upload-time = "2025-09-25T21:32:36.789Z" },
{ url = "https://files.pythonhosted.org/packages/92/b5/47e807c2623074914e29dabd16cbbdd4bf5e9b2db9f8090fa64411fc5382/pyyaml-6.0.3-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:501a031947e3a9025ed4405a168e6ef5ae3126c59f90ce0cd6f2bfc477be31b7", size = 766454, upload-time = "2025-09-25T21:32:37.966Z" },
{ url = "https://files.pythonhosted.org/packages/02/9e/e5e9b168be58564121efb3de6859c452fccde0ab093d8438905899a3a483/pyyaml-6.0.3-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:b3bc83488de33889877a0f2543ade9f70c67d66d9ebb4ac959502e12de895788", size = 836355, upload-time = "2025-09-25T21:32:39.178Z" },
{ url = "https://files.pythonhosted.org/packages/88/f9/16491d7ed2a919954993e48aa941b200f38040928474c9e85ea9e64222c3/pyyaml-6.0.3-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c458b6d084f9b935061bc36216e8a69a7e293a2f1e68bf956dcd9e6cbcd143f5", size = 794175, upload-time = "2025-09-25T21:32:40.865Z" },
{ url = "https://files.pythonhosted.org/packages/dd/3f/5989debef34dc6397317802b527dbbafb2b4760878a53d4166579111411e/pyyaml-6.0.3-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:7c6610def4f163542a622a73fb39f534f8c101d690126992300bf3207eab9764", size = 755228, upload-time = "2025-09-25T21:32:42.084Z" },
{ url = "https://files.pythonhosted.org/packages/d7/ce/af88a49043cd2e265be63d083fc75b27b6ed062f5f9fd6cdc223ad62f03e/pyyaml-6.0.3-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:5190d403f121660ce8d1d2c1bb2ef1bd05b5f68533fc5c2ea899bd15f4399b35", size = 789194, upload-time = "2025-09-25T21:32:43.362Z" },
{ url = "https://files.pythonhosted.org/packages/23/20/bb6982b26a40bb43951265ba29d4c246ef0ff59c9fdcdf0ed04e0687de4d/pyyaml-6.0.3-cp314-cp314-win_amd64.whl", hash = "sha256:4a2e8cebe2ff6ab7d1050ecd59c25d4c8bd7e6f400f5f82b96557ac0abafd0ac", size = 156429, upload-time = "2025-09-25T21:32:57.844Z" },
{ url = "https://files.pythonhosted.org/packages/f4/f4/a4541072bb9422c8a883ab55255f918fa378ecf083f5b85e87fc2b4eda1b/pyyaml-6.0.3-cp314-cp314-win_arm64.whl", hash = "sha256:93dda82c9c22deb0a405ea4dc5f2d0cda384168e466364dec6255b293923b2f3", size = 143912, upload-time = "2025-09-25T21:32:59.247Z" },
{ url = "https://files.pythonhosted.org/packages/7c/f9/07dd09ae774e4616edf6cda684ee78f97777bdd15847253637a6f052a62f/pyyaml-6.0.3-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:02893d100e99e03eda1c8fd5c441d8c60103fd175728e23e431db1b589cf5ab3", size = 189108, upload-time = "2025-09-25T21:32:44.377Z" },
{ url = "https://files.pythonhosted.org/packages/4e/78/8d08c9fb7ce09ad8c38ad533c1191cf27f7ae1effe5bb9400a46d9437fcf/pyyaml-6.0.3-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:c1ff362665ae507275af2853520967820d9124984e0f7466736aea23d8611fba", size = 183641, upload-time = "2025-09-25T21:32:45.407Z" },
{ url = "https://files.pythonhosted.org/packages/7b/5b/3babb19104a46945cf816d047db2788bcaf8c94527a805610b0289a01c6b/pyyaml-6.0.3-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:6adc77889b628398debc7b65c073bcb99c4a0237b248cacaf3fe8a557563ef6c", size = 831901, upload-time = "2025-09-25T21:32:48.83Z" },
{ url = "https://files.pythonhosted.org/packages/8b/cc/dff0684d8dc44da4d22a13f35f073d558c268780ce3c6ba1b87055bb0b87/pyyaml-6.0.3-cp314-cp314t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:a80cb027f6b349846a3bf6d73b5e95e782175e52f22108cfa17876aaeff93702", size = 861132, upload-time = "2025-09-25T21:32:50.149Z" },
{ url = "https://files.pythonhosted.org/packages/b1/5e/f77dc6b9036943e285ba76b49e118d9ea929885becb0a29ba8a7c75e29fe/pyyaml-6.0.3-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:00c4bdeba853cc34e7dd471f16b4114f4162dc03e6b7afcc2128711f0eca823c", size = 839261, upload-time = "2025-09-25T21:32:51.808Z" },
{ url = "https://files.pythonhosted.org/packages/ce/88/a9db1376aa2a228197c58b37302f284b5617f56a5d959fd1763fb1675ce6/pyyaml-6.0.3-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:66e1674c3ef6f541c35191caae2d429b967b99e02040f5ba928632d9a7f0f065", size = 805272, upload-time = "2025-09-25T21:32:52.941Z" },
{ url = "https://files.pythonhosted.org/packages/da/92/1446574745d74df0c92e6aa4a7b0b3130706a4142b2d1a5869f2eaa423c6/pyyaml-6.0.3-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:16249ee61e95f858e83976573de0f5b2893b3677ba71c9dd36b9cf8be9ac6d65", size = 829923, upload-time = "2025-09-25T21:32:54.537Z" },
{ url = "https://files.pythonhosted.org/packages/f0/7a/1c7270340330e575b92f397352af856a8c06f230aa3e76f86b39d01b416a/pyyaml-6.0.3-cp314-cp314t-win_amd64.whl", hash = "sha256:4ad1906908f2f5ae4e5a8ddfce73c320c2a1429ec52eafd27138b7f1cbe341c9", size = 174062, upload-time = "2025-09-25T21:32:55.767Z" },
{ url = "https://files.pythonhosted.org/packages/f1/12/de94a39c2ef588c7e6455cfbe7343d3b2dc9d6b6b2f40c4c6565744c873d/pyyaml-6.0.3-cp314-cp314t-win_arm64.whl", hash = "sha256:ebc55a14a21cb14062aa4162f906cd962b28e2e9ea38f9b4391244cd8de4ae0b", size = 149341, upload-time = "2025-09-25T21:32:56.828Z" },
]
[[package]]
name = "requests"
version = "2.32.5"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "certifi" },
{ name = "charset-normalizer" },
{ name = "idna" },
{ name = "urllib3" },
]
sdist = { url = "https://files.pythonhosted.org/packages/c9/74/b3ff8e6c8446842c3f5c837e9c3dfcfe2018ea6ecef224c710c85ef728f4/requests-2.32.5.tar.gz", hash = "sha256:dbba0bac56e100853db0ea71b82b4dfd5fe2bf6d3754a8893c3af500cec7d7cf", size = 134517, upload-time = "2025-08-18T20:46:02.573Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/1e/db/4254e3eabe8020b458f1a747140d32277ec7a271daf1d235b70dc0b4e6e3/requests-2.32.5-py3-none-any.whl", hash = "sha256:2462f94637a34fd532264295e186976db0f5d453d1cdd31473c85a6a161affb6", size = 64738, upload-time = "2025-08-18T20:46:00.542Z" },
]
[[package]]
name = "rich"
version = "14.3.2"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "markdown-it-py" },
{ name = "pygments" },
]
sdist = { url = "https://files.pythonhosted.org/packages/74/99/a4cab2acbb884f80e558b0771e97e21e939c5dfb460f488d19df485e8298/rich-14.3.2.tar.gz", hash = "sha256:e712f11c1a562a11843306f5ed999475f09ac31ffb64281f73ab29ffdda8b3b8", size = 230143, upload-time = "2026-02-01T16:20:47.908Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/ef/45/615f5babd880b4bd7d405cc0dc348234c5ffb6ed1ea33e152ede08b2072d/rich-14.3.2-py3-none-any.whl", hash = "sha256:08e67c3e90884651da3239ea668222d19bea7b589149d8014a21c633420dbb69", size = 309963, upload-time = "2026-02-01T16:20:46.078Z" },
]
[[package]]
name = "shellingham"
version = "1.5.4"
source = { registry = "https://pypi.org/simple" }
sdist = { url = "https://files.pythonhosted.org/packages/58/15/8b3609fd3830ef7b27b655beb4b4e9c62313a4e8da8c676e142cc210d58e/shellingham-1.5.4.tar.gz", hash = "sha256:8dbca0739d487e5bd35ab3ca4b36e11c4078f3a234bfce294b0a0291363404de", size = 10310, upload-time = "2023-10-24T04:13:40.426Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/e0/f9/0595336914c5619e5f28a1fb793285925a8cd4b432c9da0a987836c7f822/shellingham-1.5.4-py2.py3-none-any.whl", hash = "sha256:7ecfff8f2fd72616f7481040475a65b2bf8af90a56c89140852d1120324e8686", size = 9755, upload-time = "2023-10-24T04:13:38.866Z" },
]
[[package]]
name = "slack-bolt"
version = "1.27.0"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "slack-sdk" },
]
sdist = { url = "https://files.pythonhosted.org/packages/4c/28/50ed0b86e48b48e6ddcc71de93b91c8ac14a55d1249e4bff0586494a2f90/slack_bolt-1.27.0.tar.gz", hash = "sha256:3db91d64e277e176a565c574ae82748aa8554f19e41a4fceadca4d65374ce1e0", size = 129101, upload-time = "2025-11-13T20:17:46.878Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/01/a8/1acb355759747ba4da5f45c1a33d641994b9e04b914908c9434f18bd97e8/slack_bolt-1.27.0-py2.py3-none-any.whl", hash = "sha256:c43c94bf34740f2adeb9b55566c83f1e73fed6ba2878bd346cdfd6fd8ad22360", size = 230428, upload-time = "2025-11-13T20:17:45.465Z" },
]
[[package]]
name = "slack-sdk"
version = "3.40.0"
source = { registry = "https://pypi.org/simple" }
sdist = { url = "https://files.pythonhosted.org/packages/45/f7/4a968e1d091569ff7f6da929695728c3eacf10afcda7c424b70df2c3700b/slack_sdk-3.40.0.tar.gz", hash = "sha256:87b9a79d1d6e19a2b1877727a0ec6f016d82d30a6a410389fba87c221c99f10e", size = 249478, upload-time = "2026-02-10T22:12:13.445Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/27/72/428fb01a1043ddbb3f66297363406d6e69ddff5ad89c4d07945a3753a235/slack_sdk-3.40.0-py2.py3-none-any.whl", hash = "sha256:f2bada5ed3adb10a01e154e90db01d6d8938d0461b5790c12bcb807b2d28bbe2", size = 312786, upload-time = "2026-02-10T22:12:11.258Z" },
]
[[package]]
name = "sympy"
version = "1.14.0"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "mpmath" },
]
sdist = { url = "https://files.pythonhosted.org/packages/83/d3/803453b36afefb7c2bb238361cd4ae6125a569b4db67cd9e79846ba2d68c/sympy-1.14.0.tar.gz", hash = "sha256:d3d3fe8df1e5a0b42f0e7bdf50541697dbe7d23746e894990c030e2b05e72517", size = 7793921, upload-time = "2025-04-27T18:05:01.611Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/a2/09/77d55d46fd61b4a135c444fc97158ef34a095e5681d0a6c10b75bf356191/sympy-1.14.0-py3-none-any.whl", hash = "sha256:e091cc3e99d2141a0ba2847328f5479b05d94a6635cb96148ccb3f34671bd8f5", size = 6299353, upload-time = "2025-04-27T18:04:59.103Z" },
]
[[package]]
name = "tokenizers"
version = "0.22.2"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "huggingface-hub" },
]
sdist = { url = "https://files.pythonhosted.org/packages/73/6f/f80cfef4a312e1fb34baf7d85c72d4411afde10978d4657f8cdd811d3ccc/tokenizers-0.22.2.tar.gz", hash = "sha256:473b83b915e547aa366d1eee11806deaf419e17be16310ac0a14077f1e28f917", size = 372115, upload-time = "2026-01-05T10:45:15.988Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/92/97/5dbfabf04c7e348e655e907ed27913e03db0923abb5dfdd120d7b25630e1/tokenizers-0.22.2-cp39-abi3-macosx_10_12_x86_64.whl", hash = "sha256:544dd704ae7238755d790de45ba8da072e9af3eea688f698b137915ae959281c", size = 3100275, upload-time = "2026-01-05T10:41:02.158Z" },
{ url = "https://files.pythonhosted.org/packages/2e/47/174dca0502ef88b28f1c9e06b73ce33500eedfac7a7692108aec220464e7/tokenizers-0.22.2-cp39-abi3-macosx_11_0_arm64.whl", hash = "sha256:1e418a55456beedca4621dbab65a318981467a2b188e982a23e117f115ce5001", size = 2981472, upload-time = "2026-01-05T10:41:00.276Z" },
{ url = "https://files.pythonhosted.org/packages/d6/84/7990e799f1309a8b87af6b948f31edaa12a3ed22d11b352eaf4f4b2e5753/tokenizers-0.22.2-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2249487018adec45d6e3554c71d46eb39fa8ea67156c640f7513eb26f318cec7", size = 3290736, upload-time = "2026-01-05T10:40:32.165Z" },
{ url = "https://files.pythonhosted.org/packages/78/59/09d0d9ba94dcd5f4f1368d4858d24546b4bdc0231c2354aa31d6199f0399/tokenizers-0.22.2-cp39-abi3-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:25b85325d0815e86e0bac263506dd114578953b7b53d7de09a6485e4a160a7dd", size = 3168835, upload-time = "2026-01-05T10:40:38.847Z" },
{ url = "https://files.pythonhosted.org/packages/47/50/b3ebb4243e7160bda8d34b731e54dd8ab8b133e50775872e7a434e524c28/tokenizers-0.22.2-cp39-abi3-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bfb88f22a209ff7b40a576d5324bf8286b519d7358663db21d6246fb17eea2d5", size = 3521673, upload-time = "2026-01-05T10:40:56.614Z" },
{ url = "https://files.pythonhosted.org/packages/e0/fa/89f4cb9e08df770b57adb96f8cbb7e22695a4cb6c2bd5f0c4f0ebcf33b66/tokenizers-0.22.2-cp39-abi3-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1c774b1276f71e1ef716e5486f21e76333464f47bece56bbd554485982a9e03e", size = 3724818, upload-time = "2026-01-05T10:40:44.507Z" },
{ url = "https://files.pythonhosted.org/packages/64/04/ca2363f0bfbe3b3d36e95bf67e56a4c88c8e3362b658e616d1ac185d47f2/tokenizers-0.22.2-cp39-abi3-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:df6c4265b289083bf710dff49bc51ef252f9d5be33a45ee2bed151114a56207b", size = 3379195, upload-time = "2026-01-05T10:40:51.139Z" },
{ url = "https://files.pythonhosted.org/packages/2e/76/932be4b50ef6ccedf9d3c6639b056a967a86258c6d9200643f01269211ca/tokenizers-0.22.2-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:369cc9fc8cc10cb24143873a0d95438bb8ee257bb80c71989e3ee290e8d72c67", size = 3274982, upload-time = "2026-01-05T10:40:58.331Z" },
{ url = "https://files.pythonhosted.org/packages/1d/28/5f9f5a4cc211b69e89420980e483831bcc29dade307955cc9dc858a40f01/tokenizers-0.22.2-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:29c30b83d8dcd061078b05ae0cb94d3c710555fbb44861139f9f83dcca3dc3e4", size = 9478245, upload-time = "2026-01-05T10:41:04.053Z" },
{ url = "https://files.pythonhosted.org/packages/6c/fb/66e2da4704d6aadebf8cb39f1d6d1957df667ab24cff2326b77cda0dcb85/tokenizers-0.22.2-cp39-abi3-musllinux_1_2_armv7l.whl", hash = "sha256:37ae80a28c1d3265bb1f22464c856bd23c02a05bb211e56d0c5301a435be6c1a", size = 9560069, upload-time = "2026-01-05T10:45:10.673Z" },
{ url = "https://files.pythonhosted.org/packages/16/04/fed398b05caa87ce9b1a1bb5166645e38196081b225059a6edaff6440fac/tokenizers-0.22.2-cp39-abi3-musllinux_1_2_i686.whl", hash = "sha256:791135ee325f2336f498590eb2f11dc5c295232f288e75c99a36c5dbce63088a", size = 9899263, upload-time = "2026-01-05T10:45:12.559Z" },
{ url = "https://files.pythonhosted.org/packages/05/a1/d62dfe7376beaaf1394917e0f8e93ee5f67fea8fcf4107501db35996586b/tokenizers-0.22.2-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:38337540fbbddff8e999d59970f3c6f35a82de10053206a7562f1ea02d046fa5", size = 10033429, upload-time = "2026-01-05T10:45:14.333Z" },
{ url = "https://files.pythonhosted.org/packages/fd/18/a545c4ea42af3df6effd7d13d250ba77a0a86fb20393143bbb9a92e434d4/tokenizers-0.22.2-cp39-abi3-win32.whl", hash = "sha256:a6bf3f88c554a2b653af81f3204491c818ae2ac6fbc09e76ef4773351292bc92", size = 2502363, upload-time = "2026-01-05T10:45:20.593Z" },
{ url = "https://files.pythonhosted.org/packages/65/71/0670843133a43d43070abeb1949abfdef12a86d490bea9cd9e18e37c5ff7/tokenizers-0.22.2-cp39-abi3-win_amd64.whl", hash = "sha256:c9ea31edff2968b44a88f97d784c2f16dc0729b8b143ed004699ebca91f05c48", size = 2747786, upload-time = "2026-01-05T10:45:18.411Z" },
{ url = "https://files.pythonhosted.org/packages/72/f4/0de46cfa12cdcbcd464cc59fde36912af405696f687e53a091fb432f694c/tokenizers-0.22.2-cp39-abi3-win_arm64.whl", hash = "sha256:9ce725d22864a1e965217204946f830c37876eee3b2ba6fc6255e8e903d5fcbc", size = 2612133, upload-time = "2026-01-05T10:45:17.232Z" },
]
[[package]]
name = "tqdm"
version = "4.67.3"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "colorama", marker = "sys_platform == 'win32'" },
]
sdist = { url = "https://files.pythonhosted.org/packages/09/a9/6ba95a270c6f1fbcd8dac228323f2777d886cb206987444e4bce66338dd4/tqdm-4.67.3.tar.gz", hash = "sha256:7d825f03f89244ef73f1d4ce193cb1774a8179fd96f31d7e1dcde62092b960bb", size = 169598, upload-time = "2026-02-03T17:35:53.048Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/16/e1/3079a9ff9b8e11b846c6ac5c8b5bfb7ff225eee721825310c91b3b50304f/tqdm-4.67.3-py3-none-any.whl", hash = "sha256:ee1e4c0e59148062281c49d80b25b67771a127c85fc9676d3be5f243206826bf", size = 78374, upload-time = "2026-02-03T17:35:50.982Z" },
]
[[package]]
name = "typer"
version = "0.23.1"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "annotated-doc" },
{ name = "click" },
{ name = "rich" },
{ name = "shellingham" },
]
sdist = { url = "https://files.pythonhosted.org/packages/fd/07/b822e1b307d40e263e8253d2384cf98c51aa2368cc7ba9a07e523a1d964b/typer-0.23.1.tar.gz", hash = "sha256:2070374e4d31c83e7b61362fd859aa683576432fd5b026b060ad6b4cd3b86134", size = 120047, upload-time = "2026-02-13T10:04:30.984Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/d5/91/9b286ab899c008c2cb05e8be99814807e7fbbd33f0c0c960470826e5ac82/typer-0.23.1-py3-none-any.whl", hash = "sha256:3291ad0d3c701cbf522012faccfbb29352ff16ad262db2139e6b01f15781f14e", size = 56813, upload-time = "2026-02-13T10:04:32.008Z" },
]
[[package]]
name = "typer-slim"
version = "0.23.1"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "typer" },
]
sdist = { url = "https://files.pythonhosted.org/packages/da/22/b9c47b8655937b6877d40791b937931702ba9c5f9d28753199266aa96f50/typer_slim-0.23.1.tar.gz", hash = "sha256:dfe92a6317030ee2380f65bf92e540d7c77fefcc689e10d585b4925b45b5e06a", size = 4762, upload-time = "2026-02-13T10:04:26.416Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/ad/8a/5764b851659345f34787f1b6eb30b9d308bbd6c294825cbe38b6b869c97a/typer_slim-0.23.1-py3-none-any.whl", hash = "sha256:8146d5df1eb89f628191c4c604c8464fa841885d0733c58e6e700ff0228adac5", size = 3397, upload-time = "2026-02-13T10:04:27.132Z" },
]
[[package]]
name = "typing-extensions"
version = "4.15.0"
source = { registry = "https://pypi.org/simple" }
sdist = { url = "https://files.pythonhosted.org/packages/72/94/1a15dd82efb362ac84269196e94cf00f187f7ed21c242792a923cdb1c61f/typing_extensions-4.15.0.tar.gz", hash = "sha256:0cea48d173cc12fa28ecabc3b837ea3cf6f38c6d1136f85cbaaf598984861466", size = 109391, upload-time = "2025-08-25T13:49:26.313Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/18/67/36e9267722cc04a6b9f15c7f3441c2363321a3ea07da7ae0c0707beb2a9c/typing_extensions-4.15.0-py3-none-any.whl", hash = "sha256:f0fa19c6845758ab08074a0cfa8b7aecb71c999ca73d62883bc25cc018c4e548", size = 44614, upload-time = "2025-08-25T13:49:24.86Z" },
]
[[package]]
name = "urllib3"
version = "2.6.3"
source = { registry = "https://pypi.org/simple" }
sdist = { url = "https://files.pythonhosted.org/packages/c7/24/5f1b3bdffd70275f6661c76461e25f024d5a38a46f04aaca912426a2b1d3/urllib3-2.6.3.tar.gz", hash = "sha256:1b62b6884944a57dbe321509ab94fd4d3b307075e0c2eae991ac71ee15ad38ed", size = 435556, upload-time = "2026-01-07T16:24:43.925Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/39/08/aaaad47bc4e9dc8c725e68f9d04865dbcb2052843ff09c97b08904852d84/urllib3-2.6.3-py3-none-any.whl", hash = "sha256:bf272323e553dfb2e87d9bfd225ca7b0f467b919d7bbd355436d3fd37cb0acd4", size = 131584, upload-time = "2026-01-07T16:24:42.685Z" },
]
[[package]]
name = "watchdog"
version = "6.0.0"
source = { registry = "https://pypi.org/simple" }
sdist = { url = "https://files.pythonhosted.org/packages/db/7d/7f3d619e951c88ed75c6037b246ddcf2d322812ee8ea189be89511721d54/watchdog-6.0.0.tar.gz", hash = "sha256:9ddf7c82fda3ae8e24decda1338ede66e1c99883db93711d8fb941eaa2d8c282", size = 131220, upload-time = "2024-11-01T14:07:13.037Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/a9/c7/ca4bf3e518cb57a686b2feb4f55a1892fd9a3dd13f470fca14e00f80ea36/watchdog-6.0.0-py3-none-manylinux2014_aarch64.whl", hash = "sha256:7607498efa04a3542ae3e05e64da8202e58159aa1fa4acddf7678d34a35d4f13", size = 79079, upload-time = "2024-11-01T14:06:59.472Z" },
{ url = "https://files.pythonhosted.org/packages/5c/51/d46dc9332f9a647593c947b4b88e2381c8dfc0942d15b8edc0310fa4abb1/watchdog-6.0.0-py3-none-manylinux2014_armv7l.whl", hash = "sha256:9041567ee8953024c83343288ccc458fd0a2d811d6a0fd68c4c22609e3490379", size = 79078, upload-time = "2024-11-01T14:07:01.431Z" },
{ url = "https://files.pythonhosted.org/packages/d4/57/04edbf5e169cd318d5f07b4766fee38e825d64b6913ca157ca32d1a42267/watchdog-6.0.0-py3-none-manylinux2014_i686.whl", hash = "sha256:82dc3e3143c7e38ec49d61af98d6558288c415eac98486a5c581726e0737c00e", size = 79076, upload-time = "2024-11-01T14:07:02.568Z" },
{ url = "https://files.pythonhosted.org/packages/ab/cc/da8422b300e13cb187d2203f20b9253e91058aaf7db65b74142013478e66/watchdog-6.0.0-py3-none-manylinux2014_ppc64.whl", hash = "sha256:212ac9b8bf1161dc91bd09c048048a95ca3a4c4f5e5d4a7d1b1a7d5752a7f96f", size = 79077, upload-time = "2024-11-01T14:07:03.893Z" },
{ url = "https://files.pythonhosted.org/packages/2c/3b/b8964e04ae1a025c44ba8e4291f86e97fac443bca31de8bd98d3263d2fcf/watchdog-6.0.0-py3-none-manylinux2014_ppc64le.whl", hash = "sha256:e3df4cbb9a450c6d49318f6d14f4bbc80d763fa587ba46ec86f99f9e6876bb26", size = 79078, upload-time = "2024-11-01T14:07:05.189Z" },
{ url = "https://files.pythonhosted.org/packages/62/ae/a696eb424bedff7407801c257d4b1afda455fe40821a2be430e173660e81/watchdog-6.0.0-py3-none-manylinux2014_s390x.whl", hash = "sha256:2cce7cfc2008eb51feb6aab51251fd79b85d9894e98ba847408f662b3395ca3c", size = 79077, upload-time = "2024-11-01T14:07:06.376Z" },
{ url = "https://files.pythonhosted.org/packages/b5/e8/dbf020b4d98251a9860752a094d09a65e1b436ad181faf929983f697048f/watchdog-6.0.0-py3-none-manylinux2014_x86_64.whl", hash = "sha256:20ffe5b202af80ab4266dcd3e91aae72bf2da48c0d33bdb15c66658e685e94e2", size = 79078, upload-time = "2024-11-01T14:07:07.547Z" },
{ url = "https://files.pythonhosted.org/packages/07/f6/d0e5b343768e8bcb4cda79f0f2f55051bf26177ecd5651f84c07567461cf/watchdog-6.0.0-py3-none-win32.whl", hash = "sha256:07df1fdd701c5d4c8e55ef6cf55b8f0120fe1aef7ef39a1c6fc6bc2e606d517a", size = 79065, upload-time = "2024-11-01T14:07:09.525Z" },
{ url = "https://files.pythonhosted.org/packages/db/d9/c495884c6e548fce18a8f40568ff120bc3a4b7b99813081c8ac0c936fa64/watchdog-6.0.0-py3-none-win_amd64.whl", hash = "sha256:cbafb470cf848d93b5d013e2ecb245d4aa1c8fd0504e863ccefa32445359d680", size = 79070, upload-time = "2024-11-01T14:07:10.686Z" },
{ url = "https://files.pythonhosted.org/packages/33/e8/e40370e6d74ddba47f002a32919d91310d6074130fe4e17dabcafc15cbf1/watchdog-6.0.0-py3-none-win_ia64.whl", hash = "sha256:a1914259fa9e1454315171103c6a30961236f508b9b623eae470268bbcc6a22f", size = 79067, upload-time = "2024-11-01T14:07:11.845Z" },
]
[[package]]
name = "win32-setctime"
version = "1.2.0"
source = { registry = "https://pypi.org/simple" }
sdist = { url = "https://files.pythonhosted.org/packages/b3/8f/705086c9d734d3b663af0e9bb3d4de6578d08f46b1b101c2442fd9aecaa2/win32_setctime-1.2.0.tar.gz", hash = "sha256:ae1fdf948f5640aae05c511ade119313fb6a30d7eabe25fef9764dca5873c4c0", size = 4867, upload-time = "2024-12-07T15:28:28.314Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/e1/07/c6fe3ad3e685340704d314d765b7912993bcb8dc198f0e7a89382d37974b/win32_setctime-1.2.0-py3-none-any.whl", hash = "sha256:95d644c4e708aba81dc3704a116d8cbc974d70b3bdb8be1d150e36be6e9d1390", size = 4083, upload-time = "2024-12-07T15:28:26.465Z" },
]