latest updates
This commit is contained in:
@@ -1,2 +1,6 @@
|
||||
# Aetheel Adapters
|
||||
# Channel adapters for connecting the AI agent to messaging platforms.
|
||||
|
||||
from adapters.base import BaseAdapter, IncomingMessage
|
||||
|
||||
__all__ = ["BaseAdapter", "IncomingMessage"]
|
||||
|
||||
146
adapters/base.py
Normal file
146
adapters/base.py
Normal file
@@ -0,0 +1,146 @@
|
||||
"""
|
||||
Aetheel Base Adapter
|
||||
====================
|
||||
Abstract base class for all channel adapters (Slack, Telegram, Discord, etc.).
|
||||
|
||||
Every adapter converts platform-specific events into a channel-agnostic
|
||||
IncomingMessage and routes responses back through send_message().
|
||||
|
||||
The AI handler only sees IncomingMessage — it never knows which platform
|
||||
the message came from.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
from abc import ABC, abstractmethod
|
||||
from dataclasses import dataclass, field
|
||||
from datetime import datetime, timezone
|
||||
from typing import Any, Callable
|
||||
|
||||
logger = logging.getLogger("aetheel.adapters")
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Channel-Agnostic Message
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
@dataclass
|
||||
class IncomingMessage:
|
||||
"""
|
||||
Channel-agnostic incoming message.
|
||||
|
||||
Every adapter converts its platform-specific event into this format
|
||||
before passing it to the message handler. This is the ONLY type the
|
||||
AI handler sees.
|
||||
"""
|
||||
|
||||
text: str
|
||||
user_id: str
|
||||
user_name: str
|
||||
channel_id: str # platform-specific channel/chat ID
|
||||
channel_name: str
|
||||
conversation_id: str # unique ID for session isolation (thread, chat, etc.)
|
||||
source: str # "slack", "telegram", "discord", etc.
|
||||
is_dm: bool
|
||||
timestamp: datetime = field(default_factory=lambda: datetime.now(timezone.utc))
|
||||
raw_event: dict[str, Any] = field(default_factory=dict, repr=False)
|
||||
|
||||
|
||||
# Type alias for the message handler callback
|
||||
MessageHandler = Callable[[IncomingMessage], str | None]
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Abstract Base Adapter
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class BaseAdapter(ABC):
|
||||
"""
|
||||
Abstract base class for channel adapters.
|
||||
|
||||
Each adapter must:
|
||||
1. Connect to the messaging platform
|
||||
2. Convert incoming events into IncomingMessage objects
|
||||
3. Call registered handlers with the IncomingMessage
|
||||
4. Send responses back to the platform via send_message()
|
||||
"""
|
||||
|
||||
def __init__(self) -> None:
|
||||
self._message_handlers: list[MessageHandler] = []
|
||||
|
||||
def on_message(self, handler: MessageHandler) -> MessageHandler:
|
||||
"""
|
||||
Register a message handler (can be used as a decorator).
|
||||
The handler receives an IncomingMessage and should return a
|
||||
response string or None.
|
||||
"""
|
||||
self._message_handlers.append(handler)
|
||||
return handler
|
||||
|
||||
@abstractmethod
|
||||
def start(self) -> None:
|
||||
"""Start the adapter (blocking)."""
|
||||
...
|
||||
|
||||
@abstractmethod
|
||||
def start_async(self) -> None:
|
||||
"""Start the adapter in a background thread (non-blocking)."""
|
||||
...
|
||||
|
||||
@abstractmethod
|
||||
def stop(self) -> None:
|
||||
"""Stop the adapter gracefully."""
|
||||
...
|
||||
|
||||
@abstractmethod
|
||||
def send_message(
|
||||
self,
|
||||
channel_id: str,
|
||||
text: str,
|
||||
thread_id: str | None = None,
|
||||
) -> None:
|
||||
"""
|
||||
Send a message to a channel/chat on this platform.
|
||||
|
||||
Args:
|
||||
channel_id: Platform-specific channel/chat ID
|
||||
text: Message text
|
||||
thread_id: Optional thread/reply ID for threading
|
||||
"""
|
||||
...
|
||||
|
||||
@property
|
||||
@abstractmethod
|
||||
def source_name(self) -> str:
|
||||
"""Short name for this adapter, e.g. 'slack', 'telegram'."""
|
||||
...
|
||||
|
||||
def _dispatch(self, msg: IncomingMessage) -> None:
|
||||
"""
|
||||
Dispatch an IncomingMessage to all registered handlers.
|
||||
Called by subclasses after converting platform events.
|
||||
"""
|
||||
for handler in self._message_handlers:
|
||||
try:
|
||||
response = handler(msg)
|
||||
if response:
|
||||
self.send_message(
|
||||
channel_id=msg.channel_id,
|
||||
text=response,
|
||||
thread_id=msg.raw_event.get("thread_id"),
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
f"[{self.source_name}] Handler error: {e}", exc_info=True
|
||||
)
|
||||
try:
|
||||
self.send_message(
|
||||
channel_id=msg.channel_id,
|
||||
text="⚠️ Something went wrong processing your message.",
|
||||
thread_id=msg.raw_event.get("thread_id"),
|
||||
)
|
||||
except Exception:
|
||||
pass
|
||||
@@ -9,12 +9,7 @@ Features:
|
||||
- Receives DMs and @mentions in channels
|
||||
- Each thread = persistent conversation context
|
||||
- Sends replies back to the same thread
|
||||
- Message handler callback for plugging in AI logic
|
||||
|
||||
Architecture (from OpenClaw):
|
||||
- OpenClaw uses @slack/bolt (Node.js) with socketMode: true
|
||||
- We replicate this with slack_bolt (Python) which is the official Python SDK
|
||||
- Like OpenClaw, we separate: token resolution, message handling, and sending
|
||||
- Extends BaseAdapter for multi-channel support
|
||||
|
||||
Usage:
|
||||
from adapters.slack_adapter import SlackAdapter
|
||||
@@ -27,8 +22,6 @@ Usage:
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
import threading
|
||||
import time
|
||||
from dataclasses import dataclass, field
|
||||
from datetime import datetime, timezone
|
||||
from typing import Any, Callable
|
||||
@@ -38,69 +31,32 @@ from slack_bolt.adapter.socket_mode import SocketModeHandler
|
||||
from slack_sdk import WebClient
|
||||
from slack_sdk.errors import SlackApiError
|
||||
|
||||
from adapters.base import BaseAdapter, IncomingMessage
|
||||
|
||||
logger = logging.getLogger("aetheel.slack")
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Types
|
||||
# Types (Slack-specific, kept for internal use)
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
@dataclass
|
||||
class SlackMessage:
|
||||
"""Represents an incoming Slack message (mirrors OpenClaw's SlackMessageEvent)."""
|
||||
|
||||
text: str
|
||||
user_id: str
|
||||
user_name: str
|
||||
channel_id: str
|
||||
channel_name: str
|
||||
thread_ts: str | None
|
||||
message_ts: str
|
||||
is_dm: bool
|
||||
is_mention: bool
|
||||
is_thread_reply: bool
|
||||
raw_event: dict = field(default_factory=dict, repr=False)
|
||||
|
||||
@property
|
||||
def conversation_id(self) -> str:
|
||||
"""
|
||||
Unique conversation identifier.
|
||||
Uses thread_ts if in a thread, otherwise the message_ts.
|
||||
This mirrors OpenClaw's session isolation per thread.
|
||||
"""
|
||||
return self.thread_ts or self.message_ts
|
||||
|
||||
@property
|
||||
def timestamp(self) -> datetime:
|
||||
"""Parse Slack ts into a datetime."""
|
||||
ts_float = float(self.message_ts)
|
||||
return datetime.fromtimestamp(ts_float, tz=timezone.utc)
|
||||
|
||||
|
||||
@dataclass
|
||||
class SlackSendResult:
|
||||
"""Result of sending a Slack message (mirrors OpenClaw's SlackSendResult)."""
|
||||
"""Result of sending a Slack message."""
|
||||
|
||||
message_id: str
|
||||
channel_id: str
|
||||
thread_ts: str | None = None
|
||||
|
||||
|
||||
# Type alias for the message handler callback
|
||||
MessageHandler = Callable[[SlackMessage], str | None]
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Token Resolution (inspired by OpenClaw src/slack/token.ts)
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
def resolve_bot_token(explicit: str | None = None) -> str:
|
||||
"""
|
||||
Resolve the Slack bot token.
|
||||
Priority: explicit param > SLACK_BOT_TOKEN env var.
|
||||
"""
|
||||
"""Resolve the Slack bot token."""
|
||||
token = (explicit or os.environ.get("SLACK_BOT_TOKEN", "")).strip()
|
||||
if not token:
|
||||
raise ValueError(
|
||||
@@ -113,10 +69,7 @@ def resolve_bot_token(explicit: str | None = None) -> str:
|
||||
|
||||
|
||||
def resolve_app_token(explicit: str | None = None) -> str:
|
||||
"""
|
||||
Resolve the Slack app-level token (required for Socket Mode).
|
||||
Priority: explicit param > SLACK_APP_TOKEN env var.
|
||||
"""
|
||||
"""Resolve the Slack app-level token (required for Socket Mode)."""
|
||||
token = (explicit or os.environ.get("SLACK_APP_TOKEN", "")).strip()
|
||||
if not token:
|
||||
raise ValueError(
|
||||
@@ -133,24 +86,12 @@ def resolve_app_token(explicit: str | None = None) -> str:
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class SlackAdapter:
|
||||
class SlackAdapter(BaseAdapter):
|
||||
"""
|
||||
Slack adapter using Socket Mode.
|
||||
Slack adapter using Socket Mode, extending BaseAdapter.
|
||||
|
||||
Inspired by OpenClaw's monitorSlackProvider() in src/slack/monitor/provider.ts:
|
||||
- Connects via Socket Mode (no public URL needed)
|
||||
- Handles DMs and @mentions
|
||||
- Thread-based conversation isolation
|
||||
- Configurable message handler callback
|
||||
|
||||
Example:
|
||||
adapter = SlackAdapter()
|
||||
|
||||
@adapter.on_message
|
||||
def handle(msg: SlackMessage) -> str:
|
||||
return f"Echo: {msg.text}"
|
||||
|
||||
adapter.start()
|
||||
Inspired by OpenClaw's monitorSlackProvider() in src/slack/monitor/provider.ts.
|
||||
Converts Slack events into IncomingMessage objects before dispatching.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
@@ -159,9 +100,9 @@ class SlackAdapter:
|
||||
app_token: str | None = None,
|
||||
log_level: str = "INFO",
|
||||
):
|
||||
super().__init__()
|
||||
self._bot_token = resolve_bot_token(bot_token)
|
||||
self._app_token = resolve_app_token(app_token)
|
||||
self._message_handlers: list[MessageHandler] = []
|
||||
self._bot_user_id: str = ""
|
||||
self._bot_user_name: str = ""
|
||||
self._team_id: str = ""
|
||||
@@ -170,15 +111,7 @@ class SlackAdapter:
|
||||
self._running = False
|
||||
self._socket_handler: SocketModeHandler | None = None
|
||||
|
||||
# Configure logging
|
||||
logging.basicConfig(
|
||||
level=getattr(logging, log_level.upper(), logging.INFO),
|
||||
format="%(asctime)s [%(name)s] %(levelname)s: %(message)s",
|
||||
datefmt="%Y-%m-%d %H:%M:%S",
|
||||
)
|
||||
|
||||
# Initialize Slack Bolt app with Socket Mode
|
||||
# This mirrors OpenClaw's: new App({ token: botToken, appToken, socketMode: true })
|
||||
self._app = App(
|
||||
token=self._bot_token,
|
||||
logger=logger,
|
||||
@@ -189,23 +122,15 @@ class SlackAdapter:
|
||||
self._register_handlers()
|
||||
|
||||
# -------------------------------------------------------------------
|
||||
# Public API
|
||||
# BaseAdapter implementation
|
||||
# -------------------------------------------------------------------
|
||||
|
||||
def on_message(self, handler: MessageHandler) -> MessageHandler:
|
||||
"""
|
||||
Register a message handler (can be used as a decorator).
|
||||
The handler receives a SlackMessage and should return a response string or None.
|
||||
"""
|
||||
self._message_handlers.append(handler)
|
||||
return handler
|
||||
@property
|
||||
def source_name(self) -> str:
|
||||
return "slack"
|
||||
|
||||
def start(self) -> None:
|
||||
"""
|
||||
Start the Slack adapter in Socket Mode.
|
||||
This is a blocking call (like OpenClaw's `await app.start()`).
|
||||
"""
|
||||
# Resolve bot identity (like OpenClaw's auth.test call)
|
||||
"""Start the Slack adapter in Socket Mode (blocking)."""
|
||||
self._resolve_identity()
|
||||
|
||||
logger.info("=" * 60)
|
||||
@@ -247,57 +172,40 @@ class SlackAdapter:
|
||||
|
||||
def send_message(
|
||||
self,
|
||||
channel: str,
|
||||
channel_id: str,
|
||||
text: str,
|
||||
thread_ts: str | None = None,
|
||||
) -> SlackSendResult:
|
||||
thread_id: str | None = None,
|
||||
) -> None:
|
||||
"""
|
||||
Send a message to a Slack channel or DM.
|
||||
Mirrors OpenClaw's sendMessageSlack() in src/slack/send.ts.
|
||||
|
||||
Args:
|
||||
channel: Channel ID (C...), user ID (U...), or DM channel (D...)
|
||||
text: Message text (supports Slack mrkdwn formatting)
|
||||
thread_ts: Optional thread timestamp to reply in a thread
|
||||
|
||||
Returns:
|
||||
SlackSendResult with message_id and channel_id
|
||||
"""
|
||||
if not text.strip():
|
||||
raise ValueError("Cannot send an empty message.")
|
||||
return
|
||||
|
||||
# If it looks like a user ID, open a DM first
|
||||
# (mirrors OpenClaw's resolveChannelId)
|
||||
if channel.startswith("U") or channel.startswith("W"):
|
||||
if channel_id.startswith("U") or channel_id.startswith("W"):
|
||||
try:
|
||||
dm_response = self._client.conversations_open(users=[channel])
|
||||
channel = dm_response["channel"]["id"]
|
||||
dm_response = self._client.conversations_open(users=[channel_id])
|
||||
channel_id = dm_response["channel"]["id"]
|
||||
except SlackApiError as e:
|
||||
raise RuntimeError(f"Failed to open DM with {channel}: {e}") from e
|
||||
raise RuntimeError(f"Failed to open DM with {channel_id}: {e}") from e
|
||||
|
||||
# Chunk long messages (OpenClaw's SLACK_TEXT_LIMIT = 4000)
|
||||
# Chunk long messages (Slack's limit = 4000 chars)
|
||||
SLACK_TEXT_LIMIT = 4000
|
||||
chunks = self._chunk_text(text, SLACK_TEXT_LIMIT)
|
||||
|
||||
last_ts = ""
|
||||
for chunk in chunks:
|
||||
try:
|
||||
response = self._client.chat_postMessage(
|
||||
channel=channel,
|
||||
self._client.chat_postMessage(
|
||||
channel=channel_id,
|
||||
text=chunk,
|
||||
thread_ts=thread_ts,
|
||||
thread_ts=thread_id,
|
||||
)
|
||||
last_ts = response.get("ts", "")
|
||||
except SlackApiError as e:
|
||||
logger.error(f"Failed to send message: {e}")
|
||||
raise
|
||||
|
||||
return SlackSendResult(
|
||||
message_id=last_ts or "unknown",
|
||||
channel_id=channel,
|
||||
thread_ts=thread_ts,
|
||||
)
|
||||
|
||||
# -------------------------------------------------------------------
|
||||
# Internal: Event handlers
|
||||
# -------------------------------------------------------------------
|
||||
@@ -305,21 +213,13 @@ class SlackAdapter:
|
||||
def _register_handlers(self) -> None:
|
||||
"""Register Slack event handlers on the Bolt app."""
|
||||
|
||||
# Handle direct messages and channel messages WITHOUT @mention
|
||||
# Note: When someone @mentions the bot, Slack fires BOTH a "message"
|
||||
# event and an "app_mention" event. We only want to respond once,
|
||||
# so the message handler skips messages containing the bot @mention.
|
||||
# Those are handled exclusively by handle_mention_event below.
|
||||
@self._app.event("message")
|
||||
def handle_message_event(event: dict, say: Callable, client: WebClient) -> None:
|
||||
# Skip if this message contains an @mention of our bot
|
||||
# (it will be handled by app_mention instead)
|
||||
raw_text = event.get("text", "")
|
||||
if self._bot_user_id and f"<@{self._bot_user_id}>" in raw_text:
|
||||
return
|
||||
self._process_incoming(event, say, client)
|
||||
|
||||
# Handle @mentions in channels
|
||||
@self._app.event("app_mention")
|
||||
def handle_mention_event(event: dict, say: Callable, client: WebClient) -> None:
|
||||
self._process_incoming(event, say, client, is_mention=True)
|
||||
@@ -333,9 +233,9 @@ class SlackAdapter:
|
||||
) -> None:
|
||||
"""
|
||||
Process an incoming Slack message.
|
||||
This is the core handler, inspired by OpenClaw's createSlackMessageHandler().
|
||||
Converts to IncomingMessage and dispatches to handlers.
|
||||
"""
|
||||
# Skip bot messages (including our own)
|
||||
# Skip bot messages
|
||||
if event.get("bot_id") or event.get("subtype") in (
|
||||
"bot_message",
|
||||
"message_changed",
|
||||
@@ -356,7 +256,7 @@ class SlackAdapter:
|
||||
message_ts = event.get("ts", "")
|
||||
is_dm = channel_type in ("im", "mpim")
|
||||
|
||||
# Strip the bot mention from the text (like OpenClaw does)
|
||||
# Strip the bot mention from the text
|
||||
clean_text = self._strip_mention(raw_text).strip()
|
||||
if not clean_text:
|
||||
return
|
||||
@@ -365,56 +265,45 @@ class SlackAdapter:
|
||||
user_name = self._resolve_user_name(user_id, client)
|
||||
channel_name = self._resolve_channel_name(channel_id, client)
|
||||
|
||||
# Build the SlackMessage object
|
||||
msg = SlackMessage(
|
||||
# Conversation ID for session isolation (thread-based)
|
||||
conversation_id = thread_ts or message_ts
|
||||
|
||||
# Build channel-agnostic IncomingMessage
|
||||
msg = IncomingMessage(
|
||||
text=clean_text,
|
||||
user_id=user_id,
|
||||
user_name=user_name,
|
||||
channel_id=channel_id,
|
||||
channel_name=channel_name,
|
||||
thread_ts=thread_ts,
|
||||
message_ts=message_ts,
|
||||
conversation_id=conversation_id,
|
||||
source="slack",
|
||||
is_dm=is_dm,
|
||||
is_mention=is_mention,
|
||||
is_thread_reply=thread_ts is not None,
|
||||
raw_event=event,
|
||||
timestamp=datetime.fromtimestamp(float(message_ts), tz=timezone.utc)
|
||||
if message_ts
|
||||
else datetime.now(timezone.utc),
|
||||
raw_event={
|
||||
"thread_id": thread_ts or message_ts, # for BaseAdapter._dispatch
|
||||
"thread_ts": thread_ts,
|
||||
"message_ts": message_ts,
|
||||
"is_mention": is_mention,
|
||||
"is_thread_reply": thread_ts is not None,
|
||||
"channel_type": channel_type,
|
||||
},
|
||||
)
|
||||
|
||||
logger.info(
|
||||
f"📨 Message from @{user_name} in #{channel_name}: {clean_text[:100]}"
|
||||
)
|
||||
|
||||
# Call all registered handlers
|
||||
for handler in self._message_handlers:
|
||||
try:
|
||||
response = handler(msg)
|
||||
if response:
|
||||
# Reply in the same thread
|
||||
# (OpenClaw uses thread_ts for thread isolation)
|
||||
reply_thread = thread_ts or message_ts
|
||||
say(text=response, thread_ts=reply_thread)
|
||||
logger.info(
|
||||
f"📤 Reply sent to #{channel_name} (thread={reply_thread[:10]}...)"
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"Handler error: {e}", exc_info=True)
|
||||
try:
|
||||
say(
|
||||
text=f"⚠️ Something went wrong processing your message.",
|
||||
thread_ts=thread_ts or message_ts,
|
||||
)
|
||||
except Exception:
|
||||
pass
|
||||
# Dispatch to handlers via BaseAdapter
|
||||
self._dispatch(msg)
|
||||
|
||||
# -------------------------------------------------------------------
|
||||
# Internal: Helpers
|
||||
# -------------------------------------------------------------------
|
||||
|
||||
def _resolve_identity(self) -> None:
|
||||
"""
|
||||
Resolve bot identity via auth.test.
|
||||
Mirrors OpenClaw's auth.test call in monitorSlackProvider().
|
||||
"""
|
||||
"""Resolve bot identity via auth.test."""
|
||||
try:
|
||||
auth = self._client.auth_test()
|
||||
self._bot_user_id = auth.get("user_id", "")
|
||||
@@ -466,7 +355,6 @@ class SlackAdapter:
|
||||
def _chunk_text(text: str, limit: int = 4000) -> list[str]:
|
||||
"""
|
||||
Split text into chunks respecting Slack's character limit.
|
||||
Mirrors OpenClaw's chunkMarkdownTextWithMode().
|
||||
Tries to split at newlines, then at spaces, then hard-splits.
|
||||
"""
|
||||
if len(text) <= limit:
|
||||
@@ -479,14 +367,11 @@ class SlackAdapter:
|
||||
chunks.append(remaining)
|
||||
break
|
||||
|
||||
# Try to find a good break point
|
||||
cut = limit
|
||||
# Prefer breaking at a newline
|
||||
newline_pos = remaining.rfind("\n", 0, limit)
|
||||
if newline_pos > limit // 2:
|
||||
cut = newline_pos + 1
|
||||
else:
|
||||
# Fall back to space
|
||||
space_pos = remaining.rfind(" ", 0, limit)
|
||||
if space_pos > limit // 2:
|
||||
cut = space_pos + 1
|
||||
|
||||
264
adapters/telegram_adapter.py
Normal file
264
adapters/telegram_adapter.py
Normal file
@@ -0,0 +1,264 @@
|
||||
"""
|
||||
Aetheel Telegram Adapter
|
||||
=========================
|
||||
Connects to Telegram via the Bot API using python-telegram-bot.
|
||||
|
||||
Features:
|
||||
- Receives private messages and group @mentions
|
||||
- Each chat = persistent conversation context
|
||||
- Sends replies back to the same chat
|
||||
- Extends BaseAdapter for multi-channel support
|
||||
|
||||
Setup:
|
||||
1. Create a bot via @BotFather on Telegram
|
||||
2. Set TELEGRAM_BOT_TOKEN in .env
|
||||
3. Start with: python main.py --telegram
|
||||
|
||||
Usage:
|
||||
from adapters.telegram_adapter import TelegramAdapter
|
||||
|
||||
adapter = TelegramAdapter()
|
||||
adapter.on_message(my_handler)
|
||||
adapter.start()
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import logging
|
||||
import os
|
||||
import threading
|
||||
from datetime import datetime, timezone
|
||||
|
||||
from adapters.base import BaseAdapter, IncomingMessage
|
||||
|
||||
logger = logging.getLogger("aetheel.telegram")
|
||||
|
||||
|
||||
def resolve_telegram_token(explicit: str | None = None) -> str:
|
||||
"""Resolve the Telegram bot token."""
|
||||
token = (explicit or os.environ.get("TELEGRAM_BOT_TOKEN", "")).strip()
|
||||
if not token:
|
||||
raise ValueError(
|
||||
"Telegram bot token is required. "
|
||||
"Set TELEGRAM_BOT_TOKEN environment variable or pass it explicitly. "
|
||||
"Get one from @BotFather on Telegram."
|
||||
)
|
||||
return token
|
||||
|
||||
|
||||
class TelegramAdapter(BaseAdapter):
|
||||
"""
|
||||
Telegram channel adapter using python-telegram-bot.
|
||||
|
||||
Handles:
|
||||
- Private messages (DMs)
|
||||
- Group messages where the bot is @mentioned
|
||||
- Inline replies in groups
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
bot_token: str | None = None,
|
||||
):
|
||||
super().__init__()
|
||||
self._token = resolve_telegram_token(bot_token)
|
||||
self._application = None
|
||||
self._bot_username: str = ""
|
||||
self._running = False
|
||||
self._thread: threading.Thread | None = None
|
||||
|
||||
# -------------------------------------------------------------------
|
||||
# BaseAdapter implementation
|
||||
# -------------------------------------------------------------------
|
||||
|
||||
@property
|
||||
def source_name(self) -> str:
|
||||
return "telegram"
|
||||
|
||||
def start(self) -> None:
|
||||
"""Start the Telegram adapter (blocking)."""
|
||||
from telegram.ext import (
|
||||
Application,
|
||||
MessageHandler,
|
||||
filters,
|
||||
)
|
||||
|
||||
self._application = (
|
||||
Application.builder().token(self._token).build()
|
||||
)
|
||||
|
||||
# Register handler for all text messages
|
||||
self._application.add_handler(
|
||||
MessageHandler(
|
||||
filters.TEXT & ~filters.COMMAND, self._handle_message
|
||||
)
|
||||
)
|
||||
|
||||
# Resolve bot identity
|
||||
async def _resolve():
|
||||
bot = self._application.bot
|
||||
me = await bot.get_me()
|
||||
self._bot_username = me.username or ""
|
||||
logger.info(f"Telegram bot: @{self._bot_username} (id={me.id})")
|
||||
|
||||
asyncio.get_event_loop().run_until_complete(_resolve())
|
||||
|
||||
logger.info("=" * 60)
|
||||
logger.info(" Aetheel Telegram Adapter")
|
||||
logger.info("=" * 60)
|
||||
logger.info(f" Bot: @{self._bot_username}")
|
||||
logger.info(f" Mode: Polling")
|
||||
logger.info(f" Handlers: {len(self._message_handlers)} registered")
|
||||
logger.info("=" * 60)
|
||||
|
||||
self._running = True
|
||||
self._application.run_polling(drop_pending_updates=True)
|
||||
|
||||
def start_async(self) -> None:
|
||||
"""Start the adapter in a background thread (non-blocking)."""
|
||||
self._thread = threading.Thread(
|
||||
target=self.start, daemon=True, name="telegram-adapter"
|
||||
)
|
||||
self._thread.start()
|
||||
logger.info("Telegram adapter started in background thread")
|
||||
|
||||
def stop(self) -> None:
|
||||
"""Stop the Telegram adapter gracefully."""
|
||||
self._running = False
|
||||
if self._application:
|
||||
try:
|
||||
self._application.stop()
|
||||
except Exception:
|
||||
pass
|
||||
logger.info("Telegram adapter stopped.")
|
||||
|
||||
def send_message(
|
||||
self,
|
||||
channel_id: str,
|
||||
text: str,
|
||||
thread_id: str | None = None,
|
||||
) -> None:
|
||||
"""Send a message to a Telegram chat."""
|
||||
if not text.strip() or not self._application:
|
||||
return
|
||||
|
||||
# Chunk long messages (Telegram limit = 4096 chars)
|
||||
TELEGRAM_TEXT_LIMIT = 4096
|
||||
chunks = _chunk_text(text, TELEGRAM_TEXT_LIMIT)
|
||||
|
||||
async def _send():
|
||||
bot = self._application.bot
|
||||
for chunk in chunks:
|
||||
kwargs = {
|
||||
"chat_id": int(channel_id),
|
||||
"text": chunk,
|
||||
"parse_mode": "Markdown",
|
||||
}
|
||||
if thread_id:
|
||||
kwargs["reply_to_message_id"] = int(thread_id)
|
||||
try:
|
||||
await bot.send_message(**kwargs)
|
||||
except Exception as e:
|
||||
# Retry without parse_mode if Markdown fails
|
||||
logger.debug(f"Markdown send failed, retrying plain: {e}")
|
||||
kwargs.pop("parse_mode", None)
|
||||
await bot.send_message(**kwargs)
|
||||
|
||||
try:
|
||||
loop = asyncio.get_running_loop()
|
||||
loop.create_task(_send())
|
||||
except RuntimeError:
|
||||
asyncio.run(_send())
|
||||
|
||||
# -------------------------------------------------------------------
|
||||
# Internal: Message handling
|
||||
# -------------------------------------------------------------------
|
||||
|
||||
async def _handle_message(self, update, context) -> None:
|
||||
"""Process an incoming Telegram message."""
|
||||
message = update.effective_message
|
||||
if not message or not message.text:
|
||||
return
|
||||
|
||||
chat = update.effective_chat
|
||||
user = update.effective_user
|
||||
if not chat or not user:
|
||||
return
|
||||
|
||||
is_private = chat.type == "private"
|
||||
text = message.text
|
||||
|
||||
# In groups, only respond to @mentions
|
||||
if not is_private:
|
||||
if self._bot_username and f"@{self._bot_username}" in text:
|
||||
text = text.replace(f"@{self._bot_username}", "").strip()
|
||||
else:
|
||||
return # Ignore non-mention messages in groups
|
||||
|
||||
if not text.strip():
|
||||
return
|
||||
|
||||
# Build IncomingMessage
|
||||
user_name = user.full_name or user.username or str(user.id)
|
||||
channel_name = chat.title or (
|
||||
f"DM with {user_name}" if is_private else str(chat.id)
|
||||
)
|
||||
|
||||
msg = IncomingMessage(
|
||||
text=text,
|
||||
user_id=str(user.id),
|
||||
user_name=user_name,
|
||||
channel_id=str(chat.id),
|
||||
channel_name=channel_name,
|
||||
conversation_id=str(chat.id), # Telegram: one session per chat
|
||||
source="telegram",
|
||||
is_dm=is_private,
|
||||
timestamp=datetime.fromtimestamp(
|
||||
message.date.timestamp(), tz=timezone.utc
|
||||
)
|
||||
if message.date
|
||||
else datetime.now(timezone.utc),
|
||||
raw_event={
|
||||
"thread_id": str(message.message_id) if not is_private else None,
|
||||
"message_id": message.message_id,
|
||||
"chat_type": chat.type,
|
||||
},
|
||||
)
|
||||
|
||||
logger.info(
|
||||
f"📨 [TG] Message from {user_name} in {channel_name}: {text[:100]}"
|
||||
)
|
||||
|
||||
# Dispatch to handlers (synchronous — handlers are sync functions)
|
||||
self._dispatch(msg)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Helpers
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
def _chunk_text(text: str, limit: int = 4096) -> list[str]:
|
||||
"""Split text into chunks respecting Telegram's character limit."""
|
||||
if len(text) <= limit:
|
||||
return [text]
|
||||
|
||||
chunks = []
|
||||
remaining = text
|
||||
while remaining:
|
||||
if len(remaining) <= limit:
|
||||
chunks.append(remaining)
|
||||
break
|
||||
|
||||
cut = limit
|
||||
newline_pos = remaining.rfind("\n", 0, limit)
|
||||
if newline_pos > limit // 2:
|
||||
cut = newline_pos + 1
|
||||
else:
|
||||
space_pos = remaining.rfind(" ", 0, limit)
|
||||
if space_pos > limit // 2:
|
||||
cut = space_pos + 1
|
||||
|
||||
chunks.append(remaining[:cut])
|
||||
remaining = remaining[cut:]
|
||||
|
||||
return chunks
|
||||
294
agent/subagent.py
Normal file
294
agent/subagent.py
Normal file
@@ -0,0 +1,294 @@
|
||||
"""
|
||||
Aetheel Subagent Manager
|
||||
=========================
|
||||
Spawns background AI agent sessions for long-running tasks.
|
||||
|
||||
The main agent can "spawn" a subagent by including an action tag in its
|
||||
response. The subagent runs in a background thread with its own runtime
|
||||
session and sends results back to the originating channel when done.
|
||||
|
||||
Usage:
|
||||
from agent.subagent import SubagentManager
|
||||
|
||||
manager = SubagentManager(runtime_factory=make_runtime, send_fn=send_message)
|
||||
manager.spawn(
|
||||
task="Research Python 3.14 features",
|
||||
channel_id="C123",
|
||||
channel_type="slack",
|
||||
)
|
||||
"""
|
||||
|
||||
import logging
|
||||
import threading
|
||||
import time
|
||||
import uuid
|
||||
from dataclasses import dataclass, field
|
||||
from datetime import datetime, timezone
|
||||
from typing import Any, Callable
|
||||
|
||||
logger = logging.getLogger("aetheel.subagent")
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Types
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
@dataclass
|
||||
class SubagentTask:
|
||||
"""A running or completed subagent task."""
|
||||
|
||||
id: str
|
||||
task: str # The task/prompt given to the subagent
|
||||
channel_id: str
|
||||
channel_type: str # "slack", "telegram", etc.
|
||||
thread_id: str | None = None
|
||||
user_name: str | None = None
|
||||
status: str = "pending" # pending, running, done, failed
|
||||
result: str | None = None
|
||||
error: str | None = None
|
||||
created_at: str = field(
|
||||
default_factory=lambda: datetime.now(timezone.utc).isoformat()
|
||||
)
|
||||
duration_ms: int = 0
|
||||
|
||||
|
||||
# Type aliases
|
||||
RuntimeFactory = Callable[[], Any] # Creates a fresh runtime instance
|
||||
SendFunction = Callable[[str, str, str | None, str], None]
|
||||
# send_fn(channel_id, text, thread_id, channel_type)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Subagent Manager
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class SubagentManager:
|
||||
"""
|
||||
Manages background subagent tasks.
|
||||
|
||||
Each subagent runs in its own thread with a fresh runtime instance.
|
||||
When complete, it sends results back to the originating channel
|
||||
via the send function.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
runtime_factory: RuntimeFactory,
|
||||
send_fn: SendFunction,
|
||||
max_concurrent: int = 3,
|
||||
):
|
||||
self._runtime_factory = runtime_factory
|
||||
self._send_fn = send_fn
|
||||
self._max_concurrent = max_concurrent
|
||||
self._tasks: dict[str, SubagentTask] = {}
|
||||
self._lock = threading.Lock()
|
||||
|
||||
def spawn(
|
||||
self,
|
||||
*,
|
||||
task: str,
|
||||
channel_id: str,
|
||||
channel_type: str = "slack",
|
||||
thread_id: str | None = None,
|
||||
user_name: str | None = None,
|
||||
context: str | None = None,
|
||||
) -> str:
|
||||
"""
|
||||
Spawn a background subagent to work on a task.
|
||||
|
||||
Returns the subagent ID immediately. The subagent runs in a
|
||||
background thread and sends results back when done.
|
||||
"""
|
||||
# Check concurrent limit
|
||||
active = self._count_active()
|
||||
if active >= self._max_concurrent:
|
||||
logger.warning(
|
||||
f"Max concurrent subagents reached ({self._max_concurrent}). "
|
||||
f"Rejecting task: {task[:50]}"
|
||||
)
|
||||
raise RuntimeError(
|
||||
f"Too many active subagents ({active}/{self._max_concurrent}). "
|
||||
"Wait for one to finish."
|
||||
)
|
||||
|
||||
task_id = uuid.uuid4().hex[:8]
|
||||
subagent_task = SubagentTask(
|
||||
id=task_id,
|
||||
task=task,
|
||||
channel_id=channel_id,
|
||||
channel_type=channel_type,
|
||||
thread_id=thread_id,
|
||||
user_name=user_name,
|
||||
)
|
||||
|
||||
with self._lock:
|
||||
self._tasks[task_id] = subagent_task
|
||||
|
||||
# Launch in background thread
|
||||
thread = threading.Thread(
|
||||
target=self._run_subagent,
|
||||
args=(task_id, context),
|
||||
daemon=True,
|
||||
name=f"subagent-{task_id}",
|
||||
)
|
||||
thread.start()
|
||||
|
||||
logger.info(
|
||||
f"🚀 Subagent spawned: {task_id} — '{task[:50]}' "
|
||||
f"(channel={channel_type}/{channel_id})"
|
||||
)
|
||||
return task_id
|
||||
|
||||
def list_active(self) -> list[SubagentTask]:
|
||||
"""List all active (running/pending) subagent tasks."""
|
||||
with self._lock:
|
||||
return [
|
||||
t
|
||||
for t in self._tasks.values()
|
||||
if t.status in ("pending", "running")
|
||||
]
|
||||
|
||||
def list_all(self) -> list[SubagentTask]:
|
||||
"""List all subagent tasks (including completed)."""
|
||||
with self._lock:
|
||||
return list(self._tasks.values())
|
||||
|
||||
def cancel(self, task_id: str) -> bool:
|
||||
"""
|
||||
Mark a subagent task as cancelled.
|
||||
Note: This doesn't kill the thread (subprocess may still finish),
|
||||
but prevents the result from being sent back.
|
||||
"""
|
||||
with self._lock:
|
||||
task = self._tasks.get(task_id)
|
||||
if task and task.status in ("pending", "running"):
|
||||
task.status = "cancelled"
|
||||
logger.info(f"Subagent cancelled: {task_id}")
|
||||
return True
|
||||
return False
|
||||
|
||||
# -------------------------------------------------------------------
|
||||
# Internal
|
||||
# -------------------------------------------------------------------
|
||||
|
||||
def _count_active(self) -> int:
|
||||
with self._lock:
|
||||
return sum(
|
||||
1
|
||||
for t in self._tasks.values()
|
||||
if t.status in ("pending", "running")
|
||||
)
|
||||
|
||||
def _run_subagent(self, task_id: str, context: str | None) -> None:
|
||||
"""Background thread that runs a subagent session."""
|
||||
with self._lock:
|
||||
task = self._tasks.get(task_id)
|
||||
if not task:
|
||||
return
|
||||
task.status = "running"
|
||||
|
||||
started = time.time()
|
||||
|
||||
try:
|
||||
# Lazy import to avoid circular dependency
|
||||
from agent.opencode_runtime import build_aetheel_system_prompt
|
||||
|
||||
# Create a fresh runtime instance
|
||||
runtime = self._runtime_factory()
|
||||
|
||||
# Build system prompt for the subagent
|
||||
system_prompt = build_aetheel_system_prompt(
|
||||
user_name=task.user_name,
|
||||
extra_context=(
|
||||
f"# Subagent Context\n\n"
|
||||
f"You are a background subagent running task: {task.task}\n"
|
||||
f"Complete the task and provide your findings.\n"
|
||||
+ (f"\n{context}" if context else "")
|
||||
),
|
||||
)
|
||||
|
||||
# Run the task through the runtime
|
||||
response = runtime.chat(
|
||||
message=task.task,
|
||||
conversation_id=f"subagent-{task_id}",
|
||||
system_prompt=system_prompt,
|
||||
)
|
||||
|
||||
duration_ms = int((time.time() - started) * 1000)
|
||||
|
||||
with self._lock:
|
||||
current = self._tasks.get(task_id)
|
||||
if not current or current.status == "cancelled":
|
||||
return
|
||||
current.duration_ms = duration_ms
|
||||
|
||||
if response.ok:
|
||||
with self._lock:
|
||||
current = self._tasks.get(task_id)
|
||||
if current:
|
||||
current.status = "done"
|
||||
current.result = response.text
|
||||
|
||||
# Send result back to the originating channel
|
||||
result_msg = (
|
||||
f"🤖 *Subagent Complete* (task `{task_id}`)\n\n"
|
||||
f"**Task:** {task.task[:200]}\n\n"
|
||||
f"{response.text}"
|
||||
)
|
||||
|
||||
try:
|
||||
self._send_fn(
|
||||
task.channel_id,
|
||||
result_msg,
|
||||
task.thread_id,
|
||||
task.channel_type,
|
||||
)
|
||||
logger.info(
|
||||
f"✅ Subagent {task_id} complete ({duration_ms}ms)"
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
f"Failed to send subagent result: {e}", exc_info=True
|
||||
)
|
||||
else:
|
||||
with self._lock:
|
||||
current = self._tasks.get(task_id)
|
||||
if current:
|
||||
current.status = "failed"
|
||||
current.error = response.error
|
||||
|
||||
# Notify of failure
|
||||
error_msg = (
|
||||
f"⚠️ *Subagent Failed* (task `{task_id}`)\n\n"
|
||||
f"**Task:** {task.task[:200]}\n\n"
|
||||
f"Error: {response.error or 'Unknown error'}"
|
||||
)
|
||||
|
||||
try:
|
||||
self._send_fn(
|
||||
task.channel_id,
|
||||
error_msg,
|
||||
task.thread_id,
|
||||
task.channel_type,
|
||||
)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
logger.warning(
|
||||
f"❌ Subagent {task_id} failed: {response.error}"
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
duration_ms = int((time.time() - started) * 1000)
|
||||
with self._lock:
|
||||
current = self._tasks.get(task_id)
|
||||
if current:
|
||||
current.status = "failed"
|
||||
current.error = str(e)
|
||||
current.duration_ms = duration_ms
|
||||
|
||||
logger.error(
|
||||
f"❌ Subagent {task_id} crashed: {e}", exc_info=True
|
||||
)
|
||||
3
docs/additions.txt
Normal file
3
docs/additions.txt
Normal file
@@ -0,0 +1,3 @@
|
||||
config instead of env
|
||||
edit its own files and config as well as add skills
|
||||
install script starts server and adds the aetheel command
|
||||
243
docs/comparison.md
Normal file
243
docs/comparison.md
Normal file
@@ -0,0 +1,243 @@
|
||||
# ⚔️ Aetheel vs. Inspiration Repos — Comparison & Missing Features
|
||||
|
||||
> A detailed comparison of Aetheel with Nanobot, NanoClaw, OpenClaw, and PicoClaw — highlighting what's different, what's missing, and what can be added.
|
||||
|
||||
---
|
||||
|
||||
## Feature Comparison Matrix
|
||||
|
||||
| Feature | Aetheel | Nanobot | NanoClaw | OpenClaw | PicoClaw |
|
||||
|---------|---------|---------|----------|----------|----------|
|
||||
| **Language** | Python | Python | TypeScript | TypeScript | Go |
|
||||
| **Channels** | Slack only | 9 channels | WhatsApp only | 15+ channels | 5 channels |
|
||||
| **LLM Runtime** | OpenCode / Claude Code (subprocess) | LiteLLM (multi-provider) | Claude Agent SDK | Pi Agent (custom RPC) | Go-native agent |
|
||||
| **Memory** | Hybrid (vector + BM25) | Simple file-based | Per-group CLAUDE.md | Workspace files | MEMORY.md + sessions |
|
||||
| **Config** | `.env` file | `config.json` | Code changes (no config) | JSON5 config | `config.json` |
|
||||
| **Skills** | ❌ None | ✅ Bundled + custom | ✅ Code skills (transform) | ✅ Bundled + managed + workspace | ✅ Custom skills |
|
||||
| **Scheduled Tasks** | ⚠️ Action tags (remind only) | ✅ Full cron system | ✅ Task scheduler | ✅ Cron + webhooks + Gmail | ✅ Cron + heartbeat |
|
||||
| **Security** | ❌ No sandbox | ⚠️ Workspace restriction | ✅ Container isolation | ✅ Docker sandbox + pairing | ✅ Workspace sandbox |
|
||||
| **MCP Support** | ❌ No | ✅ Yes | ❌ No | ❌ No | ❌ No |
|
||||
| **Web Search** | ❌ No | ✅ Brave Search | ✅ Via Claude tools | ✅ Browser control | ✅ Brave + DuckDuckGo |
|
||||
| **Voice** | ❌ No | ✅ Via Groq Whisper | ❌ No | ✅ Voice Wake + Talk Mode | ✅ Via Groq Whisper |
|
||||
| **Browser Control** | ❌ No | ❌ No | ❌ No | ✅ Full CDP control | ❌ No |
|
||||
| **Companion Apps** | ❌ No | ❌ No | ❌ No | ✅ macOS + iOS + Android | ❌ No |
|
||||
| **Session Management** | ✅ Thread-based (Slack) | ✅ Session-based | ✅ Per-group isolated | ✅ Full sessions + agent-to-agent | ✅ Session-based |
|
||||
| **Docker Support** | ❌ No | ✅ Yes | ❌ (uses Apple Container) | ✅ Full compose setup | ✅ Yes |
|
||||
| **Install Script** | ✅ Yes | ✅ pip/uv install | ✅ Claude guides setup | ✅ npm + wizard | ✅ Binary / make |
|
||||
| **Identity Files** | ✅ SOUL.md, USER.md, MEMORY.md | ✅ AGENTS.md, SOUL.md, USER.md, etc. | ✅ CLAUDE.md per group | ✅ AGENTS.md, SOUL.md, USER.md, TOOLS.md | ✅ Full set (AGENTS, SOUL, IDENTITY, USER, TOOLS) |
|
||||
| **Subagents** | ❌ No | ✅ Spawn subagent | ✅ Agent Swarms | ✅ sessions_send / sessions_spawn | ✅ Spawn subagent |
|
||||
| **Heartbeat/Proactive** | ❌ No | ✅ Heartbeat | ❌ No | ✅ Cron + wakeups | ✅ HEARTBEAT.md |
|
||||
| **Multi-provider** | ⚠️ Via OpenCode/Claude | ✅ 12+ providers | ❌ Claude only | ✅ Multi-model + failover | ✅ 7+ providers |
|
||||
| **WebChat** | ❌ No | ❌ No | ❌ No | ✅ Built-in WebChat | ❌ No |
|
||||
|
||||
---
|
||||
|
||||
## What Aetheel Does Well
|
||||
|
||||
### ✅ Strengths
|
||||
|
||||
1. **Advanced Memory System** — Aetheel has the most sophisticated memory system with **hybrid search (0.7 vector + 0.3 BM25)**, local embeddings via `fastembed`, and SQLite FTS5. None of the other repos have this level of memory sophistication.
|
||||
|
||||
2. **Local-First Embeddings** — Zero API calls for memory search. Uses ONNX-based local model (BAAI/bge-small-en-v1.5).
|
||||
|
||||
3. **Dual Runtime Support** — Clean abstraction allowing switching between OpenCode and Claude Code with the same `AgentResponse` interface.
|
||||
|
||||
4. **Thread Isolation in Slack** — Each Slack thread gets its own AI session, providing natural conversation isolation.
|
||||
|
||||
5. **Action Tags** — Inline `[ACTION:remind|minutes|message]` tags are elegant for in-response scheduling.
|
||||
|
||||
6. **File Watching** — Memory auto-reindexes when `.md` files are edited.
|
||||
|
||||
---
|
||||
|
||||
## What Aetheel Is Missing
|
||||
|
||||
### 🔴 Critical Gaps (High Priority)
|
||||
|
||||
#### 1. Multi-Channel Support
|
||||
**Current:** Slack only
|
||||
**All others:** Multiple channels (3-15+)
|
||||
|
||||
Aetheel is locked to Slack. Adding at least **Telegram** and **Discord** would significantly increase usability. All four inspiration repos treat multi-channel as essential.
|
||||
|
||||
> **Recommendation:** Follow Nanobot's pattern — each channel is a module in `channels/` with a common interface. Start with Telegram (easiest — just a token).
|
||||
|
||||
#### 2. Skills System
|
||||
**Current:** None
|
||||
**Others:** All have skills/plugins
|
||||
|
||||
Aetheel has no way to extend agent capabilities beyond its hardcoded memory and runtime setup. A skills system would allow:
|
||||
- Bundled skills (GitHub, weather, web search)
|
||||
- User-created skills in workspace
|
||||
- Community-contributed skills
|
||||
|
||||
> **Recommendation:** Create a `skills/` directory in the workspace. Skills are markdown files (`SKILL.md`) that get injected into the agent's context.
|
||||
|
||||
#### 3. Scheduled Tasks (Cron)
|
||||
**Current:** Only `[ACTION:remind]` (one-time, simple)
|
||||
**Others:** Full cron systems with persistent storage
|
||||
|
||||
The action tag system is clever but limited. A proper cron system would support:
|
||||
- Recurring cron expressions (`0 9 * * *`)
|
||||
- Interval-based scheduling
|
||||
- Persistent job storage
|
||||
- CLI management
|
||||
|
||||
> **Recommendation:** Add a `cron/` module with SQLite-backed job storage and an APScheduler-based execution engine.
|
||||
|
||||
#### 4. Security Sandbox
|
||||
**Current:** No sandboxing
|
||||
**Others:** Container isolation (NanoClaw), workspace restriction (PicoClaw), Docker sandbox (OpenClaw)
|
||||
|
||||
The AI runtime has unrestricted system access. At minimum, workspace-level restrictions should be added.
|
||||
|
||||
> **Recommendation:** Follow PicoClaw's approach — restrict tool access to workspace directory by default. Block dangerous shell commands.
|
||||
|
||||
---
|
||||
|
||||
### 🟡 Important Gaps (Medium Priority)
|
||||
|
||||
#### 5. Config File System (JSON instead of .env)
|
||||
**Current:** `.env` file with environment variables
|
||||
**Others:** JSON/JSON5 config files
|
||||
|
||||
A structured config file is more flexible and easier to manage than flat env vars. It can hold nested structures for channels, providers, tools, etc.
|
||||
|
||||
> **Recommendation:** Switch to `~/.aetheel/config.json` with a schema validator. Keep `.env` for secrets only.
|
||||
|
||||
#### 6. Web Search Tool
|
||||
**Current:** No web search
|
||||
**Others:** Brave Search, DuckDuckGo, or full browser control
|
||||
|
||||
The agent can't search the web. This is a significant limitation for a personal assistant.
|
||||
|
||||
> **Recommendation:** Add Brave Search API integration (free tier: 2000 queries/month) with DuckDuckGo as fallback.
|
||||
|
||||
#### 7. Subagent / Spawn Capability
|
||||
**Current:** No subagents
|
||||
**Others:** All have spawn/subagent systems
|
||||
|
||||
For long-running tasks, the main agent should be able to spawn background sub-tasks that work independently and report back.
|
||||
|
||||
> **Recommendation:** Add a `spawn` tool that creates a background thread/process running a separate agent session.
|
||||
|
||||
#### 8. Heartbeat / Proactive System
|
||||
**Current:** No proactive capabilities
|
||||
**Others:** Nanobot and PicoClaw have heartbeat systems
|
||||
|
||||
The agent only responds to messages. A heartbeat system would allow periodic check-ins, proactive notifications, and scheduled intelligence.
|
||||
|
||||
> **Recommendation:** Add `HEARTBEAT.md` file + periodic timer that triggers agent with heartbeat tasks.
|
||||
|
||||
#### 9. CLI Interface
|
||||
**Current:** Only `python main.py` with flags
|
||||
**Others:** Full CLI with subcommands (`nanobot agent`, `picoclaw cron`, etc.)
|
||||
|
||||
> **Recommendation:** Add a CLI using `click` or `argparse` with subcommands: `aetheel chat`, `aetheel status`, `aetheel cron`, etc.
|
||||
|
||||
#### 10. Tool System
|
||||
**Current:** No explicit tool system (AI handles everything via runtime)
|
||||
**Others:** Shell exec, file R/W, web search, spawn, message, etc.
|
||||
|
||||
Aetheel delegates all tool use to the AI runtime (OpenCode/Claude Code). While this works, having explicit tools gives more control and allows sandboxing.
|
||||
|
||||
> **Recommendation:** Define a tool interface and implement core tools (file ops, shell, web search) that run through the aetheel process with sandboxing.
|
||||
|
||||
---
|
||||
|
||||
### 🟢 Nice-to-Have (Lower Priority)
|
||||
|
||||
#### 11. MCP Server Support
|
||||
Only Nanobot supports MCP. Would allow connecting external tool servers.
|
||||
|
||||
#### 12. Multi-Provider Support
|
||||
Currently relies on OpenCode/Claude Code for provider handling. Direct multi-provider support (like Nanobot's 12+ providers via LiteLLM) would add flexibility.
|
||||
|
||||
#### 13. Docker / Container Support
|
||||
No Docker compose or containerized deployment option.
|
||||
|
||||
#### 14. Agent-to-Agent Communication
|
||||
OpenClaw's `sessions_send` allows agents to message each other. Useful for multi-agent workflows.
|
||||
|
||||
#### 15. Gateway Architecture
|
||||
Moving from a direct Slack adapter to a gateway pattern would make adding channels much easier.
|
||||
|
||||
#### 16. Onboarding Wizard
|
||||
OpenClaw's `onboard --install-daemon` provides a guided setup. Aetheel's install script is good but could be more interactive.
|
||||
|
||||
#### 17. Voice Support
|
||||
Voice Wake / Talk Mode (OpenClaw) or Whisper transcription (Nanobot, PicoClaw).
|
||||
|
||||
#### 18. WebChat Interface
|
||||
A browser-based chat UI connected to the gateway.
|
||||
|
||||
#### 19. TOOLS.md File
|
||||
A `TOOLS.md` file describing available tools to the agent, used by PicoClaw and OpenClaw.
|
||||
|
||||
#### 20. Self-Modification
|
||||
From `additions.txt`: "edit its own files and config as well as add skills" — the agent should be able to modify its own configuration and add new skills.
|
||||
|
||||
---
|
||||
|
||||
## Architecture Comparison
|
||||
|
||||
```mermaid
|
||||
graph LR
|
||||
subgraph Aetheel["⚔️ Aetheel (Current)"]
|
||||
A_SLACK["Slack\n(only channel)"]
|
||||
A_MAIN["main.py"]
|
||||
A_MEM["Memory\n(hybrid search)"]
|
||||
A_RT["OpenCode / Claude\n(subprocess)"]
|
||||
end
|
||||
|
||||
subgraph Target["🎯 Target Architecture"]
|
||||
T_CHAN["Multi-Channel\nGateway"]
|
||||
T_CORE["Core Agent\n+ Tool System"]
|
||||
T_MEM["Memory\n(hybrid search)"]
|
||||
T_SK["Skills"]
|
||||
T_CRON["Cron"]
|
||||
T_PROV["Multi-Provider"]
|
||||
T_SEC["Security\nSandbox"]
|
||||
end
|
||||
|
||||
A_SLACK --> A_MAIN
|
||||
A_MAIN --> A_MEM
|
||||
A_MAIN --> A_RT
|
||||
|
||||
T_CHAN --> T_CORE
|
||||
T_CORE --> T_MEM
|
||||
T_CORE --> T_SK
|
||||
T_CORE --> T_CRON
|
||||
T_CORE --> T_PROV
|
||||
T_CORE --> T_SEC
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Prioritized Roadmap Suggestion
|
||||
|
||||
Based on the analysis, here's a suggested implementation order:
|
||||
|
||||
### Phase 1: Foundation (Essentials)
|
||||
1. **Config system** — Switch from `.env` to JSON config
|
||||
2. **Skills system** — `skills/` directory with `SKILL.md` loading
|
||||
3. **Tool system** — Core tools (shell, file, web search) with sandbox
|
||||
4. **Security sandbox** — Workspace-restricted tool execution
|
||||
|
||||
### Phase 2: Channels & Scheduling
|
||||
5. **Channel abstraction** — Extract adapter interface from Slack adapter
|
||||
6. **Telegram channel** — First new channel
|
||||
7. **Cron system** — Full scheduled task management
|
||||
8. **CLI** — Proper CLI with subcommands
|
||||
|
||||
### Phase 3: Advanced Features
|
||||
9. **Heartbeat** — Proactive agent capabilities
|
||||
10. **Subagents** — Spawn background tasks
|
||||
11. **Discord channel** — Second new channel
|
||||
12. **Web search** — Brave Search + DuckDuckGo
|
||||
|
||||
### Phase 4: Polish
|
||||
13. **Self-modification** — Agent can edit config and add skills
|
||||
14. **Docker support** — Dockerfile + compose
|
||||
15. **MCP support** — External tool servers
|
||||
16. **WebChat** — Browser-based chat UI
|
||||
207
docs/nanobot.md
Normal file
207
docs/nanobot.md
Normal file
@@ -0,0 +1,207 @@
|
||||
# 🐈 Nanobot — Architecture & How It Works
|
||||
|
||||
> **Ultra-Lightweight Personal AI Assistant** — ~4,000 lines of Python, 99% smaller than OpenClaw.
|
||||
|
||||
## Overview
|
||||
|
||||
Nanobot is a minimalist personal AI assistant written in Python that focuses on delivering core agent functionality with the smallest possible codebase. It uses LiteLLM for multi-provider LLM routing, supports 9+ chat channels, and includes memory, skills, scheduled tasks, and MCP tool integration.
|
||||
|
||||
| Attribute | Value |
|
||||
|-----------|-------|
|
||||
| **Language** | Python 3.11+ |
|
||||
| **Lines of Code** | ~4,000 (core agent) |
|
||||
| **Config** | `~/.nanobot/config.json` |
|
||||
| **Package** | `pip install nanobot-ai` |
|
||||
| **LLM Routing** | LiteLLM (multi-provider) |
|
||||
|
||||
---
|
||||
|
||||
## Architecture Flowchart
|
||||
|
||||
```mermaid
|
||||
graph TB
|
||||
subgraph Channels["📱 Chat Channels"]
|
||||
TG["Telegram"]
|
||||
DC["Discord"]
|
||||
WA["WhatsApp"]
|
||||
FS["Feishu"]
|
||||
MC["Mochat"]
|
||||
DT["DingTalk"]
|
||||
SL["Slack"]
|
||||
EM["Email"]
|
||||
QQ["QQ"]
|
||||
end
|
||||
|
||||
subgraph Gateway["🌐 Gateway (nanobot gateway)"]
|
||||
CH["Channel Manager"]
|
||||
MQ["Message Queue"]
|
||||
end
|
||||
|
||||
subgraph Agent["🧠 Core Agent"]
|
||||
LOOP["Agent Loop\n(loop.py)"]
|
||||
CTX["Context Builder\n(context.py)"]
|
||||
MEM["Memory System\n(memory.py)"]
|
||||
SK["Skills Loader\n(skills.py)"]
|
||||
SA["Subagent\n(subagent.py)"]
|
||||
end
|
||||
|
||||
subgraph Tools["🔧 Built-in Tools"]
|
||||
SHELL["Shell Exec"]
|
||||
FILE["File R/W/Edit"]
|
||||
WEB["Web Search"]
|
||||
SPAWN["Spawn Subagent"]
|
||||
MCP["MCP Servers"]
|
||||
end
|
||||
|
||||
subgraph Providers["☁️ LLM Providers (LiteLLM)"]
|
||||
OR["OpenRouter"]
|
||||
AN["Anthropic"]
|
||||
OA["OpenAI"]
|
||||
DS["DeepSeek"]
|
||||
GR["Groq"]
|
||||
GE["Gemini"]
|
||||
VL["vLLM (local)"]
|
||||
end
|
||||
|
||||
Channels --> Gateway
|
||||
Gateway --> Agent
|
||||
CTX --> LOOP
|
||||
MEM --> CTX
|
||||
SK --> CTX
|
||||
LOOP --> Tools
|
||||
LOOP --> Providers
|
||||
SA --> LOOP
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Message Flow
|
||||
|
||||
```mermaid
|
||||
sequenceDiagram
|
||||
participant User
|
||||
participant Channel as Chat Channel
|
||||
participant GW as Gateway
|
||||
participant Agent as Agent Loop
|
||||
participant LLM as LLM Provider
|
||||
participant Tools as Tools
|
||||
|
||||
User->>Channel: Send message
|
||||
Channel->>GW: Forward message
|
||||
GW->>Agent: Route to agent
|
||||
Agent->>Agent: Build context (memory, skills, identity)
|
||||
Agent->>LLM: Send prompt + tools
|
||||
LLM-->>Agent: Response (text or tool call)
|
||||
|
||||
alt Tool Call
|
||||
Agent->>Tools: Execute tool
|
||||
Tools-->>Agent: Tool result
|
||||
Agent->>LLM: Send tool result
|
||||
LLM-->>Agent: Final response
|
||||
end
|
||||
|
||||
Agent->>Agent: Update memory
|
||||
Agent-->>GW: Return response
|
||||
GW-->>Channel: Send reply
|
||||
Channel-->>User: Display response
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Key Components
|
||||
|
||||
### 1. Agent Loop (`agent/loop.py`)
|
||||
The core loop that manages the LLM ↔ tool execution cycle:
|
||||
- Builds a prompt using context (memory, skills, identity files)
|
||||
- Sends to LLM via LiteLLM
|
||||
- If LLM returns a tool call → executes it → sends result back
|
||||
- Continues until LLM returns a text response (no more tool calls)
|
||||
|
||||
### 2. Context Builder (`agent/context.py`)
|
||||
Assembles the system prompt from:
|
||||
- **Identity files**: `AGENTS.md`, `SOUL.md`, `USER.md`, `TOOLS.md`, `IDENTITY.md`
|
||||
- **Memory**: Persistent `MEMORY.md` with recall
|
||||
- **Skills**: Loaded from `~/.nanobot/workspace/skills/`
|
||||
- **Conversation history**: Session-based context
|
||||
|
||||
### 3. Memory System (`agent/memory.py`)
|
||||
- Persistent memory stored in `MEMORY.md` in the workspace
|
||||
- Agent can read and write memories
|
||||
- Survives across sessions
|
||||
|
||||
### 4. Provider Registry (`providers/registry.py`)
|
||||
- Single-source-of-truth for all LLM providers
|
||||
- Adding a new provider = 2 steps (add `ProviderSpec` + config field)
|
||||
- Auto-prefixes model names for LiteLLM routing
|
||||
- Supports 12+ providers including local vLLM
|
||||
|
||||
### 5. Channel System (`channels/`)
|
||||
- 9 chat platforms supported (Telegram, Discord, WhatsApp, Feishu, Mochat, DingTalk, Slack, Email, QQ)
|
||||
- Each channel handles auth, message parsing, and response delivery
|
||||
- Allowlist-based security (`allowFrom`)
|
||||
- Started via `nanobot gateway`
|
||||
|
||||
### 6. Skills (`skills/`)
|
||||
- Bundled skills: GitHub, weather, tmux, etc.
|
||||
- Custom skills loaded from workspace
|
||||
- Skills are injected into the agent's context
|
||||
|
||||
### 7. Scheduled Tasks (Cron)
|
||||
- Add jobs via `nanobot cron add`
|
||||
- Supports cron expressions and interval-based scheduling
|
||||
- Jobs stored persistently
|
||||
|
||||
### 8. MCP Integration
|
||||
- Supports Model Context Protocol servers
|
||||
- Stdio and HTTP transport modes
|
||||
- Compatible with Claude Desktop / Cursor MCP configs
|
||||
- Tools auto-discovered and registered at startup
|
||||
|
||||
---
|
||||
|
||||
## Project Structure
|
||||
|
||||
```
|
||||
nanobot/
|
||||
├── agent/ # 🧠 Core agent logic
|
||||
│ ├── loop.py # Agent loop (LLM ↔ tool execution)
|
||||
│ ├── context.py # Prompt builder
|
||||
│ ├── memory.py # Persistent memory
|
||||
│ ├── skills.py # Skills loader
|
||||
│ ├── subagent.py # Background task execution
|
||||
│ └── tools/ # Built-in tools (incl. spawn)
|
||||
├── skills/ # 🎯 Bundled skills (github, weather, tmux...)
|
||||
├── channels/ # 📱 Chat channel integrations
|
||||
├── providers/ # ☁️ LLM provider registry
|
||||
├── config/ # ⚙️ Configuration schema
|
||||
├── cron/ # ⏰ Scheduled tasks
|
||||
├── heartbeat/ # 💓 Heartbeat system
|
||||
├── session/ # 📝 Session management
|
||||
├── bus/ # 📨 Internal event bus
|
||||
├── cli/ # 🖥️ CLI commands
|
||||
└── utils/ # 🔧 Utilities
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## CLI Commands
|
||||
|
||||
| Command | Description |
|
||||
|---------|-------------|
|
||||
| `nanobot onboard` | Initialize config & workspace |
|
||||
| `nanobot agent -m "..."` | Chat with the agent |
|
||||
| `nanobot agent` | Interactive chat mode |
|
||||
| `nanobot gateway` | Start all channels |
|
||||
| `nanobot status` | Show status |
|
||||
| `nanobot cron add/list/remove` | Manage scheduled tasks |
|
||||
| `nanobot channels login` | Link WhatsApp device |
|
||||
|
||||
---
|
||||
|
||||
## Key Design Decisions
|
||||
|
||||
1. **LiteLLM for provider abstraction** — One interface for all LLM providers
|
||||
2. **JSON config over env vars** — Single `config.json` file for all settings
|
||||
3. **Skills-based extensibility** — Modular skill system for adding capabilities
|
||||
4. **Provider Registry pattern** — Adding providers is 2-step, zero if-elif chains
|
||||
5. **Agent social network** — Can join MoltBook, ClawdChat communities
|
||||
214
docs/nanoclaw.md
Normal file
214
docs/nanoclaw.md
Normal file
@@ -0,0 +1,214 @@
|
||||
# 🦀 NanoClaw — Architecture & How It Works
|
||||
|
||||
> **Minimal, Security-First Personal AI Assistant** — built on Claude Agent SDK with container isolation.
|
||||
|
||||
## Overview
|
||||
|
||||
NanoClaw is a minimalist personal AI assistant that prioritizes **security through container isolation** and **understandability through small codebase size**. It runs on Claude Agent SDK (Claude Code) and uses WhatsApp as its primary channel. Each group chat runs in its own isolated Linux container.
|
||||
|
||||
| Attribute | Value |
|
||||
|-----------|-------|
|
||||
| **Language** | TypeScript (Node.js 20+) |
|
||||
| **Codebase Size** | ~34.9k tokens (~17% of Claude context window) |
|
||||
| **Config** | No config files — code changes only |
|
||||
| **AI Runtime** | Claude Agent SDK (Claude Code) |
|
||||
| **Primary Channel** | WhatsApp (Baileys) |
|
||||
| **Isolation** | Apple Container (macOS) / Docker (Linux) |
|
||||
|
||||
---
|
||||
|
||||
## Architecture Flowchart
|
||||
|
||||
```mermaid
|
||||
graph TB
|
||||
subgraph WhatsApp["📱 WhatsApp"]
|
||||
WA["WhatsApp Client\n(Baileys)"]
|
||||
end
|
||||
|
||||
subgraph Core["🧠 Core Process (Single Node.js)"]
|
||||
IDX["Orchestrator\n(index.ts)"]
|
||||
DB["SQLite DB\n(db.ts)"]
|
||||
GQ["Group Queue\n(group-queue.ts)"]
|
||||
TS["Task Scheduler\n(task-scheduler.ts)"]
|
||||
IPC["IPC Watcher\n(ipc.ts)"]
|
||||
RT["Router\n(router.ts)"]
|
||||
end
|
||||
|
||||
subgraph Containers["🐳 Isolated Containers"]
|
||||
C1["Container 1\nGroup A\n(CLAUDE.md)"]
|
||||
C2["Container 2\nGroup B\n(CLAUDE.md)"]
|
||||
C3["Container 3\nMain Channel\n(CLAUDE.md)"]
|
||||
end
|
||||
|
||||
subgraph Memory["💾 Per-Group Memory"]
|
||||
M1["groups/A/CLAUDE.md"]
|
||||
M2["groups/B/CLAUDE.md"]
|
||||
M3["groups/main/CLAUDE.md"]
|
||||
end
|
||||
|
||||
WA --> IDX
|
||||
IDX --> DB
|
||||
IDX --> GQ
|
||||
GQ --> Containers
|
||||
TS --> Containers
|
||||
Containers --> IPC
|
||||
IPC --> RT
|
||||
RT --> WA
|
||||
C1 --- M1
|
||||
C2 --- M2
|
||||
C3 --- M3
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Message Flow
|
||||
|
||||
```mermaid
|
||||
sequenceDiagram
|
||||
participant User
|
||||
participant WA as WhatsApp (Baileys)
|
||||
participant IDX as Orchestrator
|
||||
participant DB as SQLite
|
||||
participant GQ as Group Queue
|
||||
participant Container as Container (Claude SDK)
|
||||
participant IPC as IPC Watcher
|
||||
|
||||
User->>WA: Send message with @Andy
|
||||
WA->>IDX: New message event
|
||||
IDX->>DB: Store message
|
||||
IDX->>GQ: Enqueue (per-group, concurrency limited)
|
||||
GQ->>Container: Spawn Claude agent container
|
||||
Note over Container: Mounts only group's filesystem
|
||||
Note over Container: Reads group-specific CLAUDE.md
|
||||
Container->>Container: Claude processes with tools
|
||||
Container->>IPC: Write response to filesystem
|
||||
IPC->>IDX: Detect new response file
|
||||
IDX->>WA: Send reply
|
||||
WA->>User: Display response
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Key Components
|
||||
|
||||
### 1. Orchestrator (`src/index.ts`)
|
||||
The single entry point that manages:
|
||||
- WhatsApp connection state
|
||||
- Message polling loop
|
||||
- Agent invocation decisions
|
||||
- State management for groups and sessions
|
||||
|
||||
### 2. WhatsApp Channel (`src/channels/whatsapp.ts`)
|
||||
- Uses **Baileys** library for WhatsApp Web connection
|
||||
- Handles authentication via QR code scan
|
||||
- Manages send/receive of messages
|
||||
- Supports media messages
|
||||
|
||||
### 3. Container Runner (`src/container-runner.ts`)
|
||||
The security core of NanoClaw:
|
||||
- Spawns **streaming Claude Agent SDK** containers
|
||||
- Each group runs in its own Linux container
|
||||
- **Apple Container** on macOS, **Docker** on Linux
|
||||
- Only explicitly mounted directories are accessible
|
||||
- Bash commands run INSIDE the container, not on host
|
||||
|
||||
### 4. SQLite Database (`src/db.ts`)
|
||||
- Stores messages, groups, sessions, and state
|
||||
- Per-group message history
|
||||
- Session continuity tracking
|
||||
|
||||
### 5. Group Queue (`src/group-queue.ts`)
|
||||
- Per-group message queue
|
||||
- Global concurrency limit
|
||||
- Ensures one agent invocation per group at a time
|
||||
|
||||
### 6. IPC System (`src/ipc.ts`)
|
||||
- Filesystem-based inter-process communication
|
||||
- Container writes response to mounted directory
|
||||
- IPC watcher detects and processes response files
|
||||
- Handles task results from scheduled jobs
|
||||
|
||||
### 7. Task Scheduler (`src/task-scheduler.ts`)
|
||||
- Recurring jobs that run Claude in containers
|
||||
- Jobs can message the user back
|
||||
- Managed from the main channel (self-chat)
|
||||
|
||||
### 8. Router (`src/router.ts`)
|
||||
- Formats outbound messages
|
||||
- Routes responses to correct WhatsApp recipient
|
||||
|
||||
### 9. Per-Group Memory (`groups/*/CLAUDE.md`)
|
||||
- Each group has its own `CLAUDE.md` memory file
|
||||
- Mounted into the group's container
|
||||
- Complete filesystem isolation between groups
|
||||
|
||||
---
|
||||
|
||||
## Security Model
|
||||
|
||||
```mermaid
|
||||
graph LR
|
||||
subgraph Host["🖥️ Host System"]
|
||||
NanoClaw["NanoClaw Process"]
|
||||
end
|
||||
|
||||
subgraph Container1["🐳 Container (Group A)"]
|
||||
Agent1["Claude Agent"]
|
||||
FS1["Mounted: groups/A/"]
|
||||
end
|
||||
|
||||
subgraph Container2["🐳 Container (Group B)"]
|
||||
Agent2["Claude Agent"]
|
||||
FS2["Mounted: groups/B/"]
|
||||
end
|
||||
|
||||
NanoClaw -->|"Spawns"| Container1
|
||||
NanoClaw -->|"Spawns"| Container2
|
||||
|
||||
style Container1 fill:#e8f5e9
|
||||
style Container2 fill:#e8f5e9
|
||||
```
|
||||
|
||||
- **OS-level isolation** vs. application-level permission checks
|
||||
- Agents can only see what's explicitly mounted
|
||||
- Bash commands run in container, not on host
|
||||
- No shared memory between groups
|
||||
|
||||
---
|
||||
|
||||
## Philosophy & Design Decisions
|
||||
|
||||
1. **Small enough to understand** — Read the entire codebase in ~8 minutes
|
||||
2. **Secure by isolation** — Linux containers, not permission checks
|
||||
3. **Built for one user** — Not a framework, working software for personal use
|
||||
4. **Customization = code changes** — No config sprawl, modify the code directly
|
||||
5. **AI-native** — Claude Code handles setup (`/setup`), debugging, customization
|
||||
6. **Skills over features** — Don't add features to codebase, add skills that transform forks
|
||||
7. **Best harness, best model** — Claude Agent SDK gives Claude Code superpowers
|
||||
|
||||
---
|
||||
|
||||
## Agent Swarms (Unique Feature)
|
||||
|
||||
NanoClaw is the **first personal AI assistant** to support Agent Swarms:
|
||||
- Spin up teams of specialized agents
|
||||
- Agents collaborate within your chat
|
||||
- Each agent runs in its own container
|
||||
|
||||
---
|
||||
|
||||
## Usage
|
||||
|
||||
```bash
|
||||
# Setup (Claude Code handles everything)
|
||||
git clone https://github.com/gavrielc/nanoclaw.git
|
||||
cd nanoclaw
|
||||
claude
|
||||
# Then run /setup
|
||||
|
||||
# Talk to your assistant
|
||||
@Andy send me a daily summary every morning at 9am
|
||||
@Andy review the git history and update the README
|
||||
```
|
||||
|
||||
Trigger word: `@Andy` (customizable via code changes)
|
||||
291
docs/openclaw.md
Normal file
291
docs/openclaw.md
Normal file
@@ -0,0 +1,291 @@
|
||||
# 🦞 OpenClaw — Architecture & How It Works
|
||||
|
||||
> **Full-Featured Personal AI Assistant** — Massive TypeScript codebase with 15+ channels, companion apps, and enterprise-grade features.
|
||||
|
||||
## Overview
|
||||
|
||||
OpenClaw is the most feature-complete personal AI assistant in this space. It's a TypeScript monorepo with a WebSocket-based Gateway as the control plane, supporting 15+ messaging channels, companion macOS/iOS/Android apps, browser control, live canvas, voice wake, and extensive automation.
|
||||
|
||||
| Attribute | Value |
|
||||
|-----------|-------|
|
||||
| **Language** | TypeScript (Node.js ≥22) |
|
||||
| **Codebase Size** | 430k+ lines, 50+ source modules |
|
||||
| **Config** | `~/.openclaw/openclaw.json` (JSON5) |
|
||||
| **AI Runtime** | Pi Agent (custom RPC), multi-model |
|
||||
| **Channels** | 15+ (WhatsApp, Telegram, Slack, Discord, Signal, iMessage, Teams, Matrix, Zalo, WebChat, etc.) |
|
||||
| **Package Mgr** | pnpm (monorepo) |
|
||||
|
||||
---
|
||||
|
||||
## Architecture Flowchart
|
||||
|
||||
```mermaid
|
||||
graph TB
|
||||
subgraph Channels["📱 Messaging Channels (15+)"]
|
||||
WA["WhatsApp\n(Baileys)"]
|
||||
TG["Telegram\n(grammY)"]
|
||||
SL["Slack\n(Bolt)"]
|
||||
DC["Discord\n(discord.js)"]
|
||||
GC["Google Chat"]
|
||||
SIG["Signal\n(signal-cli)"]
|
||||
BB["BlueBubbles\n(iMessage)"]
|
||||
IM["iMessage\n(legacy)"]
|
||||
MST["MS Teams"]
|
||||
MTX["Matrix"]
|
||||
ZL["Zalo"]
|
||||
WC["WebChat"]
|
||||
end
|
||||
|
||||
subgraph Gateway["🌐 Gateway (Control Plane)"]
|
||||
WS["WebSocket Server\nws://127.0.0.1:18789"]
|
||||
SES["Session Manager"]
|
||||
RTE["Channel Router"]
|
||||
PRES["Presence System"]
|
||||
Q["Message Queue"]
|
||||
CFG["Config Manager"]
|
||||
AUTH["Auth / Pairing"]
|
||||
end
|
||||
|
||||
subgraph Agent["🧠 Pi Agent (RPC)"]
|
||||
AGENT["Agent Runtime"]
|
||||
TOOLS["Tool Registry"]
|
||||
STREAM["Block Streaming"]
|
||||
PROV["Provider Router\n(multi-model)"]
|
||||
end
|
||||
|
||||
subgraph Apps["📲 Companion Apps"]
|
||||
MAC["macOS Menu Bar"]
|
||||
IOS["iOS Node"]
|
||||
ANDR["Android Node"]
|
||||
end
|
||||
|
||||
subgraph ToolSet["🔧 Tools & Automation"]
|
||||
BROWSER["Browser Control\n(CDP/Chromium)"]
|
||||
CANVAS["Live Canvas\n(A2UI)"]
|
||||
CRON["Cron Jobs"]
|
||||
WEBHOOK["Webhooks"]
|
||||
GMAIL["Gmail Pub/Sub"]
|
||||
NODES["Nodes\n(camera, screen, location)"]
|
||||
SKILLS_T["Skills Platform"]
|
||||
SESS_T["Session Tools\n(agent-to-agent)"]
|
||||
end
|
||||
|
||||
subgraph Workspace["💾 Workspace"]
|
||||
AGENTS_MD["AGENTS.md"]
|
||||
SOUL_MD["SOUL.md"]
|
||||
USER_MD["USER.md"]
|
||||
TOOLS_MD["TOOLS.md"]
|
||||
SKILLS_W["Skills/"]
|
||||
end
|
||||
|
||||
Channels --> Gateway
|
||||
Apps --> Gateway
|
||||
Gateway --> Agent
|
||||
Agent --> ToolSet
|
||||
Agent --> Workspace
|
||||
Agent --> PROV
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Message Flow
|
||||
|
||||
```mermaid
|
||||
sequenceDiagram
|
||||
participant User
|
||||
participant Channel as Channel (WA/TG/Slack/etc.)
|
||||
participant GW as Gateway (WS)
|
||||
participant Session as Session Manager
|
||||
participant Agent as Pi Agent (RPC)
|
||||
participant LLM as LLM Provider
|
||||
participant Tools as Tools
|
||||
|
||||
User->>Channel: Send message
|
||||
Channel->>GW: Forward via channel adapter
|
||||
GW->>Session: Route to session (main/group)
|
||||
GW->>GW: Check auth (pairing/allowlist)
|
||||
Session->>Agent: Invoke agent (RPC)
|
||||
Agent->>Agent: Build prompt (AGENTS.md, SOUL.md, tools)
|
||||
Agent->>LLM: Stream request (with tool definitions)
|
||||
|
||||
loop Tool Use Loop
|
||||
LLM-->>Agent: Tool call (block stream)
|
||||
Agent->>Tools: Execute tool
|
||||
Tools-->>Agent: Tool result
|
||||
Agent->>LLM: Continue with result
|
||||
end
|
||||
|
||||
LLM-->>Agent: Final response (block stream)
|
||||
Agent-->>Session: Return response
|
||||
Session->>GW: Add to outbound queue
|
||||
GW->>GW: Chunk if needed (per-channel limits)
|
||||
GW->>Channel: Send chunked replies
|
||||
Channel->>User: Display response
|
||||
|
||||
Note over GW: Typing indicators, presence updates
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Key Components
|
||||
|
||||
### 1. Gateway (`src/gateway/`)
|
||||
The central control plane — everything connects through it:
|
||||
- **WebSocket server** on `ws://127.0.0.1:18789`
|
||||
- Session management (main, group, per-channel)
|
||||
- Multi-agent routing (different agents for different channels)
|
||||
- Presence tracking and typing indicators
|
||||
- Config management and hot-reload
|
||||
- Health checks, doctor diagnostics
|
||||
|
||||
### 2. Pi Agent (`src/agents/`)
|
||||
Custom RPC-based agent runtime:
|
||||
- Tool streaming and block streaming
|
||||
- Multi-model support with failover
|
||||
- Session pruning for long conversations
|
||||
- Usage tracking (tokens, cost)
|
||||
- Thinking level control (off → xhigh)
|
||||
|
||||
### 3. Channel System (`src/channels/` + per-channel dirs)
|
||||
15+ channel adapters, each with:
|
||||
- Auth handling (pairing codes, allowlists, OAuth)
|
||||
- Message format conversion
|
||||
- Media pipeline (images, audio, video)
|
||||
- Group routing with mention gating
|
||||
- Per-channel chunking (character limits differ)
|
||||
|
||||
### 4. Security System (`src/security/`)
|
||||
Multi-layered security:
|
||||
- **DM Pairing** — unknown senders get a pairing code, must be approved
|
||||
- **Allowlists** — per-channel user whitelists
|
||||
- **Docker Sandbox** — non-main sessions can run in per-session Docker containers
|
||||
- **Tool denylist** — block dangerous tools in sandbox mode
|
||||
- **Elevated bash** — per-session toggle for host-level access
|
||||
|
||||
### 5. Browser Control (`src/browser/`)
|
||||
- Dedicated OpenClaw-managed Chrome/Chromium instance
|
||||
- CDP (Chrome DevTools Protocol) control
|
||||
- Snapshots, actions, uploads, profiles
|
||||
- Full web automation capabilities
|
||||
|
||||
### 6. Canvas & A2UI (`src/canvas-host/`)
|
||||
- Agent-driven visual workspace
|
||||
- A2UI (Agent-to-UI) — push HTML/JS to canvas
|
||||
- Canvas eval, snapshot, reset
|
||||
- Available on macOS, iOS, Android
|
||||
|
||||
### 7. Voice System
|
||||
- **Voice Wake** — always-on speech detection
|
||||
- **Talk Mode** — continuous conversation overlay
|
||||
- ElevenLabs TTS integration
|
||||
- Available on macOS, iOS, Android
|
||||
|
||||
### 8. Companion Apps
|
||||
- **macOS app**: Menu bar, Voice Wake/PTT, WebChat, debug tools
|
||||
- **iOS node**: Canvas, Voice Wake, Talk Mode, camera, Bonjour pairing
|
||||
- **Android node**: Canvas, Talk Mode, camera, screen recording, SMS
|
||||
|
||||
### 9. Session Tools (Agent-to-Agent)
|
||||
- `sessions_list` — discover active sessions
|
||||
- `sessions_history` — fetch transcript logs
|
||||
- `sessions_send` — message another session with reply-back
|
||||
|
||||
### 10. Skills Platform (`src/plugins/`, `skills/`)
|
||||
- **Bundled skills** — pre-installed capabilities
|
||||
- **Managed skills** — installed from ClawHub registry
|
||||
- **Workspace skills** — user-created in workspace
|
||||
- Install gating and UI
|
||||
- ClawHub registry for community skills
|
||||
|
||||
### 11. Automation
|
||||
- **Cron jobs** — scheduled recurring tasks
|
||||
- **Webhooks** — external trigger surface
|
||||
- **Gmail Pub/Sub** — email-triggered actions
|
||||
|
||||
### 12. Ops & Deployment
|
||||
- Docker support with compose
|
||||
- Tailscale Serve/Funnel for remote access
|
||||
- SSH tunnels with token/password auth
|
||||
- `openclaw doctor` for diagnostics
|
||||
- Nix mode for declarative config
|
||||
|
||||
---
|
||||
|
||||
## Project Structure (Simplified)
|
||||
|
||||
```
|
||||
openclaw/
|
||||
├── src/
|
||||
│ ├── agents/ # Pi agent runtime
|
||||
│ ├── gateway/ # WebSocket gateway
|
||||
│ ├── channels/ # Channel adapter base
|
||||
│ ├── whatsapp/ # WhatsApp adapter
|
||||
│ ├── telegram/ # Telegram adapter
|
||||
│ ├── slack/ # Slack adapter
|
||||
│ ├── discord/ # Discord adapter
|
||||
│ ├── signal/ # Signal adapter
|
||||
│ ├── imessage/ # iMessage adapters
|
||||
│ ├── browser/ # Browser control (CDP)
|
||||
│ ├── canvas-host/ # Canvas & A2UI
|
||||
│ ├── sessions/ # Session management
|
||||
│ ├── routing/ # Message routing
|
||||
│ ├── security/ # Auth, pairing, sandbox
|
||||
│ ├── cron/ # Scheduled jobs
|
||||
│ ├── memory/ # Memory system
|
||||
│ ├── providers/ # LLM providers
|
||||
│ ├── plugins/ # Plugin/skill system
|
||||
│ ├── media/ # Media pipeline
|
||||
│ ├── tts/ # Text-to-speech
|
||||
│ ├── web/ # Control UI + WebChat
|
||||
│ ├── wizard/ # Onboarding wizard
|
||||
│ └── cli/ # CLI commands
|
||||
├── apps/ # Companion app sources
|
||||
├── packages/ # Shared packages
|
||||
├── extensions/ # Extension channels
|
||||
├── skills/ # Bundled skills
|
||||
├── ui/ # Web UI source
|
||||
└── Swabble/ # macOS/iOS Swift source
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## CLI Commands
|
||||
|
||||
| Command | Description |
|
||||
|---------|-------------|
|
||||
| `openclaw onboard` | Guided setup wizard |
|
||||
| `openclaw gateway` | Start the gateway |
|
||||
| `openclaw agent --message "..."` | Chat with agent |
|
||||
| `openclaw message send` | Send to any channel |
|
||||
| `openclaw doctor` | Diagnostics & migration |
|
||||
| `openclaw pairing approve` | Approve DM pairing |
|
||||
| `openclaw update` | Update to latest version |
|
||||
| `openclaw channels login` | Link WhatsApp |
|
||||
|
||||
---
|
||||
|
||||
## Chat Commands (In-Channel)
|
||||
|
||||
| Command | Description |
|
||||
|---------|-------------|
|
||||
| `/status` | Session status (model, tokens, cost) |
|
||||
| `/new` / `/reset` | Reset session |
|
||||
| `/compact` | Compact session context |
|
||||
| `/think <level>` | Set thinking level |
|
||||
| `/verbose on\|off` | Toggle verbose mode |
|
||||
| `/usage off\|tokens\|full` | Usage footer |
|
||||
| `/restart` | Restart gateway |
|
||||
| `/activation mention\|always` | Group activation mode |
|
||||
|
||||
---
|
||||
|
||||
## Key Design Decisions
|
||||
|
||||
1. **Gateway as control plane** — Single WebSocket server everything connects to
|
||||
2. **Multi-agent routing** — Different agents for different channels/groups
|
||||
3. **Pairing-based security** — Unknown DMs get pairing codes by default
|
||||
4. **Docker sandboxing** — Non-main sessions can be isolated
|
||||
5. **Block streaming** — Responses streamed as structured blocks
|
||||
6. **Extension-based channels** — MS Teams, Matrix, Zalo are extensions
|
||||
7. **Companion apps** — Native macOS/iOS/Android for device-level features
|
||||
8. **ClawHub** — Community skill registry
|
||||
251
docs/picoclaw.md
Normal file
251
docs/picoclaw.md
Normal file
@@ -0,0 +1,251 @@
|
||||
# 🦐 PicoClaw — Architecture & How It Works
|
||||
|
||||
> **Ultra-Efficient AI Assistant in Go** — $10 hardware, 10MB RAM, 1s boot time.
|
||||
|
||||
## Overview
|
||||
|
||||
PicoClaw is an extreme-lightweight rewrite of Nanobot in Go, designed to run on the cheapest possible hardware — including $10 RISC-V SBCs with <10MB RAM. The entire project was AI-bootstrapped (95% agent-generated) through a self-bootstrapping migration from Python to Go.
|
||||
|
||||
| Attribute | Value |
|
||||
|-----------|-------|
|
||||
| **Language** | Go 1.21+ |
|
||||
| **RAM Usage** | <10MB |
|
||||
| **Startup Time** | <1s (even at 0.6GHz) |
|
||||
| **Hardware Cost** | As low as $10 |
|
||||
| **Architectures** | x86_64, ARM64, RISC-V |
|
||||
| **Binary** | Single self-contained binary |
|
||||
| **Config** | `~/.picoclaw/config.json` |
|
||||
|
||||
---
|
||||
|
||||
## Architecture Flowchart
|
||||
|
||||
```mermaid
|
||||
graph TB
|
||||
subgraph Channels["📱 Chat Channels"]
|
||||
TG["Telegram"]
|
||||
DC["Discord"]
|
||||
QQ["QQ"]
|
||||
DT["DingTalk"]
|
||||
LINE["LINE"]
|
||||
end
|
||||
|
||||
subgraph Core["🧠 Core Agent (Single Binary)"]
|
||||
MAIN["Main Entry\n(cmd/)"]
|
||||
AGENT["Agent Loop\n(pkg/agent/)"]
|
||||
CONF["Config\n(pkg/config/)"]
|
||||
AUTH["Auth\n(pkg/auth/)"]
|
||||
PROV["Providers\n(pkg/providers/)"]
|
||||
TOOLS["Tools\n(pkg/tools/)"]
|
||||
end
|
||||
|
||||
subgraph ToolSet["🔧 Built-in Tools"]
|
||||
SHELL["Shell Exec"]
|
||||
FILE["File R/W"]
|
||||
WEB["Web Search\n(Brave / DuckDuckGo)"]
|
||||
CRON_T["Cron / Reminders"]
|
||||
SPAWN["Spawn Subagent"]
|
||||
MSG["Message Tool"]
|
||||
end
|
||||
|
||||
subgraph Workspace["💾 Workspace"]
|
||||
AGENTS_MD["AGENTS.md"]
|
||||
SOUL_MD["SOUL.md"]
|
||||
TOOLS_MD["TOOLS.md"]
|
||||
USER_MD["USER.md"]
|
||||
IDENTITY["IDENTITY.md"]
|
||||
HB["HEARTBEAT.md"]
|
||||
MEM["MEMORY.md"]
|
||||
SESSIONS["sessions/"]
|
||||
SKILLS["skills/"]
|
||||
end
|
||||
|
||||
subgraph Providers["☁️ LLM Providers"]
|
||||
GEMINI["Gemini"]
|
||||
ZHIPU["Zhipu"]
|
||||
OR["OpenRouter"]
|
||||
OA["OpenAI"]
|
||||
AN["Anthropic"]
|
||||
DS["DeepSeek"]
|
||||
GROQ["Groq\n(+ voice)"]
|
||||
end
|
||||
|
||||
Channels --> Core
|
||||
AGENT --> ToolSet
|
||||
AGENT --> Workspace
|
||||
AGENT --> Providers
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Message Flow
|
||||
|
||||
```mermaid
|
||||
sequenceDiagram
|
||||
participant User
|
||||
participant Channel as Chat Channel
|
||||
participant GW as Gateway
|
||||
participant Agent as Agent Loop
|
||||
participant LLM as LLM Provider
|
||||
participant Tools as Tools
|
||||
|
||||
User->>Channel: Send message
|
||||
Channel->>GW: Forward message
|
||||
GW->>Agent: Route to agent
|
||||
Agent->>Agent: Load context (AGENTS.md, SOUL.md, USER.md)
|
||||
Agent->>LLM: Send prompt + tool defs
|
||||
LLM-->>Agent: Response
|
||||
|
||||
alt Tool Call
|
||||
Agent->>Tools: Execute tool
|
||||
Tools-->>Agent: Result
|
||||
Agent->>LLM: Continue
|
||||
LLM-->>Agent: Next response
|
||||
end
|
||||
|
||||
Agent->>Agent: Update memory/session
|
||||
Agent-->>GW: Return response
|
||||
GW-->>Channel: Send reply
|
||||
Channel-->>User: Display
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Heartbeat System Flow
|
||||
|
||||
```mermaid
|
||||
sequenceDiagram
|
||||
participant Timer as Heartbeat Timer
|
||||
participant Agent as Agent
|
||||
participant HB as HEARTBEAT.md
|
||||
participant Subagent as Spawn Subagent
|
||||
participant User
|
||||
|
||||
Timer->>Agent: Trigger (every 30 min)
|
||||
Agent->>HB: Read periodic tasks
|
||||
|
||||
alt Quick Task
|
||||
Agent->>Agent: Execute directly
|
||||
Agent-->>Timer: HEARTBEAT_OK
|
||||
end
|
||||
|
||||
alt Long Task
|
||||
Agent->>Subagent: Spawn async subagent
|
||||
Agent-->>Timer: Continue to next task
|
||||
Subagent->>Subagent: Work independently
|
||||
Subagent->>User: Send result via message tool
|
||||
end
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Key Components
|
||||
|
||||
### 1. Agent Loop (`pkg/agent/`)
|
||||
Go-native implementation of the LLM ↔ tool execution loop:
|
||||
- Builds context from workspace identity files
|
||||
- Sends to LLM provider with tool definitions
|
||||
- Iterates on tool calls up to `max_tool_iterations` (default: 20)
|
||||
- Session history managed in `workspace/sessions/`
|
||||
|
||||
### 2. Provider System (`pkg/providers/`)
|
||||
- Gemini and Zhipu are fully tested
|
||||
- OpenRouter, Anthropic, OpenAI, DeepSeek marked "to be tested"
|
||||
- Groq for voice transcription (Whisper)
|
||||
- Each provider implements a common interface
|
||||
|
||||
### 3. Tool System (`pkg/tools/`)
|
||||
Built-in tools:
|
||||
- **read_file** / **write_file** / **list_dir** / **edit_file** / **append_file** — File operations
|
||||
- **exec** — Shell command execution (with safety guards)
|
||||
- **web_search** — Brave Search or DuckDuckGo fallback
|
||||
- **cron** — Scheduled reminders and recurring tasks
|
||||
- **spawn** — Create async subagents
|
||||
- **message** — Subagent-to-user communication
|
||||
|
||||
### 4. Security Sandbox
|
||||
|
||||
```mermaid
|
||||
graph TD
|
||||
RW["restrict_to_workspace = true"]
|
||||
|
||||
RW --> RF["read_file: workspace only"]
|
||||
RW --> WF["write_file: workspace only"]
|
||||
RW --> LD["list_dir: workspace only"]
|
||||
RW --> EF["edit_file: workspace only"]
|
||||
RW --> AF["append_file: workspace only"]
|
||||
RW --> EX["exec: workspace paths only"]
|
||||
|
||||
EX --> BL["ALWAYS Blocked:"]
|
||||
BL --> RM["rm -rf"]
|
||||
BL --> FMT["format, mkfs"]
|
||||
BL --> DD["dd if="]
|
||||
BL --> SHUT["shutdown, reboot"]
|
||||
BL --> FORK["fork bomb"]
|
||||
```
|
||||
|
||||
- Workspace sandbox enabled by default
|
||||
- All tools restricted to workspace directory
|
||||
- Dangerous commands always blocked (even with sandbox off)
|
||||
- Consistent across main agent, subagents, and heartbeat tasks
|
||||
|
||||
### 5. Heartbeat System
|
||||
- Reads `HEARTBEAT.md` every 30 minutes
|
||||
- Quick tasks executed directly
|
||||
- Long tasks spawned as async subagents
|
||||
- Subagents communicate independently via message tool
|
||||
|
||||
### 6. Channel System
|
||||
- **Telegram** — Easy setup (token only)
|
||||
- **Discord** — Bot token + intents
|
||||
- **QQ** — AppID + AppSecret
|
||||
- **DingTalk** — Client credentials
|
||||
- **LINE** — Credentials + webhook URL (HTTPS required)
|
||||
|
||||
### 7. Workspace Layout
|
||||
```
|
||||
~/.picoclaw/workspace/
|
||||
├── sessions/ # Conversation history
|
||||
├── memory/ # Long-term memory (MEMORY.md)
|
||||
├── state/ # Persistent state
|
||||
├── cron/ # Scheduled jobs database
|
||||
├── skills/ # Custom skills
|
||||
├── AGENTS.md # Agent behavior guide
|
||||
├── HEARTBEAT.md # Periodic task prompts
|
||||
├── IDENTITY.md # Agent identity
|
||||
├── SOUL.md # Agent soul
|
||||
├── TOOLS.md # Tool descriptions
|
||||
└── USER.md # User preferences
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Comparison Table (from README)
|
||||
|
||||
| | OpenClaw | NanoBot | **PicoClaw** |
|
||||
|---------------------|------------|-------------|-----------------------|
|
||||
| **Language** | TypeScript | Python | **Go** |
|
||||
| **RAM** | >1GB | >100MB | **<10MB** |
|
||||
| **Startup (0.8GHz)**| >500s | >30s | **<1s** |
|
||||
| **Cost** | Mac $599 | SBC ~$50 | **Any Linux, ~$10** |
|
||||
|
||||
---
|
||||
|
||||
## Deployment Targets
|
||||
|
||||
PicoClaw can run on almost any Linux device:
|
||||
- **$9.9** LicheeRV-Nano — Minimal home assistant
|
||||
- **$30-50** NanoKVM — Automated server maintenance
|
||||
- **$50-100** MaixCAM — Smart monitoring
|
||||
|
||||
---
|
||||
|
||||
## Key Design Decisions
|
||||
|
||||
1. **Go for minimal footprint** — Single binary, no runtime deps, tiny memory
|
||||
2. **AI-bootstrapped migration** — 95% of Go code generated by the AI agent itself
|
||||
3. **Web search with fallback** — Brave Search primary, DuckDuckGo fallback (free)
|
||||
4. **Heartbeat for proactive tasks** — Agent checks `HEARTBEAT.md` periodically
|
||||
5. **Subagent pattern** — Long tasks run async, don't block heartbeat
|
||||
6. **Default sandbox** — `restrict_to_workspace: true` by default
|
||||
7. **Cross-architecture** — Single binary compiles for x86, ARM64, RISC-V
|
||||
606
main.py
606
main.py
@@ -1,23 +1,17 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Aetheel Slack Service — Main Entry Point
|
||||
=========================================
|
||||
Starts the Slack adapter in Socket Mode, connected to the OpenCode AI runtime.
|
||||
Aetheel — Main Entry Point
|
||||
============================
|
||||
Starts the AI assistant with multi-channel adapters, memory, skills,
|
||||
scheduled tasks, and subagent support.
|
||||
|
||||
Usage:
|
||||
python main.py # Run with OpenCode AI handler
|
||||
python main.py --test # Run with echo handler for testing
|
||||
python main.py --cli # Force CLI mode (subprocess)
|
||||
python main.py --sdk # Force SDK mode (opencode serve)
|
||||
|
||||
Environment:
|
||||
SLACK_BOT_TOKEN — Slack bot token (xoxb-...)
|
||||
SLACK_APP_TOKEN — Slack app-level token (xapp-...)
|
||||
OPENCODE_MODE — "cli" or "sdk" (default: cli)
|
||||
OPENCODE_MODEL — Model to use (e.g., anthropic/claude-sonnet-4-20250514)
|
||||
OPENCODE_SERVER_URL — SDK server URL (default: http://localhost:4096)
|
||||
OPENCODE_TIMEOUT — CLI timeout in seconds (default: 120)
|
||||
LOG_LEVEL — Optional, default: INFO
|
||||
python main.py Start with Slack + AI handler
|
||||
python main.py --telegram Also enable Telegram adapter
|
||||
python main.py --claude Use Claude Code runtime
|
||||
python main.py --test Echo handler for testing
|
||||
python main.py --model anthropic/claude-sonnet-4-20250514
|
||||
python main.py --log DEBUG Debug logging
|
||||
"""
|
||||
|
||||
import argparse
|
||||
@@ -34,7 +28,8 @@ from dotenv import load_dotenv
|
||||
# Load .env file
|
||||
load_dotenv()
|
||||
|
||||
from adapters.slack_adapter import SlackAdapter, SlackMessage
|
||||
from adapters.base import BaseAdapter, IncomingMessage
|
||||
from adapters.slack_adapter import SlackAdapter
|
||||
from agent.claude_runtime import ClaudeCodeConfig, ClaudeCodeRuntime
|
||||
from agent.opencode_runtime import (
|
||||
AgentResponse,
|
||||
@@ -43,21 +38,34 @@ from agent.opencode_runtime import (
|
||||
RuntimeMode,
|
||||
build_aetheel_system_prompt,
|
||||
)
|
||||
from agent.subagent import SubagentManager
|
||||
from memory import MemoryManager
|
||||
from memory.types import MemoryConfig
|
||||
from scheduler import Scheduler
|
||||
from scheduler.store import ScheduledJob
|
||||
from skills import SkillsManager
|
||||
|
||||
logger = logging.getLogger("aetheel")
|
||||
|
||||
# Type alias for either runtime
|
||||
AnyRuntime = OpenCodeRuntime | ClaudeCodeRuntime
|
||||
|
||||
# Global runtime instance (initialized in main)
|
||||
# Global instances (initialized in main)
|
||||
_runtime: AnyRuntime | None = None
|
||||
_memory: MemoryManager | None = None
|
||||
_slack_adapter: SlackAdapter | None = None
|
||||
_skills: SkillsManager | None = None
|
||||
_scheduler: Scheduler | None = None
|
||||
_subagent_mgr: SubagentManager | None = None
|
||||
_adapters: dict[str, BaseAdapter] = {} # source_name -> adapter
|
||||
|
||||
# Runtime config (stored for subagent factory)
|
||||
_use_claude: bool = False
|
||||
_cli_args: argparse.Namespace | None = None
|
||||
|
||||
# Regex for parsing action tags from AI responses
|
||||
_ACTION_RE = re.compile(r"\[ACTION:remind\|(\d+)\|(.+?)\]", re.DOTALL)
|
||||
_CRON_RE = re.compile(r"\[ACTION:cron\|([\d\*/,\- ]+)\|(.+?)\]", re.DOTALL)
|
||||
_SPAWN_RE = re.compile(r"\[ACTION:spawn\|(.+?)\]", re.DOTALL)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
@@ -65,19 +73,16 @@ _ACTION_RE = re.compile(r"\[ACTION:remind\|(\d+)\|(.+?)\]", re.DOTALL)
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
def echo_handler(msg: SlackMessage) -> str:
|
||||
"""
|
||||
Simple echo handler for testing.
|
||||
Returns a formatted response with message details.
|
||||
"""
|
||||
def echo_handler(msg: IncomingMessage) -> str:
|
||||
"""Simple echo handler for testing."""
|
||||
response_lines = [
|
||||
f"👋 *Aetheel received your message!*",
|
||||
"",
|
||||
f"📝 *Text:* {msg.text}",
|
||||
f"👤 *From:* {msg.user_name} (`{msg.user_id}`)",
|
||||
f"📍 *Channel:* #{msg.channel_name} (`{msg.channel_id}`)",
|
||||
f"💬 *Type:* {'DM' if msg.is_dm else 'Mention' if msg.is_mention else 'Channel'}",
|
||||
f"🧵 *Thread:* `{msg.conversation_id[:15]}...`",
|
||||
f"📍 *Channel:* {msg.channel_name} (`{msg.channel_id}`)",
|
||||
f"💬 *Source:* {msg.source}",
|
||||
f"🧵 *ConvID:* `{msg.conversation_id[:15]}...`",
|
||||
f"🕐 *Time:* {msg.timestamp.strftime('%Y-%m-%d %H:%M:%S UTC')}",
|
||||
"",
|
||||
f"_This is an echo response from the Aetheel test handler._",
|
||||
@@ -85,20 +90,22 @@ def echo_handler(msg: SlackMessage) -> str:
|
||||
return "\n".join(response_lines)
|
||||
|
||||
|
||||
def _build_memory_context(msg: SlackMessage) -> str:
|
||||
def _build_context(msg: IncomingMessage) -> str:
|
||||
"""
|
||||
Build memory context to inject into the system prompt.
|
||||
Build full context to inject into the system prompt.
|
||||
|
||||
Reads identity files (SOUL.md, USER.md) and searches long-term
|
||||
memory for relevant context based on the user's message.
|
||||
Combines:
|
||||
- Identity files (SOUL.md, USER.md, MEMORY.md)
|
||||
- Relevant memory search results
|
||||
- Relevant skills for this message
|
||||
- Available skills summary
|
||||
"""
|
||||
global _memory
|
||||
if _memory is None:
|
||||
return ""
|
||||
global _memory, _skills
|
||||
|
||||
sections: list[str] = []
|
||||
|
||||
# ── Identity: SOUL.md ──
|
||||
if _memory:
|
||||
soul = _memory.read_soul()
|
||||
if soul:
|
||||
sections.append(f"# Your Identity (SOUL.md)\n\n{soul}")
|
||||
@@ -119,7 +126,6 @@ def _build_memory_context(msg: SlackMessage) -> str:
|
||||
if results:
|
||||
snippets = []
|
||||
for r in results:
|
||||
# Skip if it's just the identity files themselves (already included)
|
||||
if r.path in ("SOUL.md", "USER.md", "MEMORY.md"):
|
||||
continue
|
||||
snippets.append(
|
||||
@@ -134,26 +140,30 @@ def _build_memory_context(msg: SlackMessage) -> str:
|
||||
except Exception as e:
|
||||
logger.debug(f"Memory search failed: {e}")
|
||||
|
||||
# ── Skills context ──
|
||||
if _skills:
|
||||
# Inject matching skill instructions
|
||||
skill_context = _skills.get_context(msg.text)
|
||||
if skill_context:
|
||||
sections.append(skill_context)
|
||||
|
||||
# Always show available skills summary
|
||||
skills_summary = _skills.get_all_context()
|
||||
if skills_summary:
|
||||
sections.append(skills_summary)
|
||||
|
||||
return "\n\n---\n\n".join(sections)
|
||||
|
||||
|
||||
def ai_handler(msg: SlackMessage) -> str:
|
||||
def ai_handler(msg: IncomingMessage) -> str:
|
||||
"""
|
||||
AI-powered handler using OpenCode runtime.
|
||||
|
||||
This is the heart of Aetheel — it routes incoming Slack messages
|
||||
through the OpenCode agent runtime, which handles:
|
||||
- Memory context injection (SOUL.md, USER.md, MEMORY.md)
|
||||
- Session management (per-thread)
|
||||
- Model selection
|
||||
- System prompt injection
|
||||
- Response generation
|
||||
- Conversation logging
|
||||
AI-powered handler — the heart of Aetheel.
|
||||
|
||||
Flow:
|
||||
Slack message → memory context → ai_handler → OpenCodeRuntime.chat() → AI response → session log
|
||||
Message → context (memory + skills) → system prompt → runtime.chat()
|
||||
→ action tags → session log → response
|
||||
"""
|
||||
global _runtime, _memory
|
||||
global _runtime, _memory, _scheduler
|
||||
|
||||
if _runtime is None:
|
||||
return "⚠️ AI runtime not initialized. Please restart the service."
|
||||
@@ -173,15 +183,19 @@ def ai_handler(msg: SlackMessage) -> str:
|
||||
if text_lower in ("sessions", "/sessions"):
|
||||
return _format_sessions()
|
||||
|
||||
# Build memory context from identity files + search
|
||||
memory_context = _build_memory_context(msg)
|
||||
# Cron management commands
|
||||
if text_lower.startswith("/cron"):
|
||||
return _handle_cron_command(text_lower)
|
||||
|
||||
# Route to AI via OpenCode
|
||||
# Build context from memory + skills
|
||||
context = _build_context(msg)
|
||||
|
||||
# Route to AI via runtime
|
||||
system_prompt = build_aetheel_system_prompt(
|
||||
user_name=msg.user_name,
|
||||
channel_name=msg.channel_name,
|
||||
is_dm=msg.is_dm,
|
||||
extra_context=memory_context,
|
||||
extra_context=context,
|
||||
)
|
||||
|
||||
response = _runtime.chat(
|
||||
@@ -194,12 +208,10 @@ def ai_handler(msg: SlackMessage) -> str:
|
||||
error_msg = response.error or "Unknown error"
|
||||
logger.error(f"AI error: {error_msg}")
|
||||
|
||||
# Provide a helpful error message
|
||||
if "not found" in error_msg.lower() or "not installed" in error_msg.lower():
|
||||
return (
|
||||
"⚠️ OpenCode CLI is not available.\n"
|
||||
"Install it with: `curl -fsSL https://opencode.ai/install | bash`\n"
|
||||
"See `docs/opencode-setup.md` for details."
|
||||
"⚠️ AI CLI is not available.\n"
|
||||
"Check the runtime installation docs."
|
||||
)
|
||||
if "timeout" in error_msg.lower():
|
||||
return (
|
||||
@@ -208,19 +220,18 @@ def ai_handler(msg: SlackMessage) -> str:
|
||||
)
|
||||
return f"⚠️ AI error: {error_msg[:200]}"
|
||||
|
||||
# Log response stats
|
||||
logger.info(
|
||||
f"🤖 AI response: {len(response.text)} chars, "
|
||||
f"{response.duration_ms}ms"
|
||||
)
|
||||
|
||||
# Parse and execute action tags (e.g., reminders)
|
||||
# Parse and execute action tags (reminders, cron, spawn)
|
||||
reply_text = _process_action_tags(response.text, msg)
|
||||
|
||||
# Log conversation to memory session log
|
||||
if _memory:
|
||||
try:
|
||||
channel = "dm" if msg.is_dm else msg.channel_name or "slack"
|
||||
channel = "dm" if msg.is_dm else msg.channel_name or msg.source
|
||||
_memory.log_session(
|
||||
f"**User ({msg.user_name}):** {msg.text}\n\n"
|
||||
f"**Aetheel:** {reply_text}",
|
||||
@@ -237,81 +248,216 @@ def ai_handler(msg: SlackMessage) -> str:
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
def _process_action_tags(text: str, msg: SlackMessage) -> str:
|
||||
def _process_action_tags(text: str, msg: IncomingMessage) -> str:
|
||||
"""
|
||||
Parse and execute action tags from the AI response.
|
||||
|
||||
Currently supports:
|
||||
[ACTION:remind|<minutes>|<message>]
|
||||
|
||||
Returns the response text with action tags stripped out.
|
||||
Supports:
|
||||
[ACTION:remind|<minutes>|<message>] → one-shot reminder
|
||||
[ACTION:cron|<cron_expr>|<prompt>] → recurring cron job
|
||||
[ACTION:spawn|<task description>] → background subagent
|
||||
"""
|
||||
cleaned = text
|
||||
|
||||
# Find all reminder action tags
|
||||
# ── Remind tags (one-shot) ──
|
||||
for match in _ACTION_RE.finditer(text):
|
||||
minutes_str, reminder_msg = match.group(1), match.group(2)
|
||||
try:
|
||||
minutes = int(minutes_str)
|
||||
_schedule_reminder(
|
||||
if _scheduler:
|
||||
_scheduler.add_once(
|
||||
delay_minutes=minutes,
|
||||
message=reminder_msg.strip(),
|
||||
prompt=reminder_msg.strip(),
|
||||
channel_id=msg.channel_id,
|
||||
thread_ts=msg.thread_ts if hasattr(msg, "thread_ts") else None,
|
||||
channel_type=msg.source,
|
||||
thread_id=msg.raw_event.get("thread_id"),
|
||||
user_name=msg.user_name,
|
||||
)
|
||||
logger.info(
|
||||
f"⏰ Reminder scheduled: '{reminder_msg.strip()[:50]}' "
|
||||
f"in {minutes} min for #{msg.channel_name}"
|
||||
f"in {minutes} min for {msg.source}/{msg.channel_name}"
|
||||
)
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to schedule reminder: {e}")
|
||||
cleaned = cleaned.replace(match.group(0), "").strip()
|
||||
|
||||
# Strip the action tag from the visible response
|
||||
# ── Cron tags (recurring) ──
|
||||
for match in _CRON_RE.finditer(text):
|
||||
cron_expr, cron_prompt = match.group(1).strip(), match.group(2).strip()
|
||||
try:
|
||||
if _scheduler:
|
||||
job_id = _scheduler.add_cron(
|
||||
cron_expr=cron_expr,
|
||||
prompt=cron_prompt,
|
||||
channel_id=msg.channel_id,
|
||||
channel_type=msg.source,
|
||||
thread_id=msg.raw_event.get("thread_id"),
|
||||
user_name=msg.user_name,
|
||||
)
|
||||
logger.info(
|
||||
f"🔄 Cron scheduled: '{cron_prompt[:50]}' ({cron_expr}) "
|
||||
f"job_id={job_id}"
|
||||
)
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to schedule cron job: {e}")
|
||||
cleaned = cleaned.replace(match.group(0), "").strip()
|
||||
|
||||
# ── Spawn tags (subagent) ──
|
||||
for match in _SPAWN_RE.finditer(text):
|
||||
spawn_task = match.group(1).strip()
|
||||
try:
|
||||
if _subagent_mgr:
|
||||
task_id = _subagent_mgr.spawn(
|
||||
task=spawn_task,
|
||||
channel_id=msg.channel_id,
|
||||
channel_type=msg.source,
|
||||
thread_id=msg.raw_event.get("thread_id"),
|
||||
user_name=msg.user_name,
|
||||
)
|
||||
logger.info(
|
||||
f"🚀 Subagent spawned: '{spawn_task[:50]}' "
|
||||
f"task_id={task_id}"
|
||||
)
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to spawn subagent: {e}")
|
||||
cleaned = cleaned.replace(match.group(0), "").strip()
|
||||
|
||||
return cleaned
|
||||
|
||||
|
||||
def _schedule_reminder(
|
||||
*,
|
||||
delay_minutes: int,
|
||||
message: str,
|
||||
# ---------------------------------------------------------------------------
|
||||
# Cron Management Commands
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
def _handle_cron_command(text: str) -> str:
|
||||
"""Handle /cron subcommands."""
|
||||
global _scheduler
|
||||
|
||||
if not _scheduler:
|
||||
return "⚠️ Scheduler not initialized."
|
||||
|
||||
parts = text.strip().split(maxsplit=2)
|
||||
|
||||
if len(parts) < 2 or parts[1] == "list":
|
||||
jobs = _scheduler.list_jobs()
|
||||
if not jobs:
|
||||
return "📋 No scheduled jobs."
|
||||
lines = ["📋 *Scheduled Jobs:*\n"]
|
||||
for job in jobs:
|
||||
kind = f"🔄 `{job.cron_expr}`" if job.is_recurring else "⏰ one-shot"
|
||||
lines.append(
|
||||
f"• `{job.id}` — {kind} — {job.prompt[:60]}"
|
||||
)
|
||||
return "\n".join(lines)
|
||||
|
||||
if parts[1] == "remove" and len(parts) >= 3:
|
||||
job_id = parts[2].strip()
|
||||
if _scheduler.remove(job_id):
|
||||
return f"✅ Job `{job_id}` removed."
|
||||
return f"⚠️ Job `{job_id}` not found."
|
||||
|
||||
return (
|
||||
"Usage: `/cron list` or `/cron remove <id>`"
|
||||
)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Scheduler Callback
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
def _on_scheduled_job(job: ScheduledJob) -> None:
|
||||
"""
|
||||
Called by the scheduler when a job fires.
|
||||
|
||||
Creates a synthetic IncomingMessage and routes it through ai_handler,
|
||||
then sends the response to the right channel.
|
||||
"""
|
||||
logger.info(f"🔔 Scheduled job firing: {job.id} — '{job.prompt[:50]}'")
|
||||
|
||||
# Build a synthetic message
|
||||
msg = IncomingMessage(
|
||||
text=job.prompt,
|
||||
user_id="system",
|
||||
user_name=job.user_name or "Scheduler",
|
||||
channel_id=job.channel_id,
|
||||
channel_name=f"scheduled-{job.id}",
|
||||
conversation_id=f"cron-{job.id}",
|
||||
source=job.channel_type,
|
||||
is_dm=True,
|
||||
raw_event={"thread_id": job.thread_id},
|
||||
)
|
||||
|
||||
# Route through the AI handler
|
||||
try:
|
||||
response = ai_handler(msg)
|
||||
if response:
|
||||
_send_to_channel(
|
||||
channel_id=job.channel_id,
|
||||
text=response,
|
||||
thread_id=job.thread_id,
|
||||
channel_type=job.channel_type,
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"Scheduled job {job.id} handler failed: {e}", exc_info=True)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Multi-Channel Send
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
def _send_to_channel(
|
||||
channel_id: str,
|
||||
thread_ts: str | None = None,
|
||||
user_name: str | None = None,
|
||||
text: str,
|
||||
thread_id: str | None,
|
||||
channel_type: str,
|
||||
) -> None:
|
||||
"""
|
||||
Schedule a Slack message to be sent after a delay.
|
||||
Uses a background thread with a timer.
|
||||
Send a message to a specific channel via the right adapter.
|
||||
|
||||
Used by the scheduler and subagent manager to route responses
|
||||
back to the correct platform.
|
||||
"""
|
||||
global _slack_adapter
|
||||
|
||||
delay_seconds = delay_minutes * 60
|
||||
|
||||
def _send_reminder():
|
||||
try:
|
||||
if _slack_adapter and _slack_adapter._app:
|
||||
mention = f"@{user_name}" if user_name else ""
|
||||
reminder_text = f"⏰ *Reminder* {mention}: {message}"
|
||||
|
||||
kwargs = {
|
||||
"channel": channel_id,
|
||||
"text": reminder_text,
|
||||
}
|
||||
if thread_ts:
|
||||
kwargs["thread_ts"] = thread_ts
|
||||
|
||||
_slack_adapter._app.client.chat_postMessage(**kwargs)
|
||||
logger.info(f"⏰ Reminder sent: '{message[:50]}'")
|
||||
adapter = _adapters.get(channel_type)
|
||||
if adapter:
|
||||
adapter.send_message(
|
||||
channel_id=channel_id,
|
||||
text=text,
|
||||
thread_id=thread_id,
|
||||
)
|
||||
else:
|
||||
logger.warning("Cannot send reminder: Slack adapter not available")
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to send reminder: {e}")
|
||||
# Fallback: try the first available adapter
|
||||
for a in _adapters.values():
|
||||
a.send_message(channel_id=channel_id, text=text, thread_id=thread_id)
|
||||
break
|
||||
else:
|
||||
logger.warning(
|
||||
f"No adapter for '{channel_type}' — cannot send message"
|
||||
)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Runtime Factory (for subagents)
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
def _make_runtime() -> AnyRuntime:
|
||||
"""Create a fresh runtime instance (used by subagent manager)."""
|
||||
global _use_claude, _cli_args
|
||||
|
||||
if _use_claude:
|
||||
config = ClaudeCodeConfig.from_env()
|
||||
if _cli_args and _cli_args.model:
|
||||
config.model = _cli_args.model
|
||||
return ClaudeCodeRuntime(config)
|
||||
else:
|
||||
config = OpenCodeConfig.from_env()
|
||||
if _cli_args and _cli_args.model:
|
||||
config.model = _cli_args.model
|
||||
return OpenCodeRuntime(config)
|
||||
|
||||
timer = threading.Timer(delay_seconds, _send_reminder)
|
||||
timer.daemon = True
|
||||
timer.start()
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Formatting Helpers
|
||||
@@ -320,7 +466,7 @@ def _schedule_reminder(
|
||||
|
||||
def _format_status() -> str:
|
||||
"""Format the /status response with runtime info."""
|
||||
global _runtime
|
||||
global _runtime, _scheduler, _skills, _subagent_mgr
|
||||
|
||||
lines = [
|
||||
"🟢 *Aetheel is online*",
|
||||
@@ -334,14 +480,27 @@ def _format_status() -> str:
|
||||
f"• *Model:* {status['model']}",
|
||||
f"• *Provider:* {status['provider']}",
|
||||
f"• *Active Sessions:* {status['active_sessions']}",
|
||||
f"• *OpenCode Available:* {'✅' if status['opencode_available'] else '❌'}",
|
||||
])
|
||||
if "sdk_connected" in status:
|
||||
lines.append(
|
||||
f"• *SDK Connected:* {'✅' if status['sdk_connected'] else '❌'}"
|
||||
)
|
||||
else:
|
||||
lines.append("• Runtime: not initialized")
|
||||
|
||||
# Adapter status
|
||||
if _adapters:
|
||||
adapter_names = ", ".join(_adapters.keys())
|
||||
lines.append(f"• *Channels:* {adapter_names}")
|
||||
|
||||
# Skills status
|
||||
if _skills:
|
||||
skill_count = len(_skills.skills)
|
||||
lines.append(f"• *Skills Loaded:* {skill_count}")
|
||||
|
||||
# Scheduler status
|
||||
if _scheduler:
|
||||
jobs = _scheduler.list_jobs()
|
||||
lines.append(f"• *Scheduled Jobs:* {len(jobs)}")
|
||||
|
||||
# Subagents status
|
||||
if _subagent_mgr:
|
||||
active = _subagent_mgr.list_active()
|
||||
lines.append(f"• *Active Subagents:* {len(active)}")
|
||||
|
||||
lines.extend([
|
||||
"",
|
||||
@@ -361,13 +520,18 @@ def _format_help() -> str:
|
||||
"• `help` — Show this help message\n"
|
||||
"• `time` — Current server time\n"
|
||||
"• `sessions` — Active session count\n"
|
||||
"• `/cron list` — List scheduled jobs\n"
|
||||
"• `/cron remove <id>` — Remove a scheduled job\n"
|
||||
"\n"
|
||||
"*AI Chat:*\n"
|
||||
"• Send any message and the AI will respond\n"
|
||||
"• Each thread maintains its own conversation\n"
|
||||
"• DMs work too — just message me directly\n"
|
||||
"\n"
|
||||
"_Powered by OpenCode — https://opencode.ai_"
|
||||
"*AI Actions:*\n"
|
||||
"• The AI can schedule reminders\n"
|
||||
"• The AI can set up recurring cron jobs\n"
|
||||
"• The AI can spawn background subagents for long tasks\n"
|
||||
)
|
||||
|
||||
|
||||
@@ -390,51 +554,36 @@ def _format_sessions() -> str:
|
||||
|
||||
|
||||
def main():
|
||||
global _runtime, _memory, _skills, _scheduler, _subagent_mgr
|
||||
global _adapters, _use_claude, _cli_args
|
||||
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Aetheel Slack Service — AI-Powered via OpenCode or Claude Code",
|
||||
description="Aetheel — AI-Powered Personal Assistant",
|
||||
formatter_class=argparse.RawDescriptionHelpFormatter,
|
||||
epilog="""
|
||||
epilog="""\
|
||||
Examples:
|
||||
python main.py Start with AI handler (OpenCode)
|
||||
python main.py --claude Start with Claude Code runtime
|
||||
python main.py --test Start with echo-only handler
|
||||
python main.py --cli Force CLI mode (subprocess, OpenCode)
|
||||
python main.py --sdk Force SDK mode (opencode serve)
|
||||
python main.py Start with Slack + AI handler
|
||||
python main.py --telegram Also enable Telegram adapter
|
||||
python main.py --claude Use Claude Code runtime
|
||||
python main.py --test Echo-only handler
|
||||
python main.py --model anthropic/claude-sonnet-4-20250514
|
||||
python main.py --log DEBUG Start with debug logging
|
||||
python main.py --log DEBUG Debug logging
|
||||
""",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--test",
|
||||
action="store_true",
|
||||
help="Use simple echo handler for testing",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--claude",
|
||||
action="store_true",
|
||||
help="Use Claude Code runtime instead of OpenCode",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--cli",
|
||||
action="store_true",
|
||||
help="Force CLI mode (opencode run subprocess)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--sdk",
|
||||
action="store_true",
|
||||
help="Force SDK mode (opencode serve API)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--model",
|
||||
default=None,
|
||||
help="Model to use (e.g., anthropic/claude-sonnet-4-20250514)",
|
||||
)
|
||||
parser.add_argument("--test", action="store_true", help="Use echo handler for testing")
|
||||
parser.add_argument("--claude", action="store_true", help="Use Claude Code runtime")
|
||||
parser.add_argument("--cli", action="store_true", help="Force CLI mode (OpenCode)")
|
||||
parser.add_argument("--sdk", action="store_true", help="Force SDK mode (OpenCode)")
|
||||
parser.add_argument("--telegram", action="store_true", help="Enable Telegram adapter")
|
||||
parser.add_argument("--model", default=None, help="Model to use")
|
||||
parser.add_argument(
|
||||
"--log",
|
||||
default=os.environ.get("LOG_LEVEL", "INFO"),
|
||||
help="Log level (DEBUG, INFO, WARNING, ERROR)",
|
||||
)
|
||||
args = parser.parse_args()
|
||||
_cli_args = args
|
||||
_use_claude = args.claude
|
||||
|
||||
# Configure logging
|
||||
logging.basicConfig(
|
||||
@@ -443,21 +592,9 @@ Examples:
|
||||
datefmt="%Y-%m-%d %H:%M:%S",
|
||||
)
|
||||
|
||||
# Validate Slack tokens are present
|
||||
if not os.environ.get("SLACK_BOT_TOKEN"):
|
||||
print("❌ SLACK_BOT_TOKEN is not set!")
|
||||
print(" Copy .env.example to .env and add your tokens.")
|
||||
print(" See docs/slack-setup.md for instructions.")
|
||||
sys.exit(1)
|
||||
|
||||
if not os.environ.get("SLACK_APP_TOKEN"):
|
||||
print("❌ SLACK_APP_TOKEN is not set!")
|
||||
print(" Copy .env.example to .env and add your tokens.")
|
||||
print(" See docs/slack-setup.md for instructions.")
|
||||
sys.exit(1)
|
||||
|
||||
# Initialize memory system
|
||||
global _runtime, _memory
|
||||
# -------------------------------------------------------------------
|
||||
# 1. Initialize Memory System
|
||||
# -------------------------------------------------------------------
|
||||
|
||||
workspace_dir = os.environ.get(
|
||||
"AETHEEL_WORKSPACE", os.path.expanduser("~/.aetheel/workspace")
|
||||
@@ -472,11 +609,8 @@ Examples:
|
||||
db_path=db_path,
|
||||
)
|
||||
_memory = MemoryManager(mem_config)
|
||||
logger.info(
|
||||
f"Memory system initialized: workspace={workspace_dir}"
|
||||
)
|
||||
logger.info(f"Memory system initialized: workspace={workspace_dir}")
|
||||
|
||||
# Initial sync (indexes identity files on first run)
|
||||
stats = asyncio.run(_memory.sync())
|
||||
logger.info(
|
||||
f"Memory sync: {stats.get('files_indexed', 0)} files indexed, "
|
||||
@@ -486,63 +620,161 @@ Examples:
|
||||
logger.warning(f"Memory system init failed (continuing without): {e}")
|
||||
_memory = None
|
||||
|
||||
# Initialize AI runtime (unless in test mode)
|
||||
# -------------------------------------------------------------------
|
||||
# 2. Initialize Skills System
|
||||
# -------------------------------------------------------------------
|
||||
|
||||
try:
|
||||
_skills = SkillsManager(workspace_dir)
|
||||
loaded = _skills.load_all()
|
||||
logger.info(f"Skills system initialized: {len(loaded)} skill(s)")
|
||||
except Exception as e:
|
||||
logger.warning(f"Skills system init failed (continuing without): {e}")
|
||||
_skills = None
|
||||
|
||||
# -------------------------------------------------------------------
|
||||
# 3. Initialize AI Runtime
|
||||
# -------------------------------------------------------------------
|
||||
|
||||
runtime_label = "echo (test mode)"
|
||||
if not args.test:
|
||||
if args.claude:
|
||||
# Claude Code runtime
|
||||
claude_config = ClaudeCodeConfig.from_env()
|
||||
if args.model:
|
||||
claude_config.model = args.model
|
||||
_runtime = ClaudeCodeRuntime(claude_config)
|
||||
runtime_label = f"claude-code, model={claude_config.model or 'default'}"
|
||||
else:
|
||||
# OpenCode runtime (default)
|
||||
config = OpenCodeConfig.from_env()
|
||||
|
||||
# CLI flag overrides
|
||||
if args.cli:
|
||||
config.mode = RuntimeMode.CLI
|
||||
elif args.sdk:
|
||||
config.mode = RuntimeMode.SDK
|
||||
|
||||
if args.model:
|
||||
config.model = args.model
|
||||
|
||||
_runtime = OpenCodeRuntime(config)
|
||||
runtime_label = (
|
||||
f"opencode/{config.mode.value}, "
|
||||
f"model={config.model or 'default'}"
|
||||
)
|
||||
|
||||
# Create Slack adapter
|
||||
global _slack_adapter
|
||||
adapter = SlackAdapter(log_level=args.log)
|
||||
_slack_adapter = adapter
|
||||
# -------------------------------------------------------------------
|
||||
# 4. Initialize Scheduler
|
||||
# -------------------------------------------------------------------
|
||||
|
||||
# Register handler
|
||||
if args.test:
|
||||
adapter.on_message(echo_handler)
|
||||
logger.info("Using echo handler (test mode)")
|
||||
try:
|
||||
_scheduler = Scheduler(callback=_on_scheduled_job)
|
||||
_scheduler.start()
|
||||
logger.info("Scheduler initialized")
|
||||
except Exception as e:
|
||||
logger.warning(f"Scheduler init failed (continuing without): {e}")
|
||||
_scheduler = None
|
||||
|
||||
# -------------------------------------------------------------------
|
||||
# 5. Initialize Subagent Manager
|
||||
# -------------------------------------------------------------------
|
||||
|
||||
if _runtime:
|
||||
try:
|
||||
_subagent_mgr = SubagentManager(
|
||||
runtime_factory=_make_runtime,
|
||||
send_fn=_send_to_channel,
|
||||
)
|
||||
logger.info("Subagent manager initialized")
|
||||
except Exception as e:
|
||||
logger.warning(f"Subagent manager init failed: {e}")
|
||||
_subagent_mgr = None
|
||||
|
||||
# -------------------------------------------------------------------
|
||||
# 6. Initialize Channel Adapters
|
||||
# -------------------------------------------------------------------
|
||||
|
||||
# Choose the message handler
|
||||
handler = echo_handler if args.test else ai_handler
|
||||
|
||||
# Slack adapter (always enabled if tokens are present)
|
||||
slack_token = os.environ.get("SLACK_BOT_TOKEN")
|
||||
slack_app_token = os.environ.get("SLACK_APP_TOKEN")
|
||||
|
||||
if slack_token and slack_app_token:
|
||||
try:
|
||||
slack = SlackAdapter(log_level=args.log)
|
||||
slack.on_message(handler)
|
||||
_adapters["slack"] = slack
|
||||
logger.info("Slack adapter registered")
|
||||
except Exception as e:
|
||||
logger.error(f"Slack adapter failed to initialize: {e}")
|
||||
else:
|
||||
adapter.on_message(ai_handler)
|
||||
logger.info(f"Using AI handler ({runtime_label})")
|
||||
logger.warning("Slack tokens not set — Slack adapter disabled")
|
||||
|
||||
# Telegram adapter (enabled with --telegram flag)
|
||||
if args.telegram:
|
||||
telegram_token = os.environ.get("TELEGRAM_BOT_TOKEN")
|
||||
if telegram_token:
|
||||
try:
|
||||
from adapters.telegram_adapter import TelegramAdapter
|
||||
|
||||
telegram = TelegramAdapter()
|
||||
telegram.on_message(handler)
|
||||
_adapters["telegram"] = telegram
|
||||
logger.info("Telegram adapter registered")
|
||||
except Exception as e:
|
||||
logger.error(f"Telegram adapter failed to initialize: {e}")
|
||||
else:
|
||||
logger.error(
|
||||
"TELEGRAM_BOT_TOKEN not set — cannot enable Telegram. "
|
||||
"Get a token from @BotFather on Telegram."
|
||||
)
|
||||
|
||||
if not _adapters:
|
||||
print("❌ No channel adapters initialized!")
|
||||
print(" Set SLACK_BOT_TOKEN + SLACK_APP_TOKEN or use --telegram")
|
||||
sys.exit(1)
|
||||
|
||||
# Start file watching for automatic memory re-indexing
|
||||
if _memory:
|
||||
_memory.start_watching()
|
||||
|
||||
# Start (blocking)
|
||||
# -------------------------------------------------------------------
|
||||
# 7. Start Adapters
|
||||
# -------------------------------------------------------------------
|
||||
|
||||
logger.info("=" * 60)
|
||||
logger.info(" Aetheel Starting")
|
||||
logger.info("=" * 60)
|
||||
logger.info(f" Runtime: {runtime_label}")
|
||||
logger.info(f" Channels: {', '.join(_adapters.keys())}")
|
||||
logger.info(f" Skills: {len(_skills.skills) if _skills else 0}")
|
||||
logger.info(f" Scheduler: {'✅' if _scheduler else '❌'}")
|
||||
logger.info(f" Subagents: {'✅' if _subagent_mgr else '❌'}")
|
||||
logger.info("=" * 60)
|
||||
|
||||
try:
|
||||
if len(_adapters) == 1:
|
||||
# Single adapter — start it blocking
|
||||
adapter = next(iter(_adapters.values()))
|
||||
adapter.start()
|
||||
else:
|
||||
# Multiple adapters — start all but last async, last blocking
|
||||
adapter_list = list(_adapters.values())
|
||||
for adapter in adapter_list[:-1]:
|
||||
adapter.start_async()
|
||||
adapter_list[-1].start() # blocking
|
||||
|
||||
except KeyboardInterrupt:
|
||||
if _memory:
|
||||
_memory.close()
|
||||
logger.info("Shutting down...")
|
||||
finally:
|
||||
# Cleanup
|
||||
for adapter in _adapters.values():
|
||||
try:
|
||||
adapter.stop()
|
||||
except Exception as e:
|
||||
except Exception:
|
||||
pass
|
||||
if _scheduler:
|
||||
_scheduler.stop()
|
||||
if _memory:
|
||||
_memory.close()
|
||||
logger.error(f"Fatal error: {e}", exc_info=True)
|
||||
sys.exit(1)
|
||||
logger.info("Aetheel stopped. Goodbye! 👋")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
@@ -5,9 +5,21 @@ description = "A personal AI assistant that lives in Slack — with persistent m
|
||||
readme = "README.md"
|
||||
requires-python = ">=3.14"
|
||||
dependencies = [
|
||||
"apscheduler>=3.10.0,<4.0.0",
|
||||
"fastembed>=0.7.4",
|
||||
"python-dotenv>=1.2.1,<2.0.0",
|
||||
"python-telegram-bot>=21.0",
|
||||
"slack-bolt>=1.27.0,<2.0.0",
|
||||
"slack-sdk>=3.40.0,<4.0.0",
|
||||
"watchdog>=6.0.0",
|
||||
]
|
||||
|
||||
[project.optional-dependencies]
|
||||
test = [
|
||||
"pytest>=8.0",
|
||||
"pytest-asyncio>=0.24",
|
||||
]
|
||||
|
||||
[tool.setuptools.packages.find]
|
||||
include = ["agent*", "adapters*", "memory*", "skills*", "scheduler*"]
|
||||
exclude = ["tests*", "archive*", "inspiration*"]
|
||||
|
||||
6
scheduler/__init__.py
Normal file
6
scheduler/__init__.py
Normal file
@@ -0,0 +1,6 @@
|
||||
# Aetheel Scheduler
|
||||
# Persistent cron-based task scheduling.
|
||||
|
||||
from scheduler.scheduler import Scheduler
|
||||
|
||||
__all__ = ["Scheduler"]
|
||||
275
scheduler/scheduler.py
Normal file
275
scheduler/scheduler.py
Normal file
@@ -0,0 +1,275 @@
|
||||
"""
|
||||
Aetheel Scheduler
|
||||
=================
|
||||
APScheduler-based task scheduler with SQLite persistence.
|
||||
|
||||
Supports:
|
||||
- One-shot delayed jobs (replaces threading.Timer reminders)
|
||||
- Recurring cron jobs (cron expressions)
|
||||
- Persistent storage (jobs survive restarts)
|
||||
- Callback-based execution (fires handlers with job context)
|
||||
|
||||
Usage:
|
||||
from scheduler import Scheduler
|
||||
|
||||
scheduler = Scheduler(callback=my_handler)
|
||||
scheduler.start()
|
||||
|
||||
# One-shot reminder
|
||||
scheduler.add_once(
|
||||
delay_minutes=5,
|
||||
prompt="Time to stretch!",
|
||||
channel_id="C123",
|
||||
channel_type="slack",
|
||||
)
|
||||
|
||||
# Recurring cron job
|
||||
scheduler.add_cron(
|
||||
cron_expr="0 9 * * *",
|
||||
prompt="Good morning! Here's your daily summary.",
|
||||
channel_id="C123",
|
||||
channel_type="slack",
|
||||
)
|
||||
"""
|
||||
|
||||
import logging
|
||||
from datetime import datetime, timedelta, timezone
|
||||
from typing import Callable
|
||||
|
||||
from apscheduler.schedulers.background import BackgroundScheduler
|
||||
from apscheduler.triggers.cron import CronTrigger
|
||||
from apscheduler.triggers.date import DateTrigger
|
||||
|
||||
from scheduler.store import JobStore, ScheduledJob
|
||||
|
||||
logger = logging.getLogger("aetheel.scheduler")
|
||||
|
||||
# Callback type: receives the ScheduledJob when it fires
|
||||
JobCallback = Callable[[ScheduledJob], None]
|
||||
|
||||
|
||||
class Scheduler:
|
||||
"""
|
||||
APScheduler-based task scheduler.
|
||||
|
||||
Wraps BackgroundScheduler with SQLite persistence. When a job fires,
|
||||
it calls the registered callback with the ScheduledJob details. The
|
||||
callback is responsible for routing the job's prompt to the AI handler
|
||||
and sending the response to the right channel.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
callback: JobCallback,
|
||||
db_path: str | None = None,
|
||||
):
|
||||
self._callback = callback
|
||||
self._store = JobStore(db_path=db_path)
|
||||
self._scheduler = BackgroundScheduler(
|
||||
daemon=True,
|
||||
job_defaults={"misfire_grace_time": 60},
|
||||
)
|
||||
self._running = False
|
||||
|
||||
@property
|
||||
def running(self) -> bool:
|
||||
return self._running
|
||||
|
||||
def start(self) -> None:
|
||||
"""Start the scheduler and restore persisted jobs."""
|
||||
if self._running:
|
||||
return
|
||||
|
||||
self._scheduler.start()
|
||||
self._running = True
|
||||
|
||||
# Restore recurring jobs from the database
|
||||
restored = 0
|
||||
for job in self._store.list_recurring():
|
||||
try:
|
||||
self._register_cron_job(job)
|
||||
restored += 1
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to restore job {job.id}: {e}")
|
||||
|
||||
logger.info(f"Scheduler started (restored {restored} recurring jobs)")
|
||||
|
||||
def stop(self) -> None:
|
||||
"""Shut down the scheduler."""
|
||||
if self._running:
|
||||
self._scheduler.shutdown(wait=False)
|
||||
self._running = False
|
||||
logger.info("Scheduler stopped")
|
||||
|
||||
# -------------------------------------------------------------------
|
||||
# Public API: Add jobs
|
||||
# -------------------------------------------------------------------
|
||||
|
||||
def add_once(
|
||||
self,
|
||||
*,
|
||||
delay_minutes: int,
|
||||
prompt: str,
|
||||
channel_id: str,
|
||||
channel_type: str = "slack",
|
||||
thread_id: str | None = None,
|
||||
user_name: str | None = None,
|
||||
) -> str:
|
||||
"""
|
||||
Schedule a one-shot job to fire after a delay.
|
||||
|
||||
Replaces the old threading.Timer-based _schedule_reminder().
|
||||
Returns the job ID.
|
||||
"""
|
||||
job_id = JobStore.new_id()
|
||||
run_at = datetime.now(timezone.utc) + timedelta(minutes=delay_minutes)
|
||||
|
||||
job = ScheduledJob(
|
||||
id=job_id,
|
||||
cron_expr=None,
|
||||
prompt=prompt,
|
||||
channel_id=channel_id,
|
||||
channel_type=channel_type,
|
||||
thread_id=thread_id,
|
||||
user_name=user_name,
|
||||
created_at=datetime.now(timezone.utc).isoformat(),
|
||||
next_run=run_at.isoformat(),
|
||||
)
|
||||
|
||||
# Persist
|
||||
self._store.add(job)
|
||||
|
||||
# Schedule
|
||||
self._scheduler.add_job(
|
||||
self._fire_job,
|
||||
trigger=DateTrigger(run_date=run_at),
|
||||
args=[job_id],
|
||||
id=f"once-{job_id}",
|
||||
)
|
||||
|
||||
logger.info(
|
||||
f"⏰ One-shot scheduled: '{prompt[:50]}' in {delay_minutes} min "
|
||||
f"(id={job_id}, channel={channel_type}/{channel_id})"
|
||||
)
|
||||
return job_id
|
||||
|
||||
def add_cron(
|
||||
self,
|
||||
*,
|
||||
cron_expr: str,
|
||||
prompt: str,
|
||||
channel_id: str,
|
||||
channel_type: str = "slack",
|
||||
thread_id: str | None = None,
|
||||
user_name: str | None = None,
|
||||
) -> str:
|
||||
"""
|
||||
Schedule a recurring cron job.
|
||||
|
||||
Args:
|
||||
cron_expr: Standard cron expression (5 fields: min hour day month weekday)
|
||||
|
||||
Returns the job ID.
|
||||
"""
|
||||
job_id = JobStore.new_id()
|
||||
|
||||
job = ScheduledJob(
|
||||
id=job_id,
|
||||
cron_expr=cron_expr,
|
||||
prompt=prompt,
|
||||
channel_id=channel_id,
|
||||
channel_type=channel_type,
|
||||
thread_id=thread_id,
|
||||
user_name=user_name,
|
||||
created_at=datetime.now(timezone.utc).isoformat(),
|
||||
)
|
||||
|
||||
# Persist
|
||||
self._store.add(job)
|
||||
|
||||
# Register with APScheduler
|
||||
self._register_cron_job(job)
|
||||
|
||||
logger.info(
|
||||
f"🔄 Cron scheduled: '{prompt[:50]}' ({cron_expr}) "
|
||||
f"(id={job_id}, channel={channel_type}/{channel_id})"
|
||||
)
|
||||
return job_id
|
||||
|
||||
# -------------------------------------------------------------------
|
||||
# Public API: Manage jobs
|
||||
# -------------------------------------------------------------------
|
||||
|
||||
def remove(self, job_id: str) -> bool:
|
||||
"""Remove a job by ID. Returns True if found and removed."""
|
||||
# Remove from APScheduler
|
||||
for prefix in ("once-", "cron-"):
|
||||
try:
|
||||
self._scheduler.remove_job(f"{prefix}{job_id}")
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# Remove from store
|
||||
return self._store.remove(job_id)
|
||||
|
||||
def list_jobs(self) -> list[ScheduledJob]:
|
||||
"""List all scheduled jobs."""
|
||||
return self._store.list_all()
|
||||
|
||||
def list_recurring(self) -> list[ScheduledJob]:
|
||||
"""List only recurring cron jobs."""
|
||||
return self._store.list_recurring()
|
||||
|
||||
# -------------------------------------------------------------------
|
||||
# Internal
|
||||
# -------------------------------------------------------------------
|
||||
|
||||
def _register_cron_job(self, job: ScheduledJob) -> None:
|
||||
"""Register a cron job with APScheduler."""
|
||||
if not job.cron_expr:
|
||||
return
|
||||
|
||||
# Parse cron expression (5 fields: min hour day month weekday)
|
||||
parts = job.cron_expr.strip().split()
|
||||
if len(parts) != 5:
|
||||
raise ValueError(
|
||||
f"Invalid cron expression: '{job.cron_expr}' — "
|
||||
"expected 5 fields (minute hour day month weekday)"
|
||||
)
|
||||
|
||||
trigger = CronTrigger(
|
||||
minute=parts[0],
|
||||
hour=parts[1],
|
||||
day=parts[2],
|
||||
month=parts[3],
|
||||
day_of_week=parts[4],
|
||||
)
|
||||
|
||||
self._scheduler.add_job(
|
||||
self._fire_job,
|
||||
trigger=trigger,
|
||||
args=[job.id],
|
||||
id=f"cron-{job.id}",
|
||||
replace_existing=True,
|
||||
)
|
||||
|
||||
def _fire_job(self, job_id: str) -> None:
|
||||
"""Called by APScheduler when a job triggers."""
|
||||
job = self._store.get(job_id)
|
||||
if not job:
|
||||
logger.warning(f"Job {job_id} not found in store — skipping")
|
||||
return
|
||||
|
||||
logger.info(
|
||||
f"🔔 Job fired: {job_id} (cron={job.cron_expr}, "
|
||||
f"prompt='{job.prompt[:50]}')"
|
||||
)
|
||||
|
||||
try:
|
||||
self._callback(job)
|
||||
except Exception as e:
|
||||
logger.error(f"Job callback failed for {job_id}: {e}", exc_info=True)
|
||||
|
||||
# Clean up one-shot jobs after firing
|
||||
if not job.is_recurring:
|
||||
self._store.remove(job_id)
|
||||
167
scheduler/store.py
Normal file
167
scheduler/store.py
Normal file
@@ -0,0 +1,167 @@
|
||||
"""
|
||||
Aetheel Scheduler — SQLite Job Store
|
||||
=====================================
|
||||
Persistent storage for scheduled jobs.
|
||||
|
||||
Schema:
|
||||
jobs(id, cron_expr, prompt, channel_id, channel_type, created_at, next_run)
|
||||
"""
|
||||
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import sqlite3
|
||||
import uuid
|
||||
from dataclasses import dataclass
|
||||
from datetime import datetime, timezone
|
||||
|
||||
logger = logging.getLogger("aetheel.scheduler.store")
|
||||
|
||||
|
||||
@dataclass
|
||||
class ScheduledJob:
|
||||
"""A persisted scheduled job."""
|
||||
|
||||
id: str
|
||||
cron_expr: str | None # None for one-shot jobs
|
||||
prompt: str
|
||||
channel_id: str
|
||||
channel_type: str # "slack", "telegram", etc.
|
||||
created_at: str
|
||||
next_run: str | None = None
|
||||
thread_id: str | None = None # for threading context
|
||||
user_name: str | None = None # who created the job
|
||||
|
||||
@property
|
||||
def is_recurring(self) -> bool:
|
||||
return self.cron_expr is not None
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# SQLite Store
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
CREATE_TABLE_SQL = """
|
||||
CREATE TABLE IF NOT EXISTS jobs (
|
||||
id TEXT PRIMARY KEY,
|
||||
cron_expr TEXT,
|
||||
prompt TEXT NOT NULL,
|
||||
channel_id TEXT NOT NULL,
|
||||
channel_type TEXT NOT NULL DEFAULT 'slack',
|
||||
thread_id TEXT,
|
||||
user_name TEXT,
|
||||
created_at TEXT NOT NULL,
|
||||
next_run TEXT
|
||||
);
|
||||
"""
|
||||
|
||||
|
||||
class JobStore:
|
||||
"""SQLite-backed persistent job store."""
|
||||
|
||||
def __init__(self, db_path: str | None = None):
|
||||
self._db_path = db_path or os.path.join(
|
||||
os.path.expanduser("~/.aetheel"), "scheduler.db"
|
||||
)
|
||||
# Ensure directory exists
|
||||
os.makedirs(os.path.dirname(self._db_path), exist_ok=True)
|
||||
self._init_db()
|
||||
|
||||
def _init_db(self) -> None:
|
||||
"""Initialize the database schema."""
|
||||
with sqlite3.connect(self._db_path) as conn:
|
||||
conn.execute(CREATE_TABLE_SQL)
|
||||
conn.commit()
|
||||
logger.info(f"Scheduler DB initialized: {self._db_path}")
|
||||
|
||||
def _conn(self) -> sqlite3.Connection:
|
||||
conn = sqlite3.connect(self._db_path)
|
||||
conn.row_factory = sqlite3.Row
|
||||
return conn
|
||||
|
||||
def add(self, job: ScheduledJob) -> str:
|
||||
"""Add a job to the store. Returns the job ID."""
|
||||
with self._conn() as conn:
|
||||
conn.execute(
|
||||
"""
|
||||
INSERT INTO jobs (id, cron_expr, prompt, channel_id, channel_type,
|
||||
thread_id, user_name, created_at, next_run)
|
||||
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)
|
||||
""",
|
||||
(
|
||||
job.id,
|
||||
job.cron_expr,
|
||||
job.prompt,
|
||||
job.channel_id,
|
||||
job.channel_type,
|
||||
job.thread_id,
|
||||
job.user_name,
|
||||
job.created_at,
|
||||
job.next_run,
|
||||
),
|
||||
)
|
||||
conn.commit()
|
||||
logger.info(f"Job added: {job.id} (cron={job.cron_expr})")
|
||||
return job.id
|
||||
|
||||
def remove(self, job_id: str) -> bool:
|
||||
"""Remove a job by ID. Returns True if found and removed."""
|
||||
with self._conn() as conn:
|
||||
cursor = conn.execute("DELETE FROM jobs WHERE id = ?", (job_id,))
|
||||
conn.commit()
|
||||
removed = cursor.rowcount > 0
|
||||
if removed:
|
||||
logger.info(f"Job removed: {job_id}")
|
||||
return removed
|
||||
|
||||
def get(self, job_id: str) -> ScheduledJob | None:
|
||||
"""Get a job by ID."""
|
||||
with self._conn() as conn:
|
||||
row = conn.execute(
|
||||
"SELECT * FROM jobs WHERE id = ?", (job_id,)
|
||||
).fetchone()
|
||||
return self._row_to_job(row) if row else None
|
||||
|
||||
def list_all(self) -> list[ScheduledJob]:
|
||||
"""List all jobs."""
|
||||
with self._conn() as conn:
|
||||
rows = conn.execute(
|
||||
"SELECT * FROM jobs ORDER BY created_at"
|
||||
).fetchall()
|
||||
return [self._row_to_job(row) for row in rows]
|
||||
|
||||
def list_recurring(self) -> list[ScheduledJob]:
|
||||
"""List only recurring (cron) jobs."""
|
||||
with self._conn() as conn:
|
||||
rows = conn.execute(
|
||||
"SELECT * FROM jobs WHERE cron_expr IS NOT NULL ORDER BY created_at"
|
||||
).fetchall()
|
||||
return [self._row_to_job(row) for row in rows]
|
||||
|
||||
def clear_oneshot(self) -> int:
|
||||
"""Remove all one-shot (non-cron) jobs. Returns count removed."""
|
||||
with self._conn() as conn:
|
||||
cursor = conn.execute(
|
||||
"DELETE FROM jobs WHERE cron_expr IS NULL"
|
||||
)
|
||||
conn.commit()
|
||||
return cursor.rowcount
|
||||
|
||||
@staticmethod
|
||||
def _row_to_job(row: sqlite3.Row) -> ScheduledJob:
|
||||
return ScheduledJob(
|
||||
id=row["id"],
|
||||
cron_expr=row["cron_expr"],
|
||||
prompt=row["prompt"],
|
||||
channel_id=row["channel_id"],
|
||||
channel_type=row["channel_type"],
|
||||
thread_id=row["thread_id"],
|
||||
user_name=row["user_name"],
|
||||
created_at=row["created_at"],
|
||||
next_run=row["next_run"],
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def new_id() -> str:
|
||||
"""Generate a short unique job ID."""
|
||||
return uuid.uuid4().hex[:8]
|
||||
6
skills/__init__.py
Normal file
6
skills/__init__.py
Normal file
@@ -0,0 +1,6 @@
|
||||
# Aetheel Skills System
|
||||
# Context-injection skills loaded from workspace.
|
||||
|
||||
from skills.skills import Skill, SkillsManager
|
||||
|
||||
__all__ = ["Skill", "SkillsManager"]
|
||||
270
skills/skills.py
Normal file
270
skills/skills.py
Normal file
@@ -0,0 +1,270 @@
|
||||
"""
|
||||
Aetheel Skills System
|
||||
=====================
|
||||
Discovers, parses, and injects skill context into the system prompt.
|
||||
|
||||
Skills are markdown files in the workspace that teach the agent how to
|
||||
handle specific types of requests. They are loaded once at startup and
|
||||
injected into the system prompt when their trigger words match the
|
||||
user's message.
|
||||
|
||||
Skill format (~/.aetheel/workspace/skills/<name>/SKILL.md):
|
||||
---
|
||||
name: weather
|
||||
description: Check weather for any city
|
||||
triggers: [weather, forecast, temperature, rain]
|
||||
---
|
||||
|
||||
# Weather Skill
|
||||
|
||||
When the user asks about weather, use the following approach:
|
||||
...
|
||||
|
||||
Usage:
|
||||
from skills import SkillsManager
|
||||
|
||||
manager = SkillsManager("~/.aetheel/workspace")
|
||||
manager.load_all()
|
||||
context = manager.get_context("what's the weather like?")
|
||||
"""
|
||||
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
from dataclasses import dataclass, field
|
||||
from pathlib import Path
|
||||
|
||||
logger = logging.getLogger("aetheel.skills")
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Types
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
@dataclass
|
||||
class Skill:
|
||||
"""A loaded skill with metadata and instructions."""
|
||||
|
||||
name: str
|
||||
description: str
|
||||
triggers: list[str]
|
||||
body: str # The markdown body (instructions for the agent)
|
||||
path: str # Absolute path to the SKILL.md file
|
||||
|
||||
def matches(self, text: str) -> bool:
|
||||
"""Check if any trigger word appears in the given text."""
|
||||
text_lower = text.lower()
|
||||
return any(trigger.lower() in text_lower for trigger in self.triggers)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Skills Manager
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class SkillsManager:
|
||||
"""
|
||||
Discovers and manages skills from the workspace.
|
||||
|
||||
Skills live in {workspace}/skills/<name>/SKILL.md.
|
||||
Each SKILL.md has YAML frontmatter (name, description, triggers)
|
||||
and a markdown body with instructions for the agent.
|
||||
"""
|
||||
|
||||
def __init__(self, workspace_dir: str):
|
||||
self._workspace = os.path.expanduser(workspace_dir)
|
||||
self._skills_dir = os.path.join(self._workspace, "skills")
|
||||
self._skills: list[Skill] = []
|
||||
|
||||
@property
|
||||
def skills(self) -> list[Skill]:
|
||||
"""Return all loaded skills."""
|
||||
return list(self._skills)
|
||||
|
||||
@property
|
||||
def skills_dir(self) -> str:
|
||||
"""Return the skills directory path."""
|
||||
return self._skills_dir
|
||||
|
||||
def load_all(self) -> list[Skill]:
|
||||
"""
|
||||
Discover and load all skills from the workspace.
|
||||
|
||||
Scans {workspace}/skills/ for subdirectories containing SKILL.md.
|
||||
Returns the list of loaded skills.
|
||||
"""
|
||||
self._skills = []
|
||||
|
||||
if not os.path.isdir(self._skills_dir):
|
||||
logger.info(
|
||||
f"Skills directory not found: {self._skills_dir} — "
|
||||
"no skills loaded. Create it to add skills."
|
||||
)
|
||||
return []
|
||||
|
||||
for entry in sorted(os.listdir(self._skills_dir)):
|
||||
skill_dir = os.path.join(self._skills_dir, entry)
|
||||
if not os.path.isdir(skill_dir):
|
||||
continue
|
||||
|
||||
skill_file = os.path.join(skill_dir, "SKILL.md")
|
||||
if not os.path.isfile(skill_file):
|
||||
logger.debug(f"Skipping {entry}/ — no SKILL.md found")
|
||||
continue
|
||||
|
||||
try:
|
||||
skill = self._parse_skill(skill_file)
|
||||
self._skills.append(skill)
|
||||
logger.info(
|
||||
f"Loaded skill: {skill.name} "
|
||||
f"(triggers={skill.triggers})"
|
||||
)
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to load skill from {skill_file}: {e}")
|
||||
|
||||
logger.info(f"Loaded {len(self._skills)} skill(s)")
|
||||
return list(self._skills)
|
||||
|
||||
def reload(self) -> list[Skill]:
|
||||
"""Reload all skills (same as load_all, but clears cache first)."""
|
||||
return self.load_all()
|
||||
|
||||
def get_relevant(self, message: str) -> list[Skill]:
|
||||
"""
|
||||
Find skills relevant to the given message.
|
||||
Matches trigger words against the message text.
|
||||
"""
|
||||
return [s for s in self._skills if s.matches(message)]
|
||||
|
||||
def get_context(self, message: str) -> str:
|
||||
"""
|
||||
Build a context string for skills relevant to the message.
|
||||
|
||||
Returns a formatted string ready for injection into the
|
||||
system prompt, or empty string if no skills match.
|
||||
"""
|
||||
relevant = self.get_relevant(message)
|
||||
if not relevant:
|
||||
return ""
|
||||
|
||||
sections = ["# Active Skills\n"]
|
||||
for skill in relevant:
|
||||
sections.append(
|
||||
f"## {skill.name}\n"
|
||||
f"*{skill.description}*\n\n"
|
||||
f"{skill.body}"
|
||||
)
|
||||
|
||||
return "\n\n---\n\n".join(sections)
|
||||
|
||||
def get_all_context(self) -> str:
|
||||
"""
|
||||
Build a context string listing ALL loaded skills (for reference).
|
||||
|
||||
This is a lighter-weight summary — just names and descriptions,
|
||||
not full bodies. Used to let the agent know what skills exist.
|
||||
"""
|
||||
if not self._skills:
|
||||
return ""
|
||||
|
||||
lines = ["# Available Skills\n"]
|
||||
for skill in self._skills:
|
||||
triggers = ", ".join(skill.triggers[:5])
|
||||
lines.append(f"- **{skill.name}**: {skill.description} (triggers: {triggers})")
|
||||
|
||||
return "\n".join(lines)
|
||||
|
||||
# -------------------------------------------------------------------
|
||||
# Parsing
|
||||
# -------------------------------------------------------------------
|
||||
|
||||
def _parse_skill(self, path: str) -> Skill:
|
||||
"""
|
||||
Parse a SKILL.md file with YAML frontmatter.
|
||||
|
||||
Format:
|
||||
---
|
||||
name: skill_name
|
||||
description: What this skill does
|
||||
triggers: [word1, word2, word3]
|
||||
---
|
||||
|
||||
# Skill body (markdown instructions)
|
||||
"""
|
||||
with open(path, "r", encoding="utf-8") as f:
|
||||
content = f.read()
|
||||
|
||||
# Parse YAML frontmatter (simple regex-based, no PyYAML needed)
|
||||
frontmatter, body = self._split_frontmatter(content)
|
||||
|
||||
name = self._extract_field(frontmatter, "name") or Path(path).parent.name
|
||||
description = self._extract_field(frontmatter, "description") or ""
|
||||
triggers_raw = self._extract_field(frontmatter, "triggers") or ""
|
||||
triggers = self._parse_list(triggers_raw)
|
||||
|
||||
if not triggers:
|
||||
# Fall back to using the skill name as a trigger
|
||||
triggers = [name]
|
||||
|
||||
return Skill(
|
||||
name=name,
|
||||
description=description,
|
||||
triggers=triggers,
|
||||
body=body.strip(),
|
||||
path=path,
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def _split_frontmatter(content: str) -> tuple[str, str]:
|
||||
"""Split YAML frontmatter from markdown body."""
|
||||
# Match --- at start, then content, then ---
|
||||
match = re.match(
|
||||
r"^---\s*\n(.*?)\n---\s*\n(.*)",
|
||||
content,
|
||||
re.DOTALL,
|
||||
)
|
||||
if match:
|
||||
return match.group(1), match.group(2)
|
||||
return "", content
|
||||
|
||||
@staticmethod
|
||||
def _extract_field(frontmatter: str, field_name: str) -> str | None:
|
||||
"""Extract a simple field value from YAML frontmatter."""
|
||||
pattern = rf"^{re.escape(field_name)}\s*:\s*(.+)$"
|
||||
match = re.search(pattern, frontmatter, re.MULTILINE)
|
||||
if match:
|
||||
value = match.group(1).strip()
|
||||
# Remove surrounding quotes if present
|
||||
if (value.startswith('"') and value.endswith('"')) or (
|
||||
value.startswith("'") and value.endswith("'")
|
||||
):
|
||||
value = value[1:-1]
|
||||
return value
|
||||
return None
|
||||
|
||||
@staticmethod
|
||||
def _parse_list(raw: str) -> list[str]:
|
||||
"""
|
||||
Parse a YAML-like list string into a Python list.
|
||||
|
||||
Handles both:
|
||||
- Inline: [word1, word2, word3]
|
||||
- Comma-separated: word1, word2, word3
|
||||
"""
|
||||
if not raw:
|
||||
return []
|
||||
|
||||
# Remove brackets if present
|
||||
raw = raw.strip()
|
||||
if raw.startswith("[") and raw.endswith("]"):
|
||||
raw = raw[1:-1]
|
||||
|
||||
# Split by commas and clean up
|
||||
items = []
|
||||
for item in raw.split(","):
|
||||
cleaned = item.strip().strip("'\"")
|
||||
if cleaned:
|
||||
items.append(cleaned)
|
||||
|
||||
return items
|
||||
7
stock_update.sh
Executable file
7
stock_update.sh
Executable file
@@ -0,0 +1,7 @@
|
||||
#!/bin/bash
|
||||
# Stock market update script
|
||||
echo "=== Stock Market Update - $(date) ===" >> /Users/tanmay-air/Desktop/aetheel/stock_updates.log
|
||||
echo "Searching for top stocks..." >> /Users/tanmay-air/Desktop/aetheel/stock_updates.log
|
||||
# Note: This script logs when it runs. Run opencode and search manually for actual data.
|
||||
echo "Check completed at $(date)" >> /Users/tanmay-air/Desktop/aetheel/stock_updates.log
|
||||
echo "" >> /Users/tanmay-air/Desktop/aetheel/stock_updates.log
|
||||
1
tests/__init__.py
Normal file
1
tests/__init__.py
Normal file
@@ -0,0 +1 @@
|
||||
# Test package
|
||||
200
tests/test_base_adapter.py
Normal file
200
tests/test_base_adapter.py
Normal file
@@ -0,0 +1,200 @@
|
||||
"""
|
||||
Tests for the Base Adapter and IncomingMessage.
|
||||
"""
|
||||
|
||||
import pytest
|
||||
from datetime import datetime, timezone
|
||||
|
||||
from adapters.base import BaseAdapter, IncomingMessage
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Concrete adapter for testing (implements all abstract methods)
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class MockAdapter(BaseAdapter):
|
||||
"""A minimal concrete adapter for testing BaseAdapter."""
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.sent_messages: list[dict] = []
|
||||
self._started = False
|
||||
|
||||
@property
|
||||
def source_name(self) -> str:
|
||||
return "mock"
|
||||
|
||||
def start(self) -> None:
|
||||
self._started = True
|
||||
|
||||
def start_async(self) -> None:
|
||||
self._started = True
|
||||
|
||||
def stop(self) -> None:
|
||||
self._started = False
|
||||
|
||||
def send_message(
|
||||
self,
|
||||
channel_id: str,
|
||||
text: str,
|
||||
thread_id: str | None = None,
|
||||
) -> None:
|
||||
self.sent_messages.append({
|
||||
"channel_id": channel_id,
|
||||
"text": text,
|
||||
"thread_id": thread_id,
|
||||
})
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Fixtures
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def adapter():
|
||||
return MockAdapter()
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def sample_message():
|
||||
return IncomingMessage(
|
||||
text="Hello world",
|
||||
user_id="U123",
|
||||
user_name="testuser",
|
||||
channel_id="C456",
|
||||
channel_name="general",
|
||||
conversation_id="conv-789",
|
||||
source="mock",
|
||||
is_dm=False,
|
||||
raw_event={"thread_id": "T100"},
|
||||
)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Tests: IncomingMessage
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestIncomingMessage:
|
||||
def test_create_message(self, sample_message):
|
||||
assert sample_message.text == "Hello world"
|
||||
assert sample_message.user_id == "U123"
|
||||
assert sample_message.user_name == "testuser"
|
||||
assert sample_message.channel_id == "C456"
|
||||
assert sample_message.source == "mock"
|
||||
assert sample_message.is_dm is False
|
||||
|
||||
def test_timestamp_default(self):
|
||||
msg = IncomingMessage(
|
||||
text="test",
|
||||
user_id="U1",
|
||||
user_name="user",
|
||||
channel_id="C1",
|
||||
channel_name="ch",
|
||||
conversation_id="conv",
|
||||
source="test",
|
||||
is_dm=True,
|
||||
)
|
||||
assert msg.timestamp is not None
|
||||
assert msg.timestamp.tzinfo is not None # has timezone
|
||||
|
||||
def test_raw_event_default(self):
|
||||
msg = IncomingMessage(
|
||||
text="test",
|
||||
user_id="U1",
|
||||
user_name="user",
|
||||
channel_id="C1",
|
||||
channel_name="ch",
|
||||
conversation_id="conv",
|
||||
source="test",
|
||||
is_dm=False,
|
||||
)
|
||||
assert msg.raw_event == {}
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Tests: BaseAdapter
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestBaseAdapter:
|
||||
def test_register_handler(self, adapter):
|
||||
handler = lambda msg: "response"
|
||||
adapter.on_message(handler)
|
||||
assert len(adapter._message_handlers) == 1
|
||||
|
||||
def test_on_message_as_decorator(self, adapter):
|
||||
@adapter.on_message
|
||||
def my_handler(msg):
|
||||
return "decorated response"
|
||||
|
||||
assert len(adapter._message_handlers) == 1
|
||||
assert my_handler("test") == "decorated response"
|
||||
|
||||
def test_dispatch_calls_handler(self, adapter, sample_message):
|
||||
responses = []
|
||||
|
||||
@adapter.on_message
|
||||
def handler(msg):
|
||||
responses.append(msg.text)
|
||||
return f"reply to: {msg.text}"
|
||||
|
||||
adapter._dispatch(sample_message)
|
||||
assert responses == ["Hello world"]
|
||||
|
||||
def test_dispatch_sends_response(self, adapter, sample_message):
|
||||
@adapter.on_message
|
||||
def handler(msg):
|
||||
return "Auto reply"
|
||||
|
||||
adapter._dispatch(sample_message)
|
||||
assert len(adapter.sent_messages) == 1
|
||||
assert adapter.sent_messages[0]["text"] == "Auto reply"
|
||||
assert adapter.sent_messages[0]["channel_id"] == "C456"
|
||||
|
||||
def test_dispatch_no_response(self, adapter, sample_message):
|
||||
@adapter.on_message
|
||||
def handler(msg):
|
||||
return None # explicit no response
|
||||
|
||||
adapter._dispatch(sample_message)
|
||||
assert len(adapter.sent_messages) == 0
|
||||
|
||||
def test_dispatch_handler_error(self, adapter, sample_message):
|
||||
@adapter.on_message
|
||||
def bad_handler(msg):
|
||||
raise ValueError("Something broke")
|
||||
|
||||
# Should not raise — dispatch catches errors
|
||||
adapter._dispatch(sample_message)
|
||||
# Should send error message
|
||||
assert len(adapter.sent_messages) == 1
|
||||
assert "Something went wrong" in adapter.sent_messages[0]["text"]
|
||||
|
||||
def test_multiple_handlers(self, adapter, sample_message):
|
||||
calls = []
|
||||
|
||||
@adapter.on_message
|
||||
def handler1(msg):
|
||||
calls.append("h1")
|
||||
return None
|
||||
|
||||
@adapter.on_message
|
||||
def handler2(msg):
|
||||
calls.append("h2")
|
||||
return "from h2"
|
||||
|
||||
adapter._dispatch(sample_message)
|
||||
assert calls == ["h1", "h2"]
|
||||
assert len(adapter.sent_messages) == 1 # only h2 returned a response
|
||||
|
||||
def test_source_name(self, adapter):
|
||||
assert adapter.source_name == "mock"
|
||||
|
||||
def test_start_stop(self, adapter):
|
||||
adapter.start()
|
||||
assert adapter._started is True
|
||||
adapter.stop()
|
||||
assert adapter._started is False
|
||||
140
tests/test_scheduler.py
Normal file
140
tests/test_scheduler.py
Normal file
@@ -0,0 +1,140 @@
|
||||
"""
|
||||
Tests for the Scheduler Store (SQLite persistence).
|
||||
"""
|
||||
|
||||
import os
|
||||
import tempfile
|
||||
from datetime import datetime, timezone
|
||||
|
||||
import pytest
|
||||
|
||||
from scheduler.store import JobStore, ScheduledJob
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Fixtures
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def db_path(tmp_path):
|
||||
"""Provide a temp database path."""
|
||||
return str(tmp_path / "test_scheduler.db")
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def store(db_path):
|
||||
"""Create a fresh JobStore."""
|
||||
return JobStore(db_path=db_path)
|
||||
|
||||
|
||||
def _make_job(
|
||||
cron_expr: str | None = "*/5 * * * *",
|
||||
prompt: str = "Test prompt",
|
||||
channel_id: str = "C123",
|
||||
channel_type: str = "slack",
|
||||
) -> ScheduledJob:
|
||||
"""Helper to create a ScheduledJob."""
|
||||
return ScheduledJob(
|
||||
id=JobStore.new_id(),
|
||||
cron_expr=cron_expr,
|
||||
prompt=prompt,
|
||||
channel_id=channel_id,
|
||||
channel_type=channel_type,
|
||||
created_at=datetime.now(timezone.utc).isoformat(),
|
||||
)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Tests: JobStore
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestJobStore:
|
||||
def test_add_and_get(self, store):
|
||||
job = _make_job(prompt="Hello scheduler")
|
||||
store.add(job)
|
||||
retrieved = store.get(job.id)
|
||||
assert retrieved is not None
|
||||
assert retrieved.id == job.id
|
||||
assert retrieved.prompt == "Hello scheduler"
|
||||
assert retrieved.cron_expr == "*/5 * * * *"
|
||||
|
||||
def test_remove(self, store):
|
||||
job = _make_job()
|
||||
store.add(job)
|
||||
assert store.remove(job.id) is True
|
||||
assert store.get(job.id) is None
|
||||
|
||||
def test_remove_nonexistent(self, store):
|
||||
assert store.remove("nonexistent") is False
|
||||
|
||||
def test_list_all(self, store):
|
||||
job1 = _make_job(prompt="Job 1")
|
||||
job2 = _make_job(prompt="Job 2")
|
||||
store.add(job1)
|
||||
store.add(job2)
|
||||
all_jobs = store.list_all()
|
||||
assert len(all_jobs) == 2
|
||||
|
||||
def test_list_recurring(self, store):
|
||||
cron_job = _make_job(cron_expr="0 9 * * *", prompt="Recurring")
|
||||
oneshot_job = _make_job(cron_expr=None, prompt="One-shot")
|
||||
store.add(cron_job)
|
||||
store.add(oneshot_job)
|
||||
recurring = store.list_recurring()
|
||||
assert len(recurring) == 1
|
||||
assert recurring[0].prompt == "Recurring"
|
||||
|
||||
def test_clear_oneshot(self, store):
|
||||
cron_job = _make_job(cron_expr="0 9 * * *")
|
||||
oneshot1 = _make_job(cron_expr=None, prompt="OS 1")
|
||||
oneshot2 = _make_job(cron_expr=None, prompt="OS 2")
|
||||
store.add(cron_job)
|
||||
store.add(oneshot1)
|
||||
store.add(oneshot2)
|
||||
removed = store.clear_oneshot()
|
||||
assert removed == 2
|
||||
remaining = store.list_all()
|
||||
assert len(remaining) == 1
|
||||
assert remaining[0].is_recurring
|
||||
|
||||
def test_is_recurring(self):
|
||||
cron = _make_job(cron_expr="*/5 * * * *")
|
||||
oneshot = _make_job(cron_expr=None)
|
||||
assert cron.is_recurring is True
|
||||
assert oneshot.is_recurring is False
|
||||
|
||||
def test_persistence(self, db_path):
|
||||
"""Jobs survive store re-creation."""
|
||||
store1 = JobStore(db_path=db_path)
|
||||
job = _make_job(prompt="Persistent job")
|
||||
store1.add(job)
|
||||
|
||||
# Create a new store instance pointing to same DB
|
||||
store2 = JobStore(db_path=db_path)
|
||||
retrieved = store2.get(job.id)
|
||||
assert retrieved is not None
|
||||
assert retrieved.prompt == "Persistent job"
|
||||
|
||||
def test_new_id_unique(self):
|
||||
ids = {JobStore.new_id() for _ in range(100)}
|
||||
assert len(ids) == 100 # all unique
|
||||
|
||||
def test_job_with_metadata(self, store):
|
||||
job = _make_job()
|
||||
job.thread_id = "T456"
|
||||
job.user_name = "Test User"
|
||||
store.add(job)
|
||||
retrieved = store.get(job.id)
|
||||
assert retrieved is not None
|
||||
assert retrieved.thread_id == "T456"
|
||||
assert retrieved.user_name == "Test User"
|
||||
|
||||
def test_telegram_channel_type(self, store):
|
||||
job = _make_job(channel_type="telegram", channel_id="987654")
|
||||
store.add(job)
|
||||
retrieved = store.get(job.id)
|
||||
assert retrieved is not None
|
||||
assert retrieved.channel_type == "telegram"
|
||||
assert retrieved.channel_id == "987654"
|
||||
272
tests/test_skills.py
Normal file
272
tests/test_skills.py
Normal file
@@ -0,0 +1,272 @@
|
||||
"""
|
||||
Tests for the Skills System.
|
||||
"""
|
||||
|
||||
import os
|
||||
import tempfile
|
||||
|
||||
import pytest
|
||||
|
||||
from skills.skills import Skill, SkillsManager
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Fixtures
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def skills_workspace(tmp_path):
|
||||
"""Create a temp workspace with sample skills."""
|
||||
skills_dir = tmp_path / "skills"
|
||||
|
||||
# Skill 1: weather
|
||||
weather_dir = skills_dir / "weather"
|
||||
weather_dir.mkdir(parents=True)
|
||||
(weather_dir / "SKILL.md").write_text(
|
||||
"""---
|
||||
name: weather
|
||||
description: Check weather for any city
|
||||
triggers: [weather, forecast, temperature, rain]
|
||||
---
|
||||
|
||||
# Weather Skill
|
||||
|
||||
When the user asks about weather:
|
||||
1. Extract the city name
|
||||
2. Provide info based on your knowledge
|
||||
"""
|
||||
)
|
||||
|
||||
# Skill 2: coding
|
||||
coding_dir = skills_dir / "coding"
|
||||
coding_dir.mkdir(parents=True)
|
||||
(coding_dir / "SKILL.md").write_text(
|
||||
"""---
|
||||
name: coding
|
||||
description: Help with programming tasks
|
||||
triggers: [code, python, javascript, bug, debug]
|
||||
---
|
||||
|
||||
# Coding Skill
|
||||
|
||||
When the user asks about coding:
|
||||
- Ask what language they're working in
|
||||
- Provide code snippets with explanations
|
||||
"""
|
||||
)
|
||||
|
||||
# Skill 3: invalid (no SKILL.md)
|
||||
invalid_dir = skills_dir / "empty_skill"
|
||||
invalid_dir.mkdir(parents=True)
|
||||
# No SKILL.md here
|
||||
|
||||
# Skill 4: minimal (no explicit triggers)
|
||||
minimal_dir = skills_dir / "minimal"
|
||||
minimal_dir.mkdir(parents=True)
|
||||
(minimal_dir / "SKILL.md").write_text(
|
||||
"""---
|
||||
name: minimal_skill
|
||||
description: A minimal skill
|
||||
---
|
||||
|
||||
This is a minimal skill with no explicit triggers.
|
||||
"""
|
||||
)
|
||||
|
||||
return tmp_path
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def empty_workspace(tmp_path):
|
||||
"""Create a temp workspace with no skills directory."""
|
||||
return tmp_path
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Tests: Skill Dataclass
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestSkill:
|
||||
def test_matches_trigger(self):
|
||||
skill = Skill(
|
||||
name="weather",
|
||||
description="Weather skill",
|
||||
triggers=["weather", "forecast"],
|
||||
body="# Weather",
|
||||
path="/fake/path",
|
||||
)
|
||||
assert skill.matches("What's the weather today?")
|
||||
assert skill.matches("Give me the FORECAST")
|
||||
assert not skill.matches("Tell me a joke")
|
||||
|
||||
def test_matches_is_case_insensitive(self):
|
||||
skill = Skill(
|
||||
name="test",
|
||||
description="Test",
|
||||
triggers=["Python"],
|
||||
body="body",
|
||||
path="/fake",
|
||||
)
|
||||
assert skill.matches("I'm learning python")
|
||||
assert skill.matches("PYTHON is great")
|
||||
assert skill.matches("Python 3.14")
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Tests: SkillsManager Loading
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestSkillsManagerLoading:
|
||||
def test_load_all_finds_skills(self, skills_workspace):
|
||||
manager = SkillsManager(str(skills_workspace))
|
||||
loaded = manager.load_all()
|
||||
# Should find weather, coding, minimal (not empty_skill which has no SKILL.md)
|
||||
assert len(loaded) == 3
|
||||
names = {s.name for s in loaded}
|
||||
assert "weather" in names
|
||||
assert "coding" in names
|
||||
assert "minimal_skill" in names
|
||||
|
||||
def test_load_parses_frontmatter(self, skills_workspace):
|
||||
manager = SkillsManager(str(skills_workspace))
|
||||
manager.load_all()
|
||||
weather = next(s for s in manager.skills if s.name == "weather")
|
||||
assert weather.description == "Check weather for any city"
|
||||
assert "weather" in weather.triggers
|
||||
assert "forecast" in weather.triggers
|
||||
assert "temperature" in weather.triggers
|
||||
assert "rain" in weather.triggers
|
||||
|
||||
def test_load_parses_body(self, skills_workspace):
|
||||
manager = SkillsManager(str(skills_workspace))
|
||||
manager.load_all()
|
||||
weather = next(s for s in manager.skills if s.name == "weather")
|
||||
assert "# Weather Skill" in weather.body
|
||||
assert "Extract the city name" in weather.body
|
||||
|
||||
def test_empty_workspace(self, empty_workspace):
|
||||
manager = SkillsManager(str(empty_workspace))
|
||||
loaded = manager.load_all()
|
||||
assert loaded == []
|
||||
|
||||
def test_minimal_skill_uses_name_as_trigger(self, skills_workspace):
|
||||
manager = SkillsManager(str(skills_workspace))
|
||||
manager.load_all()
|
||||
minimal = next(s for s in manager.skills if s.name == "minimal_skill")
|
||||
assert minimal.triggers == ["minimal_skill"]
|
||||
|
||||
def test_reload_rediscovers(self, skills_workspace):
|
||||
manager = SkillsManager(str(skills_workspace))
|
||||
manager.load_all()
|
||||
assert len(manager.skills) == 3
|
||||
|
||||
# Add a new skill
|
||||
new_dir = skills_workspace / "skills" / "newskill"
|
||||
new_dir.mkdir()
|
||||
(new_dir / "SKILL.md").write_text(
|
||||
"---\nname: newskill\ndescription: New\ntriggers: [new]\n---\nNew body"
|
||||
)
|
||||
|
||||
reloaded = manager.reload()
|
||||
assert len(reloaded) == 4
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Tests: SkillsManager Matching
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestSkillsManagerMatching:
|
||||
def test_get_relevant_finds_matching(self, skills_workspace):
|
||||
manager = SkillsManager(str(skills_workspace))
|
||||
manager.load_all()
|
||||
relevant = manager.get_relevant("What's the weather?")
|
||||
names = {s.name for s in relevant}
|
||||
assert "weather" in names
|
||||
assert "coding" not in names
|
||||
|
||||
def test_get_relevant_multiple_matches(self, skills_workspace):
|
||||
manager = SkillsManager(str(skills_workspace))
|
||||
manager.load_all()
|
||||
# This should match both weather (temperature) and coding (debug)
|
||||
relevant = manager.get_relevant("debug the temperature sensor code")
|
||||
names = {s.name for s in relevant}
|
||||
assert "weather" in names
|
||||
assert "coding" in names
|
||||
|
||||
def test_get_relevant_no_matches(self, skills_workspace):
|
||||
manager = SkillsManager(str(skills_workspace))
|
||||
manager.load_all()
|
||||
relevant = manager.get_relevant("Tell me a joke about cats")
|
||||
assert relevant == []
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Tests: Context Generation
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestSkillsManagerContext:
|
||||
def test_get_context_with_matching(self, skills_workspace):
|
||||
manager = SkillsManager(str(skills_workspace))
|
||||
manager.load_all()
|
||||
context = manager.get_context("forecast for tomorrow")
|
||||
assert "# Active Skills" in context
|
||||
assert "Weather Skill" in context
|
||||
|
||||
def test_get_context_empty_when_no_match(self, skills_workspace):
|
||||
manager = SkillsManager(str(skills_workspace))
|
||||
manager.load_all()
|
||||
context = manager.get_context("random unrelated message")
|
||||
assert context == ""
|
||||
|
||||
def test_get_all_context(self, skills_workspace):
|
||||
manager = SkillsManager(str(skills_workspace))
|
||||
manager.load_all()
|
||||
summary = manager.get_all_context()
|
||||
assert "# Available Skills" in summary
|
||||
assert "weather" in summary
|
||||
assert "coding" in summary
|
||||
|
||||
def test_get_all_context_empty_workspace(self, empty_workspace):
|
||||
manager = SkillsManager(str(empty_workspace))
|
||||
manager.load_all()
|
||||
assert manager.get_all_context() == ""
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Tests: Frontmatter Parsing Edge Cases
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestFrontmatterParsing:
|
||||
def test_parse_list_inline_brackets(self):
|
||||
result = SkillsManager._parse_list("[a, b, c]")
|
||||
assert result == ["a", "b", "c"]
|
||||
|
||||
def test_parse_list_no_brackets(self):
|
||||
result = SkillsManager._parse_list("a, b, c")
|
||||
assert result == ["a", "b", "c"]
|
||||
|
||||
def test_parse_list_quoted_items(self):
|
||||
result = SkillsManager._parse_list("['hello', \"world\"]")
|
||||
assert result == ["hello", "world"]
|
||||
|
||||
def test_parse_list_empty(self):
|
||||
result = SkillsManager._parse_list("")
|
||||
assert result == []
|
||||
|
||||
def test_split_frontmatter(self):
|
||||
content = "---\nname: test\n---\nBody here"
|
||||
fm, body = SkillsManager._split_frontmatter(content)
|
||||
assert "name: test" in fm
|
||||
assert body == "Body here"
|
||||
|
||||
def test_split_no_frontmatter(self):
|
||||
content = "Just a body with no frontmatter"
|
||||
fm, body = SkillsManager._split_frontmatter(content)
|
||||
assert fm == ""
|
||||
assert body == content
|
||||
105
uv.lock
generated
105
uv.lock
generated
@@ -7,21 +7,34 @@ name = "aetheel"
|
||||
version = "0.1.0"
|
||||
source = { virtual = "." }
|
||||
dependencies = [
|
||||
{ name = "apscheduler" },
|
||||
{ name = "fastembed" },
|
||||
{ name = "python-dotenv" },
|
||||
{ name = "python-telegram-bot" },
|
||||
{ name = "slack-bolt" },
|
||||
{ name = "slack-sdk" },
|
||||
{ name = "watchdog" },
|
||||
]
|
||||
|
||||
[package.optional-dependencies]
|
||||
test = [
|
||||
{ name = "pytest" },
|
||||
{ name = "pytest-asyncio" },
|
||||
]
|
||||
|
||||
[package.metadata]
|
||||
requires-dist = [
|
||||
{ name = "apscheduler", specifier = ">=3.10.0,<4.0.0" },
|
||||
{ name = "fastembed", specifier = ">=0.7.4" },
|
||||
{ name = "pytest", marker = "extra == 'test'", specifier = ">=8.0" },
|
||||
{ name = "pytest-asyncio", marker = "extra == 'test'", specifier = ">=0.24" },
|
||||
{ name = "python-dotenv", specifier = ">=1.2.1,<2.0.0" },
|
||||
{ name = "python-telegram-bot", specifier = ">=21.0" },
|
||||
{ name = "slack-bolt", specifier = ">=1.27.0,<2.0.0" },
|
||||
{ name = "slack-sdk", specifier = ">=3.40.0,<4.0.0" },
|
||||
{ name = "watchdog", specifier = ">=6.0.0" },
|
||||
]
|
||||
provides-extras = ["test"]
|
||||
|
||||
[[package]]
|
||||
name = "annotated-doc"
|
||||
@@ -44,6 +57,18 @@ wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/38/0e/27be9fdef66e72d64c0cdc3cc2823101b80585f8119b5c112c2e8f5f7dab/anyio-4.12.1-py3-none-any.whl", hash = "sha256:d405828884fc140aa80a3c667b8beed277f1dfedec42ba031bd6ac3db606ab6c", size = 113592, upload-time = "2026-01-06T11:45:19.497Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "apscheduler"
|
||||
version = "3.11.2"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "tzlocal" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/07/12/3e4389e5920b4c1763390c6d371162f3784f86f85cd6d6c1bfe68eef14e2/apscheduler-3.11.2.tar.gz", hash = "sha256:2a9966b052ec805f020c8c4c3ae6e6a06e24b1bf19f2e11d91d8cca0473eef41", size = 108683, upload-time = "2025-12-22T00:39:34.884Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/9f/64/2e54428beba8d9992aa478bb8f6de9e4ecaa5f8f513bcfd567ed7fb0262d/apscheduler-3.11.2-py3-none-any.whl", hash = "sha256:ce005177f741409db4e4dd40a7431b76feb856b9dd69d57e0da49d6715bfd26d", size = 64439, upload-time = "2025-12-22T00:39:33.303Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "certifi"
|
||||
version = "2026.1.4"
|
||||
@@ -235,6 +260,15 @@ wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/0e/61/66938bbb5fc52dbdf84594873d5b51fb1f7c7794e9c0f5bd885f30bc507b/idna-3.11-py3-none-any.whl", hash = "sha256:771a87f49d9defaf64091e6e6fe9c18d4833f140bd19464795bc32d966ca37ea", size = 71008, upload-time = "2025-10-12T14:55:18.883Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "iniconfig"
|
||||
version = "2.3.0"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/72/34/14ca021ce8e5dfedc35312d08ba8bf51fdd999c576889fc2c24cb97f4f10/iniconfig-2.3.0.tar.gz", hash = "sha256:c76315c77db068650d49c5b56314774a7804df16fee4402c1f19d6d15d8c4730", size = 20503, upload-time = "2025-10-18T21:55:43.219Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/cb/b1/3846dd7f199d53cb17f49cba7e651e9ce294d8497c8c150530ed11865bb8/iniconfig-2.3.0-py3-none-any.whl", hash = "sha256:f631c04d2c48c52b84d0d0549c99ff3859c98df65b3101406327ecc7d53fbf12", size = 7484, upload-time = "2025-10-18T21:55:41.639Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "loguru"
|
||||
version = "0.7.3"
|
||||
@@ -409,6 +443,15 @@ wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/89/c7/5572fa4a3f45740eaab6ae86fcdf7195b55beac1371ac8c619d880cfe948/pillow-11.3.0-cp314-cp314t-win_arm64.whl", hash = "sha256:79ea0d14d3ebad43ec77ad5272e6ff9bba5b679ef73375ea760261207fa8e0aa", size = 2512835, upload-time = "2025-07-01T09:15:50.399Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "pluggy"
|
||||
version = "1.6.0"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/f9/e2/3e91f31a7d2b083fe6ef3fa267035b518369d9511ffab804f839851d2779/pluggy-1.6.0.tar.gz", hash = "sha256:7dcc130b76258d33b90f61b658791dede3486c3e6bfb003ee5c9bfb396dd22f3", size = 69412, upload-time = "2025-05-15T12:30:07.975Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/54/20/4d324d65cc6d9205fabedc306948156824eb9f0ee1633355a8f7ec5c66bf/pluggy-1.6.0-py3-none-any.whl", hash = "sha256:e920276dd6813095e9377c0bc5566d94c932c33b27a3e3945d8389c374dd4746", size = 20538, upload-time = "2025-05-15T12:30:06.134Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "protobuf"
|
||||
version = "6.33.5"
|
||||
@@ -439,6 +482,34 @@ wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/c7/21/705964c7812476f378728bdf590ca4b771ec72385c533964653c68e86bdc/pygments-2.19.2-py3-none-any.whl", hash = "sha256:86540386c03d588bb81d44bc3928634ff26449851e99741617ecb9037ee5ec0b", size = 1225217, upload-time = "2025-06-21T13:39:07.939Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "pytest"
|
||||
version = "9.0.2"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "colorama", marker = "sys_platform == 'win32'" },
|
||||
{ name = "iniconfig" },
|
||||
{ name = "packaging" },
|
||||
{ name = "pluggy" },
|
||||
{ name = "pygments" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/d1/db/7ef3487e0fb0049ddb5ce41d3a49c235bf9ad299b6a25d5780a89f19230f/pytest-9.0.2.tar.gz", hash = "sha256:75186651a92bd89611d1d9fc20f0b4345fd827c41ccd5c299a868a05d70edf11", size = 1568901, upload-time = "2025-12-06T21:30:51.014Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/3b/ab/b3226f0bd7cdcf710fbede2b3548584366da3b19b5021e74f5bde2a8fa3f/pytest-9.0.2-py3-none-any.whl", hash = "sha256:711ffd45bf766d5264d487b917733b453d917afd2b0ad65223959f59089f875b", size = 374801, upload-time = "2025-12-06T21:30:49.154Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "pytest-asyncio"
|
||||
version = "1.3.0"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "pytest" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/90/2c/8af215c0f776415f3590cac4f9086ccefd6fd463befeae41cd4d3f193e5a/pytest_asyncio-1.3.0.tar.gz", hash = "sha256:d7f52f36d231b80ee124cd216ffb19369aa168fc10095013c6b014a34d3ee9e5", size = 50087, upload-time = "2025-11-10T16:07:47.256Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/e5/35/f8b19922b6a25bc0880171a2f1a003eaeb93657475193ab516fd87cac9da/pytest_asyncio-1.3.0-py3-none-any.whl", hash = "sha256:611e26147c7f77640e6d0a92a38ed17c3e9848063698d5c93d5aa7aa11cebff5", size = 15075, upload-time = "2025-11-10T16:07:45.537Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "python-dotenv"
|
||||
version = "1.2.1"
|
||||
@@ -448,6 +519,19 @@ wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/14/1b/a298b06749107c305e1fe0f814c6c74aea7b2f1e10989cb30f544a1b3253/python_dotenv-1.2.1-py3-none-any.whl", hash = "sha256:b81ee9561e9ca4004139c6cbba3a238c32b03e4894671e181b671e8cb8425d61", size = 21230, upload-time = "2025-10-26T15:12:09.109Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "python-telegram-bot"
|
||||
version = "22.6"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "httpcore" },
|
||||
{ name = "httpx" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/cd/9b/8df90c85404166a6631e857027866263adb27440d8af1dbeffbdc4f0166c/python_telegram_bot-22.6.tar.gz", hash = "sha256:50ae8cc10f8dff01445628687951020721f37956966b92a91df4c1bf2d113742", size = 1503761, upload-time = "2026-01-24T13:57:00.269Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/13/97/7298f0e1afe3a1ae52ff4c5af5087ed4de319ea73eb3b5c8c4dd4e76e708/python_telegram_bot-22.6-py3-none-any.whl", hash = "sha256:e598fe171c3dde2dfd0f001619ee9110eece66761a677b34719fb18934935ce0", size = 737267, upload-time = "2026-01-24T13:56:58.06Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "pyyaml"
|
||||
version = "6.0.3"
|
||||
@@ -618,6 +702,27 @@ wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/18/67/36e9267722cc04a6b9f15c7f3441c2363321a3ea07da7ae0c0707beb2a9c/typing_extensions-4.15.0-py3-none-any.whl", hash = "sha256:f0fa19c6845758ab08074a0cfa8b7aecb71c999ca73d62883bc25cc018c4e548", size = 44614, upload-time = "2025-08-25T13:49:24.86Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tzdata"
|
||||
version = "2025.3"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/5e/a7/c202b344c5ca7daf398f3b8a477eeb205cf3b6f32e7ec3a6bac0629ca975/tzdata-2025.3.tar.gz", hash = "sha256:de39c2ca5dc7b0344f2eba86f49d614019d29f060fc4ebc8a417896a620b56a7", size = 196772, upload-time = "2025-12-13T17:45:35.667Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/c7/b0/003792df09decd6849a5e39c28b513c06e84436a54440380862b5aeff25d/tzdata-2025.3-py2.py3-none-any.whl", hash = "sha256:06a47e5700f3081aab02b2e513160914ff0694bce9947d6b76ebd6bf57cfc5d1", size = 348521, upload-time = "2025-12-13T17:45:33.889Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tzlocal"
|
||||
version = "5.3.1"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "tzdata", marker = "sys_platform == 'win32'" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/8b/2e/c14812d3d4d9cd1773c6be938f89e5735a1f11a9f184ac3639b93cef35d5/tzlocal-5.3.1.tar.gz", hash = "sha256:cceffc7edecefea1f595541dbd6e990cb1ea3d19bf01b2809f362a03dd7921fd", size = 30761, upload-time = "2025-03-05T21:17:41.549Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/c2/14/e2a54fabd4f08cd7af1c07030603c3356b74da07f7cc056e600436edfa17/tzlocal-5.3.1-py3-none-any.whl", hash = "sha256:eb1a66c3ef5847adf7a834f1be0800581b683b5608e74f86ecbcef8ab91bb85d", size = 18026, upload-time = "2025-03-05T21:17:39.857Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "urllib3"
|
||||
version = "2.6.3"
|
||||
|
||||
Reference in New Issue
Block a user