feat: openclaw-style secrets (env.vars + \) and per-task model routing

- Replace python-dotenv with config.json env.vars block + \ substitution
- Add models section for per-task model routing (heartbeat, subagent, default)
- Heartbeat/subagent tasks can use different models/providers than main chat
- Remove python-dotenv from dependencies
- Update all docs to reflect new config approach
- Reorganize docs into project/ and research/ subdirectories
This commit is contained in:
2026-02-20 23:49:05 -05:00
parent 55c6767e69
commit 82c2640481
35 changed files with 2904 additions and 422 deletions

View File

@@ -153,6 +153,8 @@ class OpenCodeConfig:
system_prompt: str | None = None
workspace_dir: str | None = None
format: str = "json" # output format: "default" (formatted) or "json" (raw events)
agent: str | None = None # OpenCode agent name (from `opencode agent list`)
attach_url: str | None = None # Attach to running server to avoid MCP cold boot
# Session
auto_create_sessions: bool = True
@@ -183,6 +185,8 @@ class OpenCodeConfig:
os.environ.get("AETHEEL_WORKSPACE"),
),
format=os.environ.get("OPENCODE_FORMAT", "json"),
agent=os.environ.get("OPENCODE_AGENT"),
attach_url=os.environ.get("OPENCODE_ATTACH"),
)
@@ -487,23 +491,20 @@ class OpenCodeRuntime:
message: str,
conversation_id: str | None = None,
system_prompt: str | None = None,
files: list[str] | None = None,
fork: bool = False,
title: str | None = None,
) -> AgentResponse:
"""
Send a message to the AI agent and get a response.
This is the main entry point, used by the Slack adapter's message handler.
If a live session exists for this conversation_id, the message is sent
as a follow-up to the existing session (IPC streaming). Otherwise a
new session is created.
Args:
message: The user's message text
conversation_id: External conversation ID (e.g., Slack thread_ts)
for session isolation
conversation_id: External conversation ID for session isolation
system_prompt: Optional per-request system prompt override
Returns:
AgentResponse with the AI's reply
files: Optional file paths to attach (images, docs, etc.)
fork: Fork the session instead of continuing linearly
title: Human-readable session title
"""
started = time.time()
@@ -513,8 +514,6 @@ class OpenCodeRuntime:
)
try:
# Check for an active live session — if one exists, this is a
# follow-up message that should continue the same agent context
if conversation_id:
live = self._live_sessions.get(conversation_id)
if live and live.session_id:
@@ -525,15 +524,16 @@ class OpenCodeRuntime:
live.touch()
live.message_count += 1
# Route to the appropriate mode
if self._config.mode == RuntimeMode.SDK and self._sdk_available:
result = self._chat_sdk(message, conversation_id, system_prompt)
else:
result = self._chat_cli(message, conversation_id, system_prompt)
result = self._chat_cli(
message, conversation_id, system_prompt,
files=files, fork=fork, title=title,
)
result.duration_ms = int((time.time() - started) * 1000)
# Track the live session
if conversation_id and result.session_id:
live = self._live_sessions.get_or_create(conversation_id)
live.session_id = result.session_id
@@ -622,9 +622,11 @@ class OpenCodeRuntime:
"mode": self._config.mode.value,
"model": self._config.model or "default",
"provider": self._config.provider or "auto",
"agent": self._config.agent or "default",
"active_sessions": self._sessions.count,
"live_sessions": len(self._live_sessions.list_active()),
"opencode_available": self._is_opencode_available(),
"attach": self._config.attach_url or "none",
}
if self._config.mode == RuntimeMode.SDK:
@@ -633,6 +635,78 @@ class OpenCodeRuntime:
return status
def list_models(self, provider: str | None = None, verbose: bool = False) -> str:
"""
List available models via `opencode models [provider]`.
Returns the raw output as a string.
"""
args = [self._config.command, "models"]
if provider:
args.append(provider)
if verbose:
args.append("--verbose")
try:
result = subprocess.run(
args,
capture_output=True,
text=True,
timeout=30,
env=self._build_cli_env(),
)
output = result.stdout.strip() or result.stderr.strip()
return output or "No models found."
except subprocess.TimeoutExpired:
return "⚠️ Timed out fetching models."
except FileNotFoundError:
return "⚠️ OpenCode CLI not found."
def get_stats(self, days: int | None = None) -> str:
"""
Get token usage and cost stats via `opencode stats`.
Returns the raw output as a string.
"""
args = [self._config.command, "stats"]
if days:
args.extend(["--days", str(days)])
try:
result = subprocess.run(
args,
capture_output=True,
text=True,
timeout=15,
env=self._build_cli_env(),
)
output = result.stdout.strip() or result.stderr.strip()
return output or "No stats available."
except subprocess.TimeoutExpired:
return "⚠️ Timed out fetching stats."
except FileNotFoundError:
return "⚠️ OpenCode CLI not found."
def list_agents(self) -> str:
"""
List available agents via `opencode agent list`.
Returns the raw output as a string.
"""
args = [self._config.command, "agent", "list"]
try:
result = subprocess.run(
args,
capture_output=True,
text=True,
timeout=15,
env=self._build_cli_env(),
)
output = result.stdout.strip() or result.stderr.strip()
return output or "No agents found."
except subprocess.TimeoutExpired:
return "⚠️ Timed out listing agents."
except FileNotFoundError:
return "⚠️ OpenCode CLI not found."
def cleanup_sessions(self) -> int:
"""Clean up stale sessions. Returns count removed."""
return self._sessions.cleanup(self._config.session_ttl_hours)
@@ -647,18 +721,17 @@ class OpenCodeRuntime:
message: str,
conversation_id: str | None = None,
system_prompt: str | None = None,
files: list[str] | None = None,
fork: bool = False,
title: str | None = None,
) -> AgentResponse:
"""
Run OpenCode in CLI mode via `opencode run`.
This mirrors OpenClaw's cli-runner.ts:
1. Build the CLI args (like buildCliArgs)
2. Run the command with a timeout
3. Parse the output (like parseCliJson)
4. Return structured results
"""
# Build CLI args — modeled after OpenClaw's buildCliArgs()
args = self._build_cli_args(message, conversation_id, system_prompt)
args = self._build_cli_args(
message, conversation_id, system_prompt,
files=files, fork=fork, title=title,
)
logger.info(
f"CLI exec: {self._config.command} run "
@@ -726,17 +799,15 @@ class OpenCodeRuntime:
message: str,
conversation_id: str | None = None,
system_prompt: str | None = None,
files: list[str] | None = None,
fork: bool = False,
title: str | None = None,
) -> list[str]:
"""
Build CLI arguments for `opencode run`.
Modeled after OpenClaw's buildCliArgs() in cli-runner/helpers.ts:
- base args (command + run)
- model arg (--model)
- session arg (--session / --continue)
- system prompt (prepended to message as XML block)
- format arg (--format)
- the prompt itself
Supports: --model, --session, --continue, --format, --agent,
--attach, --file, --fork, --title
"""
args = [self._config.command, "run"]
@@ -744,24 +815,38 @@ class OpenCodeRuntime:
if self._config.model:
args.extend(["--model", self._config.model])
# Session continuity — like OpenClaw's sessionArg
# Agent selection
if self._config.agent:
args.extend(["--agent", self._config.agent])
# Attach to running server (avoids MCP cold boot per request)
if self._config.attach_url:
args.extend(["--attach", self._config.attach_url])
# Session continuity
existing_session = None
if conversation_id:
existing_session = self._sessions.get(conversation_id)
if existing_session:
# Continue an existing session
args.extend(["--continue", "--session", existing_session])
# For new conversations, OpenCode creates a new session automatically
if fork:
args.append("--fork")
# Output format — use JSON for structured parsing, default for plain text
# Valid choices: "default" (formatted), "json" (raw JSON events)
# Session title
if title:
args.extend(["--title", title])
# File attachments
if files:
for f in files:
args.extend(["--file", f])
# Output format
if self._config.format and self._config.format in ("default", "json"):
args.extend(["--format", self._config.format])
# Build the full prompt — prepend system prompt if provided
# opencode run doesn't have a --system-prompt flag, so we inject it
# as an XML-tagged block before the user message
if system_prompt:
full_message = (
f"<system_instructions>\n{system_prompt}\n</system_instructions>\n\n"
@@ -770,7 +855,6 @@ class OpenCodeRuntime:
else:
full_message = message
# The prompt message (must come last as a positional arg)
args.append(full_message)
return args