Files
Aetheel/test_all.sh
tanmay11k 6d73f74e0b feat: config-driven architecture, install wizard, live runtime switching, usage tracking, auto-failover
Major changes:
- Config-driven adapters: all channels (Slack, Discord, Telegram, WebChat, Webhooks) controlled via config.json with enabled flags and token auto-detection, no CLI flags required
- Runtime engine field: runtime.engine selects opencode/claude from config
- Interactive install script: 8-phase setup wizard with AI runtime detection/installation, token setup, identity file personalization (personality presets), aetheel CLI command, background service (launchd/systemd)
- Live runtime switching: /engine, /model, /provider commands hot-swap the AI runtime from chat without restart, changes persisted to config.json
- Usage tracking: per-request cost extraction from Claude Code JSON output, cumulative stats via /usage command
- Auto-failover: rate limit detection on both runtimes, automatic switch to other engine on quota errors with user notification
- Chat commands work without / prefix (Slack intercepts / in channels), commands: engine, model, provider, config, usage, reload, cron, subagents, status, help
- /config set for editing config.json from chat with dotted key notation
- Security audit saved to docs/security-audit.md
- Full command reference in docs/commands.md
- Future changes doc with NanoClaw agent teams analysis
- Logo added to README and WebChat UI
- README fully rewritten with all features documented
2026-02-18 01:07:12 -05:00

425 lines
13 KiB
Bash

#!/usr/bin/env bash
# =============================================================================
# Aetheel — Full Feature Test Script
# =============================================================================
# Runs all tests and smoke checks for every Aetheel feature.
#
# Usage:
# chmod +x test_all.sh
# ./test_all.sh
#
# Requirements:
# - uv installed (https://docs.astral.sh/uv/getting-started/installation/)
# - Run from the Aetheel/ directory
# =============================================================================
set -euo pipefail
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
CYAN='\033[0;36m'
NC='\033[0m' # No Color
BOLD='\033[1m'
PASS=0
FAIL=0
SKIP=0
pass() { ((PASS++)); echo -e " ${GREEN}$1${NC}"; }
fail() { ((FAIL++)); echo -e " ${RED}$1${NC}"; }
skip() { ((SKIP++)); echo -e " ${YELLOW}⚠️ $1${NC}"; }
section() { echo -e "\n${CYAN}${BOLD}━━━ $1 ━━━${NC}"; }
# =============================================================================
section "1. Environment Check"
# =============================================================================
# Check uv
if command -v uv &>/dev/null; then
UV_VER=$(uv --version 2>&1 | head -1)
pass "uv found: $UV_VER"
else
fail "uv not found — install from https://docs.astral.sh/uv/"
echo " Cannot continue without uv."
exit 1
fi
# Check Python
if command -v python &>/dev/null; then
PY_VER=$(python --version 2>&1)
pass "Python found: $PY_VER"
else
fail "Python not found"
exit 1
fi
# Check we're in the right directory
if [ ! -f "pyproject.toml" ]; then
fail "pyproject.toml not found — run this script from the Aetheel/ directory"
exit 1
fi
pass "Running from Aetheel directory"
# =============================================================================
section "2. Dependency Installation"
# =============================================================================
echo " Installing project + test dependencies..."
if uv sync --extra test --quiet 2>&1; then
pass "uv sync --extra test succeeded"
else
fail "uv sync failed"
echo " Trying pip fallback..."
uv pip install -e ".[test]" --quiet 2>&1 || fail "pip install also failed"
fi
# Verify critical packages
echo " Verifying installed packages..."
REQUIRED_PKGS=("pytest" "pytest-asyncio" "hypothesis" "click" "aiohttp" "apscheduler" "python-dotenv")
for pkg in "${REQUIRED_PKGS[@]}"; do
if uv pip show "$pkg" &>/dev/null 2>&1; then
pass "$pkg installed"
else
fail "$pkg NOT installed"
fi
done
# =============================================================================
section "3. Pytest — Unit Tests"
# =============================================================================
echo " Running all unit tests (excluding scheduler if apscheduler import fails)..."
echo ""
if uv run python -m pytest tests/ -v --tb=short --ignore=tests/test_scheduler.py 2>&1; then
pass "Core test suite passed"
else
fail "Core test suite had failures"
fi
echo ""
echo " Attempting scheduler tests..."
if uv run python -m pytest tests/test_scheduler.py -v --tb=short 2>&1; then
pass "Scheduler tests passed"
else
skip "Scheduler tests failed (may need apscheduler)"
fi
# =============================================================================
section "4. Smoke Tests — Config System"
# =============================================================================
echo " Testing config load..."
if uv run python -c "
from config import load_config, AetheelConfig
cfg = load_config()
assert isinstance(cfg, AetheelConfig)
assert cfg.claude.no_tools == False, 'no_tools should default to False'
assert len(cfg.claude.allowed_tools) > 0, 'allowed_tools should have defaults'
assert hasattr(cfg, 'heartbeat'), 'Missing heartbeat config'
assert hasattr(cfg, 'webchat'), 'Missing webchat config'
assert hasattr(cfg, 'mcp'), 'Missing mcp config'
assert hasattr(cfg, 'hooks'), 'Missing hooks config'
assert hasattr(cfg, 'webhooks'), 'Missing webhooks config'
print('Config loaded OK — all sections present')
" 2>&1; then
pass "Config system works"
else
fail "Config system broken"
fi
# =============================================================================
section "5. Smoke Tests — System Prompt"
# =============================================================================
if uv run python -c "
from agent.opencode_runtime import build_aetheel_system_prompt
prompt = build_aetheel_system_prompt()
sections = ['Your Tools', 'Self-Modification', 'Subagents & Teams']
for s in sections:
assert s in prompt, f'Missing section: {s}'
print(f' ✅ Section present: {s}')
print('System prompt OK')
" 2>&1; then
pass "System prompt has all new sections"
else
fail "System prompt missing sections"
fi
# =============================================================================
section "6. Smoke Tests — CLI"
# =============================================================================
if uv run python cli.py --help >/dev/null 2>&1; then
pass "CLI --help works"
else
fail "CLI --help failed"
fi
# Check subcommands exist
for cmd in start chat status doctor; do
if uv run python cli.py $cmd --help >/dev/null 2>&1; then
pass "CLI command '$cmd' exists"
else
fail "CLI command '$cmd' missing"
fi
done
for grp in cron config memory; do
if uv run python cli.py $grp --help >/dev/null 2>&1; then
pass "CLI group '$grp' exists"
else
fail "CLI group '$grp' missing"
fi
done
# =============================================================================
section "7. Smoke Tests — Heartbeat Parser"
# =============================================================================
if uv run python -c "
from heartbeat.heartbeat import HeartbeatRunner
tests = [
('Every 30 minutes', '*/30 * * * *'),
('Every hour', '0 * * * *'),
('Every 2 hours', '0 */2 * * *'),
('Every morning (9:00 AM)', '0 9 * * *'),
('Every evening (6:00 PM)', '0 18 * * *'),
]
for header, expected in tests:
result = HeartbeatRunner._parse_schedule_header(header)
assert result == expected, f'{header}: got {result}, expected {expected}'
print(f' ✅ {header} → {result}')
print('Heartbeat parser OK')
" 2>&1; then
pass "Heartbeat schedule parser works"
else
fail "Heartbeat schedule parser broken"
fi
# =============================================================================
section "8. Smoke Tests — Hook System"
# =============================================================================
if uv run python -c "
from hooks.hooks import HookManager, HookEvent
# Test programmatic hooks
mgr = HookManager()
received = []
mgr.register('gateway:startup', lambda e: received.append(e.event_key))
mgr.trigger(HookEvent(type='gateway', action='startup'))
assert received == ['gateway:startup'], f'Expected [gateway:startup], got {received}'
print(' ✅ Programmatic hook registration and trigger')
# Test event key
event = HookEvent(type='command', action='reload')
assert event.event_key == 'command:reload'
print(' ✅ Event key formatting')
# Test messages
mgr2 = HookManager()
mgr2.register('test:event', lambda e: e.messages.append('hello'))
event2 = HookEvent(type='test', action='event')
msgs = mgr2.trigger(event2)
assert 'hello' in msgs
print(' ✅ Hook message passing')
print('Hook system OK')
" 2>&1; then
pass "Hook system works"
else
fail "Hook system broken"
fi
# =============================================================================
section "9. Smoke Tests — Webhook Receiver"
# =============================================================================
if uv run python -c "
from webhooks.receiver import WebhookReceiver, WebhookConfig
config = WebhookConfig(enabled=True, port=0, host='127.0.0.1', token='test')
receiver = WebhookReceiver(
ai_handler_fn=lambda msg: 'ok',
send_fn=lambda *a: None,
config=config,
)
# Verify routes are registered
routes = [r.resource.canonical for r in receiver._app.router.routes() if hasattr(r, 'resource') and hasattr(r.resource, 'canonical')]
print(f' Routes: {routes}')
assert '/hooks/wake' in routes or any('/hooks/wake' in str(r) for r in receiver._app.router.routes())
print(' ✅ Webhook routes registered')
print('Webhook receiver OK')
" 2>&1; then
pass "Webhook receiver initializes"
else
fail "Webhook receiver broken"
fi
# =============================================================================
section "10. Smoke Tests — SubagentBus"
# =============================================================================
if uv run python -c "
from agent.subagent import SubagentBus, SubagentManager
# Test bus
bus = SubagentBus()
received = []
bus.subscribe('ch1', lambda msg, sender: received.append((msg, sender)))
bus.publish('ch1', 'hello', 'agent-1')
assert received == [('hello', 'agent-1')]
print(' ✅ SubagentBus pub/sub works')
# Test manager has bus
mgr = SubagentManager(runtime_factory=lambda: None, send_fn=lambda *a: None)
assert isinstance(mgr.bus, SubagentBus)
print(' ✅ SubagentManager.bus property works')
print('SubagentBus OK')
" 2>&1; then
pass "SubagentBus works"
else
fail "SubagentBus broken"
fi
# =============================================================================
section "11. Smoke Tests — MCP Config Writer"
# =============================================================================
if uv run python -c "
import json, os, tempfile
from config import MCPConfig, MCPServerConfig, write_mcp_config
with tempfile.TemporaryDirectory() as tmpdir:
cfg = MCPConfig(servers={
'test-server': MCPServerConfig(command='echo', args=['hello'], env={'KEY': 'val'})
})
# Claude format
write_mcp_config(cfg, tmpdir, use_claude=True)
with open(os.path.join(tmpdir, '.mcp.json')) as f:
data = json.load(f)
assert 'mcpServers' in data
assert 'test-server' in data['mcpServers']
print(' ✅ Claude .mcp.json written correctly')
# OpenCode format
write_mcp_config(cfg, tmpdir, use_claude=False)
with open(os.path.join(tmpdir, 'opencode.json')) as f:
data = json.load(f)
assert 'mcp' in data
print(' ✅ OpenCode opencode.json written correctly')
# Empty config skips
os.remove(os.path.join(tmpdir, '.mcp.json'))
write_mcp_config(MCPConfig(), tmpdir, use_claude=True)
assert not os.path.exists(os.path.join(tmpdir, '.mcp.json'))
print(' ✅ Empty config skips file creation')
print('MCP config writer OK')
" 2>&1; then
pass "MCP config writer works"
else
fail "MCP config writer broken"
fi
# =============================================================================
section "12. Smoke Tests — WebChat Adapter"
# =============================================================================
if uv run python -c "
from adapters.webchat_adapter import WebChatAdapter
from adapters.base import BaseAdapter
adapter = WebChatAdapter(host='127.0.0.1', port=9999)
assert isinstance(adapter, BaseAdapter)
assert adapter.source_name == 'webchat'
print(' ✅ WebChatAdapter extends BaseAdapter')
print(' ✅ source_name is webchat')
import os
static_dir = os.path.join(os.path.dirname('adapters/webchat_adapter.py'), 'static')
html_path = os.path.join('static', 'chat.html')
assert os.path.isfile(html_path), f'chat.html not found at {html_path}'
print(' ✅ static/chat.html exists')
print('WebChat adapter OK')
" 2>&1; then
pass "WebChat adapter works"
else
fail "WebChat adapter broken"
fi
# =============================================================================
section "13. Smoke Tests — Claude Runtime Config"
# =============================================================================
if uv run python -c "
from agent.claude_runtime import ClaudeCodeConfig
cfg = ClaudeCodeConfig()
assert cfg.no_tools == False, f'no_tools should be False, got {cfg.no_tools}'
assert len(cfg.allowed_tools) > 10, f'Expected 15+ tools, got {len(cfg.allowed_tools)}'
assert 'Bash' in cfg.allowed_tools
assert 'WebSearch' in cfg.allowed_tools
assert 'TeamCreate' in cfg.allowed_tools
assert 'SendMessage' in cfg.allowed_tools
print(f' ✅ no_tools defaults to False')
print(f' ✅ {len(cfg.allowed_tools)} tools in default allowed_tools')
print(f' ✅ Team/Task tools included')
print('Claude runtime config OK')
" 2>&1; then
pass "Claude runtime config correct"
else
fail "Claude runtime config broken"
fi
# =============================================================================
section "14. Import Check — All Modules"
# =============================================================================
MODULES=(
"config"
"adapters.base"
"adapters.webchat_adapter"
"agent.opencode_runtime"
"agent.claude_runtime"
"agent.subagent"
"heartbeat.heartbeat"
"hooks.hooks"
"webhooks.receiver"
"skills.skills"
"cli"
)
for mod in "${MODULES[@]}"; do
if uv run python -c "import $mod" 2>/dev/null; then
pass "import $mod"
else
fail "import $mod"
fi
done
# =============================================================================
section "RESULTS"
# =============================================================================
TOTAL=$((PASS + FAIL + SKIP))
echo ""
echo -e "${BOLD}Total: $TOTAL checks${NC}"
echo -e " ${GREEN}Passed: $PASS${NC}"
echo -e " ${RED}Failed: $FAIL${NC}"
echo -e " ${YELLOW}Skipped: $SKIP${NC}"
echo ""
if [ "$FAIL" -eq 0 ]; then
echo -e "${GREEN}${BOLD}All checks passed! 🎉${NC}"
exit 0
else
echo -e "${RED}${BOLD}$FAIL check(s) failed.${NC}"
exit 1
fi