major agent refactoring: wiki knowledge base, no RAG, no Qdrant, no Ollama

This commit is contained in:
2026-04-21 21:03:24 -04:00
parent 7e4b54d701
commit 44a1688657
80 changed files with 2699 additions and 4267 deletions

View File

@@ -28,7 +28,7 @@ async def activate_strategy(
Activate a strategy for live or paper forward trading.
Args:
strategy_name: Display name as saved via python_write("strategy", ...)
strategy_name: Display name as saved via PythonWrite("strategy", ...)
feeds: List of feed dicts: [{"symbol": "BTC/USDT.BINANCE", "period_seconds": 3600}]
allocation: Capital allocated in quote currency (e.g. 5000.0 USDT)
paper: True = paper/simulated fills (default); False = live (not yet implemented)

View File

@@ -30,7 +30,7 @@ async def backtest_strategy(
Load a saved strategy, fetch OHLC+ data for each feed, and run a backtest.
Args:
strategy_name: Display name as saved via python_write("strategy", ...)
strategy_name: Display name as saved via PythonWrite("strategy", ...)
feeds: List of feed dicts, e.g. [{"symbol": "BTC/USDT.BINANCE", "period_seconds": 3600}]
from_time: Backtest start (Unix timestamp or date string)
to_time: Backtest end (Unix timestamp or date string)

View File

@@ -139,7 +139,7 @@ async def evaluate_indicator(
"error": (
f"Custom indicator '{pandas_ta_name}' not found after registering "
"custom indicators. Make sure the indicator was created with "
"python_write(category='indicator', name='...') and that its "
"PythonWrite(category='indicator', name='...') and that its "
"implementation.py defines a function matching the sanitized name."
)
}))]

View File

@@ -18,6 +18,7 @@ After write/edit operations, a category-specific test harness runs to validate
the code and capture errors/output for agent feedback.
"""
import base64
import json
import logging
import re
@@ -62,7 +63,6 @@ class BaseMetadata:
"""Base metadata for all categories."""
name: str # Display name (can have special chars)
description: str # LLM-generated description
details: str = "" # Full markdown description with enough detail to reproduce the code
conda_packages: list[str] = None # Additional conda packages required
def __post_init__(self):
@@ -165,7 +165,12 @@ class IndicatorMetadata(BaseMetadata):
@dataclass
class ResearchMetadata(BaseMetadata):
"""Metadata for research scripts."""
pass
output: dict = None # Output files: {"analysis": "analysis.md", "images": ["img1.png", ...]}
def __post_init__(self):
super().__post_init__()
if self.output is None:
self.output = {}
# Metadata class registry
@@ -546,11 +551,19 @@ class CategoryFileManager:
except Exception as e:
return {"success": False, "error": f"Failed to write implementation: {e}"}
# Build metadata
# Write details.md (stored separately from metadata)
details_path = item_dir / "details.md"
try:
details_path.write_text(details or "")
log.info(f"Wrote details: {details_path}")
except Exception as e:
return {"success": False, "error": f"Failed to write details: {e}"}
# Build metadata (details stored separately in details.md)
meta_dict = metadata or {}
meta_dict["name"] = name
meta_dict["description"] = description
meta_dict["details"] = details
meta_dict.pop("details", None) # ensure details not stored in metadata
# For indicators, store the canonical pandas_ta_name so the reverse
# mapping (ta_name → directory) is reliable regardless of name casing.
@@ -583,7 +596,7 @@ class CategoryFileManager:
if validation["success"]:
if cat == Category.RESEARCH:
log.info(f"Auto-executing research script: {name}")
result["execution"] = await self.execute_research(name)
result["execution"] = await self.execute_research(name, commit=False)
elif cat == Category.INDICATOR:
log.info(f"Auto-executing indicator test: {name}")
result["execution"] = await self._execute_indicator(item_dir)
@@ -652,6 +665,18 @@ class CategoryFileManager:
except Exception as e:
return {"success": False, "error": f"Failed to read existing metadata: {e}"}
# Load existing details from details.md; migrate from metadata.json if needed
details_path = item_dir / "details.md"
existing_details = ""
if details_path.exists():
existing_details = details_path.read_text()
elif existing_meta.get("details"):
existing_details = existing_meta.pop("details")
try:
details_path.write_text(existing_details)
except Exception:
pass # migration failure is non-fatal
# Apply string-replacement patches if provided
if patches is not None:
if not impl_path.exists():
@@ -682,7 +707,7 @@ class CategoryFileManager:
# Apply text-replacement patches to details field if provided
if detail_patches is not None:
current_details = existing_meta.get("details", "")
current_details = existing_details
for i, patch in enumerate(detail_patches):
old = patch.get("old_string", "")
new = patch.get("new_string", "")
@@ -693,14 +718,22 @@ class CategoryFileManager:
current_details = current_details.replace(old, new, 1)
details = current_details
# Update metadata
# Write details.md if details was updated
if details is not None:
try:
details_path.write_text(details)
log.info(f"Updated details.md: {details_path}")
except Exception as e:
return {"success": False, "error": f"Failed to write details: {e}"}
# Update metadata (details always stored in details.md, never in metadata)
updated_meta = existing_meta.copy()
updated_meta.pop("details", None)
if description is not None:
updated_meta["description"] = description
if details is not None:
updated_meta["details"] = details
if metadata:
updated_meta.update(metadata)
meta_updates = {k: v for k, v in metadata.items() if k != "details"}
updated_meta.update(meta_updates)
# Validate and write metadata
try:
@@ -730,7 +763,7 @@ class CategoryFileManager:
if code is not None and result["success"]:
if cat == Category.RESEARCH:
log.info(f"Auto-executing research script after edit: {name}")
result["execution"] = await self.execute_research(name)
result["execution"] = await self.execute_research(name, commit=False)
elif cat == Category.INDICATOR:
log.info(f"Auto-executing indicator test after edit: {name}")
result["execution"] = await self._execute_indicator(item_dir)
@@ -778,9 +811,24 @@ class CategoryFileManager:
if meta_path.exists():
metadata = json.loads(meta_path.read_text())
# Read details from details.md; migrate from metadata.json if needed
details_path = item_dir / "details.md"
details = ""
if details_path.exists():
details = details_path.read_text()
elif metadata.get("details"):
details = metadata.pop("details")
try:
details_path.write_text(details)
meta_path.write_text(json.dumps(metadata, indent=2))
log.info(f"Migrated details to details.md for {item_dir.name}")
except Exception:
pass # migration failure is non-fatal
return {
"exists": True,
"code": code,
"details": details,
"metadata": metadata,
}
except Exception as e:
@@ -972,12 +1020,14 @@ class CategoryFileManager:
"images": data["images"],
}
async def execute_research(self, name: str) -> dict[str, Any]:
async def execute_research(self, name: str, commit: bool = True) -> dict[str, Any]:
"""
Execute a research script and return structured content with images.
Args:
name: Display name of the research script
commit: Whether to commit output files to git (default True; set False
when called from write()/edit() which commit everything together)
Returns:
dict with:
@@ -1040,10 +1090,139 @@ class CategoryFileManager:
log.error(f"execute_research '{name}': script failed with no output")
return {"error": "Research script execution failed"}
# Persist output to output/ subdir
if content:
output_dir = item_dir / "output"
output_dir.mkdir(exist_ok=True)
output_meta: dict[str, Any] = {}
if data.get("stdout"):
analysis_path = output_dir / "analysis.md"
try:
analysis_path.write_text(data["stdout"])
output_meta["analysis"] = "analysis.md"
except Exception as e:
log.warning(f"execute_research '{name}': failed to write analysis.md: {e}")
image_files = []
for i, img in enumerate(data.get("images", []), 1):
img_filename = f"img{i}.png"
img_path = output_dir / img_filename
try:
img_path.write_bytes(base64.b64decode(img["data"]))
image_files.append(img_filename)
except Exception as e:
log.warning(f"execute_research '{name}': failed to write {img_filename}: {e}")
if image_files:
output_meta["images"] = image_files
# Update metadata.json with output section
meta_path = item_dir / "metadata.json"
if meta_path.exists():
try:
meta = json.loads(meta_path.read_text())
meta["output"] = output_meta
meta_path.write_text(json.dumps(meta, indent=2))
except Exception as e:
log.warning(f"execute_research '{name}': failed to update metadata output: {e}")
# Commit output files
if commit:
try:
await self.git.commit_async(f"output(research): {name}")
except Exception as e:
log.warning(f"execute_research '{name}': git commit failed: {e}")
log.info(f"execute_research '{name}': returning {len(content)} content items")
return {"content": content}
def read_output(
self,
category: str,
name: str,
files: Optional[list[str]] = None
) -> dict[str, Any]:
"""
Read output files for a category item.
Args:
category: Category name
name: Display name of the item
files: Specific filenames under output/ to return (e.g. ["analysis.md", "img1.png"]).
If omitted, returns all output files listed in metadata.
Returns:
dict with:
- content: list of TextContent and ImageContent objects (MCP format)
- files_returned: list of filenames returned
- output_dir: str path to output directory
- error: str (if any)
"""
try:
cat = Category(category)
except ValueError:
return {"error": f"Invalid category '{category}'"}
item_dir = get_category_path(self.src_dir, cat, name)
if not item_dir.exists():
return {"error": f"Item '{name}' not found in '{category}'"}
output_dir = item_dir / "output"
if not output_dir.exists():
return {"error": f"No output directory for '{name}' — run the script first"}
# Determine which files to return
if files is None:
meta_path = item_dir / "metadata.json"
if meta_path.exists():
try:
meta = json.loads(meta_path.read_text())
output_meta = meta.get("output") or {}
files = []
if output_meta.get("analysis"):
files.append(output_meta["analysis"])
files.extend(output_meta.get("images") or [])
except Exception:
files = []
if not files:
# Fallback: return all files in output dir
files = [p.name for p in sorted(output_dir.iterdir()) if p.is_file()]
if not files:
return {"error": f"No output files found for '{name}'"}
from mcp.types import TextContent, ImageContent
content = []
files_returned = []
for filename in files:
file_path = output_dir / filename
if not file_path.exists():
log.warning(f"Output file not found: {file_path}")
continue
suffix = file_path.suffix.lower()
if suffix in ('.md', '.txt'):
text = file_path.read_text()
content.append(TextContent(type="text", text=text))
files_returned.append(filename)
elif suffix in ('.png', '.jpg', '.jpeg'):
data = base64.b64encode(file_path.read_bytes()).decode()
mime = "image/png" if suffix == '.png' else "image/jpeg"
content.append(ImageContent(type="image", data=data, mimeType=mime))
files_returned.append(filename)
else:
log.warning(f"Unsupported output file type: {filename}")
return {
"content": content,
"files_returned": files_returned,
"output_dir": str(output_dir),
}
async def delete(self, category: str, name: str) -> dict[str, Any]:
"""
Delete a category script directory and commit the removal to git.

View File

@@ -7,9 +7,9 @@ in the user container. These stores sync with the gateway and web client.
Storage location: {DATA_DIR}/workspace/{store_name}.json
Available tools:
- workspace_read(store_name) -> dict
- workspace_write(store_name, data) -> None
- workspace_patch(store_name, patch) -> dict
- WorkspaceRead(store_name) -> dict
- WorkspaceWrite(store_name, data) -> None
- WorkspacePatch(store_name, patch) -> dict
Future: Path-based triggers for container-side reactions to state changes.
"""
@@ -322,14 +322,14 @@ def register_workspace_tools(server):
@server.call_tool()
async def handle_tool_call(name: str, arguments: dict) -> Any:
"""Handle workspace tool calls."""
if name == "workspace_read":
if name == "WorkspaceRead":
return store.read(arguments.get("store_name", ""))
elif name == "workspace_write":
elif name == "WorkspaceWrite":
return store.write(
arguments.get("store_name", ""),
arguments.get("data")
)
elif name == "workspace_patch":
elif name == "WorkspacePatch":
return store.patch(
arguments.get("store_name", ""),
arguments.get("patch", [])
@@ -342,7 +342,7 @@ def register_workspace_tools(server):
"""List available workspace tools."""
return [
{
"name": "workspace_read",
"name": "WorkspaceRead",
"description": "Read a workspace store from persistent storage",
"inputSchema": {
"type": "object",
@@ -356,7 +356,7 @@ def register_workspace_tools(server):
}
},
{
"name": "workspace_write",
"name": "WorkspaceWrite",
"description": "Write a workspace store to persistent storage",
"inputSchema": {
"type": "object",
@@ -373,7 +373,7 @@ def register_workspace_tools(server):
}
},
{
"name": "workspace_patch",
"name": "WorkspacePatch",
"description": "Apply JSON patch operations to a workspace store",
"inputSchema": {
"type": "object",