Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
26 changes: 26 additions & 0 deletions mini_agent/cli.py
Original file line number Diff line number Diff line change
Expand Up @@ -323,6 +323,25 @@ def parse_args() -> argparse.Namespace:
# Subcommands
subparsers = parser.add_subparsers(dest="command", help="Available commands")

# gateway subcommand
gateway_parser = subparsers.add_parser("gateway", help="Start the API Gateway server")
gateway_parser.add_argument(
"--host",
default="0.0.0.0",
help="Host to bind (default: 0.0.0.0)",
)
gateway_parser.add_argument(
"--port",
type=int,
default=8000,
help="Port to bind (default: 8000)",
)
gateway_parser.add_argument(
"--reload",
action="store_true",
help="Enable auto-reload (dev mode)",
)

# log subcommand
log_parser = subparsers.add_parser("log", help="Show log directory or read log files")
log_parser.add_argument(
Expand Down Expand Up @@ -846,6 +865,13 @@ def main():
# Parse command line arguments
args = parse_args()

# Handle gateway subcommand
if args.command == "gateway":
from mini_agent.gateway.app import start_server
print(f"{Colors.BRIGHT_CYAN}🚀 Starting Mini-Agent Gateway on {args.host}:{args.port}...{Colors.RESET}")
start_server(host=args.host, port=args.port, reload=args.reload)
return

# Handle log subcommand
if args.command == "log":
if args.filename:
Expand Down
115 changes: 115 additions & 0 deletions mini_agent/gateway/app.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,115 @@
"""FastAPI application for the Gateway API."""

import asyncio
from typing import Dict, Any

from fastapi import FastAPI, HTTPException, BackgroundTasks
from mini_agent.gateway.schemas import (
CreateSessionRequest,
SessionResponse,
ChatRequest,
ChatResponse,
HistoryResponse,
)
from mini_agent.gateway.service import session_manager

app = FastAPI(
title="Mini-Agent Gateway",
description="HTTP API Gateway for Mini-Agent",
version="0.1.0",
)

@app.post("/sessions", response_model=SessionResponse)
async def create_session(request: CreateSessionRequest):
"""Create a new agent session."""
try:
session_info = await session_manager.create_session(
workspace_dir=request.workspace_dir,
system_prompt=request.system_prompt
)
return SessionResponse(**session_info)
except FileNotFoundError as e:
raise HTTPException(status_code=500, detail=str(e))
except Exception as e:
raise HTTPException(status_code=500, detail=f"Failed to create session: {str(e)}")

@app.get("/sessions/{session_id}", response_model=SessionResponse)
async def get_session(session_id: str):
"""Get session information."""
info = session_manager.get_session_info(session_id)
if not info:
raise HTTPException(status_code=404, detail="Session not found")
return SessionResponse(**info)

@app.delete("/sessions/{session_id}")
async def close_session(session_id: str):
"""Close a session."""
if not session_manager.close_session(session_id):
raise HTTPException(status_code=404, detail="Session not found")
return {"status": "closed"}

@app.post("/sessions/{session_id}/chat", response_model=ChatResponse)
async def chat(session_id: str, request: ChatRequest):
"""Send a message to the agent."""
agent = session_manager.get_agent(session_id)
if not agent:
raise HTTPException(status_code=404, detail="Session not found")

# Add user message
agent.add_user_message(request.message)

try:
# Run agent (single turn or until completion)
# Note: agent.run() runs until task completion or max steps
# For a chat API, we might want single turn, but agent.run() is designed for tasks.
# Let's use agent.run() as it handles tool calls automatically.
response_content = await agent.run()

# Get usage stats
usage = {"total_tokens": agent.api_total_tokens}

# Get tool calls from the last assistant message(s)
# This is a simplification; we might want all tool calls in this run.
tool_calls = []
for msg in reversed(agent.messages):
if msg.role == "assistant" and msg.tool_calls:
for tc in msg.tool_calls:
tool_calls.append({
"name": tc.function.name,
"arguments": tc.function.arguments,
"id": tc.id
})
if msg.role == "user" and msg.content == request.message:
break # Stop when we reach the user's message

return ChatResponse(
response=response_content,
tool_calls=tool_calls,
usage=usage
)
except Exception as e:
raise HTTPException(status_code=500, detail=f"Agent execution failed: {str(e)}")

@app.get("/sessions/{session_id}/history", response_model=HistoryResponse)
async def get_history(session_id: str):
"""Get chat history."""
agent = session_manager.get_agent(session_id)
if not agent:
raise HTTPException(status_code=404, detail="Session not found")

messages = []
for msg in agent.messages:
messages.append({
"role": msg.role,
"content": msg.content,
"tool_calls": [tc.model_dump() for tc in msg.tool_calls] if msg.tool_calls else None,
"name": msg.name,
"tool_call_id": msg.tool_call_id
})

return HistoryResponse(messages=messages)

def start_server(host: str = "0.0.0.0", port: int = 8000, reload: bool = False):
"""Start the uvicorn server."""
import uvicorn
uvicorn.run("mini_agent.gateway.app:app", host=host, port=port, reload=reload)
30 changes: 30 additions & 0 deletions mini_agent/gateway/schemas.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,30 @@
"""Pydantic models for the Gateway API."""

from typing import Any, Dict, List, Optional
from pydantic import BaseModel, Field

class CreateSessionRequest(BaseModel):
"""Request to create a new session."""
workspace_dir: Optional[str] = None
system_prompt: Optional[str] = None

class SessionResponse(BaseModel):
"""Response for session creation."""
session_id: str
workspace_dir: str
created_at: str

class ChatRequest(BaseModel):
"""Request to send a message to the agent."""
message: str
stream: bool = False # Future support for streaming

class ChatResponse(BaseModel):
"""Response from the agent."""
response: str
tool_calls: List[Dict[str, Any]] = Field(default_factory=list)
usage: Optional[Dict[str, int]] = None

class HistoryResponse(BaseModel):
"""Response for chat history."""
messages: List[Dict[str, Any]]
126 changes: 126 additions & 0 deletions mini_agent/gateway/service.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,126 @@
"""Service layer for the Gateway API."""

import asyncio
import uuid
from datetime import datetime
from pathlib import Path
from typing import Dict, Optional

from mini_agent.agent import Agent
from mini_agent.cli import initialize_base_tools, add_workspace_tools
from mini_agent.config import Config
from mini_agent.llm import LLMClient
from mini_agent.schema import LLMProvider
from mini_agent.retry import RetryConfig as RetryConfigBase

class SessionManager:
"""Manages agent sessions."""

def __init__(self):
self._sessions: Dict[str, Agent] = {}
self._session_info: Dict[str, Dict] = {}

async def create_session(self, workspace_dir: Optional[str] = None, system_prompt: Optional[str] = None) -> Dict:
"""Create a new agent session."""
session_id = str(uuid.uuid4())

# Load configuration
config_path = Config.get_default_config_path()
if not config_path.exists():
raise FileNotFoundError("Configuration file not found. Please configure Mini-Agent first.")

config = Config.from_yaml(config_path)

# Determine workspace
if workspace_dir:
ws_path = Path(workspace_dir).expanduser().absolute()
else:
# Use default workspace or create a unique one per session?
# For now, let's use a subdirectory in default workspace to avoid conflicts if needed
# Or just use the default workspace from config
ws_path = Path(config.agent.workspace_dir).expanduser().absolute()

ws_path.mkdir(parents=True, exist_ok=True)

# Initialize LLM client
retry_config = RetryConfigBase(
enabled=config.llm.retry.enabled,
max_retries=config.llm.retry.max_retries,
initial_delay=config.llm.retry.initial_delay,
max_delay=config.llm.retry.max_delay,
exponential_base=config.llm.retry.exponential_base,
retryable_exceptions=(Exception,),
)

provider = LLMProvider.ANTHROPIC if config.llm.provider.lower() == "anthropic" else LLMProvider.OPENAI

llm_client = LLMClient(
api_key=config.llm.api_key,
provider=provider,
api_base=config.llm.api_base,
model=config.llm.model,
retry_config=retry_config if config.llm.retry.enabled else None,
)

# Initialize tools
tools, skill_loader = await initialize_base_tools(config)
add_workspace_tools(tools, config, ws_path)

# Determine system prompt
if system_prompt:
prompt_text = system_prompt
else:
system_prompt_path = Config.find_config_file(config.agent.system_prompt_path)
if system_prompt_path and system_prompt_path.exists():
prompt_text = system_prompt_path.read_text(encoding="utf-8")
else:
prompt_text = "You are Mini-Agent, an intelligent assistant powered by MiniMax M2.5."

# Inject Skills Metadata
if skill_loader:
skills_metadata = skill_loader.get_skills_metadata_prompt()
if skills_metadata:
prompt_text = prompt_text.replace("{SKILLS_METADATA}", skills_metadata)
else:
prompt_text = prompt_text.replace("{SKILLS_METADATA}", "")
else:
prompt_text = prompt_text.replace("{SKILLS_METADATA}", "")

# Create Agent
agent = Agent(
llm_client=llm_client,
system_prompt=prompt_text,
tools=tools,
max_steps=config.agent.max_steps,
workspace_dir=str(ws_path),
)

self._sessions[session_id] = agent

info = {
"session_id": session_id,
"workspace_dir": str(ws_path),
"created_at": datetime.now().isoformat(),
}
self._session_info[session_id] = info

return info

def get_agent(self, session_id: str) -> Optional[Agent]:
"""Get agent instance by session ID."""
return self._sessions.get(session_id)

def get_session_info(self, session_id: str) -> Optional[Dict]:
"""Get session information."""
return self._session_info.get(session_id)

def close_session(self, session_id: str) -> bool:
"""Close a session and cleanup."""
if session_id in self._sessions:
del self._sessions[session_id]
del self._session_info[session_id]
return True
return False

# Singleton instance
session_manager = SessionManager()
2 changes: 2 additions & 0 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,8 @@ dependencies = [
"anthropic>=0.39.0",
"openai>=1.57.4",
"agent-client-protocol>=0.6.0",
"fastapi>=0.129.2",
"uvicorn>=0.38.0",
]

[project.scripts]
Expand Down
Loading