Initial Phase A intelligence core
This commit is contained in:
commit
94eae8ceba
11 changed files with 4261 additions and 0 deletions
99
syncpatch/tool_graph.py
Normal file
99
syncpatch/tool_graph.py
Normal file
|
|
@ -0,0 +1,99 @@
|
|||
from __future__ import annotations
|
||||
|
||||
from dataclasses import dataclass, asdict
|
||||
from typing import Any
|
||||
|
||||
|
||||
@dataclass
|
||||
class ToolNode:
|
||||
name: str
|
||||
kind: str
|
||||
description: str
|
||||
input_schema: str
|
||||
output_schema: str
|
||||
effect_type: str
|
||||
risk: str
|
||||
latency_class: str
|
||||
groundedness: str
|
||||
cost_class: str
|
||||
|
||||
def to_dict(self) -> dict[str, Any]:
|
||||
return asdict(self)
|
||||
|
||||
|
||||
KIND_TO_EFFECT = {
|
||||
'action': 'state_change',
|
||||
'evidence': 'evidence_only',
|
||||
'final': 'answer_only',
|
||||
}
|
||||
|
||||
KIND_TO_RISK = {
|
||||
'action': 'high',
|
||||
'evidence': 'low',
|
||||
'final': 'medium',
|
||||
}
|
||||
|
||||
KIND_TO_OUTPUT = {
|
||||
'action': 'action_result',
|
||||
'evidence': 'evidence_blob',
|
||||
'final': 'final_answer',
|
||||
}
|
||||
|
||||
|
||||
def classify_latency(name: str, kind: str) -> str:
|
||||
if name in {'url_research', 'web_research', 'ops_deep_analyze'}:
|
||||
return 'slow'
|
||||
if kind == 'action':
|
||||
return 'medium'
|
||||
if kind == 'evidence':
|
||||
return 'fast'
|
||||
return 'medium'
|
||||
|
||||
|
||||
|
||||
def classify_cost(name: str, kind: str) -> str:
|
||||
if name in {'web_research', 'url_research', 'ops_deep_analyze'}:
|
||||
return 'high'
|
||||
if kind == 'action':
|
||||
return 'medium'
|
||||
return 'low'
|
||||
|
||||
|
||||
|
||||
def classify_groundedness(name: str, kind: str) -> str:
|
||||
if name in {'setup_lookup', 'memory_profile', 'web_root_cause', 'user_service_access_diagnose', 'light_status', 'emby_user_provision'}:
|
||||
return 'strong'
|
||||
if kind == 'evidence':
|
||||
return 'strong'
|
||||
if name in {'general_answer', 'personal_assist', 'expert_write', 'expert_strategy'}:
|
||||
return 'weak'
|
||||
return 'medium'
|
||||
|
||||
|
||||
|
||||
def infer_input_schema(name: str) -> str:
|
||||
if name == 'memory_profile':
|
||||
return 'user_query+memory_mode'
|
||||
if name == 'setup_lookup':
|
||||
return 'user_query+service_hint'
|
||||
return 'user_query'
|
||||
|
||||
|
||||
|
||||
def build_tool_graph(tool_registry: dict[str, dict[str, Any]]) -> dict[str, ToolNode]:
|
||||
graph: dict[str, ToolNode] = {}
|
||||
for name, info in (tool_registry or {}).items():
|
||||
kind = str(info.get('kind') or 'final')
|
||||
graph[name] = ToolNode(
|
||||
name=name,
|
||||
kind=kind,
|
||||
description=str(info.get('description') or ''),
|
||||
input_schema=infer_input_schema(name),
|
||||
output_schema=KIND_TO_OUTPUT.get(kind, 'opaque'),
|
||||
effect_type=KIND_TO_EFFECT.get(kind, 'answer_only'),
|
||||
risk=KIND_TO_RISK.get(kind, 'medium'),
|
||||
latency_class=classify_latency(name, kind),
|
||||
groundedness=classify_groundedness(name, kind),
|
||||
cost_class=classify_cost(name, kind),
|
||||
)
|
||||
return graph
|
||||
Loading…
Add table
Add a link
Reference in a new issue