auto-sync: task-router 2026-04-03_16:32
This commit is contained in:
commit
5bff900bc4
54
SKILL.md
Normal file
54
SKILL.md
Normal file
@ -0,0 +1,54 @@
|
|||||||
|
---
|
||||||
|
name: dialogue-components-standardizer
|
||||||
|
description: A unified skill for standardizing the production and review of 6 dialogue interaction components. Core logic is fixed; optimizations are handled via branch files and scripts for repeatability. Enter skill only when components change dynamically.
|
||||||
|
---
|
||||||
|
|
||||||
|
# Dialogue Components Standardizer
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
|
||||||
|
This skill provides a modular structure for standardizing dialogue components:
|
||||||
|
- **Core (Fixed)**: Component types, workflows, and policies (defined here).
|
||||||
|
- **Branch Files**: `component_configs.yaml` for component-specific details (modify for optimizations).
|
||||||
|
- **Scripts**: Automated execution for repeatable tasks (e.g., generation, review).
|
||||||
|
- **Dynamic Entry**: Use skill for component changes; otherwise, rely on scripts.
|
||||||
|
|
||||||
|
## Core Structure
|
||||||
|
|
||||||
|
### Component Types (Fixed)
|
||||||
|
The 6 components are predefined:
|
||||||
|
1. dialogue_reading
|
||||||
|
2. dialogue_expression
|
||||||
|
3. dialogue_selective_reading
|
||||||
|
4. dialogue_selection
|
||||||
|
5. dialogue_sentence_building
|
||||||
|
6. dialogue_fill_in_the_blanks
|
||||||
|
|
||||||
|
### Workflows (Fixed)
|
||||||
|
- **Production**: Generate configs via script.
|
||||||
|
- **Review**: Validate via script.
|
||||||
|
- **Optimization**: Update `component_configs.yaml` for details.
|
||||||
|
|
||||||
|
## Branch Files
|
||||||
|
- `component_configs.yaml`: Contains format, config, and validation rules per component. Modify this for optimizations without altering core.
|
||||||
|
|
||||||
|
## Scripts
|
||||||
|
- `scripts/generate_component.py`: Generates component configs (repeatable).
|
||||||
|
- `scripts/review_component.py`: Reviews and validates configs (repeatable).
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
1. For standard production: Run scripts directly.
|
||||||
|
2. For component changes: Enter skill to update core or branch files.
|
||||||
|
3. Optimize details: Edit `component_configs.yaml`.
|
||||||
|
|
||||||
|
## Examples
|
||||||
|
- Generate: `python3 scripts/generate_component.py --type dialogue_reading --output config.json`
|
||||||
|
- Review: `python3 scripts/review_component.py --file config.json`
|
||||||
|
- "Rewrite this paragraph to sound more professional."
|
||||||
|
Route: `low_compute_model`
|
||||||
|
- "Design the data-cleaning approach, then process the CSV."
|
||||||
|
Route: `high_compute_model`, then `python_script`
|
||||||
|
|
||||||
|
## Resources
|
||||||
|
|
||||||
|
- Use [route_request.py](/Users/shasha/.codex/skills/task-router/scripts/route_request.py) as the first-pass classifier and execution planner.
|
||||||
4
agents/openai.yaml
Normal file
4
agents/openai.yaml
Normal file
@ -0,0 +1,4 @@
|
|||||||
|
interface:
|
||||||
|
display_name: "Task Router"
|
||||||
|
short_description: "Route requests by execution cost"
|
||||||
|
default_prompt: "Use $task-router to decide whether a request should run a Python script, a high-compute model, or a low-compute model."
|
||||||
74
component_configs.yaml
Normal file
74
component_configs.yaml
Normal file
@ -0,0 +1,74 @@
|
|||||||
|
# Component Configurations
|
||||||
|
# This file contains detailed configurations for each dialogue component.
|
||||||
|
# Modify this file for component-specific optimizations without changing the core skill.
|
||||||
|
|
||||||
|
components:
|
||||||
|
dialogue_reading:
|
||||||
|
required_fields: ["text", "language"]
|
||||||
|
format:
|
||||||
|
text: "string" # Required
|
||||||
|
audio: "optional_file" # Optional
|
||||||
|
language: "string" # Required
|
||||||
|
config:
|
||||||
|
duration: 30 # Expected reading time in seconds
|
||||||
|
scoring_threshold: 80 # Accuracy threshold (0-100)
|
||||||
|
validation_rules:
|
||||||
|
- "text must not be empty"
|
||||||
|
- "language must be supported"
|
||||||
|
|
||||||
|
dialogue_expression:
|
||||||
|
format:
|
||||||
|
text: "string_with_cues" # e.g., "[happy] Hello!"
|
||||||
|
media: "optional_file" # Video/image examples
|
||||||
|
config:
|
||||||
|
expression_types: ["happy", "sad", "angry"]
|
||||||
|
detection_threshold: 0.7
|
||||||
|
validation_rules:
|
||||||
|
- "expression cues must match types"
|
||||||
|
- "media file must be valid"
|
||||||
|
|
||||||
|
dialogue_selective_reading:
|
||||||
|
format:
|
||||||
|
full_dialogue: "string"
|
||||||
|
selectable_parts: "array_of_strings"
|
||||||
|
config:
|
||||||
|
min_selections: 1
|
||||||
|
max_selections: 5
|
||||||
|
feedback_enabled: true
|
||||||
|
validation_rules:
|
||||||
|
- "selectable_parts must be subset of full_dialogue"
|
||||||
|
- "selections count within limits"
|
||||||
|
|
||||||
|
dialogue_selection:
|
||||||
|
format:
|
||||||
|
prompt: "string"
|
||||||
|
options: "array_of_strings"
|
||||||
|
correct_answer: "integer" # Index of correct option
|
||||||
|
config:
|
||||||
|
multiple_choice: false
|
||||||
|
points_per_correct: 1
|
||||||
|
validation_rules:
|
||||||
|
- "correct_answer must be valid index"
|
||||||
|
- "options must have at least 2 items"
|
||||||
|
|
||||||
|
dialogue_sentence_building:
|
||||||
|
format:
|
||||||
|
words_phrases: "array_of_strings" # Shuffled components
|
||||||
|
target_sentence: "string"
|
||||||
|
config:
|
||||||
|
difficulty_level: "medium" # "easy", "medium", "hard"
|
||||||
|
hints_enabled: true
|
||||||
|
validation_rules:
|
||||||
|
- "words_phrases must form target_sentence"
|
||||||
|
- "difficulty must be valid"
|
||||||
|
|
||||||
|
dialogue_fill_in_the_blanks:
|
||||||
|
format:
|
||||||
|
template: "string_with_blanks" # e.g., "Hello [blank]!"
|
||||||
|
answers: "array_of_strings"
|
||||||
|
config:
|
||||||
|
case_sensitive: false
|
||||||
|
partial_credit: true
|
||||||
|
validation_rules:
|
||||||
|
- "blanks count must match answers"
|
||||||
|
- "template must have placeholders"
|
||||||
61
scripts/generate_component.py
Normal file
61
scripts/generate_component.py
Normal file
@ -0,0 +1,61 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
"""
|
||||||
|
generate_component.py
|
||||||
|
Script to generate standardized configurations for dialogue components.
|
||||||
|
Loads from component_configs.yaml and produces JSON output.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import argparse
|
||||||
|
import json
|
||||||
|
import yaml
|
||||||
|
import sys
|
||||||
|
import os
|
||||||
|
|
||||||
|
CONFIG_FILE = os.path.join(os.path.dirname(__file__), '..', 'component_configs.yaml')
|
||||||
|
|
||||||
|
def load_configs():
|
||||||
|
with open(CONFIG_FILE, 'r', encoding='utf-8') as f:
|
||||||
|
return yaml.safe_load(f)
|
||||||
|
|
||||||
|
def generate_component(component_type, **kwargs):
|
||||||
|
configs = load_configs()
|
||||||
|
if component_type not in configs['components']:
|
||||||
|
raise ValueError(f"Unknown component type: {component_type}")
|
||||||
|
|
||||||
|
component_config = configs['components'][component_type]
|
||||||
|
# Merge provided kwargs with defaults
|
||||||
|
config = {**component_config['config'], **kwargs}
|
||||||
|
|
||||||
|
output = {
|
||||||
|
"component_type": component_type,
|
||||||
|
"format": component_config['format'],
|
||||||
|
"config": config,
|
||||||
|
"status": "generated"
|
||||||
|
}
|
||||||
|
return output
|
||||||
|
|
||||||
|
def main():
|
||||||
|
parser = argparse.ArgumentParser(description="Generate dialogue component configuration.")
|
||||||
|
parser.add_argument('--type', required=True, help="Component type (e.g., dialogue_reading)")
|
||||||
|
parser.add_argument('--output', default='component.json', help="Output file")
|
||||||
|
# Add dynamic args based on config, but for simplicity, use kwargs
|
||||||
|
args, unknown = parser.parse_known_args()
|
||||||
|
|
||||||
|
# Parse additional kwargs
|
||||||
|
kwargs = {}
|
||||||
|
for arg in unknown:
|
||||||
|
if '=' in arg:
|
||||||
|
key, value = arg.split('=', 1)
|
||||||
|
kwargs[key] = value
|
||||||
|
|
||||||
|
try:
|
||||||
|
result = generate_component(args.type, **kwargs)
|
||||||
|
with open(args.output, 'w', encoding='utf-8') as f:
|
||||||
|
json.dump(result, f, indent=2, ensure_ascii=False)
|
||||||
|
print(f"Generated component config saved to {args.output}")
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Error: {e}", file=sys.stderr)
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
||||||
67
scripts/review_component.py
Normal file
67
scripts/review_component.py
Normal file
@ -0,0 +1,67 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
"""
|
||||||
|
review_component.py
|
||||||
|
Script to review and validate dialogue component configurations.
|
||||||
|
Loads from component_configs.yaml and checks against rules.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import argparse
|
||||||
|
import json
|
||||||
|
import yaml
|
||||||
|
import sys
|
||||||
|
import os
|
||||||
|
|
||||||
|
CONFIG_FILE = os.path.join(os.path.dirname(__file__), '..', 'component_configs.yaml')
|
||||||
|
|
||||||
|
def load_configs():
|
||||||
|
with open(CONFIG_FILE, 'r', encoding='utf-8') as f:
|
||||||
|
return yaml.safe_load(f)
|
||||||
|
|
||||||
|
def validate_component(component_data):
|
||||||
|
component_type = component_data.get('component_type')
|
||||||
|
configs = load_configs()
|
||||||
|
if component_type not in configs['components']:
|
||||||
|
return {"status": "error", "issues": [f"Unknown component type: {component_type}"]}
|
||||||
|
|
||||||
|
component_config = configs['components'][component_type]
|
||||||
|
issues = []
|
||||||
|
config_data = component_data.get('config', {})
|
||||||
|
|
||||||
|
# Check required fields
|
||||||
|
required_fields = component_config.get('required_fields', list(component_config['format'].keys()))
|
||||||
|
for field in required_fields:
|
||||||
|
if field not in config_data or not config_data[field]:
|
||||||
|
issues.append(f"Missing or empty required field: {field}")
|
||||||
|
|
||||||
|
# Check validation rules (simplified)
|
||||||
|
for rule in component_config['validation_rules']:
|
||||||
|
if "must not be empty" in rule:
|
||||||
|
for field in component_config['format']:
|
||||||
|
if field in config_data and not config_data[field]:
|
||||||
|
issues.append(f"Field {field} {rule}")
|
||||||
|
|
||||||
|
status = "approved" if not issues else "needs_fix"
|
||||||
|
return {"component_type": component_type, "issues": issues, "status": status}
|
||||||
|
|
||||||
|
def main():
|
||||||
|
parser = argparse.ArgumentParser(description="Review dialogue component configuration.")
|
||||||
|
parser.add_argument('--file', required=True, help="Component JSON file to review")
|
||||||
|
parser.add_argument('--strict', action='store_true', help="Fail on any issues")
|
||||||
|
|
||||||
|
args = parser.parse_args()
|
||||||
|
|
||||||
|
try:
|
||||||
|
with open(args.file, 'r', encoding='utf-8') as f:
|
||||||
|
component_data = json.load(f)
|
||||||
|
|
||||||
|
result = validate_component(component_data)
|
||||||
|
print(json.dumps(result, indent=2, ensure_ascii=False))
|
||||||
|
|
||||||
|
if args.strict and result['status'] != 'approved':
|
||||||
|
sys.exit(1)
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Error: {e}", file=sys.stderr)
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
||||||
314
scripts/route_request.py
Executable file
314
scripts/route_request.py
Executable file
@ -0,0 +1,314 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
import argparse
|
||||||
|
import json
|
||||||
|
import re
|
||||||
|
import sys
|
||||||
|
from dataclasses import dataclass
|
||||||
|
from typing import Dict, List
|
||||||
|
|
||||||
|
|
||||||
|
ROUTES = ("python_script", "high_compute_model", "low_compute_model")
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class RouteScore:
|
||||||
|
name: str
|
||||||
|
score: int
|
||||||
|
reasons: List[str]
|
||||||
|
|
||||||
|
|
||||||
|
def build_execution_plan(route: str, text: str, confidence: float) -> Dict[str, object]:
|
||||||
|
preview = " ".join(text.strip().split())
|
||||||
|
if len(preview) > 140:
|
||||||
|
preview = preview[:137] + "..."
|
||||||
|
|
||||||
|
if route == "python_script":
|
||||||
|
return {
|
||||||
|
"execution_type": "run_python",
|
||||||
|
"goal": "Handle the request with deterministic code execution.",
|
||||||
|
"immediate_action": "Inspect the files/data involved, then write or run a focused Python script.",
|
||||||
|
"codex_instruction": "Execute the task with Python first. Use the model only to design the script or explain the result.",
|
||||||
|
"artifacts_to_produce": [
|
||||||
|
"a Python script or one-off Python command",
|
||||||
|
"structured output or generated files",
|
||||||
|
"a concise summary of what was processed",
|
||||||
|
],
|
||||||
|
"escalate_if": [
|
||||||
|
"the script needs significant algorithm or architecture design",
|
||||||
|
"requirements are ambiguous before coding can start",
|
||||||
|
],
|
||||||
|
"request_preview": preview,
|
||||||
|
}
|
||||||
|
|
||||||
|
if route == "high_compute_model":
|
||||||
|
return {
|
||||||
|
"execution_type": "run_high_compute_model",
|
||||||
|
"goal": "Handle the request with deeper reasoning before taking action.",
|
||||||
|
"immediate_action": "Use a stronger model to analyze the task, resolve ambiguity, and produce the answer or plan.",
|
||||||
|
"codex_instruction": "Give the task to a stronger model path first. If execution is later needed, convert the resulting plan into code or commands.",
|
||||||
|
"artifacts_to_produce": [
|
||||||
|
"a detailed answer, design, or plan",
|
||||||
|
"explicit tradeoffs, assumptions, or decision criteria",
|
||||||
|
],
|
||||||
|
"escalate_if": [
|
||||||
|
"the task becomes procedural after planning",
|
||||||
|
"the answer requires file processing or repeatable transformations",
|
||||||
|
],
|
||||||
|
"request_preview": preview,
|
||||||
|
}
|
||||||
|
|
||||||
|
return {
|
||||||
|
"execution_type": "run_low_compute_model",
|
||||||
|
"goal": "Handle the request with the cheapest viable language-model pass.",
|
||||||
|
"immediate_action": "Use a lightweight model path for a fast first answer.",
|
||||||
|
"codex_instruction": "Start with a cheaper/faster model. Escalate only if the output is weak, incomplete, or the task expands.",
|
||||||
|
"artifacts_to_produce": [
|
||||||
|
"a short answer or rewrite",
|
||||||
|
"minimal reasoning with quick turnaround",
|
||||||
|
],
|
||||||
|
"escalate_if": [
|
||||||
|
"the request turns out to be ambiguous",
|
||||||
|
"the first pass fails quality checks",
|
||||||
|
"multiple retries would cost more than escalating once",
|
||||||
|
],
|
||||||
|
"request_preview": preview,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def normalize(text: str) -> str:
|
||||||
|
text = text.strip().lower()
|
||||||
|
text = re.sub(r"\s+", " ", text)
|
||||||
|
return text
|
||||||
|
|
||||||
|
|
||||||
|
def keyword_hits(text: str, keywords: List[str]) -> List[str]:
|
||||||
|
hits = []
|
||||||
|
for keyword in keywords:
|
||||||
|
if keyword in text:
|
||||||
|
hits.append(keyword)
|
||||||
|
return hits
|
||||||
|
|
||||||
|
|
||||||
|
def score_python_route(text: str) -> RouteScore:
|
||||||
|
reasons: List[str] = []
|
||||||
|
score = 0
|
||||||
|
|
||||||
|
deterministic_hits = keyword_hits(
|
||||||
|
text,
|
||||||
|
[
|
||||||
|
"python",
|
||||||
|
"script",
|
||||||
|
"csv",
|
||||||
|
"json",
|
||||||
|
"yaml",
|
||||||
|
"xml",
|
||||||
|
"excel",
|
||||||
|
"spreadsheet",
|
||||||
|
"parse",
|
||||||
|
"extract",
|
||||||
|
"transform",
|
||||||
|
"convert",
|
||||||
|
"rename",
|
||||||
|
"batch",
|
||||||
|
"directory",
|
||||||
|
"folder",
|
||||||
|
"file",
|
||||||
|
"files",
|
||||||
|
"dataset",
|
||||||
|
"log",
|
||||||
|
"logs",
|
||||||
|
"calculate",
|
||||||
|
"count",
|
||||||
|
"sort",
|
||||||
|
"filter",
|
||||||
|
"regex",
|
||||||
|
"scrape",
|
||||||
|
],
|
||||||
|
)
|
||||||
|
if deterministic_hits:
|
||||||
|
score += 4 + min(len(deterministic_hits), 6)
|
||||||
|
reasons.append(
|
||||||
|
"deterministic data/file-processing signals: "
|
||||||
|
+ ", ".join(deterministic_hits[:6])
|
||||||
|
)
|
||||||
|
|
||||||
|
if any(token in text for token in ["automate", "repeatedly", "pipeline", "generate report"]):
|
||||||
|
score += 3
|
||||||
|
reasons.append("request looks repetitive or automation-friendly")
|
||||||
|
|
||||||
|
if any(token in text for token in ["exact", "precise", "reproducible", "structured output"]):
|
||||||
|
score += 2
|
||||||
|
reasons.append("request favors reproducible execution over free-form reasoning")
|
||||||
|
|
||||||
|
return RouteScore("python_script", score, reasons)
|
||||||
|
|
||||||
|
|
||||||
|
def score_high_route(text: str) -> RouteScore:
|
||||||
|
reasons: List[str] = []
|
||||||
|
score = 0
|
||||||
|
|
||||||
|
reasoning_hits = keyword_hits(
|
||||||
|
text,
|
||||||
|
[
|
||||||
|
"analyze",
|
||||||
|
"analysis",
|
||||||
|
"design",
|
||||||
|
"architect",
|
||||||
|
"strategy",
|
||||||
|
"compare",
|
||||||
|
"tradeoff",
|
||||||
|
"debug",
|
||||||
|
"root cause",
|
||||||
|
"plan",
|
||||||
|
"complex",
|
||||||
|
"hard",
|
||||||
|
"unclear",
|
||||||
|
"ambiguous",
|
||||||
|
"research",
|
||||||
|
"brainstorm",
|
||||||
|
"proposal",
|
||||||
|
"spec",
|
||||||
|
],
|
||||||
|
)
|
||||||
|
if reasoning_hits:
|
||||||
|
score += 4 + min(len(reasoning_hits), 6)
|
||||||
|
reasons.append(
|
||||||
|
"open-ended reasoning signals: " + ", ".join(reasoning_hits[:6])
|
||||||
|
)
|
||||||
|
|
||||||
|
if any(
|
||||||
|
token in text
|
||||||
|
for token in ["step by step", "carefully", "deeply", "thoroughly", "rigorous"]
|
||||||
|
):
|
||||||
|
score += 3
|
||||||
|
reasons.append("user explicitly asks for deeper or more careful reasoning")
|
||||||
|
|
||||||
|
if len(text.split()) > 80:
|
||||||
|
score += 2
|
||||||
|
reasons.append("request is long enough to suggest higher-context reasoning")
|
||||||
|
|
||||||
|
return RouteScore("high_compute_model", score, reasons)
|
||||||
|
|
||||||
|
|
||||||
|
def score_low_route(text: str) -> RouteScore:
|
||||||
|
reasons: List[str] = []
|
||||||
|
score = 0
|
||||||
|
|
||||||
|
lightweight_hits = keyword_hits(
|
||||||
|
text,
|
||||||
|
[
|
||||||
|
"rewrite",
|
||||||
|
"rephrase",
|
||||||
|
"translate",
|
||||||
|
"summarize",
|
||||||
|
"summary",
|
||||||
|
"classify",
|
||||||
|
"tag",
|
||||||
|
"format",
|
||||||
|
"clean up",
|
||||||
|
"fix grammar",
|
||||||
|
"short answer",
|
||||||
|
"quick",
|
||||||
|
"simple",
|
||||||
|
],
|
||||||
|
)
|
||||||
|
if lightweight_hits:
|
||||||
|
score += 4 + min(len(lightweight_hits), 5)
|
||||||
|
reasons.append(
|
||||||
|
"lightweight language-task signals: " + ", ".join(lightweight_hits[:6])
|
||||||
|
)
|
||||||
|
|
||||||
|
if len(text.split()) <= 25:
|
||||||
|
score += 2
|
||||||
|
reasons.append("request is short and likely cheap to answer")
|
||||||
|
|
||||||
|
if any(token in text for token in ["cheap", "fast", "brief"]):
|
||||||
|
score += 2
|
||||||
|
reasons.append("user is optimizing for speed or lower cost")
|
||||||
|
|
||||||
|
return RouteScore("low_compute_model", score, reasons)
|
||||||
|
|
||||||
|
|
||||||
|
def choose_route(text: str) -> Dict[str, object]:
|
||||||
|
normalized = normalize(text)
|
||||||
|
if not normalized:
|
||||||
|
execution_plan = build_execution_plan("low_compute_model", text, 0.25)
|
||||||
|
return {
|
||||||
|
"route": "low_compute_model",
|
||||||
|
"confidence": 0.25,
|
||||||
|
"reasons": ["empty request defaults to the lowest-cost model"],
|
||||||
|
"scores": {route: 0 for route in ROUTES},
|
||||||
|
"execution_plan": execution_plan,
|
||||||
|
}
|
||||||
|
|
||||||
|
scored_routes = [
|
||||||
|
score_python_route(normalized),
|
||||||
|
score_high_route(normalized),
|
||||||
|
score_low_route(normalized),
|
||||||
|
]
|
||||||
|
scored_routes.sort(key=lambda item: item.score, reverse=True)
|
||||||
|
|
||||||
|
winner = scored_routes[0]
|
||||||
|
runner_up = scored_routes[1]
|
||||||
|
|
||||||
|
if winner.score == 0:
|
||||||
|
winner = RouteScore(
|
||||||
|
"high_compute_model",
|
||||||
|
1,
|
||||||
|
["fallback to the stronger model because the task is not obviously deterministic or trivial"],
|
||||||
|
)
|
||||||
|
runner_up = RouteScore("low_compute_model", 0, [])
|
||||||
|
|
||||||
|
margin = max(winner.score - runner_up.score, 0)
|
||||||
|
confidence = min(0.55 + 0.1 * margin, 0.95)
|
||||||
|
|
||||||
|
recommended_next_action = {
|
||||||
|
"python_script": "Prefer executing or writing a Python script first, then use a model only for glue logic or explanation.",
|
||||||
|
"high_compute_model": "Prefer a stronger model for planning, ambiguity resolution, or multi-step reasoning.",
|
||||||
|
"low_compute_model": "Prefer a cheaper/faster model for the first pass and escalate only if it struggles.",
|
||||||
|
}[winner.name]
|
||||||
|
confidence = round(confidence, 2)
|
||||||
|
execution_plan = build_execution_plan(winner.name, text, confidence)
|
||||||
|
|
||||||
|
return {
|
||||||
|
"route": winner.name,
|
||||||
|
"confidence": confidence,
|
||||||
|
"reasons": winner.reasons,
|
||||||
|
"scores": {item.name: item.score for item in scored_routes},
|
||||||
|
"recommended_next_action": recommended_next_action,
|
||||||
|
"execution_plan": execution_plan,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def main() -> int:
|
||||||
|
parser = argparse.ArgumentParser(
|
||||||
|
description="Route a request to python_script, high_compute_model, or low_compute_model."
|
||||||
|
)
|
||||||
|
parser.add_argument("--text", help="Request text to classify. If omitted, read from stdin.")
|
||||||
|
parser.add_argument(
|
||||||
|
"--pretty",
|
||||||
|
action="store_true",
|
||||||
|
help="Pretty-print JSON output.",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--summary",
|
||||||
|
action="store_true",
|
||||||
|
help="Print a compact human-readable routing summary instead of JSON.",
|
||||||
|
)
|
||||||
|
args = parser.parse_args()
|
||||||
|
|
||||||
|
text = args.text if args.text is not None else sys.stdin.read()
|
||||||
|
result = choose_route(text)
|
||||||
|
if args.summary:
|
||||||
|
print(f"Route: {result['route']}")
|
||||||
|
print("Why: " + "; ".join(result["reasons"][:2]))
|
||||||
|
print("Next step: " + result["execution_plan"]["immediate_action"])
|
||||||
|
elif args.pretty:
|
||||||
|
print(json.dumps(result, indent=2, ensure_ascii=True))
|
||||||
|
else:
|
||||||
|
print(json.dumps(result, ensure_ascii=True))
|
||||||
|
return 0
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
raise SystemExit(main())
|
||||||
15
test_config.json
Normal file
15
test_config.json
Normal file
@ -0,0 +1,15 @@
|
|||||||
|
{
|
||||||
|
"component_type": "dialogue_reading",
|
||||||
|
"format": {
|
||||||
|
"text": "string",
|
||||||
|
"audio": "optional_file",
|
||||||
|
"language": "string"
|
||||||
|
},
|
||||||
|
"config": {
|
||||||
|
"duration": 30,
|
||||||
|
"scoring_threshold": 80,
|
||||||
|
"text": "Hello, how are you?",
|
||||||
|
"language": "en"
|
||||||
|
},
|
||||||
|
"status": "generated"
|
||||||
|
}
|
||||||
Loading…
Reference in New Issue
Block a user