Umstellung auf Model Tools
This commit is contained in:
169
agent/loop.go
169
agent/loop.go
@@ -18,16 +18,10 @@ const (
|
||||
maxTurns = 10
|
||||
)
|
||||
|
||||
var systemPrompt = `Du bist ein Coding-Agent und programmierst Go.
|
||||
Erledige deine Aufgabe mit folgenden Tools:
|
||||
TOOL:READ_FILE:pfad
|
||||
TOOL:WRITE_FILE:pfad:<<<inhalt>>>
|
||||
TOOL:LIST_FILES:pfad
|
||||
|
||||
REGELN:
|
||||
- Nutze relative Pfade
|
||||
- Kein Markdown in Dateiinhalten
|
||||
- Wenn Task erledigt: schreibe nur TASK_COMPLETE`
|
||||
var systemPrompt = `Du bist ein autonomer Coding-Agent.
|
||||
Erledige den gegebenen Task vollständig mit den bereitgestellten Tools.
|
||||
Rufe task_complete auf sobald der Task erledigt ist.
|
||||
Nutze ausschließlich relative Pfade.`
|
||||
|
||||
type AgentLoop struct {
|
||||
client *openai.Client
|
||||
@@ -74,6 +68,7 @@ func (a *AgentLoop) Run() error {
|
||||
a.log.TaskStart(task.Title)
|
||||
|
||||
success := false
|
||||
var lastErr error
|
||||
for attempt := 1; attempt <= maxRetries; attempt++ {
|
||||
if attempt > 1 {
|
||||
a.log.Info("🔁 Retry %d/%d...", attempt, maxRetries)
|
||||
@@ -84,6 +79,7 @@ func (a *AgentLoop) Run() error {
|
||||
success = true
|
||||
break
|
||||
} else {
|
||||
lastErr = err
|
||||
a.log.Info("⚠️ Fehler: %v", err)
|
||||
}
|
||||
}
|
||||
@@ -93,6 +89,7 @@ func (a *AgentLoop) Run() error {
|
||||
a.log.TaskDone(task.Title)
|
||||
} else {
|
||||
a.log.TaskFailed(task.Title, maxRetries)
|
||||
_ = lastErr
|
||||
}
|
||||
}
|
||||
|
||||
@@ -101,137 +98,116 @@ func (a *AgentLoop) Run() error {
|
||||
}
|
||||
|
||||
func (a *AgentLoop) runTask(task prd.Task) error {
|
||||
executor := NewToolExecutor(a.workDir)
|
||||
|
||||
// Frischer Kontext pro Task
|
||||
messages := []openai.ChatCompletionMessageParamUnion{
|
||||
openai.SystemMessage(systemPrompt),
|
||||
openai.UserMessage(fmt.Sprintf(
|
||||
"Task: %s\nArbeitsverzeichnis: %s",
|
||||
task.Title,
|
||||
a.workDir,
|
||||
task.Title, a.workDir,
|
||||
)),
|
||||
}
|
||||
|
||||
a.log.ChatMessage("system", systemPrompt)
|
||||
a.log.ChatMessage("user", fmt.Sprintf(
|
||||
"Task: %s\nArbeitsverzeichnis: %s",
|
||||
task.Title,
|
||||
a.workDir,
|
||||
task.Title, a.workDir,
|
||||
))
|
||||
|
||||
for turn := 0; turn < maxTurns; turn++ {
|
||||
a.log.Turn(turn + 1)
|
||||
|
||||
// Token-Schätzung für Debugging
|
||||
totalChars := 0
|
||||
for _, m := range messages {
|
||||
totalChars += len(fmt.Sprintf("%v", m))
|
||||
}
|
||||
|
||||
start := time.Now()
|
||||
a.log.Debug("MODEL REQUEST: model=%s ~%d Zeichen\n%s", a.model, totalChars, formatMessages(messages))
|
||||
a.log.Debug("MODEL REQUEST: model=%s ~%d Zeichen\n%s",
|
||||
a.model, totalChars, formatMessages(messages))
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute)
|
||||
resp, err := a.client.Chat.Completions.New(
|
||||
context.Background(),
|
||||
ctx,
|
||||
openai.ChatCompletionNewParams{
|
||||
Model: a.model,
|
||||
Messages: messages,
|
||||
Tools: Tools, // ← Tool Calling
|
||||
},
|
||||
)
|
||||
cancel()
|
||||
|
||||
elapsed := time.Since(start)
|
||||
if resp != nil && len(resp.Choices) > 0 {
|
||||
a.log.Debug("MODEL RESPONSE\n%s", formatResponse(resp, elapsed))
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("API-Fehler (~%d Zeichen im Kontext): %w", totalChars, err)
|
||||
return fmt.Errorf("API-Fehler (~%d Zeichen): %w", totalChars, err)
|
||||
}
|
||||
|
||||
response := resp.Choices[0].Message.Content
|
||||
a.log.ChatMessage("assistant", response)
|
||||
messages = append(messages, openai.AssistantMessage(response))
|
||||
choice := resp.Choices[0]
|
||||
|
||||
// Completion Detection
|
||||
if isTaskComplete(response) {
|
||||
if turn == 0 {
|
||||
// LLM hat sofort TASK_COMPLETE ohne Tool-Call → nichts wurde getan
|
||||
nudge := "Du hast die Datei noch nicht erstellt! Nutze zuerst WRITE_FILE, dann schreibe TASK_COMPLETE."
|
||||
a.log.ChatMessage("user", nudge)
|
||||
messages = append(messages, openai.UserMessage(nudge))
|
||||
continue // nächster Turn
|
||||
}
|
||||
return nil
|
||||
}
|
||||
// Antwort zur History hinzufügen
|
||||
messages = append(messages, choice.Message.ToParam())
|
||||
|
||||
// Tool Execution
|
||||
toolOutput, hadTools := ExecuteTools(response, a.workDir)
|
||||
if hadTools {
|
||||
a.log.ChatMessage("tool", toolOutput)
|
||||
messages = append(messages, openai.UserMessage(toolOutput))
|
||||
// Kein Tool-Call → LLM hat nur Text geantwortet
|
||||
if len(choice.Message.ToolCalls) == 0 {
|
||||
a.log.ChatMessage("assistant", choice.Message.Content)
|
||||
nudge := "Nutze die bereitgestellten Tools. Rufe task_complete auf wenn du fertig bist."
|
||||
a.log.ChatMessage("user", nudge)
|
||||
messages = append(messages, openai.UserMessage(nudge))
|
||||
continue
|
||||
}
|
||||
|
||||
// Kein Tool, kein TASK_COMPLETE → anstupsen
|
||||
nudge := "Fahre fort. Wenn der Task erledigt ist, schreibe TASK_COMPLETE."
|
||||
a.log.ChatMessage("user", nudge)
|
||||
messages = append(messages, openai.UserMessage(nudge))
|
||||
}
|
||||
// Tool-Calls ausführen
|
||||
for _, toolCall := range choice.Message.ToolCalls {
|
||||
a.log.Info(" 🔧 %s(%s)", toolCall.Function.Name,
|
||||
truncate(toolCall.Function.Arguments, 80))
|
||||
|
||||
return fmt.Errorf("maximale Turns (%d) erreicht ohne TASK_COMPLETE", maxTurns)
|
||||
}
|
||||
result, done := executor.Execute(toolCall)
|
||||
a.log.ChatMessage("tool",
|
||||
fmt.Sprintf("%s → %s", toolCall.Function.Name, result))
|
||||
|
||||
// isTaskComplete erkennt TASK_COMPLETE auch bei häufigen LLM-Tippfehlern
|
||||
func isTaskComplete(response string) bool {
|
||||
if strings.Contains(response, "TASK_COMPLETE") {
|
||||
return true
|
||||
}
|
||||
typos := []string{
|
||||
"TUTK_COMPLETE",
|
||||
"TASK_COMPLET",
|
||||
"TASK_COMPLETED",
|
||||
"TASK_COMPETE",
|
||||
"TAKS_COMPLETE",
|
||||
}
|
||||
upper := strings.ToUpper(response)
|
||||
for _, t := range typos {
|
||||
if strings.Contains(upper, t) {
|
||||
return true
|
||||
// Tool-Ergebnis zurück ans LLM
|
||||
messages = append(messages, openai.ToolMessage(result, toolCall.ID))
|
||||
|
||||
if done {
|
||||
return nil // task_complete aufgerufen → Erfolg
|
||||
}
|
||||
}
|
||||
}
|
||||
return false
|
||||
|
||||
return fmt.Errorf("maximale Turns (%d) erreicht", maxTurns)
|
||||
}
|
||||
|
||||
// ─── Hilfsfunktionen ─────────────────────────────────────
|
||||
|
||||
func truncate(s string, max int) string {
|
||||
if len(s) <= max {
|
||||
return s
|
||||
}
|
||||
return s[:max] + "..."
|
||||
}
|
||||
|
||||
// formatMessages gibt die Chat-History lesbar aus
|
||||
func formatMessages(messages []openai.ChatCompletionMessageParamUnion) string {
|
||||
var sb strings.Builder
|
||||
for i, m := range messages {
|
||||
var role, content string
|
||||
|
||||
switch {
|
||||
case m.OfSystem != nil:
|
||||
role = "system"
|
||||
if len(m.OfSystem.Content.OfString.Value) > 0 {
|
||||
content = m.OfSystem.Content.OfString.Value
|
||||
}
|
||||
content = m.OfSystem.Content.OfString.Value
|
||||
case m.OfUser != nil:
|
||||
role = "user"
|
||||
if len(m.OfUser.Content.OfString.Value) > 0 {
|
||||
content = m.OfUser.Content.OfString.Value
|
||||
}
|
||||
content = m.OfUser.Content.OfString.Value
|
||||
case m.OfAssistant != nil:
|
||||
role = "assistant"
|
||||
if len(m.OfAssistant.Content.OfString.Value) > 0 {
|
||||
content = m.OfAssistant.Content.OfString.Value
|
||||
}
|
||||
content = m.OfAssistant.Content.OfString.Value
|
||||
default:
|
||||
role = "unknown"
|
||||
content = fmt.Sprintf("%+v", m)
|
||||
role = "other"
|
||||
}
|
||||
|
||||
// Inhalt auf 120 Zeichen kürzen für Übersicht
|
||||
preview := content
|
||||
if len(preview) > 120 {
|
||||
preview = preview[:120] + "..."
|
||||
}
|
||||
// Zeilenumbrüche für einzeilige Darstellung ersetzen
|
||||
preview = strings.ReplaceAll(preview, "\n", "↵")
|
||||
|
||||
preview := strings.ReplaceAll(truncate(content, 120), "\n", "↵")
|
||||
sb.WriteString(fmt.Sprintf(" [%d] %-10s : %s\n", i, role, preview))
|
||||
}
|
||||
return sb.String()
|
||||
@@ -239,7 +215,6 @@ func formatMessages(messages []openai.ChatCompletionMessageParamUnion) string {
|
||||
|
||||
func formatResponse(resp *openai.ChatCompletion, elapsed time.Duration) string {
|
||||
var sb strings.Builder
|
||||
|
||||
sb.WriteString(fmt.Sprintf(" ID : %s\n", resp.ID))
|
||||
sb.WriteString(fmt.Sprintf(" Modell : %s\n", resp.Model))
|
||||
sb.WriteString(fmt.Sprintf(" Elapsed : %s\n", elapsed.Round(time.Millisecond)))
|
||||
@@ -250,17 +225,21 @@ func formatResponse(resp *openai.ChatCompletion, elapsed time.Duration) string {
|
||||
resp.Usage.TotalTokens,
|
||||
))
|
||||
|
||||
// Tokens/Sekunde aus den Timing-Daten (Ollama-spezifisch)
|
||||
if timings, ok := resp.JSON.ExtraFields["timings"]; ok {
|
||||
sb.WriteString(fmt.Sprintf(" Timings : %s\n", timings.Raw()))
|
||||
// Tool-Calls anzeigen
|
||||
if len(resp.Choices[0].Message.ToolCalls) > 0 {
|
||||
sb.WriteString(" Tool-Calls :\n")
|
||||
for _, tc := range resp.Choices[0].Message.ToolCalls {
|
||||
sb.WriteString(fmt.Sprintf(" → %s(%s)\n",
|
||||
tc.Function.Name,
|
||||
truncate(tc.Function.Arguments, 100),
|
||||
))
|
||||
}
|
||||
} else {
|
||||
content := resp.Choices[0].Message.Content
|
||||
sb.WriteString(" Content :\n")
|
||||
for _, line := range strings.Split(content, "\n") {
|
||||
sb.WriteString(fmt.Sprintf(" %s\n", line))
|
||||
}
|
||||
}
|
||||
|
||||
sb.WriteString(fmt.Sprintf(" Content :\n"))
|
||||
// Inhalt eingerückt und vollständig ausgeben
|
||||
content := resp.Choices[0].Message.Content
|
||||
for _, line := range strings.Split(content, "\n") {
|
||||
sb.WriteString(fmt.Sprintf(" %s\n", line))
|
||||
}
|
||||
|
||||
return sb.String()
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user