195 lines
4.2 KiB
Go
195 lines
4.2 KiB
Go
package agent
|
|
|
|
import (
|
|
"context"
|
|
"fmt"
|
|
"strings"
|
|
"time"
|
|
|
|
"github.com/openai/openai-go"
|
|
oaioption "github.com/openai/openai-go/option"
|
|
|
|
"llm-agent/prd"
|
|
)
|
|
|
|
const (
|
|
baseURL = "http://127.0.0.1:12434/v1"
|
|
maxRetries = 3
|
|
maxTurns = 10
|
|
)
|
|
|
|
var systemPrompt = `Du bist ein Coding-Agent. Erledige den gegebenen Task.
|
|
|
|
TOOLS:
|
|
TOOL:READ_FILE:pfad
|
|
TOOL:WRITE_FILE:pfad
|
|
<<<
|
|
inhalt
|
|
>>>
|
|
TOOL:LIST_FILES:pfad
|
|
|
|
REGELN:
|
|
- Nutze relative Pfade
|
|
- Kein Markdown in Dateiinhalten
|
|
- Task erledigt: schreibe TASK_COMPLETE`
|
|
|
|
type AgentLoop struct {
|
|
client *openai.Client
|
|
model string
|
|
workDir string
|
|
prdFile string
|
|
log *Logger
|
|
}
|
|
|
|
func NewAgentLoop(model, workDir, prdFile string, verbose bool) *AgentLoop {
|
|
client := openai.NewClient(
|
|
oaioption.WithBaseURL(baseURL),
|
|
oaioption.WithAPIKey("ollama"),
|
|
)
|
|
return &AgentLoop{
|
|
client: &client,
|
|
model: model,
|
|
workDir: workDir,
|
|
prdFile: prdFile,
|
|
log: NewLogger(verbose),
|
|
}
|
|
}
|
|
|
|
func (a *AgentLoop) Run() error {
|
|
tasks, err := prd.ParseTasks(a.prdFile)
|
|
if err != nil {
|
|
return fmt.Errorf("PRD lesen fehlgeschlagen: %w", err)
|
|
}
|
|
|
|
pending := 0
|
|
for _, t := range tasks {
|
|
if !t.Completed {
|
|
pending++
|
|
}
|
|
}
|
|
a.log.Info("📋 %d Tasks gefunden, %d offen", len(tasks), pending)
|
|
|
|
for _, task := range tasks {
|
|
if task.Completed {
|
|
a.log.Info("✅ Überspringe (bereits erledigt): %s", task.Title)
|
|
continue
|
|
}
|
|
|
|
a.log.TaskStart(task.Title)
|
|
|
|
success := false
|
|
for attempt := 1; attempt <= maxRetries; attempt++ {
|
|
if attempt > 1 {
|
|
a.log.Info("🔁 Retry %d/%d...", attempt, maxRetries)
|
|
time.Sleep(time.Duration(attempt) * 2 * time.Second)
|
|
}
|
|
|
|
if err := a.runTask(task); err == nil {
|
|
success = true
|
|
break
|
|
} else {
|
|
a.log.Info("⚠️ Fehler: %v", err)
|
|
}
|
|
}
|
|
|
|
if success {
|
|
prd.MarkTaskComplete(a.prdFile, task.Title)
|
|
a.log.TaskDone(task.Title)
|
|
} else {
|
|
a.log.TaskFailed(task.Title, maxRetries)
|
|
}
|
|
}
|
|
|
|
a.log.Info("\n🎉 Alle Tasks abgearbeitet!")
|
|
return nil
|
|
}
|
|
|
|
func (a *AgentLoop) runTask(task prd.Task) error {
|
|
// Frischer Kontext pro Task
|
|
messages := []openai.ChatCompletionMessageParamUnion{
|
|
openai.SystemMessage(systemPrompt),
|
|
openai.UserMessage(fmt.Sprintf(
|
|
"Task: %s\nArbeitsverzeichnis: %s",
|
|
task.Title,
|
|
a.workDir,
|
|
)),
|
|
}
|
|
|
|
a.log.ChatMessage("system", systemPrompt)
|
|
a.log.ChatMessage("user", fmt.Sprintf(
|
|
"Task: %s\nArbeitsverzeichnis: %s",
|
|
task.Title,
|
|
a.workDir,
|
|
))
|
|
|
|
for turn := 0; turn < maxTurns; turn++ {
|
|
a.log.Turn(turn + 1)
|
|
|
|
// Token-Schätzung für Debugging
|
|
totalChars := 0
|
|
for _, m := range messages {
|
|
totalChars += len(fmt.Sprintf("%v", m))
|
|
}
|
|
start := time.Now()
|
|
a.log.Debug("MODEL REQUEST: model=%s totalChars=%d messages=%#v", a.model, totalChars, messages)
|
|
|
|
resp, err := a.client.Chat.Completions.New(
|
|
context.Background(),
|
|
openai.ChatCompletionNewParams{
|
|
Model: a.model,
|
|
Messages: messages,
|
|
},
|
|
)
|
|
elapsed := time.Since(start)
|
|
a.log.Debug("MODEL RESPONSE (elapsed=%s): %#v", elapsed, resp)
|
|
if err != nil {
|
|
return fmt.Errorf("API-Fehler (~%d Zeichen im Kontext): %w", totalChars, err)
|
|
}
|
|
|
|
response := resp.Choices[0].Message.Content
|
|
a.log.ChatMessage("assistant", response)
|
|
messages = append(messages, openai.AssistantMessage(response))
|
|
|
|
// Completion Detection
|
|
if isTaskComplete(response) {
|
|
return nil
|
|
}
|
|
|
|
// Tool Execution
|
|
toolOutput, hadTools := ExecuteTools(response, a.workDir)
|
|
if hadTools {
|
|
a.log.ChatMessage("tool", toolOutput)
|
|
messages = append(messages, openai.UserMessage(toolOutput))
|
|
continue
|
|
}
|
|
|
|
// Kein Tool, kein TASK_COMPLETE → anstupsen
|
|
nudge := "Fahre fort. Wenn der Task erledigt ist, schreibe TASK_COMPLETE."
|
|
a.log.ChatMessage("user", nudge)
|
|
messages = append(messages, openai.UserMessage(nudge))
|
|
}
|
|
|
|
return fmt.Errorf("maximale Turns (%d) erreicht ohne TASK_COMPLETE", maxTurns)
|
|
}
|
|
|
|
// isTaskComplete erkennt TASK_COMPLETE auch bei häufigen LLM-Tippfehlern
|
|
func isTaskComplete(response string) bool {
|
|
if strings.Contains(response, "TASK_COMPLETE") {
|
|
return true
|
|
}
|
|
typos := []string{
|
|
"TUTK_COMPLETE",
|
|
"TASK_COMPLET",
|
|
"TASK_COMPLETED",
|
|
"TASK_COMPETE",
|
|
"TAKS_COMPLETE",
|
|
}
|
|
upper := strings.ToUpper(response)
|
|
for _, t := range typos {
|
|
if strings.Contains(upper, t) {
|
|
return true
|
|
}
|
|
}
|
|
return false
|
|
}
|