package agent import ( "context" "fmt" "strings" "time" "github.com/openai/openai-go" oaioption "github.com/openai/openai-go/option" "llm-agent/prd" ) const ( baseURL = "http://127.0.0.1:12434/v1" maxRetries = 3 maxTurns = 10 ) var systemPrompt = `Du bist ein autonomer Coding-Agent. Erledige den gegebenen Task vollständig mit den bereitgestellten Tools. Rufe task_complete auf sobald der Task erledigt ist. Nutze ausschließlich relative Pfade.` type AgentLoop struct { client *openai.Client model string workDir string prdFile string log *Logger } func NewAgentLoop(model, workDir, prdFile string, verbose bool) *AgentLoop { client := openai.NewClient( oaioption.WithBaseURL(baseURL), oaioption.WithAPIKey("ollama"), ) return &AgentLoop{ client: &client, model: model, workDir: workDir, prdFile: prdFile, log: NewLogger(verbose), } } func (a *AgentLoop) Run() error { tasks, err := prd.ParseTasks(a.prdFile) if err != nil { return fmt.Errorf("PRD lesen fehlgeschlagen: %w", err) } pending := 0 for _, t := range tasks { if !t.Completed { pending++ } } a.log.Info("📋 %d Tasks gefunden, %d offen", len(tasks), pending) for _, task := range tasks { if task.Completed { a.log.Info("✅ Überspringe (bereits erledigt): %s", task.Title) continue } a.log.TaskStart(task.Title) success := false var lastErr error for attempt := 1; attempt <= maxRetries; attempt++ { if attempt > 1 { a.log.Info("🔁 Retry %d/%d...", attempt, maxRetries) time.Sleep(time.Duration(attempt) * 2 * time.Second) } if err := a.runTask(task); err == nil { success = true break } else { lastErr = err a.log.Info("⚠️ Fehler: %v", err) } } if success { prd.MarkTaskComplete(a.prdFile, task.Title) a.log.TaskDone(task.Title) } else { a.log.TaskFailed(task.Title, maxRetries) _ = lastErr } } a.log.Info("\n🎉 Alle Tasks abgearbeitet!") return nil } func (a *AgentLoop) runTask(task prd.Task) error { executor := NewToolExecutor(a.workDir) // Frischer Kontext pro Task messages := []openai.ChatCompletionMessageParamUnion{ openai.SystemMessage(systemPrompt), openai.UserMessage(fmt.Sprintf( "Task: %s\nArbeitsverzeichnis: %s", task.Title, a.workDir, )), } a.log.ChatMessage("system", systemPrompt) a.log.ChatMessage("user", fmt.Sprintf( "Task: %s\nArbeitsverzeichnis: %s", task.Title, a.workDir, )) for turn := 0; turn < maxTurns; turn++ { a.log.Turn(turn + 1) totalChars := 0 for _, m := range messages { totalChars += len(fmt.Sprintf("%v", m)) } start := time.Now() a.log.Debug("MODEL REQUEST: model=%s ~%d Zeichen\n%s", a.model, totalChars, formatMessages(messages)) ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) resp, err := a.client.Chat.Completions.New( ctx, openai.ChatCompletionNewParams{ Model: a.model, Messages: messages, Tools: Tools, // ← Tool Calling }, ) cancel() elapsed := time.Since(start) if resp != nil && len(resp.Choices) > 0 { a.log.Debug("MODEL RESPONSE\n%s", formatResponse(resp, elapsed)) } if err != nil { return fmt.Errorf("API-Fehler (~%d Zeichen): %w", totalChars, err) } choice := resp.Choices[0] // Antwort zur History hinzufügen messages = append(messages, choice.Message.ToParam()) // Kein Tool-Call → LLM hat nur Text geantwortet if len(choice.Message.ToolCalls) == 0 { a.log.ChatMessage("assistant", choice.Message.Content) nudge := "Nutze die bereitgestellten Tools. Rufe task_complete auf wenn du fertig bist." a.log.ChatMessage("user", nudge) messages = append(messages, openai.UserMessage(nudge)) continue } // Tool-Calls ausführen for _, toolCall := range choice.Message.ToolCalls { a.log.Info(" 🔧 %s(%s)", toolCall.Function.Name, truncate(toolCall.Function.Arguments, 80)) result, done := executor.Execute(toolCall) a.log.ChatMessage("tool", fmt.Sprintf("%s → %s", toolCall.Function.Name, result)) // Tool-Ergebnis zurück ans LLM messages = append(messages, openai.ToolMessage(result, toolCall.ID)) if done { return nil // task_complete aufgerufen → Erfolg } } } return fmt.Errorf("maximale Turns (%d) erreicht", maxTurns) } // ─── Hilfsfunktionen ───────────────────────────────────── func truncate(s string, max int) string { if len(s) <= max { return s } return s[:max] + "..." } func formatMessages(messages []openai.ChatCompletionMessageParamUnion) string { var sb strings.Builder for i, m := range messages { var role, content string switch { case m.OfSystem != nil: role = "system" content = m.OfSystem.Content.OfString.Value case m.OfUser != nil: role = "user" content = m.OfUser.Content.OfString.Value case m.OfAssistant != nil: role = "assistant" content = m.OfAssistant.Content.OfString.Value default: role = "other" } preview := strings.ReplaceAll(truncate(content, 120), "\n", "↵") sb.WriteString(fmt.Sprintf(" [%d] %-10s : %s\n", i, role, preview)) } return sb.String() } func formatResponse(resp *openai.ChatCompletion, elapsed time.Duration) string { var sb strings.Builder sb.WriteString(fmt.Sprintf(" ID : %s\n", resp.ID)) sb.WriteString(fmt.Sprintf(" Modell : %s\n", resp.Model)) sb.WriteString(fmt.Sprintf(" Elapsed : %s\n", elapsed.Round(time.Millisecond))) sb.WriteString(fmt.Sprintf(" Finish-Reason : %s\n", resp.Choices[0].FinishReason)) sb.WriteString(fmt.Sprintf(" Tokens : prompt=%d completion=%d total=%d\n", resp.Usage.PromptTokens, resp.Usage.CompletionTokens, resp.Usage.TotalTokens, )) // Tool-Calls anzeigen if len(resp.Choices[0].Message.ToolCalls) > 0 { sb.WriteString(" Tool-Calls :\n") for _, tc := range resp.Choices[0].Message.ToolCalls { sb.WriteString(fmt.Sprintf(" → %s(%s)\n", tc.Function.Name, truncate(tc.Function.Arguments, 100), )) } } else { content := resp.Choices[0].Message.Content sb.WriteString(" Content :\n") for _, line := range strings.Split(content, "\n") { sb.WriteString(fmt.Sprintf(" %s\n", line)) } } return sb.String() }