From 797657c56b33aec070df7d8375c6d9ca0c82fc90 Mon Sep 17 00:00:00 2001 From: "Christoph K." Date: Wed, 25 Feb 2026 07:30:06 +0100 Subject: [PATCH] init code --- PRD.md | 7 +++ agent/loop.go | 159 +++++++++++++++++++++++++++++++++++++++++++++++++ agent/tools.go | 88 +++++++++++++++++++++++++++ go.mod | 11 ++++ go.sum | 12 ++++ main.go | 67 +++++++++++++++++++++ prd/parser.go | 59 ++++++++++++++++++ 7 files changed, 403 insertions(+) create mode 100644 PRD.md create mode 100644 agent/loop.go create mode 100644 agent/tools.go create mode 100644 go.mod create mode 100644 go.sum create mode 100644 main.go create mode 100644 prd/parser.go diff --git a/PRD.md b/PRD.md new file mode 100644 index 0000000..8e8e742 --- /dev/null +++ b/PRD.md @@ -0,0 +1,7 @@ +# Mein Projekt + +## Tasks + +- [ ] Erstelle eine Datei hello.go mit einem Hello World Programm +- [ ] Erstelle eine Datei README.md mit einer kurzen Projektbeschreibung +- [ ] Projektstruktur anlegen diff --git a/agent/loop.go b/agent/loop.go new file mode 100644 index 0000000..4d0d155 --- /dev/null +++ b/agent/loop.go @@ -0,0 +1,159 @@ +package agent + +import ( + "context" + "fmt" + "strings" + "time" + + "github.com/openai/openai-go" + oaioption "github.com/openai/openai-go/option" + + "llm-agent/prd" +) + +const ( + baseURL = "http://127.0.0.1:12434/v1" + maxRetries = 3 + maxTurns = 10 // Sicherheitslimit pro Task +) + +var systemPrompt = `Du bist ein autonomer Coding-Agent. Du bekommst einen Task und erledigst ihn vollständig. + +Du hast folgende Tools zur Verfügung: +- TOOL:READ_FILE:pfad → Datei lesen +- TOOL:WRITE_FILE:pfad:inhalt → Datei schreiben +- TOOL:LIST_FILES:pfad → Verzeichnis auflisten + +Regeln: +1. Analysiere den Task zuerst +2. Nutze die Tools um Dateien zu lesen/schreiben +3. Wenn der Task vollständig erledigt ist, schreibe am Ende: TASK_COMPLETE +4. Bei Fehlern beschreibe das Problem klar` + +type AgentLoop struct { + client *openai.Client + model string + workDir string + prdFile string +} + +func NewAgentLoop(model, workDir, prdFile string) *AgentLoop { + client := openai.NewClient( + oaioption.WithBaseURL(baseURL), + oaioption.WithAPIKey("ollama"), + ) + return &AgentLoop{ + client: &client, + model: model, + workDir: workDir, + prdFile: prdFile, + } +} + +func (a *AgentLoop) Run() error { + tasks, err := prd.ParseTasks(a.prdFile) + if err != nil { + return fmt.Errorf("PRD lesen fehlgeschlagen: %w", err) + } + + pending := 0 + for _, t := range tasks { + if !t.Completed { + pending++ + } + } + fmt.Printf("📋 %d Tasks gefunden, %d offen\n\n", len(tasks), pending) + + for _, task := range tasks { + if task.Completed { + fmt.Printf("✅ Überspringe (bereits erledigt): %s\n", task.Title) + continue + } + + fmt.Printf("\n🔄 Starte Task: %s\n", task.Title) + fmt.Println(strings.Repeat("─", 50)) + + success := false + for attempt := 1; attempt <= maxRetries; attempt++ { + if attempt > 1 { + fmt.Printf("🔁 Retry %d/%d...\n", attempt, maxRetries) + time.Sleep(time.Duration(attempt) * 2 * time.Second) // Backoff + } + + err := a.runTask(task) + if err == nil { + success = true + break + } + fmt.Printf("⚠️ Fehler: %v\n", err) + } + + if success { + prd.MarkTaskComplete(a.prdFile, task.Title) + fmt.Printf("✅ Task abgeschlossen: %s\n", task.Title) + } else { + fmt.Printf("❌ Task fehlgeschlagen nach %d Versuchen: %s\n", maxRetries, task.Title) + } + } + + fmt.Println("\n🎉 Alle Tasks abgearbeitet!") + return nil +} + +func (a *AgentLoop) runTask(task prd.Task) error { + // FRISCHER Kontext für jeden Task + messages := []openai.ChatCompletionMessageParamUnion{ + openai.SystemMessage(systemPrompt), + openai.UserMessage(fmt.Sprintf("Task: %s\nArbeitsverzeichnis: %s", task.Title, a.workDir)), + } + + for turn := 0; turn < maxTurns; turn++ { + fmt.Printf(" 💭 Turn %d...\n", turn+1) + + resp, err := a.client.Chat.Completions.New( + context.Background(), + openai.ChatCompletionNewParams{ + Model: a.model, + Messages: messages, + }, + ) + if err != nil { + return fmt.Errorf("API-Fehler: %w", err) + } + + response := resp.Choices[0].Message.Content + fmt.Printf(" 🤖 %s\n", truncate(response, 200)) + + // Antwort zur History hinzufügen + messages = append(messages, openai.AssistantMessage(response)) + + // Completion Detection: Layer 1 - Signal Token + if strings.Contains(response, "TASK_COMPLETE") { + return nil // Erfolg! + } + + // Tool Execution + toolOutput, hadTools := ExecuteTools(response, a.workDir) + if hadTools { + fmt.Printf(" 🔧 Tool-Output: %s\n", truncate(toolOutput, 150)) + // Tool-Ergebnis zurück ans LLM + messages = append(messages, openai.UserMessage(toolOutput)) + continue + } + + // Kein Tool, kein TASK_COMPLETE → LLM anstupsen + messages = append(messages, openai.UserMessage( + "Bitte fahre fort. Wenn der Task erledigt ist, schreibe TASK_COMPLETE.", + )) + } + + return fmt.Errorf("maximale Turns (%d) erreicht ohne TASK_COMPLETE", maxTurns) +} + +func truncate(s string, max int) string { + if len(s) <= max { + return s + } + return s[:max] + "..." +} diff --git a/agent/tools.go b/agent/tools.go new file mode 100644 index 0000000..58c07ad --- /dev/null +++ b/agent/tools.go @@ -0,0 +1,88 @@ +package agent + +import ( + "fmt" + "os" + "path/filepath" + "strings" +) + +// Tool-Ergebnis das dem LLM zurückgegeben wird +type ToolResult struct { + Success bool + Output string +} + +// Parst Tool-Calls aus der LLM-Antwort +// Erwartetes Format: +// TOOL:READ_FILE:path/to/file +// TOOL:WRITE_FILE:path/to/file:<<>> +// TOOL:LIST_FILES:. + +func ExecuteTools(response string, workDir string) (string, bool) { + lines := strings.Split(response, "\n") + var toolOutputs []string + hasToolCall := false + + for _, line := range lines { + line = strings.TrimSpace(line) + if !strings.HasPrefix(line, "TOOL:") { + continue + } + hasToolCall = true + result := executeTool(line, workDir) + toolOutputs = append(toolOutputs, result) + } + + return strings.Join(toolOutputs, "\n"), hasToolCall +} + +func executeTool(toolCall string, workDir string) string { + parts := strings.SplitN(toolCall, ":", 4) + if len(parts) < 3 { + return "ERROR: Ungültiger Tool-Call" + } + + toolName := parts[1] + arg1 := parts[2] + + switch toolName { + case "READ_FILE": + path := filepath.Join(workDir, arg1) + content, err := os.ReadFile(path) + if err != nil { + return fmt.Sprintf("READ_FILE ERROR: %v", err) + } + return fmt.Sprintf("READ_FILE %s:\n%s", arg1, string(content)) + + case "WRITE_FILE": + if len(parts) < 4 { + return "ERROR: WRITE_FILE braucht Inhalt" + } + content := parts[3] + path := filepath.Join(workDir, arg1) + + // Verzeichnis anlegen falls nötig + if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil { + return fmt.Sprintf("WRITE_FILE ERROR: %v", err) + } + if err := os.WriteFile(path, []byte(content), 0644); err != nil { + return fmt.Sprintf("WRITE_FILE ERROR: %v", err) + } + return fmt.Sprintf("WRITE_FILE OK: %s geschrieben", arg1) + + case "LIST_FILES": + path := filepath.Join(workDir, arg1) + entries, err := os.ReadDir(path) + if err != nil { + return fmt.Sprintf("LIST_FILES ERROR: %v", err) + } + var files []string + for _, e := range entries { + files = append(files, e.Name()) + } + return fmt.Sprintf("LIST_FILES %s:\n%s", arg1, strings.Join(files, "\n")) + } + + return fmt.Sprintf("ERROR: Unbekanntes Tool: %s", toolName) +} diff --git a/go.mod b/go.mod new file mode 100644 index 0000000..50dd9dc --- /dev/null +++ b/go.mod @@ -0,0 +1,11 @@ +module llm-agent + +go 1.25.7 + +require ( + github.com/openai/openai-go v1.12.0 // indirect + github.com/tidwall/gjson v1.14.4 // indirect + github.com/tidwall/match v1.1.1 // indirect + github.com/tidwall/pretty v1.2.1 // indirect + github.com/tidwall/sjson v1.2.5 // indirect +) diff --git a/go.sum b/go.sum new file mode 100644 index 0000000..622fc37 --- /dev/null +++ b/go.sum @@ -0,0 +1,12 @@ +github.com/openai/openai-go v1.12.0 h1:NBQCnXzqOTv5wsgNC36PrFEiskGfO5wccfCWDo9S1U0= +github.com/openai/openai-go v1.12.0/go.mod h1:g461MYGXEXBVdV5SaR/5tNzNbSfwTBBefwc+LlDCK0Y= +github.com/tidwall/gjson v1.14.2/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= +github.com/tidwall/gjson v1.14.4 h1:uo0p8EbA09J7RQaflQ1aBRffTR7xedD2bcIVSYxLnkM= +github.com/tidwall/gjson v1.14.4/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= +github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA= +github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM= +github.com/tidwall/pretty v1.2.0/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= +github.com/tidwall/pretty v1.2.1 h1:qjsOFOWWQl+N3RsoF5/ssm1pHmJJwhjlSbZ51I6wMl4= +github.com/tidwall/pretty v1.2.1/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= +github.com/tidwall/sjson v1.2.5 h1:kLy8mja+1c9jlljvWTlSazM7cKDRfJuR/bOJhcY5NcY= +github.com/tidwall/sjson v1.2.5/go.mod h1:Fvgq9kS/6ociJEDnK0Fk1cpYF4FIW6ZF7LAe+6jwd28= diff --git a/main.go b/main.go new file mode 100644 index 0000000..d79449d --- /dev/null +++ b/main.go @@ -0,0 +1,67 @@ +package main + +import ( + "bufio" + "context" + "fmt" + "log" + "os" + "strconv" + "strings" + + "github.com/openai/openai-go" + oaioption "github.com/openai/openai-go/option" + + "llm-agent/agent" +) + +const baseURL = "http://127.0.0.1:12434/v1" + +func selectModel(client *openai.Client) string { + modelsPage, err := client.Models.List(context.Background()) + if err != nil { + log.Fatalf("Fehler beim Abrufen der Modelle: %v", err) + } + models := modelsPage.Data + if len(models) == 0 { + log.Fatal("Keine Modelle verfügbar!") + } + + fmt.Println("\n📦 Verfügbare Modelle:") + fmt.Println(strings.Repeat("─", 50)) + for i, m := range models { + fmt.Printf(" [%d] %s\n", i+1, m.ID) + } + fmt.Println(strings.Repeat("─", 50)) + + scanner := bufio.NewScanner(os.Stdin) + for { + fmt.Printf("Wähle ein Modell (1-%d): ", len(models)) + if !scanner.Scan() { + log.Fatal("Eingabe fehlgeschlagen") + } + choice, err := strconv.Atoi(strings.TrimSpace(scanner.Text())) + if err != nil || choice < 1 || choice > len(models) { + fmt.Printf("❌ Ungültige Eingabe.\n") + continue + } + selected := models[choice-1].ID + fmt.Printf("✅ Modell gewählt: %s\n", selected) + return selected + } +} + +func main() { + client := openai.NewClient( + oaioption.WithBaseURL(baseURL), + oaioption.WithAPIKey("ollama"), + ) + + fmt.Println("🤖 LLM Agent") + model := selectModel(&client) + + loop := agent.NewAgentLoop(model, ".", "PRD.md") + if err := loop.Run(); err != nil { + log.Fatalf("Agent fehlgeschlagen: %v", err) + } +} diff --git a/prd/parser.go b/prd/parser.go new file mode 100644 index 0000000..b6394f5 --- /dev/null +++ b/prd/parser.go @@ -0,0 +1,59 @@ +package prd + +import ( + "bufio" + "os" + "strings" +) + +type Task struct { + Title string + Completed bool + Index int +} + +func ParseTasks(filepath string) ([]Task, error) { + file, err := os.Open(filepath) + if err != nil { + return nil, err + } + defer file.Close() + + var tasks []Task + index := 0 + scanner := bufio.NewScanner(file) + + for scanner.Scan() { + line := strings.TrimSpace(scanner.Text()) + if strings.HasPrefix(line, "- [ ] ") { + tasks = append(tasks, Task{ + Title: strings.TrimPrefix(line, "- [ ] "), + Completed: false, + Index: index, + }) + index++ + } else if strings.HasPrefix(line, "- [x] ") { + tasks = append(tasks, Task{ + Title: strings.TrimPrefix(line, "- [x] "), + Completed: true, + Index: index, + }) + index++ + } + } + return tasks, scanner.Err() +} + +func MarkTaskComplete(filepath string, taskTitle string) error { + content, err := os.ReadFile(filepath) + if err != nil { + return err + } + + updated := strings.ReplaceAll( + string(content), + "- [ ] "+taskTitle, + "- [x] "+taskTitle, + ) + return os.WriteFile(filepath, []byte(updated), 0644) +}