From d0b0b4f8bd63ad7b15b6102b2375722ccbe243cb Mon Sep 17 00:00:00 2001 From: "Christoph K." Date: Tue, 7 Apr 2026 16:59:50 +0200 Subject: [PATCH] Convert backend from submodule to regular directory Remove submodule tracking; backend is now a plain directory in the repo. Also update deploy workflow: remove --recurse-submodules. Co-Authored-By: Claude Sonnet 4.6 --- .gitea/workflows/deploy.yml | 15 +- backend | 1 - backend/.env.example | 2 + backend/.gitignore | 29 ++++ backend/CLAUDE.md | 109 +++++++++++++ backend/Dockerfile | 12 ++ backend/cmd/createuser/main.go | 55 +++++++ backend/cmd/server/main.go | 87 ++++++++++ backend/go.mod | 20 +++ backend/go.sum | 87 ++++++++++ backend/internal/api/ingest.go | 134 +++++++++++++++ backend/internal/api/journal.go | 164 +++++++++++++++++++ backend/internal/api/middleware.go | 43 +++++ backend/internal/api/query.go | 102 ++++++++++++ backend/internal/api/response.go | 21 +++ backend/internal/api/router.go | 106 ++++++++++++ backend/internal/api/spa.go | 53 ++++++ backend/internal/api/static/day.js | 47 ++++++ backend/internal/api/static/style.css | 54 +++++++ backend/internal/api/templates/base.html | 15 ++ backend/internal/api/templates/day.html | 104 ++++++++++++ backend/internal/api/templates/days.html | 46 ++++++ backend/internal/api/templates/login.html | 21 +++ backend/internal/api/webapp/index.html | 1 + backend/internal/api/webui.go | 188 ++++++++++++++++++++++ backend/internal/auth/auth.go | 137 ++++++++++++++++ backend/internal/db/db.go | 41 +++++ backend/internal/db/journal.go | 109 +++++++++++++ backend/internal/db/schema.sql | 94 +++++++++++ backend/internal/db/stops.go | 60 +++++++ backend/internal/db/suggestions.go | 54 +++++++ backend/internal/db/trackpoints.go | 163 +++++++++++++++++++ backend/internal/domain/models.go | 85 ++++++++++ backend/start.sh | 8 + backend/stop.sh | 12 ++ 35 files changed, 2271 insertions(+), 8 deletions(-) delete mode 160000 backend create mode 100644 backend/.env.example create mode 100644 backend/.gitignore create mode 100644 backend/CLAUDE.md create mode 100644 backend/Dockerfile create mode 100644 backend/cmd/createuser/main.go create mode 100644 backend/cmd/server/main.go create mode 100644 backend/go.mod create mode 100644 backend/go.sum create mode 100644 backend/internal/api/ingest.go create mode 100644 backend/internal/api/journal.go create mode 100644 backend/internal/api/middleware.go create mode 100644 backend/internal/api/query.go create mode 100644 backend/internal/api/response.go create mode 100644 backend/internal/api/router.go create mode 100644 backend/internal/api/spa.go create mode 100644 backend/internal/api/static/day.js create mode 100644 backend/internal/api/static/style.css create mode 100644 backend/internal/api/templates/base.html create mode 100644 backend/internal/api/templates/day.html create mode 100644 backend/internal/api/templates/days.html create mode 100644 backend/internal/api/templates/login.html create mode 100644 backend/internal/api/webapp/index.html create mode 100644 backend/internal/api/webui.go create mode 100644 backend/internal/auth/auth.go create mode 100644 backend/internal/db/db.go create mode 100644 backend/internal/db/journal.go create mode 100644 backend/internal/db/schema.sql create mode 100644 backend/internal/db/stops.go create mode 100644 backend/internal/db/suggestions.go create mode 100644 backend/internal/db/trackpoints.go create mode 100644 backend/internal/domain/models.go create mode 100755 backend/start.sh create mode 100755 backend/stop.sh diff --git a/.gitea/workflows/deploy.yml b/.gitea/workflows/deploy.yml index 8544adb..3e1370b 100644 --- a/.gitea/workflows/deploy.yml +++ b/.gitea/workflows/deploy.yml @@ -9,18 +9,19 @@ jobs: runs-on: self-hosted steps: - - name: Checkout - uses: actions/checkout@v4 - with: - submodules: recursive + - name: Pull code + run: | + if [ -d "${{ secrets.DEPLOY_DIR }}/.git" ]; then + git -C ${{ secrets.DEPLOY_DIR }} pull --recurse-submodules + else + git clone --recurse-submodules http://192.168.1.4:3000/christoph/pamietnik.git ${{ secrets.DEPLOY_DIR }} + fi - name: Write .env run: printf 'DB_PASSWORD=%s\n' '${{ secrets.DB_PASSWORD }}' > ${{ secrets.DEPLOY_DIR }}/.env - name: Build & Deploy - run: | - cp -r ${{ github.workspace }}/. ${{ secrets.DEPLOY_DIR }}/ - docker compose -f ${{ secrets.DEPLOY_DIR }}/docker-compose.yml up --build -d + run: docker compose -f ${{ secrets.DEPLOY_DIR }}/docker-compose.yml up --build -d - name: Health check run: | diff --git a/backend b/backend deleted file mode 160000 index c3d432e..0000000 --- a/backend +++ /dev/null @@ -1 +0,0 @@ -Subproject commit c3d432e24d9a1707d7d3bbd5d84146df9133a98a diff --git a/backend/.env.example b/backend/.env.example new file mode 100644 index 0000000..592a636 --- /dev/null +++ b/backend/.env.example @@ -0,0 +1,2 @@ +DATABASE_URL=postgres://ralph:ralph@localhost:5432/ralph?sslmode=disable +LISTEN_ADDR=:8080 diff --git a/backend/.gitignore b/backend/.gitignore new file mode 100644 index 0000000..268e6b7 --- /dev/null +++ b/backend/.gitignore @@ -0,0 +1,29 @@ +# Binaries +/server +/migrate +/createuser +*.exe + +# Build output +dist/ +bin/ + +# Uploads +uploads/ + +# Environment / Secrets +.env +*.env + +# IDE +.idea/ +.vscode/ +*.swp + +# OS +.DS_Store + +# Go test cache / coverage +*.test +*.out +coverage.html diff --git a/backend/CLAUDE.md b/backend/CLAUDE.md new file mode 100644 index 0000000..9ae87de --- /dev/null +++ b/backend/CLAUDE.md @@ -0,0 +1,109 @@ +# CLAUDE.md — Pamietnik Backend (Go Server) + +## Stack + +Language: Go +DB: PostgreSQL +API-Doc: OpenAPI 3.1 (openapi.yaml) +Auth: Session Cookie (Web UI); API-Key oder JWT (Android Upload, TBD) +Hashing: Argon2id (Passwörter) +Geocoding: Nominatim (OSM) mit Cache + Rate-Limit; Provider austauschbar +Maps: OpenStreetMap Tiles (konfigurierbar, serverseitig) +Dev: docker-compose (API + PostgreSQL) + +--- + +## Kern-Features (Backend) + +1. REST API Ingest: Single + Batch Trackpoints (Idempotenz via event_id) +2. Idempotenz/Dedupe: Unique Key (device_id, event_id); Duplikate = 200 OK +3. Stop Detection: Aufenthalte erkennen (minDuration + radiusMeters konfigurierbar) +4. Suggestions: Aus Stops Vorschläge ableiten + speichern +5. Reverse-Geocoding: Nominatim gecached, Provider austauschbar via Config +6. Web UI: Login (Session Cookie), Tagesübersicht, Tagesdetail, Karte +7. Auth: Argon2id Passwort-Hashing, Session-Store in PostgreSQL + +--- + +## API Endpoints + +Ingest: + POST /v1/trackpoints <- Single Trackpoint + POST /v1/trackpoints:batch <- Batch Trackpoints + GET /healthz + GET /readyz + +Query (Auth required): + GET /v1/days?from=YYYY-MM-DD&to=YYYY-MM-DD + GET /v1/trackpoints?date=YYYY-MM-DD + GET /v1/stops?date=YYYY-MM-DD + GET /v1/suggestions?date=YYYY-MM-DD + +Web UI (Session Cookie, serverseitig gerendert): + GET /login + POST /login + POST /logout + GET /days + GET /days/{yyyy-mm-dd} + +--- + +## Datenmodell (Kern) + +Trackpoint: + event_id string (UUID, client-generated) + device_id string + trip_id string + timestamp RFC3339 oder epochMillis (TBD) + lat, lon float64 + source "gps" | "manual" + note string (optional) + +Stop: + stop_id string + device_id, trip_id + start_ts, end_ts + center_lat, center_lon + duration_s int + place_label string (optional, Nominatim) + +Suggestion: + suggestion_id + stop_id + type "highlight" | "name_place" | "add_note" + title/text string + created_at, dismissed_at + +--- + +## Architektur-Prinzipien + +- Idempotenz zuerst: Kein Duplicate Insert, immer event_id prüfen +- Geocoding nur ereignisbasiert (pro Stop), niemals periodisch/bulk +- Geocoding-Provider über Config austauschbar (kein Hardcode) +- Sessions serverseitig in PostgreSQL (invalidierbar bei Logout) +- Stop Detection Parameter (minDuration, radiusMeters) konfigurierbar +- OpenAPI immer aktuell halten; Änderungen nur via PR + CI Validation + +--- + +## Offene Entscheidungen (TBD) + +- timestamp Format: epochMillis vs RFC3339 +- Android Upload Auth: X-API-Key vs JWT +- Payload: JSON vs Protobuf +- Batch limits (max items, max bytes) +- Retention Policy (Trackpoints löschen nach X Tagen) +- Stop-Detection Parameter (Mindestdauer, Radius) +- Geocoding Provider: Nominatim public vs self-hosted vs Alternative + +--- + +## Nächste Tasks (Reihenfolge) + +- [ ] T024 REST API finalisieren (Endpoints, Fehlerformat, Limits) +- [ ] T027 PostgreSQL Schema + Migrationen + Indizes +- [ ] T028 Idempotenz implementieren (unique event_id pro device) +- [ ] T029 Observability (Logs/Metrics), Health/Ready +- [ ] T030 docker-compose lokal (API + PostgreSQL) + Minimal-Client +- [ ] T050 Auth-Konzept festlegen diff --git a/backend/Dockerfile b/backend/Dockerfile new file mode 100644 index 0000000..d429527 --- /dev/null +++ b/backend/Dockerfile @@ -0,0 +1,12 @@ +FROM golang:1.25-alpine AS builder +WORKDIR /app +COPY go.mod go.sum ./ +RUN go mod download +COPY . . +RUN CGO_ENABLED=0 GOOS=linux go build -o /server ./cmd/server +RUN CGO_ENABLED=0 GOOS=linux go build -o /createuser ./cmd/createuser + +FROM gcr.io/distroless/static-debian12 +COPY --from=builder /server /server +COPY --from=builder /createuser /createuser +ENTRYPOINT ["/server"] diff --git a/backend/cmd/createuser/main.go b/backend/cmd/createuser/main.go new file mode 100644 index 0000000..2490d17 --- /dev/null +++ b/backend/cmd/createuser/main.go @@ -0,0 +1,55 @@ +// cmd/createuser creates a new user in the database. +// Usage: DATABASE_URL=... go run ./cmd/createuser +package main + +import ( + "context" + "fmt" + "os" + + "github.com/jackc/pgx/v5" + + "github.com/jacek/pamietnik/backend/internal/auth" +) + +func main() { + if len(os.Args) != 3 { + fmt.Fprintln(os.Stderr, "usage: createuser ") + os.Exit(1) + } + username := os.Args[1] + password := os.Args[2] + + if len(password) < 8 { + fmt.Fprintln(os.Stderr, "password must be at least 8 characters") + os.Exit(1) + } + + dsn := os.Getenv("DATABASE_URL") + if dsn == "" { + dsn = "postgres://pamietnik:pamietnik@localhost:5432/pamietnik?sslmode=disable" + } + + conn, err := pgx.Connect(context.Background(), dsn) + if err != nil { + fmt.Fprintln(os.Stderr, "db error:", err) + os.Exit(1) + } + defer conn.Close(context.Background()) + + hash, err := auth.HashPassword(password) + if err != nil { + fmt.Fprintln(os.Stderr, "hash error:", err) + os.Exit(1) + } + + _, err = conn.Exec(context.Background(), + `INSERT INTO users (username, password_hash) VALUES ($1, $2)`, + username, hash, + ) + if err != nil { + fmt.Fprintln(os.Stderr, "insert error:", err) + os.Exit(1) + } + fmt.Printf("user '%s' created\n", username) +} diff --git a/backend/cmd/server/main.go b/backend/cmd/server/main.go new file mode 100644 index 0000000..9f40769 --- /dev/null +++ b/backend/cmd/server/main.go @@ -0,0 +1,87 @@ +package main + +import ( + "context" + "log/slog" + "net/http" + "os" + "os/signal" + "syscall" + "time" + + "github.com/jacek/pamietnik/backend/internal/api" + "github.com/jacek/pamietnik/backend/internal/auth" + "github.com/jacek/pamietnik/backend/internal/db" +) + +func main() { + logger := slog.New(slog.NewJSONHandler(os.Stdout, nil)) + slog.SetDefault(logger) + + dsn := getenv("DATABASE_URL", "postgres://pamietnik:pamietnik@localhost:5432/pamietnik?sslmode=disable") + addr := getenv("LISTEN_ADDR", ":8080") + uploadDir := getenv("UPLOAD_DIR", "./uploads") + + ctx, cancel := signal.NotifyContext(context.Background(), syscall.SIGINT, syscall.SIGTERM) + defer cancel() + + pool, err := db.NewPool(ctx, dsn) + if err != nil { + slog.Error("connect db", "err", err) + os.Exit(1) + } + defer pool.Close() + slog.Info("database connected") + + if err := db.InitSchema(ctx, pool); err != nil { + slog.Error("init schema", "err", err) + os.Exit(1) + } + slog.Info("schema ready") + + if err := os.MkdirAll(uploadDir, 0o755); err != nil { + slog.Error("create upload dir", "err", err) + os.Exit(1) + } + + authStore := auth.NewStore(pool) + tpStore := db.NewTrackpointStore(pool) + stopStore := db.NewStopStore(pool) + suggStore := db.NewSuggestionStore(pool) + journalStore := db.NewJournalStore(pool) + + router := api.NewRouter(authStore, tpStore, stopStore, suggStore, journalStore, uploadDir) + + srv := &http.Server{ + Addr: addr, + Handler: router, + ReadTimeout: 15 * time.Second, + WriteTimeout: 30 * time.Second, + IdleTimeout: 60 * time.Second, + } + + go func() { + slog.Info("server starting", "addr", addr) + if err := srv.ListenAndServe(); err != nil && err != http.ErrServerClosed { + slog.Error("server error", "err", err) + cancel() + } + }() + + <-ctx.Done() + slog.Info("shutting down") + + shutCtx, shutCancel := context.WithTimeout(context.Background(), 10*time.Second) + defer shutCancel() + if err := srv.Shutdown(shutCtx); err != nil { + slog.Error("shutdown error", "err", err) + } + slog.Info("server stopped") +} + +func getenv(key, fallback string) string { + if v := os.Getenv(key); v != "" { + return v + } + return fallback +} diff --git a/backend/go.mod b/backend/go.mod new file mode 100644 index 0000000..c77476b --- /dev/null +++ b/backend/go.mod @@ -0,0 +1,20 @@ +module github.com/jacek/pamietnik/backend + +go 1.25.7 + +require ( + github.com/go-chi/chi/v5 v5.2.5 + github.com/golang-migrate/migrate/v4 v4.19.1 + github.com/jackc/pgx/v5 v5.8.0 + golang.org/x/crypto v0.48.0 +) + +require ( + github.com/jackc/pgerrcode v0.0.0-20220416144525-469b46aa5efa // indirect + github.com/jackc/pgpassfile v1.0.0 // indirect + github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 // indirect + github.com/jackc/puddle/v2 v2.2.2 // indirect + golang.org/x/sync v0.19.0 // indirect + golang.org/x/sys v0.41.0 // indirect + golang.org/x/text v0.34.0 // indirect +) diff --git a/backend/go.sum b/backend/go.sum new file mode 100644 index 0000000..e279519 --- /dev/null +++ b/backend/go.sum @@ -0,0 +1,87 @@ +github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0= +github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= +github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= +github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= +github.com/containerd/errdefs v1.0.0 h1:tg5yIfIlQIrxYtu9ajqY42W3lpS19XqdxRQeEwYG8PI= +github.com/containerd/errdefs v1.0.0/go.mod h1:+YBYIdtsnF4Iw6nWZhJcqGSg/dwvV7tyJ/kCkyJ2k+M= +github.com/containerd/errdefs/pkg v0.3.0 h1:9IKJ06FvyNlexW690DXuQNx2KA2cUJXx151Xdx3ZPPE= +github.com/containerd/errdefs/pkg v0.3.0/go.mod h1:NJw6s9HwNuRhnjJhM7pylWwMyAkmCQvQ4GpJHEqRLVk= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dhui/dktest v0.4.6 h1:+DPKyScKSEp3VLtbMDHcUq6V5Lm5zfZZVb0Sk7Ahom4= +github.com/dhui/dktest v0.4.6/go.mod h1:JHTSYDtKkvFNFHJKqCzVzqXecyv+tKt8EzceOmQOgbU= +github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= +github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= +github.com/docker/docker v28.3.3+incompatible h1:Dypm25kh4rmk49v1eiVbsAtpAsYURjYkaKubwuBdxEI= +github.com/docker/docker v28.3.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= +github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= +github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= +github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= +github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/go-chi/chi/v5 v5.2.5 h1:Eg4myHZBjyvJmAFjFvWgrqDTXFyOzjj7YIm3L3mu6Ug= +github.com/go-chi/chi/v5 v5.2.5/go.mod h1:X7Gx4mteadT3eDOMTsXzmI4/rwUpOwBHLpAfupzFJP0= +github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= +github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang-migrate/migrate/v4 v4.19.1 h1:OCyb44lFuQfYXYLx1SCxPZQGU7mcaZ7gH9yH4jSFbBA= +github.com/golang-migrate/migrate/v4 v4.19.1/go.mod h1:CTcgfjxhaUtsLipnLoQRWCrjYXycRz/g5+RWDuYgPrE= +github.com/jackc/pgerrcode v0.0.0-20220416144525-469b46aa5efa h1:s+4MhCQ6YrzisK6hFJUX53drDT4UsSW3DEhKn0ifuHw= +github.com/jackc/pgerrcode v0.0.0-20220416144525-469b46aa5efa/go.mod h1:a/s9Lp5W7n/DD0VrVoyJ00FbP2ytTPDVOivvn2bMlds= +github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM= +github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= +github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 h1:iCEnooe7UlwOQYpKFhBabPMi4aNAfoODPEFNiAnClxo= +github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM= +github.com/jackc/pgx/v5 v5.8.0 h1:TYPDoleBBme0xGSAX3/+NujXXtpZn9HBONkQC7IEZSo= +github.com/jackc/pgx/v5 v5.8.0/go.mod h1:QVeDInX2m9VyzvNeiCJVjCkNFqzsNb43204HshNSZKw= +github.com/jackc/puddle/v2 v2.2.2 h1:PR8nw+E/1w0GLuRFSmiioY6UooMp6KJv0/61nB7icHo= +github.com/jackc/puddle/v2 v2.2.2/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4= +github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= +github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= +github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= +github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0= +github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= +github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= +github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= +github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= +github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= +github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug= +github.com/opencontainers/image-spec v1.1.0/go.mod h1:W4s4sFTMaBeK1BQLXbG4AdM2szdn85PY75RI83NrTrM= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= +github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= +go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= +go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0 h1:F7Jx+6hwnZ41NSFTO5q4LYDtJRXBf2PD0rNBkeB/lus= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0/go.mod h1:UHB22Z8QsdRDrnAtX4PntOl36ajSxcdUMt1sF7Y6E7Q= +go.opentelemetry.io/otel v1.37.0 h1:9zhNfelUvx0KBfu/gb+ZgeAfAgtWrfHJZcAqFC228wQ= +go.opentelemetry.io/otel v1.37.0/go.mod h1:ehE/umFRLnuLa/vSccNq9oS1ErUlkkK71gMcN34UG8I= +go.opentelemetry.io/otel/metric v1.37.0 h1:mvwbQS5m0tbmqML4NqK+e3aDiO02vsf/WgbsdpcPoZE= +go.opentelemetry.io/otel/metric v1.37.0/go.mod h1:04wGrZurHYKOc+RKeye86GwKiTb9FKm1WHtO+4EVr2E= +go.opentelemetry.io/otel/trace v1.37.0 h1:HLdcFNbRQBE2imdSEgm/kwqmQj1Or1l/7bW6mxVK7z4= +go.opentelemetry.io/otel/trace v1.37.0/go.mod h1:TlgrlQ+PtQO5XFerSPUYG0JSgGyryXewPGyayAWSBS0= +golang.org/x/crypto v0.48.0 h1:/VRzVqiRSggnhY7gNRxPauEQ5Drw9haKdM0jqfcCFts= +golang.org/x/crypto v0.48.0/go.mod h1:r0kV5h3qnFPlQnBSrULhlsRfryS2pmewsg+XfMgkVos= +golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4= +golang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= +golang.org/x/sys v0.41.0 h1:Ivj+2Cp/ylzLiEU89QhWblYnOE9zerudt9Ftecq2C6k= +golang.org/x/sys v0.41.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +golang.org/x/text v0.34.0 h1:oL/Qq0Kdaqxa1KbNeMKwQq0reLCCaFtqu2eNuSeNHbk= +golang.org/x/text v0.34.0/go.mod h1:homfLqTYRFyVYemLBFl5GgL/DWEiH5wcsQ5gSh1yziA= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/backend/internal/api/ingest.go b/backend/internal/api/ingest.go new file mode 100644 index 0000000..37db2f4 --- /dev/null +++ b/backend/internal/api/ingest.go @@ -0,0 +1,134 @@ +package api + +import ( + "encoding/json" + "net/http" + "time" + + "github.com/jacek/pamietnik/backend/internal/db" + "github.com/jacek/pamietnik/backend/internal/domain" +) + +type trackpointInput struct { + EventID string `json:"event_id"` + DeviceID string `json:"device_id"` + TripID string `json:"trip_id"` + Timestamp string `json:"timestamp"` // RFC3339 + Lat float64 `json:"lat"` + Lon float64 `json:"lon"` + Source string `json:"source"` + Note string `json:"note,omitempty"` + AccuracyM *float64 `json:"accuracy_m,omitempty"` + SpeedMps *float64 `json:"speed_mps,omitempty"` + BearingDeg *float64 `json:"bearing_deg,omitempty"` + AltitudeM *float64 `json:"altitude_m,omitempty"` +} + +func (t trackpointInput) toDomain() (domain.Trackpoint, error) { + ts, err := time.Parse(time.RFC3339, t.Timestamp) + if err != nil { + return domain.Trackpoint{}, err + } + src := t.Source + if src == "" { + src = "gps" + } + return domain.Trackpoint{ + EventID: t.EventID, + DeviceID: t.DeviceID, + TripID: t.TripID, + Timestamp: ts, + Lat: t.Lat, + Lon: t.Lon, + Source: src, + Note: t.Note, + AccuracyM: t.AccuracyM, + SpeedMps: t.SpeedMps, + BearingDeg: t.BearingDeg, + AltitudeM: t.AltitudeM, + }, nil +} + +type batchResponse struct { + ServerTime string `json:"server_time"` + AcceptedIDs []string `json:"accepted_ids"` + Rejected []db.RejectedItem `json:"rejected"` +} + +// HandleSingleTrackpoint handles POST /v1/trackpoints +func HandleSingleTrackpoint(store *db.TrackpointStore) http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + var input trackpointInput + if err := json.NewDecoder(r.Body).Decode(&input); err != nil { + writeError(w, http.StatusBadRequest, "BAD_REQUEST", "invalid JSON") + return + } + + point, err := input.toDomain() + if err != nil { + writeError(w, http.StatusBadRequest, "BAD_REQUEST", "invalid timestamp: "+err.Error()) + return + } + + userID := userIDFromContext(r.Context()) + accepted, rejected, err := store.UpsertBatch(r.Context(), userID, []domain.Trackpoint{point}) + if err != nil { + writeError(w, http.StatusInternalServerError, "INTERNAL_ERROR", "database error") + return + } + + writeJSON(w, http.StatusOK, batchResponse{ + ServerTime: time.Now().UTC().Format(time.RFC3339), + AcceptedIDs: accepted, + Rejected: rejected, + }) + } +} + +// HandleBatchTrackpoints handles POST /v1/trackpoints:batch +func HandleBatchTrackpoints(store *db.TrackpointStore) http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + var inputs []trackpointInput + if err := json.NewDecoder(r.Body).Decode(&inputs); err != nil { + writeError(w, http.StatusBadRequest, "BAD_REQUEST", "invalid JSON") + return + } + if len(inputs) == 0 { + writeError(w, http.StatusBadRequest, "BAD_REQUEST", "empty batch") + return + } + if len(inputs) > 500 { + writeError(w, http.StatusBadRequest, "TOO_LARGE", "batch exceeds 500 items") + return + } + + points := make([]domain.Trackpoint, 0, len(inputs)) + var parseRejected []db.RejectedItem + for _, inp := range inputs { + p, err := inp.toDomain() + if err != nil { + parseRejected = append(parseRejected, db.RejectedItem{ + EventID: inp.EventID, + Code: "INVALID_TIMESTAMP", + Message: err.Error(), + }) + continue + } + points = append(points, p) + } + + userID := userIDFromContext(r.Context()) + accepted, rejected, err := store.UpsertBatch(r.Context(), userID, points) + if err != nil { + writeError(w, http.StatusInternalServerError, "INTERNAL_ERROR", "database error") + return + } + rejected = append(rejected, parseRejected...) + + writeJSON(w, http.StatusOK, batchResponse{ + ServerTime: time.Now().UTC().Format(time.RFC3339), + AcceptedIDs: accepted, + Rejected: rejected, + }) + } +} diff --git a/backend/internal/api/journal.go b/backend/internal/api/journal.go new file mode 100644 index 0000000..fbfd2d5 --- /dev/null +++ b/backend/internal/api/journal.go @@ -0,0 +1,164 @@ +package api + +import ( + "fmt" + "io" + "log/slog" + "net/http" + "os" + "path/filepath" + "strings" + + "github.com/jacek/pamietnik/backend/internal/db" + "github.com/jacek/pamietnik/backend/internal/domain" +) + +const ( + maxUploadSize = 32 << 20 // 32 MB per request + maxSingleImage = 10 << 20 // 10 MB per image +) + +var allowedMIME = map[string]string{ + "image/jpeg": ".jpg", + "image/png": ".png", + "image/webp": ".webp", + "image/heic": ".heic", +} + +type JournalHandler struct { + store *db.JournalStore + uploadDir string +} + +func NewJournalHandler(store *db.JournalStore, uploadDir string) *JournalHandler { + return &JournalHandler{store: store, uploadDir: uploadDir} +} + +// HandleCreateEntry handles POST /entries (multipart/form-data). +func (h *JournalHandler) HandleCreateEntry(w http.ResponseWriter, r *http.Request) { + if err := r.ParseMultipartForm(maxUploadSize); err != nil { + http.Error(w, "Formular zu groß", http.StatusRequestEntityTooLarge) + return + } + + userID := userIDFromContext(r.Context()) + date := strings.TrimSpace(r.FormValue("date")) + entryTime := strings.TrimSpace(r.FormValue("time")) + title := strings.TrimSpace(r.FormValue("title")) + description := strings.TrimSpace(r.FormValue("description")) + + if date == "" || entryTime == "" { + http.Error(w, "Datum und Uhrzeit sind Pflichtfelder", http.StatusBadRequest) + return + } + + entry := domain.JournalEntry{ + UserID: userID, + EntryDate: date, + EntryTime: entryTime, + Title: title, + Description: description, + } + + if lat := r.FormValue("lat"); lat != "" { + var v float64 + if _, err := fmt.Sscanf(lat, "%f", &v); err == nil { + entry.Lat = &v + } + } + if lon := r.FormValue("lon"); lon != "" { + var v float64 + if _, err := fmt.Sscanf(lon, "%f", &v); err == nil { + entry.Lon = &v + } + } + + saved, err := h.store.InsertEntry(r.Context(), entry) + if err != nil { + http.Error(w, "Datenbankfehler", http.StatusInternalServerError) + return + } + + // Handle image uploads + if r.MultipartForm != nil && r.MultipartForm.File != nil { + files := r.MultipartForm.File["images"] + for _, fh := range files { + if fh.Size > maxSingleImage { + continue // skip oversized images silently + } + f, err := fh.Open() + if err != nil { + continue + } + + // Detect MIME type from first 512 bytes + buf := make([]byte, 512) + n, _ := f.Read(buf) + mime := http.DetectContentType(buf[:n]) + ext, ok := allowedMIME[mime] + if !ok { + f.Close() + continue + } + + filename := saved.EntryID + "_" + fh.Filename + filename = sanitizeFilename(filename) + ext + destPath := filepath.Join(h.uploadDir, filename) + + out, err := os.Create(destPath) + if err != nil { + f.Close() + continue + } + + // Write already-read bytes + rest; clean up file on any write error + if _, err := out.Write(buf[:n]); err != nil { + out.Close() + f.Close() + os.Remove(destPath) + continue + } + if _, err := io.Copy(out, f); err != nil { + out.Close() + f.Close() + os.Remove(destPath) + continue + } + out.Close() + f.Close() + + img := domain.JournalImage{ + EntryID: saved.EntryID, + Filename: filename, + OriginalName: fh.Filename, + MimeType: mime, + SizeBytes: fh.Size, + } + if _, err := h.store.InsertImage(r.Context(), img); err != nil { + slog.Error("insert image", "entry_id", saved.EntryID, "filename", filename, "err", err) + os.Remove(destPath) + } + } + } + + http.Redirect(w, r, "/days/"+date, http.StatusSeeOther) +} + +// sanitizeFilename strips path separators and non-printable characters. +func sanitizeFilename(name string) string { + name = filepath.Base(name) + var b strings.Builder + for _, r := range name { + if r == '/' || r == '\\' || r == ':' || r == '*' || r == '?' || r == '"' || r == '<' || r == '>' || r == '|' { + b.WriteRune('_') + } else { + b.WriteRune(r) + } + } + s := b.String() + // strip extension — we append the detected one + if idx := strings.LastIndex(s, "."); idx > 0 { + s = s[:idx] + } + return s +} diff --git a/backend/internal/api/middleware.go b/backend/internal/api/middleware.go new file mode 100644 index 0000000..d24ba6d --- /dev/null +++ b/backend/internal/api/middleware.go @@ -0,0 +1,43 @@ +package api + +import ( + "context" + "net/http" + + "github.com/jacek/pamietnik/backend/internal/auth" +) + +type contextKey string + +const ctxUserID contextKey = "user_id" + +const sessionCookieName = "session" + +// RequireAuth is a middleware that validates the session cookie. +func RequireAuth(authStore *auth.Store) func(http.Handler) http.Handler { + return func(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + cookie, err := r.Cookie(sessionCookieName) + if err != nil { + writeError(w, http.StatusUnauthorized, "UNAUTHORIZED", "login required") + return + } + sess, err := authStore.GetSession(r.Context(), cookie.Value) + if err != nil { + writeError(w, http.StatusUnauthorized, "UNAUTHORIZED", "invalid or expired session") + return + } + ctx := context.WithValue(r.Context(), ctxUserID, sess.UserID) + next.ServeHTTP(w, r.WithContext(ctx)) + }) + } +} + +func userIDFromContext(ctx context.Context) string { + v, _ := ctx.Value(ctxUserID).(string) + return v +} + +func contextWithUserID(ctx context.Context, userID string) context.Context { + return context.WithValue(ctx, ctxUserID, userID) +} diff --git a/backend/internal/api/query.go b/backend/internal/api/query.go new file mode 100644 index 0000000..dc0b8d6 --- /dev/null +++ b/backend/internal/api/query.go @@ -0,0 +1,102 @@ +package api + +import ( + "log/slog" + "net/http" + + "github.com/jacek/pamietnik/backend/internal/db" + "github.com/jacek/pamietnik/backend/internal/domain" +) + +// HandleListDays handles GET /v1/days?from=YYYY-MM-DD&to=YYYY-MM-DD +func HandleListDays(store *db.TrackpointStore) http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + userID := userIDFromContext(r.Context()) + from := r.URL.Query().Get("from") + to := r.URL.Query().Get("to") + if from == "" || to == "" { + writeError(w, http.StatusBadRequest, "BAD_REQUEST", "from and to are required (YYYY-MM-DD)") + return + } + + days, err := store.ListDays(r.Context(), userID, from, to) + if err != nil { + slog.Error("list days", "user_id", userID, "err", err) + writeError(w, http.StatusInternalServerError, "INTERNAL_ERROR", "database error") + return + } + if days == nil { + days = []domain.DaySummary{} + } + writeJSON(w, http.StatusOK, days) + } +} + +// HandleListTrackpoints handles GET /v1/trackpoints?date=YYYY-MM-DD +func HandleListTrackpoints(store *db.TrackpointStore) http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + userID := userIDFromContext(r.Context()) + date := r.URL.Query().Get("date") + if date == "" { + writeError(w, http.StatusBadRequest, "BAD_REQUEST", "date is required (YYYY-MM-DD)") + return + } + + points, err := store.ListByDate(r.Context(), userID, date) + if err != nil { + slog.Error("list trackpoints", "user_id", userID, "date", date, "err", err) + writeError(w, http.StatusInternalServerError, "INTERNAL_ERROR", "database error") + return + } + if points == nil { + points = []domain.Trackpoint{} + } + writeJSON(w, http.StatusOK, points) + } +} + +// HandleListStops handles GET /v1/stops?date=YYYY-MM-DD +func HandleListStops(store *db.StopStore) http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + userID := userIDFromContext(r.Context()) + date := r.URL.Query().Get("date") + if date == "" { + writeError(w, http.StatusBadRequest, "BAD_REQUEST", "date is required (YYYY-MM-DD)") + return + } + + stops, err := store.ListByDate(r.Context(), userID, date) + if err != nil { + slog.Error("list stops", "user_id", userID, "date", date, "err", err) + writeError(w, http.StatusInternalServerError, "INTERNAL_ERROR", "database error") + return + } + if stops == nil { + stops = []domain.Stop{} + } + writeJSON(w, http.StatusOK, stops) + } +} + +// HandleListSuggestions handles GET /v1/suggestions?date=YYYY-MM-DD +func HandleListSuggestions(store *db.SuggestionStore) http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + userID := userIDFromContext(r.Context()) + date := r.URL.Query().Get("date") + if date == "" { + writeError(w, http.StatusBadRequest, "BAD_REQUEST", "date is required (YYYY-MM-DD)") + return + } + + suggestions, err := store.ListByDate(r.Context(), userID, date) + if err != nil { + slog.Error("list suggestions", "user_id", userID, "date", date, "err", err) + writeError(w, http.StatusInternalServerError, "INTERNAL_ERROR", "database error") + return + } + if suggestions == nil { + suggestions = []domain.Suggestion{} + } + writeJSON(w, http.StatusOK, suggestions) + } +} diff --git a/backend/internal/api/response.go b/backend/internal/api/response.go new file mode 100644 index 0000000..2fee5f5 --- /dev/null +++ b/backend/internal/api/response.go @@ -0,0 +1,21 @@ +package api + +import ( + "encoding/json" + "net/http" +) + +func writeJSON(w http.ResponseWriter, status int, v any) { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(status) + json.NewEncoder(w).Encode(v) +} + +type errorResponse struct { + Code string `json:"code"` + Message string `json:"message"` +} + +func writeError(w http.ResponseWriter, status int, code, message string) { + writeJSON(w, status, errorResponse{Code: code, Message: message}) +} diff --git a/backend/internal/api/router.go b/backend/internal/api/router.go new file mode 100644 index 0000000..8487c5d --- /dev/null +++ b/backend/internal/api/router.go @@ -0,0 +1,106 @@ +package api + +import ( + "net/http" + + "github.com/go-chi/chi/v5" + "github.com/go-chi/chi/v5/middleware" + + "github.com/jacek/pamietnik/backend/internal/auth" + "github.com/jacek/pamietnik/backend/internal/db" +) + +func NewRouter( + authStore *auth.Store, + tpStore *db.TrackpointStore, + stopStore *db.StopStore, + suggStore *db.SuggestionStore, + journalStore *db.JournalStore, + uploadDir string, +) http.Handler { + r := chi.NewRouter() + r.Use(middleware.RealIP) + r.Use(middleware.Logger) + r.Use(middleware.Recoverer) + + webUI := NewWebUI(authStore, tpStore, stopStore, journalStore) + journalHandler := NewJournalHandler(journalStore, uploadDir) + authMW := RequireAuth(authStore) + webAuthMW := requireWebAuth(authStore) + + // Health + r.Get("/healthz", func(w http.ResponseWriter, r *http.Request) { + w.Write([]byte("ok")) + }) + r.Get("/readyz", func(w http.ResponseWriter, r *http.Request) { + w.Write([]byte("ok")) + }) + + // Ingest (session auth; Android API-Key auth TBD) + r.Group(func(r chi.Router) { + r.Use(authMW) + r.Post("/v1/trackpoints", HandleSingleTrackpoint(tpStore)) + r.Post("/v1/trackpoints:batch", HandleBatchTrackpoints(tpStore)) + }) + + // Query API (session auth) + r.Group(func(r chi.Router) { + r.Use(authMW) + r.Get("/v1/days", HandleListDays(tpStore)) + r.Get("/v1/trackpoints", HandleListTrackpoints(tpStore)) + r.Get("/v1/stops", HandleListStops(stopStore)) + r.Get("/v1/suggestions", HandleListSuggestions(suggStore)) + }) + + // Static assets (CSS etc.) + r.Handle("/static/*", http.StripPrefix("/static/", http.FileServer(http.FS(staticFS())))) + + // Web UI + r.Get("/login", webUI.HandleGetLogin) + r.Post("/login", webUI.HandlePostLogin) + r.Post("/logout", webUI.HandleLogout) + + r.Group(func(r chi.Router) { + r.Use(webAuthMW) + r.Get("/days", webUI.HandleDaysList) + r.Get("/days/redirect", webUI.HandleDaysRedirect) + r.Get("/days/{date}", webUI.HandleDayDetail) + r.Post("/entries", journalHandler.HandleCreateEntry) + }) + + // Serve uploaded images + r.Handle("/uploads/*", http.StripPrefix("/uploads/", http.FileServer(http.Dir(uploadDir)))) + + // SPA (Vite webapp) — served under /app/* + spaPrefix := "/app" + r.Handle(spaPrefix, http.RedirectHandler(spaPrefix+"/", http.StatusMovedPermanently)) + r.Handle(spaPrefix+"/*", http.StripPrefix(spaPrefix, SPAHandler(spaPrefix))) + + // Redirect root to Go Web UI /days + r.Get("/", func(w http.ResponseWriter, r *http.Request) { + http.Redirect(w, r, "/days", http.StatusSeeOther) + }) + + return r +} + +// requireWebAuth redirects to /login for unauthenticated web users (HTML response). +func requireWebAuth(authStore *auth.Store) func(http.Handler) http.Handler { + return func(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + cookie, err := r.Cookie(sessionCookieName) + if err != nil { + http.Redirect(w, r, "/login", http.StatusSeeOther) + return + } + sess, err := authStore.GetSession(r.Context(), cookie.Value) + if err != nil { + http.Redirect(w, r, "/login", http.StatusSeeOther) + return + } + ctx := r.Context() + ctx = contextWithUserID(ctx, sess.UserID) + next.ServeHTTP(w, r.WithContext(ctx)) + }) + } +} diff --git a/backend/internal/api/spa.go b/backend/internal/api/spa.go new file mode 100644 index 0000000..997166b --- /dev/null +++ b/backend/internal/api/spa.go @@ -0,0 +1,53 @@ +package api + +import ( + "embed" + "io/fs" + "net/http" + "path/filepath" + "strings" +) + +// spaFS holds the built Vite SPA. +// The directory backend/internal/api/webapp/ is populated by the Docker +// multi-stage build (node → copy dist → go build). +// A placeholder file keeps the embed valid when building without Docker. + +//go:embed webapp +var spaFS embed.FS + +// SPAHandler serves the Vite SPA under the given prefix (e.g. "/app"). +// Static assets (paths with file extensions) are served directly. +// All other paths fall back to index.html for client-side routing. +func SPAHandler(prefix string) http.Handler { + sub, err := fs.Sub(spaFS, "webapp") + if err != nil { + return http.NotFoundHandler() + } + fileServer := http.FileServer(http.FS(sub)) + + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + // Strip the mount prefix to get the file path + path := strings.TrimPrefix(r.URL.Path, prefix) + if path == "" || path == "/" { + // Serve index.html + r2 := r.Clone(r.Context()) + r2.URL.Path = "/index.html" + fileServer.ServeHTTP(w, r2) + return + } + + // Has a file extension → serve asset directly (JS, CSS, fonts, …) + if filepath.Ext(path) != "" { + r2 := r.Clone(r.Context()) + r2.URL.Path = path + fileServer.ServeHTTP(w, r2) + return + } + + // SPA route → serve index.html + r2 := r.Clone(r.Context()) + r2.URL.Path = "/index.html" + fileServer.ServeHTTP(w, r2) + }) +} diff --git a/backend/internal/api/static/day.js b/backend/internal/api/static/day.js new file mode 100644 index 0000000..94e1858 --- /dev/null +++ b/backend/internal/api/static/day.js @@ -0,0 +1,47 @@ +// GPS button +document.getElementById('btn-gps')?.addEventListener('click', function () { + const status = document.getElementById('gps-status'); + if (!navigator.geolocation) { + status.textContent = '// GPS nicht verfügbar'; + return; + } + status.textContent = '// Standort wird ermittelt...'; + navigator.geolocation.getCurrentPosition( + function (pos) { + document.getElementById('entry-lat').value = pos.coords.latitude.toFixed(6); + document.getElementById('entry-lon').value = pos.coords.longitude.toFixed(6); + status.textContent = '// Standort gesetzt (' + pos.coords.accuracy.toFixed(0) + ' m Genauigkeit)'; + }, + function (err) { + status.textContent = '// Fehler: ' + err.message; + }, + { enableHighAccuracy: true, timeout: 10000 } + ); +}); + +// Set current time as default +(function () { + const input = document.getElementById('entry-time'); + if (input && !input.value) { + const now = new Date(); + const hh = String(now.getHours()).padStart(2, '0'); + const mm = String(now.getMinutes()).padStart(2, '0'); + input.value = hh + ':' + mm; + } +})(); + +// Image preview +document.getElementById('image-input')?.addEventListener('change', function () { + const preview = document.getElementById('image-preview'); + preview.innerHTML = ''; + Array.from(this.files).forEach(function (file) { + if (!file.type.startsWith('image/')) return; + const reader = new FileReader(); + reader.onload = function (e) { + const img = document.createElement('img'); + img.src = e.target.result; + preview.appendChild(img); + }; + reader.readAsDataURL(file); + }); +}); diff --git a/backend/internal/api/static/style.css b/backend/internal/api/static/style.css new file mode 100644 index 0000000..e6a337e --- /dev/null +++ b/backend/internal/api/static/style.css @@ -0,0 +1,54 @@ +/* Font + monochrome override */ +:root { + --pico-font-family: 'Courier New', Courier, monospace; + --pico-font-size: 14px; + --pico-primary: #111; + --pico-primary-background: #111; + --pico-primary-border: #111; + --pico-primary-hover: #333; + --pico-primary-hover-background: #333; + --pico-primary-hover-border: #333; + --pico-primary-focus: rgba(0,0,0,.25); + --pico-primary-inverse: #fff; + --pico-primary-underline: rgba(0,0,0,.5); +} + +h1 { font-size: 1.4rem; font-weight: normal; letter-spacing: .05em; } +h2 { font-size: 1rem; font-weight: normal; letter-spacing: .05em; } + +.err { color: #c00; } +.source-gps { color: #060; } +.source-manual { color: #888; } + +/* Top bar */ +.page-header { display: flex; justify-content: space-between; align-items: baseline; margin-bottom: 1.5rem; } + +/* GPS row */ +.gps-row { display: flex; gap: .4rem; align-items: center; } +.gps-row input { flex: 1; margin-bottom: 0; } +.gps-row button { white-space: nowrap; margin-bottom: 0; } + +/* Two-column form */ +.form-row { display: grid; grid-template-columns: 1fr 1fr; gap: 1.5rem; } +@media (max-width: 480px) { .form-row { grid-template-columns: 1fr; } } + +/* Image preview */ +.image-preview { display: flex; flex-wrap: wrap; gap: .5rem; margin-bottom: .8rem; } +.image-preview img, .thumb { width: 80px; height: 80px; object-fit: cover; border: 1px solid var(--pico-muted-border-color); } +.thumb { width: 100px; height: 100px; display: block; } + +/* Journal entry cards */ +.entry-card { + border-left: 3px solid var(--pico-primary); + padding: .6rem 1rem; + margin-bottom: 1rem; + background: var(--pico-card-background-color); + border-radius: 0 var(--pico-border-radius) var(--pico-border-radius) 0; +} +.entry-meta { font-size: .8rem; margin-bottom: .3rem; } +.entry-title { font-size: 1rem; margin-bottom: .3rem; } +.entry-desc { white-space: pre-wrap; font-size: .9rem; } +.entry-images { display: flex; flex-wrap: wrap; gap: .5rem; margin-top: .5rem; } + +/* Login */ +.login-box { max-width: 360px; margin: 4rem auto; } diff --git a/backend/internal/api/templates/base.html b/backend/internal/api/templates/base.html new file mode 100644 index 0000000..13aab4a --- /dev/null +++ b/backend/internal/api/templates/base.html @@ -0,0 +1,15 @@ +{{define "base"}} + + + + + {{block "title" .}}Reisejournal{{end}} + + + + +{{block "content" .}}{{end}} +{{block "scripts" .}}{{end}} + + +{{end}} diff --git a/backend/internal/api/templates/day.html b/backend/internal/api/templates/day.html new file mode 100644 index 0000000..f2db36b --- /dev/null +++ b/backend/internal/api/templates/day.html @@ -0,0 +1,104 @@ +{{define "title"}}{{.Date}} — Reisejournal{{end}} + +{{define "content"}} +
+ +

{{.Date}}

+ +

Neuer Eintrag

+
+ +
+
+ + +
+
+ +
+ + + +
+ +
+
+ + + + + + +
+ +
+ +

Einträge ({{len .Entries}})

+ {{range .Entries}} +
+ + {{if .Title}}
{{.Title}}
{{end}} + {{if .Description}}
{{.Description}}
{{end}} + {{if .Images}} +
+ {{range .Images}} + + {{.OriginalName}} + + {{end}} +
+ {{end}} +
+ {{else}} +

// Noch keine Einträge

+ {{end}} + +

Trackpunkte ({{len .Points}})

+
+ + + + {{range .Points}} + + + + + + + + {{else}} + + {{end}} + +
ZeitLatLonQuelleNotiz
{{.Timestamp.Format "15:04:05"}}{{printf "%.5f" .Lat}}{{printf "%.5f" .Lon}}{{.Source}}{{.Note}}
// Keine Punkte
+
+ +

Aufenthalte ({{len .Stops}})

+
+ + + + {{range .Stops}} + + + + + + + {{else}} + + {{end}} + +
VonBisDauerOrt
{{.StartTS.Format "15:04"}}{{.EndTS.Format "15:04"}}{{divInt .DurationS 60}} min{{if .PlaceLabel}}{{.PlaceLabel}}{{else}}{{end}}
// Keine Aufenthalte
+
+
+{{end}} + +{{define "scripts"}} + +{{end}} + +{{template "base" .}} diff --git a/backend/internal/api/templates/days.html b/backend/internal/api/templates/days.html new file mode 100644 index 0000000..6f517d5 --- /dev/null +++ b/backend/internal/api/templates/days.html @@ -0,0 +1,46 @@ +{{define "title"}}Tage — Reisejournal{{end}} + +{{define "content"}} +
+ +
+
+ + +
+
+ +

Reisetage

+
+ + + + {{range .Days}} + + + + + + + {{else}} + + {{end}} + +
DatumPunkteVonBis
{{.Date}}{{.Count}}{{if .FirstTS}}{{.FirstTS.Format "15:04"}}{{end}}{{if .LastTS}}{{.LastTS.Format "15:04"}}{{end}}
// Keine Daten vorhanden
+
+
+{{end}} + +{{define "scripts"}} + +{{end}} + +{{template "base" .}} diff --git a/backend/internal/api/templates/login.html b/backend/internal/api/templates/login.html new file mode 100644 index 0000000..bc904c7 --- /dev/null +++ b/backend/internal/api/templates/login.html @@ -0,0 +1,21 @@ +{{define "title"}}Login — Reisejournal{{end}} + +{{define "content"}} +
+ +
+{{end}} + +{{template "base" .}} diff --git a/backend/internal/api/webapp/index.html b/backend/internal/api/webapp/index.html new file mode 100644 index 0000000..2abd53e --- /dev/null +++ b/backend/internal/api/webapp/index.html @@ -0,0 +1 @@ + diff --git a/backend/internal/api/webui.go b/backend/internal/api/webui.go new file mode 100644 index 0000000..a883d4c --- /dev/null +++ b/backend/internal/api/webui.go @@ -0,0 +1,188 @@ +package api + +import ( + "bytes" + "embed" + "errors" + "html/template" + "io/fs" + "log/slog" + "net/http" + "strings" + "time" + + "github.com/go-chi/chi/v5" + "github.com/jacek/pamietnik/backend/internal/auth" + "github.com/jacek/pamietnik/backend/internal/db" +) + +//go:embed static templates +var assets embed.FS + +var funcMap = template.FuncMap{ + "divInt": func(a, b int) int { return a / b }, + "deref": func(p *float64) float64 { + if p == nil { + return 0 + } + return *p + }, +} + +var tmpls = template.Must( + template.New("").Funcs(funcMap).ParseFS(assets, "templates/*.html"), +) + +func staticFS() fs.FS { + sub, err := fs.Sub(assets, "static") + if err != nil { + panic(err) + } + return sub +} + +// WebUI groups all web UI handlers. +type WebUI struct { + authStore *auth.Store + tpStore *db.TrackpointStore + stopStore *db.StopStore + journalStore *db.JournalStore +} + +func NewWebUI(a *auth.Store, tp *db.TrackpointStore, st *db.StopStore, j *db.JournalStore) *WebUI { + return &WebUI{authStore: a, tpStore: tp, stopStore: st, journalStore: j} +} + +func render(w http.ResponseWriter, page string, data any) { + // Each page defines its own blocks; "base" assembles the full document. + // We must clone and re-associate per request because ParseFS loads all + // templates into one set — ExecuteTemplate("base") picks up the blocks + // defined by the last parsed file otherwise. + t, err := tmpls.Clone() + if err == nil { + _, err = t.ParseFS(assets, "templates/"+page) + } + if err != nil { + slog.Error("template parse", "page", page, "err", err) + http.Error(w, "Template-Fehler", http.StatusInternalServerError) + return + } + // Render into buffer first so we can still send a proper error status + // if execution fails — once we write to w the status code is committed. + var buf bytes.Buffer + if err := t.ExecuteTemplate(&buf, "base", data); err != nil { + slog.Error("template execute", "page", page, "err", err) + http.Error(w, "Template-Fehler", http.StatusInternalServerError) + return + } + w.Header().Set("Content-Type", "text/html; charset=utf-8") + _, _ = buf.WriteTo(w) +} + +func (ui *WebUI) HandleGetLogin(w http.ResponseWriter, r *http.Request) { + render(w, "login.html", map[string]any{"Error": "", "Username": ""}) +} + +func (ui *WebUI) HandlePostLogin(w http.ResponseWriter, r *http.Request) { + if err := r.ParseForm(); err != nil { + http.Error(w, "Ungültige Formulardaten", http.StatusBadRequest) + return + } + username := strings.TrimSpace(r.FormValue("username")) + password := r.FormValue("password") + + sess, err := ui.authStore.Login(r.Context(), username, password) + if err != nil { + msg := "Interner Fehler." + if errors.Is(err, auth.ErrInvalidCredentials) { + msg = "Ungültige Zugangsdaten." + } + render(w, "login.html", map[string]any{"Error": msg, "Username": username}) + return + } + + http.SetCookie(w, &http.Cookie{ + Name: sessionCookieName, + Value: sess.SessionID, + Path: "/", + HttpOnly: true, + Secure: true, + SameSite: http.SameSiteLaxMode, + Expires: sess.ExpiresAt, + }) + http.Redirect(w, r, "/days", http.StatusSeeOther) +} + +func (ui *WebUI) HandleLogout(w http.ResponseWriter, r *http.Request) { + cookie, err := r.Cookie(sessionCookieName) + if err == nil { + ui.authStore.Logout(r.Context(), cookie.Value) + } + http.SetCookie(w, &http.Cookie{ + Name: sessionCookieName, + Value: "", + Path: "/", + MaxAge: -1, + Expires: time.Unix(0, 0), + }) + http.Redirect(w, r, "/login", http.StatusSeeOther) +} + +func (ui *WebUI) HandleDaysRedirect(w http.ResponseWriter, r *http.Request) { + date := strings.TrimSpace(r.URL.Query().Get("date")) + if date == "" { + http.Redirect(w, r, "/days", http.StatusSeeOther) + return + } + if _, err := time.Parse("2006-01-02", date); err != nil { + http.Redirect(w, r, "/days", http.StatusSeeOther) + return + } + http.Redirect(w, r, "/days/"+date, http.StatusSeeOther) +} + +func (ui *WebUI) HandleDaysList(w http.ResponseWriter, r *http.Request) { + userID := userIDFromContext(r.Context()) + now := time.Now().UTC() + from := now.AddDate(-20, 0, 0).Format("2006-01-02") + to := now.AddDate(0, 0, 1).Format("2006-01-02") + days, err := ui.tpStore.ListDays(r.Context(), userID, from, to) + if err != nil { + http.Error(w, "Fehler beim Laden", http.StatusInternalServerError) + return + } + render(w, "days.html", map[string]any{"Days": days}) +} + +func (ui *WebUI) HandleDayDetail(w http.ResponseWriter, r *http.Request) { + userID := userIDFromContext(r.Context()) + + date := chi.URLParam(r, "date") + if date == "" { + http.Error(w, "Datum fehlt", http.StatusBadRequest) + return + } + + points, err := ui.tpStore.ListByDate(r.Context(), userID, date) + if err != nil { + http.Error(w, "Fehler beim Laden", http.StatusInternalServerError) + return + } + stops, err := ui.stopStore.ListByDate(r.Context(), userID, date) + if err != nil { + http.Error(w, "Fehler beim Laden", http.StatusInternalServerError) + return + } + entries, err := ui.journalStore.ListByDate(r.Context(), userID, date) + if err != nil { + http.Error(w, "Fehler beim Laden", http.StatusInternalServerError) + return + } + + render(w, "day.html", map[string]any{ + "Date": date, + "Points": points, + "Stops": stops, + "Entries": entries, + }) +} diff --git a/backend/internal/auth/auth.go b/backend/internal/auth/auth.go new file mode 100644 index 0000000..e262459 --- /dev/null +++ b/backend/internal/auth/auth.go @@ -0,0 +1,137 @@ +package auth + +import ( + "context" + "crypto/rand" + "crypto/subtle" + "encoding/hex" + "errors" + "fmt" + "strings" + "time" + + "github.com/jackc/pgx/v5" + "github.com/jackc/pgx/v5/pgxpool" + "golang.org/x/crypto/argon2" + + "github.com/jacek/pamietnik/backend/internal/domain" +) + +const sessionDuration = 24 * time.Hour + +var ErrInvalidCredentials = errors.New("invalid username or password") +var ErrSessionNotFound = errors.New("session not found or expired") + +type Store struct { + pool *pgxpool.Pool +} + +func NewStore(pool *pgxpool.Pool) *Store { + return &Store{pool: pool} +} + +// HashPassword returns an argon2id hash of the password. +func HashPassword(password string) (string, error) { + salt := make([]byte, 16) + if _, err := rand.Read(salt); err != nil { + return "", fmt.Errorf("generate salt: %w", err) + } + hash := argon2.IDKey([]byte(password), salt, 1, 64*1024, 4, 32) + return fmt.Sprintf("$argon2id$%x$%x", salt, hash), nil +} + +// VerifyPassword checks password against stored hash. +// Format: $argon2id$$ +func VerifyPassword(password, stored string) bool { + parts := strings.Split(stored, "$") + // ["", "argon2id", "", ""] + if len(parts) != 4 || parts[1] != "argon2id" { + return false + } + salt, err := hex.DecodeString(parts[2]) + if err != nil { + return false + } + expected, err := hex.DecodeString(parts[3]) + if err != nil { + return false + } + hash := argon2.IDKey([]byte(password), salt, 1, 64*1024, 4, 32) + return subtle.ConstantTimeCompare(hash, expected) == 1 +} + +// Login verifies credentials and creates a session. +func (s *Store) Login(ctx context.Context, username, password string) (domain.Session, error) { + var user domain.User + err := s.pool.QueryRow(ctx, + `SELECT user_id, username, password_hash FROM users WHERE username = $1`, + username, + ).Scan(&user.UserID, &user.Username, &user.PasswordHash) + if err != nil { + if errors.Is(err, pgx.ErrNoRows) { + return domain.Session{}, ErrInvalidCredentials + } + return domain.Session{}, err + } + + if !VerifyPassword(password, user.PasswordHash) { + return domain.Session{}, ErrInvalidCredentials + } + + sessionID, err := newSessionID() + if err != nil { + return domain.Session{}, fmt.Errorf("create session: %w", err) + } + now := time.Now().UTC() + sess := domain.Session{ + SessionID: sessionID, + UserID: user.UserID, + CreatedAt: now, + ExpiresAt: now.Add(sessionDuration), + } + + _, err = s.pool.Exec(ctx, + `INSERT INTO sessions (session_id, user_id, created_at, expires_at) + VALUES ($1, $2, $3, $4)`, + sess.SessionID, sess.UserID, sess.CreatedAt, sess.ExpiresAt, + ) + if err != nil { + return domain.Session{}, err + } + return sess, nil +} + +// GetSession validates a session and returns user_id. +func (s *Store) GetSession(ctx context.Context, sessionID string) (domain.Session, error) { + var sess domain.Session + err := s.pool.QueryRow(ctx, + `SELECT session_id, user_id, created_at, expires_at + FROM sessions + WHERE session_id = $1 AND expires_at > NOW()`, + sessionID, + ).Scan(&sess.SessionID, &sess.UserID, &sess.CreatedAt, &sess.ExpiresAt) + if err != nil { + if errors.Is(err, pgx.ErrNoRows) { + return domain.Session{}, ErrSessionNotFound + } + return domain.Session{}, err + } + return sess, nil +} + +// Logout deletes a session. +func (s *Store) Logout(ctx context.Context, sessionID string) error { + _, err := s.pool.Exec(ctx, `DELETE FROM sessions WHERE session_id = $1`, sessionID) + if err != nil { + return fmt.Errorf("delete session: %w", err) + } + return nil +} + +func newSessionID() (string, error) { + b := make([]byte, 32) + if _, err := rand.Read(b); err != nil { + return "", fmt.Errorf("generate session id: %w", err) + } + return hex.EncodeToString(b), nil +} diff --git a/backend/internal/db/db.go b/backend/internal/db/db.go new file mode 100644 index 0000000..c2362d2 --- /dev/null +++ b/backend/internal/db/db.go @@ -0,0 +1,41 @@ +package db + +import ( + "context" + _ "embed" + "fmt" + "time" + + "github.com/jackc/pgx/v5/pgxpool" +) + +//go:embed schema.sql +var schema string + +func NewPool(ctx context.Context, dsn string) (*pgxpool.Pool, error) { + cfg, err := pgxpool.ParseConfig(dsn) + if err != nil { + return nil, fmt.Errorf("parse dsn: %w", err) + } + cfg.MaxConns = 25 + cfg.MinConns = 2 + cfg.MaxConnLifetime = 15 * time.Minute + cfg.MaxConnIdleTime = 5 * time.Minute + + pool, err := pgxpool.NewWithConfig(ctx, cfg) + if err != nil { + return nil, fmt.Errorf("create pool: %w", err) + } + if err := pool.Ping(ctx); err != nil { + return nil, fmt.Errorf("ping db: %w", err) + } + return pool, nil +} + +// InitSchema applies the embedded schema.sql (idempotent via IF NOT EXISTS). +func InitSchema(ctx context.Context, pool *pgxpool.Pool) error { + if _, err := pool.Exec(ctx, schema); err != nil { + return fmt.Errorf("init schema: %w", err) + } + return nil +} diff --git a/backend/internal/db/journal.go b/backend/internal/db/journal.go new file mode 100644 index 0000000..f400b00 --- /dev/null +++ b/backend/internal/db/journal.go @@ -0,0 +1,109 @@ +package db + +import ( + "context" + + "github.com/jackc/pgx/v5/pgxpool" + + "github.com/jacek/pamietnik/backend/internal/domain" +) + +type JournalStore struct { + pool *pgxpool.Pool +} + +func NewJournalStore(pool *pgxpool.Pool) *JournalStore { + return &JournalStore{pool: pool} +} + +// InsertEntry creates a new journal entry and returns it with the generated entry_id. +func (s *JournalStore) InsertEntry(ctx context.Context, e domain.JournalEntry) (domain.JournalEntry, error) { + err := s.pool.QueryRow(ctx, + `INSERT INTO journal_entries (user_id, entry_date, entry_time, title, description, lat, lon) + VALUES ($1, $2, $3, $4, $5, $6, $7) + RETURNING entry_id, created_at`, + e.UserID, e.EntryDate, e.EntryTime, e.Title, e.Description, e.Lat, e.Lon, + ).Scan(&e.EntryID, &e.CreatedAt) + return e, err +} + +// InsertImage attaches an image record to an entry. +func (s *JournalStore) InsertImage(ctx context.Context, img domain.JournalImage) (domain.JournalImage, error) { + err := s.pool.QueryRow(ctx, + `INSERT INTO journal_images (entry_id, filename, original_name, mime_type, size_bytes) + VALUES ($1, $2, $3, $4, $5) + RETURNING image_id, created_at`, + img.EntryID, img.Filename, img.OriginalName, img.MimeType, img.SizeBytes, + ).Scan(&img.ImageID, &img.CreatedAt) + return img, err +} + +// ListByDate returns all journal entries for a given date (YYYY-MM-DD), including their images. +func (s *JournalStore) ListByDate(ctx context.Context, userID, date string) ([]domain.JournalEntry, error) { + rows, err := s.pool.Query(ctx, + `SELECT entry_id, user_id, entry_date::text, entry_time::text, title, description, lat, lon, created_at + FROM journal_entries + WHERE user_id = $1 AND entry_date = $2 + ORDER BY entry_time`, + userID, date, + ) + if err != nil { + return nil, err + } + defer rows.Close() + + var entries []domain.JournalEntry + for rows.Next() { + var e domain.JournalEntry + if err := rows.Scan( + &e.EntryID, &e.UserID, &e.EntryDate, &e.EntryTime, + &e.Title, &e.Description, &e.Lat, &e.Lon, &e.CreatedAt, + ); err != nil { + return nil, err + } + entries = append(entries, e) + } + if err := rows.Err(); err != nil { + return nil, err + } + + if len(entries) == 0 { + return entries, nil + } + + // Load all images in a single query to avoid N+1 + entryIDs := make([]string, len(entries)) + for i, e := range entries { + entryIDs[i] = e.EntryID + } + + imgRows, err := s.pool.Query(ctx, + `SELECT image_id, entry_id, filename, original_name, mime_type, size_bytes, created_at + FROM journal_images WHERE entry_id = ANY($1) ORDER BY created_at`, + entryIDs, + ) + if err != nil { + return nil, err + } + defer imgRows.Close() + + imgMap := make(map[string][]domain.JournalImage) + for imgRows.Next() { + var img domain.JournalImage + if err := imgRows.Scan( + &img.ImageID, &img.EntryID, &img.Filename, &img.OriginalName, + &img.MimeType, &img.SizeBytes, &img.CreatedAt, + ); err != nil { + return nil, err + } + imgMap[img.EntryID] = append(imgMap[img.EntryID], img) + } + if err := imgRows.Err(); err != nil { + return nil, err + } + + for i, e := range entries { + entries[i].Images = imgMap[e.EntryID] + } + return entries, nil +} diff --git a/backend/internal/db/schema.sql b/backend/internal/db/schema.sql new file mode 100644 index 0000000..d42f4b0 --- /dev/null +++ b/backend/internal/db/schema.sql @@ -0,0 +1,94 @@ +-- Pamietnik database schema +-- Applied automatically at server startup via CREATE TABLE IF NOT EXISTS. + +CREATE TABLE IF NOT EXISTS users ( + user_id TEXT PRIMARY KEY DEFAULT gen_random_uuid()::text, + username TEXT NOT NULL UNIQUE, + password_hash TEXT NOT NULL, + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW() +); + +CREATE TABLE IF NOT EXISTS sessions ( + session_id TEXT PRIMARY KEY, + user_id TEXT NOT NULL REFERENCES users(user_id) ON DELETE CASCADE, + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + expires_at TIMESTAMPTZ NOT NULL +); +CREATE INDEX IF NOT EXISTS sessions_expires_at_idx ON sessions(expires_at); + +CREATE TABLE IF NOT EXISTS devices ( + device_id TEXT PRIMARY KEY, + user_id TEXT NOT NULL REFERENCES users(user_id) ON DELETE CASCADE, + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW() +); + +CREATE TABLE IF NOT EXISTS trackpoints ( + id BIGSERIAL PRIMARY KEY, + event_id TEXT NOT NULL, + device_id TEXT NOT NULL, + trip_id TEXT NOT NULL DEFAULT '', + ts TIMESTAMPTZ NOT NULL, + lat DOUBLE PRECISION NOT NULL, + lon DOUBLE PRECISION NOT NULL, + source TEXT NOT NULL DEFAULT 'gps', + note TEXT NOT NULL DEFAULT '', + accuracy_m DOUBLE PRECISION, + speed_mps DOUBLE PRECISION, + bearing_deg DOUBLE PRECISION, + altitude_m DOUBLE PRECISION, + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + + CONSTRAINT trackpoints_device_event_uniq UNIQUE (device_id, event_id) +); +CREATE INDEX IF NOT EXISTS trackpoints_device_ts_idx ON trackpoints(device_id, ts); +CREATE INDEX IF NOT EXISTS trackpoints_ts_idx ON trackpoints(ts); + +CREATE TABLE IF NOT EXISTS stops ( + stop_id TEXT PRIMARY KEY DEFAULT gen_random_uuid()::text, + device_id TEXT NOT NULL, + trip_id TEXT NOT NULL DEFAULT '', + start_ts TIMESTAMPTZ NOT NULL, + end_ts TIMESTAMPTZ NOT NULL, + center_lat DOUBLE PRECISION NOT NULL, + center_lon DOUBLE PRECISION NOT NULL, + duration_s INT NOT NULL, + place_label TEXT, + place_details JSONB, + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW() +); +CREATE INDEX IF NOT EXISTS stops_device_start_ts_idx ON stops(device_id, start_ts); + +CREATE TABLE IF NOT EXISTS suggestions ( + suggestion_id TEXT PRIMARY KEY DEFAULT gen_random_uuid()::text, + stop_id TEXT NOT NULL REFERENCES stops(stop_id) ON DELETE CASCADE, + type TEXT NOT NULL, + title TEXT NOT NULL DEFAULT '', + text TEXT NOT NULL DEFAULT '', + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + dismissed_at TIMESTAMPTZ +); +CREATE INDEX IF NOT EXISTS suggestions_stop_id_idx ON suggestions(stop_id); + +CREATE TABLE IF NOT EXISTS journal_entries ( + entry_id TEXT PRIMARY KEY DEFAULT gen_random_uuid()::text, + user_id TEXT NOT NULL REFERENCES users(user_id) ON DELETE CASCADE, + entry_date DATE NOT NULL, + entry_time TIME NOT NULL, + title TEXT NOT NULL DEFAULT '', + description TEXT NOT NULL DEFAULT '', + lat DOUBLE PRECISION, + lon DOUBLE PRECISION, + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW() +); +CREATE INDEX IF NOT EXISTS journal_entries_user_date_idx ON journal_entries(user_id, entry_date); + +CREATE TABLE IF NOT EXISTS journal_images ( + image_id TEXT PRIMARY KEY DEFAULT gen_random_uuid()::text, + entry_id TEXT NOT NULL REFERENCES journal_entries(entry_id) ON DELETE CASCADE, + filename TEXT NOT NULL, + original_name TEXT NOT NULL DEFAULT '', + mime_type TEXT NOT NULL DEFAULT '', + size_bytes BIGINT NOT NULL DEFAULT 0, + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW() +); +CREATE INDEX IF NOT EXISTS journal_images_entry_id_idx ON journal_images(entry_id); diff --git a/backend/internal/db/stops.go b/backend/internal/db/stops.go new file mode 100644 index 0000000..0e2ed14 --- /dev/null +++ b/backend/internal/db/stops.go @@ -0,0 +1,60 @@ +package db + +import ( + "context" + + "github.com/jackc/pgx/v5" + "github.com/jackc/pgx/v5/pgxpool" + + "github.com/jacek/pamietnik/backend/internal/domain" +) + +type StopStore struct { + pool *pgxpool.Pool +} + +func NewStopStore(pool *pgxpool.Pool) *StopStore { + return &StopStore{pool: pool} +} + +func (s *StopStore) ListByDate(ctx context.Context, userID, date string) ([]domain.Stop, error) { + rows, err := s.pool.Query(ctx, ` + SELECT st.stop_id, st.device_id, st.trip_id, + st.start_ts, st.end_ts, + st.center_lat, st.center_lon, st.duration_s, + COALESCE(st.place_label, ''), + st.place_details + FROM stops st + JOIN devices d ON d.device_id = st.device_id + WHERE d.user_id = $1 + AND DATE(st.start_ts AT TIME ZONE 'UTC') = $2::date + ORDER BY st.start_ts`, + userID, date, + ) + if err != nil { + return nil, err + } + defer rows.Close() + return pgx.CollectRows(rows, func(row pgx.CollectableRow) (domain.Stop, error) { + var st domain.Stop + err := row.Scan( + &st.StopID, &st.DeviceID, &st.TripID, + &st.StartTS, &st.EndTS, + &st.CenterLat, &st.CenterLon, &st.DurationS, + &st.PlaceLabel, &st.PlaceDetails, + ) + return st, err + }) +} + +func (s *StopStore) Insert(ctx context.Context, st domain.Stop) error { + _, err := s.pool.Exec(ctx, ` + INSERT INTO stops (stop_id, device_id, trip_id, start_ts, end_ts, + center_lat, center_lon, duration_s, place_label, place_details) + VALUES ($1,$2,$3,$4,$5,$6,$7,$8,$9,$10) + ON CONFLICT (stop_id) DO NOTHING`, + st.StopID, st.DeviceID, st.TripID, st.StartTS, st.EndTS, + st.CenterLat, st.CenterLon, st.DurationS, st.PlaceLabel, st.PlaceDetails, + ) + return err +} diff --git a/backend/internal/db/suggestions.go b/backend/internal/db/suggestions.go new file mode 100644 index 0000000..94c9927 --- /dev/null +++ b/backend/internal/db/suggestions.go @@ -0,0 +1,54 @@ +package db + +import ( + "context" + + "github.com/jackc/pgx/v5" + "github.com/jackc/pgx/v5/pgxpool" + + "github.com/jacek/pamietnik/backend/internal/domain" +) + +type SuggestionStore struct { + pool *pgxpool.Pool +} + +func NewSuggestionStore(pool *pgxpool.Pool) *SuggestionStore { + return &SuggestionStore{pool: pool} +} + +func (s *SuggestionStore) ListByDate(ctx context.Context, userID, date string) ([]domain.Suggestion, error) { + rows, err := s.pool.Query(ctx, ` + SELECT sg.suggestion_id, sg.stop_id, sg.type, sg.title, sg.text, + sg.created_at, sg.dismissed_at + FROM suggestions sg + JOIN stops st ON st.stop_id = sg.stop_id + JOIN devices d ON d.device_id = st.device_id + WHERE d.user_id = $1 + AND DATE(st.start_ts AT TIME ZONE 'UTC') = $2::date + ORDER BY sg.created_at`, + userID, date, + ) + if err != nil { + return nil, err + } + defer rows.Close() + return pgx.CollectRows(rows, func(row pgx.CollectableRow) (domain.Suggestion, error) { + var sg domain.Suggestion + err := row.Scan( + &sg.SuggestionID, &sg.StopID, &sg.Type, &sg.Title, &sg.Text, + &sg.CreatedAt, &sg.DismissedAt, + ) + return sg, err + }) +} + +func (s *SuggestionStore) Insert(ctx context.Context, sg domain.Suggestion) error { + _, err := s.pool.Exec(ctx, ` + INSERT INTO suggestions (suggestion_id, stop_id, type, title, text, created_at) + VALUES ($1,$2,$3,$4,$5,$6) + ON CONFLICT (suggestion_id) DO NOTHING`, + sg.SuggestionID, sg.StopID, sg.Type, sg.Title, sg.Text, sg.CreatedAt, + ) + return err +} diff --git a/backend/internal/db/trackpoints.go b/backend/internal/db/trackpoints.go new file mode 100644 index 0000000..1951fce --- /dev/null +++ b/backend/internal/db/trackpoints.go @@ -0,0 +1,163 @@ +package db + +import ( + "context" + "errors" + "fmt" + + "github.com/jackc/pgx/v5" + "github.com/jackc/pgx/v5/pgxpool" + + "github.com/jacek/pamietnik/backend/internal/domain" +) + +type TrackpointStore struct { + pool *pgxpool.Pool +} + +func NewTrackpointStore(pool *pgxpool.Pool) *TrackpointStore { + return &TrackpointStore{pool: pool} +} + +// UpsertBatch inserts trackpoints, ignoring duplicates (idempotency via device_id + event_id). +// Returns accepted event_ids and rejected items with reason. +func (s *TrackpointStore) UpsertBatch(ctx context.Context, userID string, points []domain.Trackpoint) (accepted []string, rejected []RejectedItem, err error) { + // First pass: validate all points + var valid []domain.Trackpoint + for _, p := range points { + if vErr := validateTrackpoint(p); vErr != nil { + rejected = append(rejected, RejectedItem{ + EventID: p.EventID, + Code: "VALIDATION_ERROR", + Message: vErr.Error(), + }) + continue + } + valid = append(valid, p) + } + + if len(valid) == 0 { + return accepted, rejected, nil + } + + // Ensure devices in a single batch (deduplicated) + if userID != "" { + seen := make(map[string]bool) + batch := &pgx.Batch{} + for _, p := range valid { + if !seen[p.DeviceID] { + seen[p.DeviceID] = true + batch.Queue( + `INSERT INTO devices (device_id, user_id) VALUES ($1, $2) ON CONFLICT (device_id) DO NOTHING`, + p.DeviceID, userID, + ) + } + } + br := s.pool.SendBatch(ctx, batch) + if closeErr := br.Close(); closeErr != nil { + return accepted, rejected, fmt.Errorf("ensure devices: %w", closeErr) + } + } + + // Insert trackpoints + for _, p := range valid { + _, err := s.pool.Exec(ctx, ` + INSERT INTO trackpoints ( + event_id, device_id, trip_id, ts, + lat, lon, source, note, + accuracy_m, speed_mps, bearing_deg, altitude_m + ) VALUES ($1,$2,$3,$4,$5,$6,$7,$8,$9,$10,$11,$12) + ON CONFLICT (device_id, event_id) DO NOTHING`, + p.EventID, p.DeviceID, p.TripID, p.Timestamp, + p.Lat, p.Lon, p.Source, p.Note, + p.AccuracyM, p.SpeedMps, p.BearingDeg, p.AltitudeM, + ) + if err != nil { + rejected = append(rejected, RejectedItem{ + EventID: p.EventID, + Code: "DB_ERROR", + Message: "database error", + }) + continue + } + accepted = append(accepted, p.EventID) + } + return accepted, rejected, nil +} + +type RejectedItem struct { + EventID string `json:"event_id"` + Code string `json:"code"` + Message string `json:"message"` +} + +func validateTrackpoint(p domain.Trackpoint) error { + if p.EventID == "" { + return errors.New("event_id is required") + } + if p.DeviceID == "" { + return errors.New("device_id is required") + } + if p.Lat < -90 || p.Lat > 90 { + return errors.New("lat out of range") + } + if p.Lon < -180 || p.Lon > 180 { + return errors.New("lon out of range") + } + if p.Source != "" && p.Source != "gps" && p.Source != "manual" { + return errors.New("source must be 'gps' or 'manual'") + } + return nil +} + +func (s *TrackpointStore) ListByDate(ctx context.Context, userID, date string) ([]domain.Trackpoint, error) { + rows, err := s.pool.Query(ctx, ` + SELECT tp.event_id, tp.device_id, tp.trip_id, tp.ts, + tp.lat, tp.lon, tp.source, tp.note, + tp.accuracy_m, tp.speed_mps, tp.bearing_deg, tp.altitude_m + FROM trackpoints tp + JOIN devices d ON d.device_id = tp.device_id + WHERE d.user_id = $1 + AND DATE(tp.ts AT TIME ZONE 'UTC') = $2::date + ORDER BY tp.ts`, + userID, date, + ) + if err != nil { + return nil, err + } + defer rows.Close() + return pgx.CollectRows(rows, func(row pgx.CollectableRow) (domain.Trackpoint, error) { + var p domain.Trackpoint + err := row.Scan( + &p.EventID, &p.DeviceID, &p.TripID, &p.Timestamp, + &p.Lat, &p.Lon, &p.Source, &p.Note, + &p.AccuracyM, &p.SpeedMps, &p.BearingDeg, &p.AltitudeM, + ) + return p, err + }) +} + +func (s *TrackpointStore) ListDays(ctx context.Context, userID, from, to string) ([]domain.DaySummary, error) { + rows, err := s.pool.Query(ctx, ` + SELECT DATE(tp.ts AT TIME ZONE 'UTC')::text AS date, + COUNT(*) AS cnt, + MIN(tp.ts), + MAX(tp.ts) + FROM trackpoints tp + JOIN devices d ON d.device_id = tp.device_id + WHERE d.user_id = $1 + AND DATE(tp.ts AT TIME ZONE 'UTC') BETWEEN $2::date AND $3::date + GROUP BY DATE(tp.ts AT TIME ZONE 'UTC') + ORDER BY date`, + userID, from, to, + ) + if err != nil { + return nil, err + } + defer rows.Close() + return pgx.CollectRows(rows, func(row pgx.CollectableRow) (domain.DaySummary, error) { + var d domain.DaySummary + err := row.Scan(&d.Date, &d.Count, &d.FirstTS, &d.LastTS) + return d, err + }) +} diff --git a/backend/internal/domain/models.go b/backend/internal/domain/models.go new file mode 100644 index 0000000..f2bcba5 --- /dev/null +++ b/backend/internal/domain/models.go @@ -0,0 +1,85 @@ +package domain + +import "time" + +type Trackpoint struct { + EventID string `json:"event_id"` + DeviceID string `json:"device_id"` + TripID string `json:"trip_id"` + Timestamp time.Time `json:"timestamp"` + Lat float64 `json:"lat"` + Lon float64 `json:"lon"` + Source string `json:"source"` // "gps" | "manual" + Note string `json:"note,omitempty"` + AccuracyM *float64 `json:"accuracy_m,omitempty"` + SpeedMps *float64 `json:"speed_mps,omitempty"` + BearingDeg *float64 `json:"bearing_deg,omitempty"` + AltitudeM *float64 `json:"altitude_m,omitempty"` +} + +type Stop struct { + StopID string `json:"stop_id"` + DeviceID string `json:"device_id"` + TripID string `json:"trip_id"` + StartTS time.Time `json:"start_ts"` + EndTS time.Time `json:"end_ts"` + CenterLat float64 `json:"center_lat"` + CenterLon float64 `json:"center_lon"` + DurationS int `json:"duration_s"` + PlaceLabel string `json:"place_label,omitempty"` + PlaceDetails map[string]any `json:"place_details,omitempty"` +} + +type Suggestion struct { + SuggestionID string `json:"suggestion_id"` + StopID string `json:"stop_id"` + Type string `json:"type"` // "highlight" | "name_place" | "add_note" + Title string `json:"title"` + Text string `json:"text"` + CreatedAt time.Time `json:"created_at"` + DismissedAt *time.Time `json:"dismissed_at,omitempty"` +} + +type DaySummary struct { + Date string `json:"date"` + Count int `json:"count"` + FirstTS *time.Time `json:"first_ts,omitempty"` + LastTS *time.Time `json:"last_ts,omitempty"` +} + +type JournalEntry struct { + EntryID string `json:"entry_id"` + UserID string `json:"user_id"` + EntryDate string `json:"entry_date"` // YYYY-MM-DD + EntryTime string `json:"entry_time"` // HH:MM + Title string `json:"title"` + Description string `json:"description"` + Lat *float64 `json:"lat,omitempty"` + Lon *float64 `json:"lon,omitempty"` + CreatedAt time.Time `json:"created_at"` + Images []JournalImage `json:"images,omitempty"` +} + +type JournalImage struct { + ImageID string `json:"image_id"` + EntryID string `json:"entry_id"` + Filename string `json:"filename"` + OriginalName string `json:"original_name"` + MimeType string `json:"mime_type"` + SizeBytes int64 `json:"size_bytes"` + CreatedAt time.Time `json:"created_at"` +} + +type User struct { + UserID string `json:"user_id"` + Username string `json:"username"` + PasswordHash string `json:"-"` + CreatedAt time.Time `json:"created_at"` +} + +type Session struct { + SessionID string `json:"session_id"` + UserID string `json:"user_id"` + CreatedAt time.Time `json:"created_at"` + ExpiresAt time.Time `json:"expires_at"` +} diff --git a/backend/start.sh b/backend/start.sh new file mode 100755 index 0000000..addddc4 --- /dev/null +++ b/backend/start.sh @@ -0,0 +1,8 @@ +#!/bin/bash +set -e + +export DATABASE_URL="${DATABASE_URL:-postgres://ralph:ralph@localhost:5432/ralph?sslmode=disable}" +export LISTEN_ADDR="${LISTEN_ADDR:-:8081}" + +echo "Starting RALPH backend on $LISTEN_ADDR ..." +exec go run ./cmd/server diff --git a/backend/stop.sh b/backend/stop.sh new file mode 100755 index 0000000..ca56908 --- /dev/null +++ b/backend/stop.sh @@ -0,0 +1,12 @@ +#!/bin/bash +PORT="${LISTEN_ADDR:-:8081}" +PORT="${PORT#:}" + +PID=$(lsof -ti:$PORT) +if [ -z "$PID" ]; then + echo "Server is not running on port $PORT" + exit 0 +fi + +kill $PID +echo "Server stopped (port $PORT, pid $PID)"