Convert backend from submodule to regular directory
Some checks failed
Deploy to NAS / deploy (push) Failing after 4s

Remove submodule tracking; backend is now a plain directory in the repo.
Also update deploy workflow: remove --recurse-submodules.

Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
This commit is contained in:
Christoph K.
2026-04-07 16:59:50 +02:00
parent 0bb7758a2f
commit d0b0b4f8bd
35 changed files with 2271 additions and 8 deletions

41
backend/internal/db/db.go Normal file
View File

@@ -0,0 +1,41 @@
package db
import (
"context"
_ "embed"
"fmt"
"time"
"github.com/jackc/pgx/v5/pgxpool"
)
//go:embed schema.sql
var schema string
func NewPool(ctx context.Context, dsn string) (*pgxpool.Pool, error) {
cfg, err := pgxpool.ParseConfig(dsn)
if err != nil {
return nil, fmt.Errorf("parse dsn: %w", err)
}
cfg.MaxConns = 25
cfg.MinConns = 2
cfg.MaxConnLifetime = 15 * time.Minute
cfg.MaxConnIdleTime = 5 * time.Minute
pool, err := pgxpool.NewWithConfig(ctx, cfg)
if err != nil {
return nil, fmt.Errorf("create pool: %w", err)
}
if err := pool.Ping(ctx); err != nil {
return nil, fmt.Errorf("ping db: %w", err)
}
return pool, nil
}
// InitSchema applies the embedded schema.sql (idempotent via IF NOT EXISTS).
func InitSchema(ctx context.Context, pool *pgxpool.Pool) error {
if _, err := pool.Exec(ctx, schema); err != nil {
return fmt.Errorf("init schema: %w", err)
}
return nil
}

View File

@@ -0,0 +1,109 @@
package db
import (
"context"
"github.com/jackc/pgx/v5/pgxpool"
"github.com/jacek/pamietnik/backend/internal/domain"
)
type JournalStore struct {
pool *pgxpool.Pool
}
func NewJournalStore(pool *pgxpool.Pool) *JournalStore {
return &JournalStore{pool: pool}
}
// InsertEntry creates a new journal entry and returns it with the generated entry_id.
func (s *JournalStore) InsertEntry(ctx context.Context, e domain.JournalEntry) (domain.JournalEntry, error) {
err := s.pool.QueryRow(ctx,
`INSERT INTO journal_entries (user_id, entry_date, entry_time, title, description, lat, lon)
VALUES ($1, $2, $3, $4, $5, $6, $7)
RETURNING entry_id, created_at`,
e.UserID, e.EntryDate, e.EntryTime, e.Title, e.Description, e.Lat, e.Lon,
).Scan(&e.EntryID, &e.CreatedAt)
return e, err
}
// InsertImage attaches an image record to an entry.
func (s *JournalStore) InsertImage(ctx context.Context, img domain.JournalImage) (domain.JournalImage, error) {
err := s.pool.QueryRow(ctx,
`INSERT INTO journal_images (entry_id, filename, original_name, mime_type, size_bytes)
VALUES ($1, $2, $3, $4, $5)
RETURNING image_id, created_at`,
img.EntryID, img.Filename, img.OriginalName, img.MimeType, img.SizeBytes,
).Scan(&img.ImageID, &img.CreatedAt)
return img, err
}
// ListByDate returns all journal entries for a given date (YYYY-MM-DD), including their images.
func (s *JournalStore) ListByDate(ctx context.Context, userID, date string) ([]domain.JournalEntry, error) {
rows, err := s.pool.Query(ctx,
`SELECT entry_id, user_id, entry_date::text, entry_time::text, title, description, lat, lon, created_at
FROM journal_entries
WHERE user_id = $1 AND entry_date = $2
ORDER BY entry_time`,
userID, date,
)
if err != nil {
return nil, err
}
defer rows.Close()
var entries []domain.JournalEntry
for rows.Next() {
var e domain.JournalEntry
if err := rows.Scan(
&e.EntryID, &e.UserID, &e.EntryDate, &e.EntryTime,
&e.Title, &e.Description, &e.Lat, &e.Lon, &e.CreatedAt,
); err != nil {
return nil, err
}
entries = append(entries, e)
}
if err := rows.Err(); err != nil {
return nil, err
}
if len(entries) == 0 {
return entries, nil
}
// Load all images in a single query to avoid N+1
entryIDs := make([]string, len(entries))
for i, e := range entries {
entryIDs[i] = e.EntryID
}
imgRows, err := s.pool.Query(ctx,
`SELECT image_id, entry_id, filename, original_name, mime_type, size_bytes, created_at
FROM journal_images WHERE entry_id = ANY($1) ORDER BY created_at`,
entryIDs,
)
if err != nil {
return nil, err
}
defer imgRows.Close()
imgMap := make(map[string][]domain.JournalImage)
for imgRows.Next() {
var img domain.JournalImage
if err := imgRows.Scan(
&img.ImageID, &img.EntryID, &img.Filename, &img.OriginalName,
&img.MimeType, &img.SizeBytes, &img.CreatedAt,
); err != nil {
return nil, err
}
imgMap[img.EntryID] = append(imgMap[img.EntryID], img)
}
if err := imgRows.Err(); err != nil {
return nil, err
}
for i, e := range entries {
entries[i].Images = imgMap[e.EntryID]
}
return entries, nil
}

View File

@@ -0,0 +1,94 @@
-- Pamietnik database schema
-- Applied automatically at server startup via CREATE TABLE IF NOT EXISTS.
CREATE TABLE IF NOT EXISTS users (
user_id TEXT PRIMARY KEY DEFAULT gen_random_uuid()::text,
username TEXT NOT NULL UNIQUE,
password_hash TEXT NOT NULL,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
);
CREATE TABLE IF NOT EXISTS sessions (
session_id TEXT PRIMARY KEY,
user_id TEXT NOT NULL REFERENCES users(user_id) ON DELETE CASCADE,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
expires_at TIMESTAMPTZ NOT NULL
);
CREATE INDEX IF NOT EXISTS sessions_expires_at_idx ON sessions(expires_at);
CREATE TABLE IF NOT EXISTS devices (
device_id TEXT PRIMARY KEY,
user_id TEXT NOT NULL REFERENCES users(user_id) ON DELETE CASCADE,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
);
CREATE TABLE IF NOT EXISTS trackpoints (
id BIGSERIAL PRIMARY KEY,
event_id TEXT NOT NULL,
device_id TEXT NOT NULL,
trip_id TEXT NOT NULL DEFAULT '',
ts TIMESTAMPTZ NOT NULL,
lat DOUBLE PRECISION NOT NULL,
lon DOUBLE PRECISION NOT NULL,
source TEXT NOT NULL DEFAULT 'gps',
note TEXT NOT NULL DEFAULT '',
accuracy_m DOUBLE PRECISION,
speed_mps DOUBLE PRECISION,
bearing_deg DOUBLE PRECISION,
altitude_m DOUBLE PRECISION,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
CONSTRAINT trackpoints_device_event_uniq UNIQUE (device_id, event_id)
);
CREATE INDEX IF NOT EXISTS trackpoints_device_ts_idx ON trackpoints(device_id, ts);
CREATE INDEX IF NOT EXISTS trackpoints_ts_idx ON trackpoints(ts);
CREATE TABLE IF NOT EXISTS stops (
stop_id TEXT PRIMARY KEY DEFAULT gen_random_uuid()::text,
device_id TEXT NOT NULL,
trip_id TEXT NOT NULL DEFAULT '',
start_ts TIMESTAMPTZ NOT NULL,
end_ts TIMESTAMPTZ NOT NULL,
center_lat DOUBLE PRECISION NOT NULL,
center_lon DOUBLE PRECISION NOT NULL,
duration_s INT NOT NULL,
place_label TEXT,
place_details JSONB,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
);
CREATE INDEX IF NOT EXISTS stops_device_start_ts_idx ON stops(device_id, start_ts);
CREATE TABLE IF NOT EXISTS suggestions (
suggestion_id TEXT PRIMARY KEY DEFAULT gen_random_uuid()::text,
stop_id TEXT NOT NULL REFERENCES stops(stop_id) ON DELETE CASCADE,
type TEXT NOT NULL,
title TEXT NOT NULL DEFAULT '',
text TEXT NOT NULL DEFAULT '',
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
dismissed_at TIMESTAMPTZ
);
CREATE INDEX IF NOT EXISTS suggestions_stop_id_idx ON suggestions(stop_id);
CREATE TABLE IF NOT EXISTS journal_entries (
entry_id TEXT PRIMARY KEY DEFAULT gen_random_uuid()::text,
user_id TEXT NOT NULL REFERENCES users(user_id) ON DELETE CASCADE,
entry_date DATE NOT NULL,
entry_time TIME NOT NULL,
title TEXT NOT NULL DEFAULT '',
description TEXT NOT NULL DEFAULT '',
lat DOUBLE PRECISION,
lon DOUBLE PRECISION,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
);
CREATE INDEX IF NOT EXISTS journal_entries_user_date_idx ON journal_entries(user_id, entry_date);
CREATE TABLE IF NOT EXISTS journal_images (
image_id TEXT PRIMARY KEY DEFAULT gen_random_uuid()::text,
entry_id TEXT NOT NULL REFERENCES journal_entries(entry_id) ON DELETE CASCADE,
filename TEXT NOT NULL,
original_name TEXT NOT NULL DEFAULT '',
mime_type TEXT NOT NULL DEFAULT '',
size_bytes BIGINT NOT NULL DEFAULT 0,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
);
CREATE INDEX IF NOT EXISTS journal_images_entry_id_idx ON journal_images(entry_id);

View File

@@ -0,0 +1,60 @@
package db
import (
"context"
"github.com/jackc/pgx/v5"
"github.com/jackc/pgx/v5/pgxpool"
"github.com/jacek/pamietnik/backend/internal/domain"
)
type StopStore struct {
pool *pgxpool.Pool
}
func NewStopStore(pool *pgxpool.Pool) *StopStore {
return &StopStore{pool: pool}
}
func (s *StopStore) ListByDate(ctx context.Context, userID, date string) ([]domain.Stop, error) {
rows, err := s.pool.Query(ctx, `
SELECT st.stop_id, st.device_id, st.trip_id,
st.start_ts, st.end_ts,
st.center_lat, st.center_lon, st.duration_s,
COALESCE(st.place_label, ''),
st.place_details
FROM stops st
JOIN devices d ON d.device_id = st.device_id
WHERE d.user_id = $1
AND DATE(st.start_ts AT TIME ZONE 'UTC') = $2::date
ORDER BY st.start_ts`,
userID, date,
)
if err != nil {
return nil, err
}
defer rows.Close()
return pgx.CollectRows(rows, func(row pgx.CollectableRow) (domain.Stop, error) {
var st domain.Stop
err := row.Scan(
&st.StopID, &st.DeviceID, &st.TripID,
&st.StartTS, &st.EndTS,
&st.CenterLat, &st.CenterLon, &st.DurationS,
&st.PlaceLabel, &st.PlaceDetails,
)
return st, err
})
}
func (s *StopStore) Insert(ctx context.Context, st domain.Stop) error {
_, err := s.pool.Exec(ctx, `
INSERT INTO stops (stop_id, device_id, trip_id, start_ts, end_ts,
center_lat, center_lon, duration_s, place_label, place_details)
VALUES ($1,$2,$3,$4,$5,$6,$7,$8,$9,$10)
ON CONFLICT (stop_id) DO NOTHING`,
st.StopID, st.DeviceID, st.TripID, st.StartTS, st.EndTS,
st.CenterLat, st.CenterLon, st.DurationS, st.PlaceLabel, st.PlaceDetails,
)
return err
}

View File

@@ -0,0 +1,54 @@
package db
import (
"context"
"github.com/jackc/pgx/v5"
"github.com/jackc/pgx/v5/pgxpool"
"github.com/jacek/pamietnik/backend/internal/domain"
)
type SuggestionStore struct {
pool *pgxpool.Pool
}
func NewSuggestionStore(pool *pgxpool.Pool) *SuggestionStore {
return &SuggestionStore{pool: pool}
}
func (s *SuggestionStore) ListByDate(ctx context.Context, userID, date string) ([]domain.Suggestion, error) {
rows, err := s.pool.Query(ctx, `
SELECT sg.suggestion_id, sg.stop_id, sg.type, sg.title, sg.text,
sg.created_at, sg.dismissed_at
FROM suggestions sg
JOIN stops st ON st.stop_id = sg.stop_id
JOIN devices d ON d.device_id = st.device_id
WHERE d.user_id = $1
AND DATE(st.start_ts AT TIME ZONE 'UTC') = $2::date
ORDER BY sg.created_at`,
userID, date,
)
if err != nil {
return nil, err
}
defer rows.Close()
return pgx.CollectRows(rows, func(row pgx.CollectableRow) (domain.Suggestion, error) {
var sg domain.Suggestion
err := row.Scan(
&sg.SuggestionID, &sg.StopID, &sg.Type, &sg.Title, &sg.Text,
&sg.CreatedAt, &sg.DismissedAt,
)
return sg, err
})
}
func (s *SuggestionStore) Insert(ctx context.Context, sg domain.Suggestion) error {
_, err := s.pool.Exec(ctx, `
INSERT INTO suggestions (suggestion_id, stop_id, type, title, text, created_at)
VALUES ($1,$2,$3,$4,$5,$6)
ON CONFLICT (suggestion_id) DO NOTHING`,
sg.SuggestionID, sg.StopID, sg.Type, sg.Title, sg.Text, sg.CreatedAt,
)
return err
}

View File

@@ -0,0 +1,163 @@
package db
import (
"context"
"errors"
"fmt"
"github.com/jackc/pgx/v5"
"github.com/jackc/pgx/v5/pgxpool"
"github.com/jacek/pamietnik/backend/internal/domain"
)
type TrackpointStore struct {
pool *pgxpool.Pool
}
func NewTrackpointStore(pool *pgxpool.Pool) *TrackpointStore {
return &TrackpointStore{pool: pool}
}
// UpsertBatch inserts trackpoints, ignoring duplicates (idempotency via device_id + event_id).
// Returns accepted event_ids and rejected items with reason.
func (s *TrackpointStore) UpsertBatch(ctx context.Context, userID string, points []domain.Trackpoint) (accepted []string, rejected []RejectedItem, err error) {
// First pass: validate all points
var valid []domain.Trackpoint
for _, p := range points {
if vErr := validateTrackpoint(p); vErr != nil {
rejected = append(rejected, RejectedItem{
EventID: p.EventID,
Code: "VALIDATION_ERROR",
Message: vErr.Error(),
})
continue
}
valid = append(valid, p)
}
if len(valid) == 0 {
return accepted, rejected, nil
}
// Ensure devices in a single batch (deduplicated)
if userID != "" {
seen := make(map[string]bool)
batch := &pgx.Batch{}
for _, p := range valid {
if !seen[p.DeviceID] {
seen[p.DeviceID] = true
batch.Queue(
`INSERT INTO devices (device_id, user_id) VALUES ($1, $2) ON CONFLICT (device_id) DO NOTHING`,
p.DeviceID, userID,
)
}
}
br := s.pool.SendBatch(ctx, batch)
if closeErr := br.Close(); closeErr != nil {
return accepted, rejected, fmt.Errorf("ensure devices: %w", closeErr)
}
}
// Insert trackpoints
for _, p := range valid {
_, err := s.pool.Exec(ctx, `
INSERT INTO trackpoints (
event_id, device_id, trip_id, ts,
lat, lon, source, note,
accuracy_m, speed_mps, bearing_deg, altitude_m
) VALUES ($1,$2,$3,$4,$5,$6,$7,$8,$9,$10,$11,$12)
ON CONFLICT (device_id, event_id) DO NOTHING`,
p.EventID, p.DeviceID, p.TripID, p.Timestamp,
p.Lat, p.Lon, p.Source, p.Note,
p.AccuracyM, p.SpeedMps, p.BearingDeg, p.AltitudeM,
)
if err != nil {
rejected = append(rejected, RejectedItem{
EventID: p.EventID,
Code: "DB_ERROR",
Message: "database error",
})
continue
}
accepted = append(accepted, p.EventID)
}
return accepted, rejected, nil
}
type RejectedItem struct {
EventID string `json:"event_id"`
Code string `json:"code"`
Message string `json:"message"`
}
func validateTrackpoint(p domain.Trackpoint) error {
if p.EventID == "" {
return errors.New("event_id is required")
}
if p.DeviceID == "" {
return errors.New("device_id is required")
}
if p.Lat < -90 || p.Lat > 90 {
return errors.New("lat out of range")
}
if p.Lon < -180 || p.Lon > 180 {
return errors.New("lon out of range")
}
if p.Source != "" && p.Source != "gps" && p.Source != "manual" {
return errors.New("source must be 'gps' or 'manual'")
}
return nil
}
func (s *TrackpointStore) ListByDate(ctx context.Context, userID, date string) ([]domain.Trackpoint, error) {
rows, err := s.pool.Query(ctx, `
SELECT tp.event_id, tp.device_id, tp.trip_id, tp.ts,
tp.lat, tp.lon, tp.source, tp.note,
tp.accuracy_m, tp.speed_mps, tp.bearing_deg, tp.altitude_m
FROM trackpoints tp
JOIN devices d ON d.device_id = tp.device_id
WHERE d.user_id = $1
AND DATE(tp.ts AT TIME ZONE 'UTC') = $2::date
ORDER BY tp.ts`,
userID, date,
)
if err != nil {
return nil, err
}
defer rows.Close()
return pgx.CollectRows(rows, func(row pgx.CollectableRow) (domain.Trackpoint, error) {
var p domain.Trackpoint
err := row.Scan(
&p.EventID, &p.DeviceID, &p.TripID, &p.Timestamp,
&p.Lat, &p.Lon, &p.Source, &p.Note,
&p.AccuracyM, &p.SpeedMps, &p.BearingDeg, &p.AltitudeM,
)
return p, err
})
}
func (s *TrackpointStore) ListDays(ctx context.Context, userID, from, to string) ([]domain.DaySummary, error) {
rows, err := s.pool.Query(ctx, `
SELECT DATE(tp.ts AT TIME ZONE 'UTC')::text AS date,
COUNT(*) AS cnt,
MIN(tp.ts),
MAX(tp.ts)
FROM trackpoints tp
JOIN devices d ON d.device_id = tp.device_id
WHERE d.user_id = $1
AND DATE(tp.ts AT TIME ZONE 'UTC') BETWEEN $2::date AND $3::date
GROUP BY DATE(tp.ts AT TIME ZONE 'UTC')
ORDER BY date`,
userID, from, to,
)
if err != nil {
return nil, err
}
defer rows.Close()
return pgx.CollectRows(rows, func(row pgx.CollectableRow) (domain.DaySummary, error) {
var d domain.DaySummary
err := row.Scan(&d.Date, &d.Count, &d.FirstTS, &d.LastTS)
return d, err
})
}