Add public feed, admin area, self-registration, visibility & hashtags
Some checks failed
Deploy to NAS / deploy (push) Failing after 26s
Some checks failed
Deploy to NAS / deploy (push) Failing after 26s
- Public feed (/) with infinite scroll via Intersection Observer - Self-registration (/register) - Admin area (/admin/entries, /admin/users) with user management - journal_entries: visibility (public/private) + hashtags fields - users: is_admin flag - DB schema updated (recreate DB to apply) - CI: run go test via docker run (golang:1.25-alpine) — fixes 'go not found' Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
This commit is contained in:
@@ -3,6 +3,7 @@ package db
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/jackc/pgx/v5"
|
||||
"github.com/jackc/pgx/v5/pgxpool"
|
||||
|
||||
"github.com/jacek/pamietnik/backend/internal/domain"
|
||||
@@ -18,11 +19,17 @@ func NewJournalStore(pool *pgxpool.Pool) *JournalStore {
|
||||
|
||||
// InsertEntry creates a new journal entry and returns it with the generated entry_id.
|
||||
func (s *JournalStore) InsertEntry(ctx context.Context, e domain.JournalEntry) (domain.JournalEntry, error) {
|
||||
if e.Visibility == "" {
|
||||
e.Visibility = "private"
|
||||
}
|
||||
if e.Hashtags == nil {
|
||||
e.Hashtags = []string{}
|
||||
}
|
||||
err := s.pool.QueryRow(ctx,
|
||||
`INSERT INTO journal_entries (user_id, entry_date, entry_time, title, description, lat, lon)
|
||||
VALUES ($1, $2, $3, $4, $5, $6, $7)
|
||||
`INSERT INTO journal_entries (user_id, entry_date, entry_time, title, description, lat, lon, visibility, hashtags)
|
||||
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9)
|
||||
RETURNING entry_id, created_at`,
|
||||
e.UserID, e.EntryDate, e.EntryTime, e.Title, e.Description, e.Lat, e.Lon,
|
||||
e.UserID, e.EntryDate, e.EntryTime, e.Title, e.Description, e.Lat, e.Lon, e.Visibility, e.Hashtags,
|
||||
).Scan(&e.EntryID, &e.CreatedAt)
|
||||
return e, err
|
||||
}
|
||||
@@ -41,7 +48,7 @@ func (s *JournalStore) InsertImage(ctx context.Context, img domain.JournalImage)
|
||||
// ListByDate returns all journal entries for a given date (YYYY-MM-DD), including their images.
|
||||
func (s *JournalStore) ListByDate(ctx context.Context, userID, date string) ([]domain.JournalEntry, error) {
|
||||
rows, err := s.pool.Query(ctx,
|
||||
`SELECT entry_id, user_id, entry_date::text, entry_time::text, title, description, lat, lon, created_at
|
||||
`SELECT entry_id, user_id, entry_date::text, entry_time::text, title, description, lat, lon, visibility, hashtags, created_at
|
||||
FROM journal_entries
|
||||
WHERE user_id = $1 AND entry_date = $2
|
||||
ORDER BY entry_time`,
|
||||
@@ -51,32 +58,78 @@ func (s *JournalStore) ListByDate(ctx context.Context, userID, date string) ([]d
|
||||
return nil, err
|
||||
}
|
||||
defer rows.Close()
|
||||
entries, err := collectEntries(rows)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return s.attachImages(ctx, entries)
|
||||
}
|
||||
|
||||
// ListPublic returns public journal entries ordered by created_at DESC, for infinite scroll.
|
||||
func (s *JournalStore) ListPublic(ctx context.Context, limit, offset int) ([]domain.JournalEntry, error) {
|
||||
rows, err := s.pool.Query(ctx,
|
||||
`SELECT entry_id, user_id, entry_date::text, entry_time::text, title, description, lat, lon, visibility, hashtags, created_at
|
||||
FROM journal_entries
|
||||
WHERE visibility = 'public'
|
||||
ORDER BY created_at DESC
|
||||
LIMIT $1 OFFSET $2`,
|
||||
limit, offset,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer rows.Close()
|
||||
entries, err := collectEntries(rows)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return s.attachImages(ctx, entries)
|
||||
}
|
||||
|
||||
// ListByUser returns all entries for a user, ordered by entry_date DESC, entry_time DESC.
|
||||
func (s *JournalStore) ListByUser(ctx context.Context, userID string) ([]domain.JournalEntry, error) {
|
||||
rows, err := s.pool.Query(ctx,
|
||||
`SELECT entry_id, user_id, entry_date::text, entry_time::text, title, description, lat, lon, visibility, hashtags, created_at
|
||||
FROM journal_entries
|
||||
WHERE user_id = $1
|
||||
ORDER BY entry_date DESC, entry_time DESC`,
|
||||
userID,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer rows.Close()
|
||||
entries, err := collectEntries(rows)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return s.attachImages(ctx, entries)
|
||||
}
|
||||
|
||||
func collectEntries(rows pgx.Rows) ([]domain.JournalEntry, error) {
|
||||
var entries []domain.JournalEntry
|
||||
for rows.Next() {
|
||||
var e domain.JournalEntry
|
||||
if err := rows.Scan(
|
||||
&e.EntryID, &e.UserID, &e.EntryDate, &e.EntryTime,
|
||||
&e.Title, &e.Description, &e.Lat, &e.Lon, &e.CreatedAt,
|
||||
&e.Title, &e.Description, &e.Lat, &e.Lon, &e.Visibility, &e.Hashtags, &e.CreatedAt,
|
||||
); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
entries = append(entries, e)
|
||||
}
|
||||
if err := rows.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return entries, rows.Err()
|
||||
}
|
||||
|
||||
// attachImages loads images for the given entries in a single query and populates .Images.
|
||||
func (s *JournalStore) attachImages(ctx context.Context, entries []domain.JournalEntry) ([]domain.JournalEntry, error) {
|
||||
if len(entries) == 0 {
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
// Load all images in a single query to avoid N+1
|
||||
entryIDs := make([]string, len(entries))
|
||||
for i, e := range entries {
|
||||
entryIDs[i] = e.EntryID
|
||||
}
|
||||
|
||||
imgRows, err := s.pool.Query(ctx,
|
||||
`SELECT image_id, entry_id, filename, original_name, mime_type, size_bytes, created_at
|
||||
FROM journal_images WHERE entry_id = ANY($1) ORDER BY created_at`,
|
||||
@@ -101,7 +154,6 @@ func (s *JournalStore) ListByDate(ctx context.Context, userID, date string) ([]d
|
||||
if err := imgRows.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for i, e := range entries {
|
||||
entries[i].Images = imgMap[e.EntryID]
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user