Add sync redesign with offline fallback (M9)

- Migration 003: adds logged_at to sync_log for TTL pruning; migrates
  settings_history to UUID TEXT PK with updated_at column
- SyncStore: Prune() deletes rows older than 30d and writes a '_pruned'
  marker at the boundary version; Pull() calls Prune lazily and returns
  ErrSyncStale (410) when the client's since_version is behind the marker
- sync_handler.go: GET /api/sync/pull?since=N; POST /api/sync/push with
  last-updated_at-wins conflict resolution for entries, balance_adjustments,
  settings_history; closed_days/closed_weeks skipped (server-only mutations)
- router.go: passes entryStore, adjustmentStore, settingsStore to SyncHandler
- settings_store.go: UUID PK, updated_at column, Upsert() for push path
- settings_service.go: generates UUID on create, sets updated_at on update
- settings_handler.go: ID params changed from int64 to string
- domain.go: Settings.ID string, Settings.UpdatedAt added
- client.ts: all mutation methods catch TypeError (offline) and fall back
  to Dexie write + outbox enqueue; crypto.randomUUID() for offline creates;
  Settings.id type changed to string
- db.ts: Dexie v3 — settings_history key path changed to string UUID;
  upgrade handler clears table for repopulation via pull
- sync.ts: real pushOutbox to POST /api/sync/push; pullChanges uses GET
  with ?since=N; 410 triggers coldStart() + retry; coldStart() wipes all
  tables and resets last_version
- 4 new Go store tests covering normal pull, stale client, empty prune,
  client-ahead-of-marker; all tests pass (store + service, 19 Vitest)
This commit is contained in:
2026-04-30 22:50:33 +02:00
parent 3214f48a6f
commit d8366f5c25
15 changed files with 864 additions and 144 deletions

View File

@@ -0,0 +1,27 @@
-- +migrate Up
-- 1. Add logged_at to sync_log for TTL-based pruning.
ALTER TABLE sync_log ADD COLUMN logged_at INTEGER NOT NULL DEFAULT 0;
-- 2. Migrate settings_history to UUID TEXT primary key and add updated_at.
ALTER TABLE settings_history RENAME TO settings_history_old;
CREATE TABLE settings_history (
id TEXT PRIMARY KEY,
effective_from TEXT NOT NULL,
hours_per_week REAL NOT NULL,
workdays_mask INTEGER NOT NULL DEFAULT 31,
timezone TEXT NOT NULL DEFAULT 'UTC',
created_at INTEGER NOT NULL,
updated_at INTEGER NOT NULL
);
INSERT INTO settings_history (id, effective_from, hours_per_week, workdays_mask, timezone, created_at, updated_at)
SELECT lower(hex(randomblob(16))), effective_from, hours_per_week, workdays_mask, timezone, created_at, created_at
FROM settings_history_old;
DROP TABLE settings_history_old;
-- +migrate Down
-- (intentionally left minimal; restoring integer PK requires recreating the table again)
ALTER TABLE sync_log DROP COLUMN logged_at;

View File

@@ -19,7 +19,7 @@ func NewSettingsStore(db *sql.DB) *SettingsStore {
// Current returns the most recent settings effective on or before the given day key.
func (s *SettingsStore) Current(ctx context.Context, asOfDayKey string) (*domain.Settings, error) {
row := s.db.QueryRowContext(ctx,
`SELECT id, effective_from, hours_per_week, workdays_mask, timezone, created_at
`SELECT id, effective_from, hours_per_week, workdays_mask, timezone, created_at, updated_at
FROM settings_history
WHERE effective_from <= ?
ORDER BY effective_from DESC, id DESC
@@ -30,7 +30,7 @@ func (s *SettingsStore) Current(ctx context.Context, asOfDayKey string) (*domain
// Latest returns the most recently created settings row.
func (s *SettingsStore) Latest(ctx context.Context) (*domain.Settings, error) {
row := s.db.QueryRowContext(ctx,
`SELECT id, effective_from, hours_per_week, workdays_mask, timezone, created_at
`SELECT id, effective_from, hours_per_week, workdays_mask, timezone, created_at, updated_at
FROM settings_history
ORDER BY effective_from DESC, id DESC
LIMIT 1`)
@@ -40,7 +40,7 @@ func (s *SettingsStore) Latest(ctx context.Context) (*domain.Settings, error) {
// History returns all settings rows ordered by effective_from DESC.
func (s *SettingsStore) History(ctx context.Context) ([]*domain.Settings, error) {
rows, err := s.db.QueryContext(ctx,
`SELECT id, effective_from, hours_per_week, workdays_mask, timezone, created_at
`SELECT id, effective_from, hours_per_week, workdays_mask, timezone, created_at, updated_at
FROM settings_history ORDER BY effective_from DESC, id DESC`)
if err != nil {
return nil, err
@@ -49,7 +49,7 @@ func (s *SettingsStore) History(ctx context.Context) ([]*domain.Settings, error)
var result []*domain.Settings
for rows.Next() {
var s domain.Settings
if err := rows.Scan(&s.ID, &s.EffectiveFrom, &s.HoursPerWeek, &s.WorkdaysMask, &s.Timezone, &s.CreatedAt); err != nil {
if err := rows.Scan(&s.ID, &s.EffectiveFrom, &s.HoursPerWeek, &s.WorkdaysMask, &s.Timezone, &s.CreatedAt, &s.UpdatedAt); err != nil {
return nil, err
}
result = append(result, &s)
@@ -59,30 +59,41 @@ func (s *SettingsStore) History(ctx context.Context) ([]*domain.Settings, error)
// Insert inserts a new settings row.
func (s *SettingsStore) Insert(ctx context.Context, set *domain.Settings) error {
res, err := s.db.ExecContext(ctx,
`INSERT INTO settings_history (effective_from, hours_per_week, workdays_mask, timezone, created_at)
VALUES (?, ?, ?, ?, ?)`,
set.EffectiveFrom, set.HoursPerWeek, set.WorkdaysMask, set.Timezone, set.CreatedAt)
if err != nil {
return err
}
id, _ := res.LastInsertId()
set.ID = id
return nil
_, err := s.db.ExecContext(ctx,
`INSERT INTO settings_history (id, effective_from, hours_per_week, workdays_mask, timezone, created_at, updated_at)
VALUES (?, ?, ?, ?, ?, ?, ?)`,
set.ID, set.EffectiveFrom, set.HoursPerWeek, set.WorkdaysMask, set.Timezone, set.CreatedAt, set.UpdatedAt)
return err
}
// Update overwrites an existing settings row by ID.
func (s *SettingsStore) Update(ctx context.Context, set *domain.Settings) error {
_, err := s.db.ExecContext(ctx,
`UPDATE settings_history
SET effective_from=?, hours_per_week=?, workdays_mask=?, timezone=?
SET effective_from=?, hours_per_week=?, workdays_mask=?, timezone=?, updated_at=?
WHERE id=?`,
set.EffectiveFrom, set.HoursPerWeek, set.WorkdaysMask, set.Timezone, set.ID)
set.EffectiveFrom, set.HoursPerWeek, set.WorkdaysMask, set.Timezone, set.UpdatedAt, set.ID)
return err
}
// Upsert inserts or replaces a settings row (used by sync push; last updated_at wins).
func (s *SettingsStore) Upsert(ctx context.Context, set *domain.Settings) error {
_, err := s.db.ExecContext(ctx,
`INSERT INTO settings_history (id, effective_from, hours_per_week, workdays_mask, timezone, created_at, updated_at)
VALUES (?, ?, ?, ?, ?, ?, ?)
ON CONFLICT(id) DO UPDATE SET
effective_from=excluded.effective_from,
hours_per_week=excluded.hours_per_week,
workdays_mask=excluded.workdays_mask,
timezone=excluded.timezone,
updated_at=excluded.updated_at
WHERE excluded.updated_at > settings_history.updated_at`,
set.ID, set.EffectiveFrom, set.HoursPerWeek, set.WorkdaysMask, set.Timezone, set.CreatedAt, set.UpdatedAt)
return err
}
// Delete removes a settings row by ID.
func (s *SettingsStore) Delete(ctx context.Context, id int64) error {
func (s *SettingsStore) Delete(ctx context.Context, id string) error {
_, err := s.db.ExecContext(ctx, `DELETE FROM settings_history WHERE id=?`, id)
return err
}
@@ -95,16 +106,16 @@ func (s *SettingsStore) Count(ctx context.Context) (int, error) {
}
// GetByID returns a single settings row by ID.
func (s *SettingsStore) GetByID(ctx context.Context, id int64) (*domain.Settings, error) {
func (s *SettingsStore) GetByID(ctx context.Context, id string) (*domain.Settings, error) {
row := s.db.QueryRowContext(ctx,
`SELECT id, effective_from, hours_per_week, workdays_mask, timezone, created_at
`SELECT id, effective_from, hours_per_week, workdays_mask, timezone, created_at, updated_at
FROM settings_history WHERE id=?`, id)
return scanSettings(row)
}
func scanSettings(row *sql.Row) (*domain.Settings, error) {
var s domain.Settings
err := row.Scan(&s.ID, &s.EffectiveFrom, &s.HoursPerWeek, &s.WorkdaysMask, &s.Timezone, &s.CreatedAt)
err := row.Scan(&s.ID, &s.EffectiveFrom, &s.HoursPerWeek, &s.WorkdaysMask, &s.Timezone, &s.CreatedAt, &s.UpdatedAt)
if err != nil {
return nil, err
}

View File

@@ -4,12 +4,21 @@ import (
"context"
"database/sql"
"encoding/json"
"errors"
"fmt"
"time"
"github.com/wotra/wotra/internal/domain"
)
// SyncStore manages the sync_log and server_version.
// ErrSyncStale is returned when the client's since_version is behind the prune marker.
var ErrSyncStale = errors.New("sync state stale: full re-sync required")
// pruneEntity and pruneOp are sentinel values written as a prune marker row.
const pruneEntity = "_pruned"
const pruneOp = "marker"
// SyncStore manages the sync_log.
type SyncStore struct {
db *sql.DB
}
@@ -21,13 +30,19 @@ func NewSyncStore(db *sql.DB) *SyncStore {
type SyncChange struct {
Entity string `json:"entity"`
EntityID string `json:"entity_id"`
Op string `json:"op"` // "upsert" | "delete"
Op string `json:"op"` // "upsert" | "delete" | "marker"
Version int64 `json:"version"`
Payload string `json:"payload"`
}
// Pull returns all sync_log rows with version > sinceVersion.
// It calls Prune first with a 30-day TTL.
// If the client is behind a prune marker it returns ErrSyncStale.
func (s *SyncStore) Pull(ctx context.Context, sinceVersion int64) ([]SyncChange, int64, error) {
if err := s.Prune(ctx, 30*24*time.Hour); err != nil {
return nil, 0, err
}
rows, err := s.db.QueryContext(ctx,
`SELECT entity, entity_id, op, version, payload FROM sync_log
WHERE version > ? ORDER BY version ASC`, sinceVersion)
@@ -35,6 +50,7 @@ func (s *SyncStore) Pull(ctx context.Context, sinceVersion int64) ([]SyncChange,
return nil, 0, err
}
defer rows.Close()
var changes []SyncChange
var maxVersion int64 = sinceVersion
for rows.Next() {
@@ -42,6 +58,10 @@ func (s *SyncStore) Pull(ctx context.Context, sinceVersion int64) ([]SyncChange,
if err := rows.Scan(&c.Entity, &c.EntityID, &c.Op, &c.Version, &c.Payload); err != nil {
return nil, 0, err
}
// First row with entity="_pruned" means client is stale.
if c.Entity == pruneEntity {
return nil, 0, ErrSyncStale
}
if c.Version > maxVersion {
maxVersion = c.Version
}
@@ -50,6 +70,49 @@ func (s *SyncStore) Pull(ctx context.Context, sinceVersion int64) ([]SyncChange,
return changes, maxVersion, rows.Err()
}
// Prune deletes sync_log rows older than ttl and inserts a prune marker at the
// version boundary so stale clients can detect they need a full re-sync.
func (s *SyncStore) Prune(ctx context.Context, ttl time.Duration) error {
cutoff := time.Now().Add(-ttl).UnixMilli()
tx, err := s.db.BeginTx(ctx, nil)
if err != nil {
return err
}
defer tx.Rollback() //nolint:errcheck
// Find max version among rows that will be pruned (excluding existing markers).
var maxPruned sql.NullInt64
err = tx.QueryRowContext(ctx,
`SELECT MAX(version) FROM sync_log WHERE logged_at < ? AND entity != ?`,
cutoff, pruneEntity).Scan(&maxPruned)
if err != nil {
return err
}
if !maxPruned.Valid {
// Nothing to prune.
return tx.Commit()
}
// Delete old rows (but not the existing marker, if any).
if _, err = tx.ExecContext(ctx,
`DELETE FROM sync_log WHERE logged_at < ? AND entity != ?`,
cutoff, pruneEntity); err != nil {
return err
}
// Insert (or replace) the prune marker at the boundary version.
now := time.Now().UnixMilli()
if _, err = tx.ExecContext(ctx,
`INSERT OR REPLACE INTO sync_log (entity, entity_id, op, version, payload, logged_at)
VALUES (?, ?, ?, ?, '{}', ?)`,
pruneEntity, pruneEntity, pruneOp, maxPruned.Int64, now); err != nil {
return err
}
return tx.Commit()
}
// nextVersion returns the next monotonic version number.
func (s *SyncStore) nextVersion(ctx context.Context) (int64, error) {
var max sql.NullInt64
@@ -96,6 +159,21 @@ func (s *SyncStore) LogClosedWeek(ctx context.Context, w *domain.ClosedWeek) err
return s.log(ctx, "closed_weeks", w.WeekKey, "upsert", string(payload))
}
// LogSettings appends a settings upsert to the sync log.
func (s *SyncStore) LogSettings(ctx context.Context, set *domain.Settings) error {
payload, err := json.Marshal(set)
if err != nil {
return err
}
return s.log(ctx, "settings_history", set.ID, "upsert", string(payload))
}
// LogSettingsDelete appends a settings delete to the sync log.
func (s *SyncStore) LogSettingsDelete(ctx context.Context, id string) error {
payload := fmt.Sprintf(`{"id":%q}`, id)
return s.log(ctx, "settings_history", id, "delete", payload)
}
// LogBalanceAdjustment appends a balance_adjustment upsert to the sync log.
func (s *SyncStore) LogBalanceAdjustment(ctx context.Context, a *domain.BalanceAdjustment) error {
payload, err := json.Marshal(a)
@@ -116,8 +194,9 @@ func (s *SyncStore) log(ctx context.Context, entity, entityID, op, payload strin
if err != nil {
return err
}
now := time.Now().UnixMilli()
_, err = s.db.ExecContext(ctx,
`INSERT INTO sync_log (entity, entity_id, op, version, payload) VALUES (?, ?, ?, ?, ?)`,
entity, entityID, op, version, payload)
`INSERT INTO sync_log (entity, entity_id, op, version, payload, logged_at) VALUES (?, ?, ?, ?, ?, ?)`,
entity, entityID, op, version, payload, now)
return err
}

View File

@@ -0,0 +1,140 @@
package store_test
import (
"context"
"errors"
"testing"
"time"
"github.com/wotra/wotra/internal/domain"
"github.com/wotra/wotra/internal/store"
)
func mustSyncStore(t *testing.T) *store.SyncStore {
t.Helper()
db, err := store.Open(":memory:")
if err != nil {
t.Fatal(err)
}
t.Cleanup(func() { db.Close() })
return store.NewSyncStore(db)
}
func TestSyncPullNormal(t *testing.T) {
s := mustSyncStore(t)
ctx := context.Background()
e1 := &domain.Entry{ID: "e1", DayKey: "2026-04-01", UpdatedAt: time.Now().UnixMilli()}
e2 := &domain.Entry{ID: "e2", DayKey: "2026-04-02", UpdatedAt: time.Now().UnixMilli()}
if err := s.LogEntry(ctx, e1); err != nil {
t.Fatal(err)
}
if err := s.LogEntry(ctx, e2); err != nil {
t.Fatal(err)
}
changes, ver, err := s.Pull(ctx, 0)
if err != nil {
t.Fatalf("Pull: %v", err)
}
if len(changes) != 2 {
t.Fatalf("expected 2 changes, got %d", len(changes))
}
if ver != 2 {
t.Fatalf("expected server_version=2, got %d", ver)
}
// Incremental pull: since=1 should return only e2.
changes2, ver2, err := s.Pull(ctx, 1)
if err != nil {
t.Fatal(err)
}
if len(changes2) != 1 || changes2[0].EntityID != "e2" {
t.Fatalf("expected [e2], got %+v", changes2)
}
if ver2 != 2 {
t.Fatalf("expected ver=2, got %d", ver2)
}
}
func TestSyncPruneStaleClient(t *testing.T) {
s := mustSyncStore(t)
ctx := context.Background()
// Log two entries then prune all of them (zero TTL).
e1 := &domain.Entry{ID: "e1", DayKey: "2026-01-01", UpdatedAt: time.Now().UnixMilli()}
e2 := &domain.Entry{ID: "e2", DayKey: "2026-01-02", UpdatedAt: time.Now().UnixMilli()}
if err := s.LogEntry(ctx, e1); err != nil {
t.Fatal(err)
}
if err := s.LogEntry(ctx, e2); err != nil {
t.Fatal(err)
}
// Prune with -1ms TTL → cutoff is 1ms in the future, so all rows are pruned.
if err := s.Prune(ctx, -time.Millisecond); err != nil {
t.Fatalf("Prune: %v", err)
}
// A stale client (since=0) should get ErrSyncStale.
_, _, err := s.Pull(ctx, 0)
if !errors.Is(err, store.ErrSyncStale) {
t.Fatalf("expected ErrSyncStale, got %v", err)
}
}
func TestSyncPruneNoRows(t *testing.T) {
s := mustSyncStore(t)
ctx := context.Background()
// Prune on empty log is a no-op.
if err := s.Prune(ctx, 30*24*time.Hour); err != nil {
t.Fatalf("Prune on empty log: %v", err)
}
changes, ver, err := s.Pull(ctx, 0)
if err != nil {
t.Fatalf("Pull: %v", err)
}
if len(changes) != 0 {
t.Fatalf("expected 0 changes, got %d", len(changes))
}
if ver != 0 {
t.Fatalf("expected ver=0, got %d", ver)
}
}
func TestSyncClientAheadOfMarker(t *testing.T) {
s := mustSyncStore(t)
ctx := context.Background()
// Log two entries, prune all, then log a third.
e1 := &domain.Entry{ID: "e1", DayKey: "2026-01-01", UpdatedAt: time.Now().UnixMilli()}
e2 := &domain.Entry{ID: "e2", DayKey: "2026-01-02", UpdatedAt: time.Now().UnixMilli()}
if err := s.LogEntry(ctx, e1); err != nil {
t.Fatal(err)
}
if err := s.LogEntry(ctx, e2); err != nil {
t.Fatal(err)
}
if err := s.Prune(ctx, -time.Millisecond); err != nil {
t.Fatal(err)
}
// Marker is at version 2. Log a new entry → version 3.
e3 := &domain.Entry{ID: "e3", DayKey: "2026-04-01", UpdatedAt: time.Now().UnixMilli()}
if err := s.LogEntry(ctx, e3); err != nil {
t.Fatal(err)
}
// A client with since=2 is past the marker — should get only e3.
changes, ver, err := s.Pull(ctx, 2)
if err != nil {
t.Fatalf("expected no error for up-to-date client, got %v", err)
}
if len(changes) != 1 || changes[0].EntityID != "e3" {
t.Fatalf("expected [e3], got %+v", changes)
}
if ver != 3 {
t.Fatalf("expected ver=3, got %d", ver)
}
}