Add sync redesign with offline fallback (M9)

- Migration 003: adds logged_at to sync_log for TTL pruning; migrates
  settings_history to UUID TEXT PK with updated_at column
- SyncStore: Prune() deletes rows older than 30d and writes a '_pruned'
  marker at the boundary version; Pull() calls Prune lazily and returns
  ErrSyncStale (410) when the client's since_version is behind the marker
- sync_handler.go: GET /api/sync/pull?since=N; POST /api/sync/push with
  last-updated_at-wins conflict resolution for entries, balance_adjustments,
  settings_history; closed_days/closed_weeks skipped (server-only mutations)
- router.go: passes entryStore, adjustmentStore, settingsStore to SyncHandler
- settings_store.go: UUID PK, updated_at column, Upsert() for push path
- settings_service.go: generates UUID on create, sets updated_at on update
- settings_handler.go: ID params changed from int64 to string
- domain.go: Settings.ID string, Settings.UpdatedAt added
- client.ts: all mutation methods catch TypeError (offline) and fall back
  to Dexie write + outbox enqueue; crypto.randomUUID() for offline creates;
  Settings.id type changed to string
- db.ts: Dexie v3 — settings_history key path changed to string UUID;
  upgrade handler clears table for repopulation via pull
- sync.ts: real pushOutbox to POST /api/sync/push; pullChanges uses GET
  with ?since=N; 410 triggers coldStart() + retry; coldStart() wipes all
  tables and resets last_version
- 4 new Go store tests covering normal pull, stale client, empty prune,
  client-ahead-of-marker; all tests pass (store + service, 19 Vitest)
This commit is contained in:
2026-04-30 22:50:33 +02:00
parent 3214f48a6f
commit d8366f5c25
15 changed files with 864 additions and 144 deletions

View File

@@ -0,0 +1,140 @@
package store_test
import (
"context"
"errors"
"testing"
"time"
"github.com/wotra/wotra/internal/domain"
"github.com/wotra/wotra/internal/store"
)
func mustSyncStore(t *testing.T) *store.SyncStore {
t.Helper()
db, err := store.Open(":memory:")
if err != nil {
t.Fatal(err)
}
t.Cleanup(func() { db.Close() })
return store.NewSyncStore(db)
}
func TestSyncPullNormal(t *testing.T) {
s := mustSyncStore(t)
ctx := context.Background()
e1 := &domain.Entry{ID: "e1", DayKey: "2026-04-01", UpdatedAt: time.Now().UnixMilli()}
e2 := &domain.Entry{ID: "e2", DayKey: "2026-04-02", UpdatedAt: time.Now().UnixMilli()}
if err := s.LogEntry(ctx, e1); err != nil {
t.Fatal(err)
}
if err := s.LogEntry(ctx, e2); err != nil {
t.Fatal(err)
}
changes, ver, err := s.Pull(ctx, 0)
if err != nil {
t.Fatalf("Pull: %v", err)
}
if len(changes) != 2 {
t.Fatalf("expected 2 changes, got %d", len(changes))
}
if ver != 2 {
t.Fatalf("expected server_version=2, got %d", ver)
}
// Incremental pull: since=1 should return only e2.
changes2, ver2, err := s.Pull(ctx, 1)
if err != nil {
t.Fatal(err)
}
if len(changes2) != 1 || changes2[0].EntityID != "e2" {
t.Fatalf("expected [e2], got %+v", changes2)
}
if ver2 != 2 {
t.Fatalf("expected ver=2, got %d", ver2)
}
}
func TestSyncPruneStaleClient(t *testing.T) {
s := mustSyncStore(t)
ctx := context.Background()
// Log two entries then prune all of them (zero TTL).
e1 := &domain.Entry{ID: "e1", DayKey: "2026-01-01", UpdatedAt: time.Now().UnixMilli()}
e2 := &domain.Entry{ID: "e2", DayKey: "2026-01-02", UpdatedAt: time.Now().UnixMilli()}
if err := s.LogEntry(ctx, e1); err != nil {
t.Fatal(err)
}
if err := s.LogEntry(ctx, e2); err != nil {
t.Fatal(err)
}
// Prune with -1ms TTL → cutoff is 1ms in the future, so all rows are pruned.
if err := s.Prune(ctx, -time.Millisecond); err != nil {
t.Fatalf("Prune: %v", err)
}
// A stale client (since=0) should get ErrSyncStale.
_, _, err := s.Pull(ctx, 0)
if !errors.Is(err, store.ErrSyncStale) {
t.Fatalf("expected ErrSyncStale, got %v", err)
}
}
func TestSyncPruneNoRows(t *testing.T) {
s := mustSyncStore(t)
ctx := context.Background()
// Prune on empty log is a no-op.
if err := s.Prune(ctx, 30*24*time.Hour); err != nil {
t.Fatalf("Prune on empty log: %v", err)
}
changes, ver, err := s.Pull(ctx, 0)
if err != nil {
t.Fatalf("Pull: %v", err)
}
if len(changes) != 0 {
t.Fatalf("expected 0 changes, got %d", len(changes))
}
if ver != 0 {
t.Fatalf("expected ver=0, got %d", ver)
}
}
func TestSyncClientAheadOfMarker(t *testing.T) {
s := mustSyncStore(t)
ctx := context.Background()
// Log two entries, prune all, then log a third.
e1 := &domain.Entry{ID: "e1", DayKey: "2026-01-01", UpdatedAt: time.Now().UnixMilli()}
e2 := &domain.Entry{ID: "e2", DayKey: "2026-01-02", UpdatedAt: time.Now().UnixMilli()}
if err := s.LogEntry(ctx, e1); err != nil {
t.Fatal(err)
}
if err := s.LogEntry(ctx, e2); err != nil {
t.Fatal(err)
}
if err := s.Prune(ctx, -time.Millisecond); err != nil {
t.Fatal(err)
}
// Marker is at version 2. Log a new entry → version 3.
e3 := &domain.Entry{ID: "e3", DayKey: "2026-04-01", UpdatedAt: time.Now().UnixMilli()}
if err := s.LogEntry(ctx, e3); err != nil {
t.Fatal(err)
}
// A client with since=2 is past the marker — should get only e3.
changes, ver, err := s.Pull(ctx, 2)
if err != nil {
t.Fatalf("expected no error for up-to-date client, got %v", err)
}
if len(changes) != 1 || changes[0].EntityID != "e3" {
t.Fatalf("expected [e3], got %+v", changes)
}
if ver != 3 {
t.Fatalf("expected ver=3, got %d", ver)
}
}