diff --git a/.changeset/dark-items-dig.md b/.changeset/dark-items-dig.md new file mode 100644 index 000000000..d11921c5e --- /dev/null +++ b/.changeset/dark-items-dig.md @@ -0,0 +1,5 @@ +--- +"@tanstack/powersync-db-collection": minor +--- + +Initial Release diff --git a/.changeset/in-memory-fallback-for-ssr.md b/.changeset/in-memory-fallback-for-ssr.md deleted file mode 100644 index 5afb61018..000000000 --- a/.changeset/in-memory-fallback-for-ssr.md +++ /dev/null @@ -1,11 +0,0 @@ ---- -"@tanstack/db": patch ---- - -Add in-memory fallback for localStorage collections in SSR environments - -Prevents errors when localStorage collections are imported on the server by automatically falling back to an in-memory store. This allows isomorphic JavaScript applications to safely import localStorage collection modules without errors during module initialization. - -When localStorage is not available (e.g., in server-side rendering environments), the collection automatically uses an in-memory storage implementation. Data will not persist across page reloads or be shared across tabs when using the in-memory fallback, but the collection will function normally otherwise. - -Fixes #691 diff --git a/.github/workflows/pr.yml b/.github/workflows/pr.yml index 472b8ec28..5ff230848 100644 --- a/.github/workflows/pr.yml +++ b/.github/workflows/pr.yml @@ -54,12 +54,14 @@ jobs: repo-token: "${{ secrets.GITHUB_TOKEN }}" pattern: "./packages/db/dist/**/*.{js,mjs}" comment-key: "db-package-size" + build-script: "build:minified" - name: Compressed Size Action - React DB Package uses: preactjs/compressed-size-action@v2 with: repo-token: "${{ secrets.GITHUB_TOKEN }}" pattern: "./packages/react-db/dist/**/*.{js,mjs}" comment-key: "react-db-package-size" + build-script: "build:minified" build-example: name: Build Example Site runs-on: ubuntu-latest diff --git a/SERIALIZED_TRANSACTION_PLAN.md b/SERIALIZED_TRANSACTION_PLAN.md new file mode 100644 index 000000000..1736c2afd --- /dev/null +++ b/SERIALIZED_TRANSACTION_PLAN.md @@ -0,0 +1,365 @@ +# Implementation Plan for `useSerializedTransaction` with TanStack Pacer + +Based on [GitHub issue #35](https://github.com/TanStack/db/issues/35), using @tanstack/pacer for strategy implementation across all 5 framework integrations. + +## Overview + +Create a framework-agnostic core in `@tanstack/db` that manages optimistic transactions with pluggable queuing strategies powered by TanStack Pacer. Each framework package wraps the core with framework-specific reactive primitives. + +## Architecture Pattern + +The core transaction logic stays in one place (`@tanstack/db`) while each framework provides its own wrapper using framework-specific reactive primitives. + +```typescript +// Core in @tanstack/db (framework-agnostic) +createSerializedTransaction(config) // Returns { mutate, cleanup } + +// React wrapper +useSerializedTransaction(config) // Uses React hooks, returns mutate function + +// Solid wrapper +useSerializedTransaction(config) // Uses Solid signals, matches useLiveQuery pattern + +// Svelte/Vue wrappers +useSerializedTransaction(config) // Framework-specific implementations + +// Angular wrapper +injectSerializedTransaction(config) // Uses Angular DI, follows injectLiveQuery pattern +``` + +## Available Strategies (Based on Pacer Utilities) + +### 1. **debounceStrategy({ wait, leading?, trailing? })** + +- Uses Pacer's `Debouncer` class +- Waits for pause in activity before committing +- **Best for:** Search inputs, auto-save fields + +### 2. **queueStrategy({ wait?, maxSize?, addItemsTo?, getItemsFrom? })** + +- Uses Pacer's `Queuer` class +- Processes all transactions in order (FIFO/LIFO) +- FIFO: `{ addItemsTo: 'back', getItemsFrom: 'front' }` +- LIFO: `{ addItemsTo: 'back', getItemsFrom: 'back' }` +- **Best for:** Sequential operations that must all complete + +### 3. **throttleStrategy({ wait, leading?, trailing? })** + +- Uses Pacer's `Throttler` class +- Evenly spaces transaction executions over time +- **Best for:** Sliders, scroll handlers, progress bars + +### 4. **batchStrategy({ maxSize?, wait?, getShouldExecute? })** + +- Uses Pacer's `Batcher` class +- Groups multiple mutations into batches +- Triggers on size or time threshold +- **Best for:** Bulk operations, reducing network calls + +## File Structure + +``` +packages/db/src/ + ├── serialized-transaction.ts # Core framework-agnostic logic + └── strategies/ + ├── index.ts # Export all strategies + ├── debounceStrategy.ts # Wraps Pacer Debouncer + ├── queueStrategy.ts # Wraps Pacer Queuer + ├── throttleStrategy.ts # Wraps Pacer Throttler + ├── batchStrategy.ts # Wraps Pacer Batcher + └── types.ts # Strategy type definitions + +packages/db/package.json # Add @tanstack/pacer dependency + +packages/react-db/src/ + └── useSerializedTransaction.ts # React hook wrapper + +packages/solid-db/src/ + └── useSerializedTransaction.ts # Solid wrapper (matches useLiveQuery pattern) + +packages/svelte-db/src/ + └── useSerializedTransaction.svelte.ts # Svelte wrapper + +packages/vue-db/src/ + └── useSerializedTransaction.ts # Vue wrapper + +packages/angular-db/src/ + └── injectSerializedTransaction.ts # Angular wrapper (DI pattern) + +packages/*/tests/ + └── serialized-transaction.test.ts # Tests per package +``` + +## Core API Design + +```typescript +// Framework-agnostic core (packages/db) +import { debounceStrategy } from '@tanstack/db' + +const { mutate, cleanup } = createSerializedTransaction({ + mutationFn: async ({ transaction }) => { + await api.save(transaction.mutations) + }, + strategy: debounceStrategy({ wait: 500 }), + metadata?: Record, +}) + +// mutate() executes mutations according to strategy and returns Transaction +const transaction = mutate(() => { + collection.update(id, draft => { draft.value = newValue }) +}) + +// Await persistence and handle errors +try { + await transaction.isPersisted.promise + console.log('Transaction committed successfully') +} catch (error) { + console.error('Transaction failed:', error) +} + +// cleanup() when done (frameworks handle this automatically) +cleanup() +``` + +## React Hook Wrapper + +```typescript +// packages/react-db +import { debounceStrategy } from "@tanstack/react-db" + +const mutate = useSerializedTransaction({ + mutationFn: async ({ transaction }) => { + await api.save(transaction.mutations) + }, + strategy: debounceStrategy({ wait: 1000 }), +}) + +// Usage in component +const handleChange = async (value) => { + const tx = mutate(() => { + collection.update(id, (draft) => { + draft.value = value + }) + }) + + // Optional: await persistence or handle errors + try { + await tx.isPersisted.promise + } catch (error) { + console.error("Update failed:", error) + } +} +``` + +## Example: Slider with Different Strategies + +```typescript +// Debounce - wait for user to stop moving slider +const mutate = useSerializedTransaction({ + mutationFn: async ({ transaction }) => { + await api.updateVolume(transaction.mutations) + }, + strategy: debounceStrategy({ wait: 500 }), +}) + +// Throttle - update every 200ms while sliding +const mutate = useSerializedTransaction({ + mutationFn: async ({ transaction }) => { + await api.updateVolume(transaction.mutations) + }, + strategy: throttleStrategy({ wait: 200 }), +}) + +// Debounce with leading/trailing - save first + final value only +const mutate = useSerializedTransaction({ + mutationFn: async ({ transaction }) => { + await api.updateVolume(transaction.mutations) + }, + strategy: debounceStrategy({ wait: 0, leading: true, trailing: true }), +}) + +// Queue - save every change in order (FIFO) +const mutate = useSerializedTransaction({ + mutationFn: async ({ transaction }) => { + await api.updateVolume(transaction.mutations) + }, + strategy: queueStrategy({ + wait: 200, + addItemsTo: "back", + getItemsFrom: "front", + }), +}) +``` + +## Implementation Steps + +### Phase 1: Core Package (@tanstack/db) + +1. Add `@tanstack/pacer` dependency to packages/db/package.json +2. Create strategy type definitions in strategies/types.ts +3. Implement strategy factories: + - `debounceStrategy.ts` - wraps Pacer Debouncer + - `queueStrategy.ts` - wraps Pacer Queuer + - `throttleStrategy.ts` - wraps Pacer Throttler + - `batchStrategy.ts` - wraps Pacer Batcher +4. Create core `createSerializedTransaction()` function +5. Export strategies + core function from packages/db/src/index.ts + +### Phase 2: Framework Wrappers + +6. **React** - Create `useSerializedTransaction` using useRef/useEffect/useCallback +7. **Solid** - Create `useSerializedTransaction` using createSignal/onCleanup (matches `useLiveQuery` pattern) +8. **Svelte** - Create `useSerializedTransaction` using Svelte stores +9. **Vue** - Create `useSerializedTransaction` using ref/onUnmounted +10. **Angular** - Create `injectSerializedTransaction` using inject/DestroyRef (matches `injectLiveQuery` pattern) + +### Phase 3: Testing & Documentation + +11. Write tests for core logic in packages/db +12. Write tests for each framework wrapper +13. Update README with examples +14. Add TypeScript examples to docs + +## Strategy Type System + +```typescript +export type Strategy = + | DebounceStrategy + | QueueStrategy + | ThrottleStrategy + | BatchStrategy + +interface BaseStrategy { + _type: TName // Discriminator for type narrowing + execute: (fn: () => void) => void | Promise + cleanup: () => void +} + +export function debounceStrategy(opts: { + wait: number + leading?: boolean + trailing?: boolean +}): DebounceStrategy + +export function queueStrategy(opts?: { + wait?: number + maxSize?: number + addItemsTo?: "front" | "back" + getItemsFrom?: "front" | "back" +}): QueueStrategy + +export function throttleStrategy(opts: { + wait: number + leading?: boolean + trailing?: boolean +}): ThrottleStrategy + +export function batchStrategy(opts?: { + maxSize?: number + wait?: number + getShouldExecute?: (items: any[]) => boolean +}): BatchStrategy +``` + +## Technical Implementation Details + +### Core createSerializedTransaction + +The core function will: + +1. Accept a strategy and mutationFn +2. Create a wrapper around `createTransaction` from existing code +3. Use the strategy's `execute()` method to control when transactions are committed +4. Return `{ mutate, cleanup }` where: + - `mutate(callback): Transaction` - executes mutations according to strategy and returns the Transaction object + - `cleanup()` - cleans up strategy resources + +**Important:** The `mutate()` function returns a `Transaction` object so callers can: + +- Await `transaction.isPersisted.promise` to know when persistence completes +- Handle errors via try/catch or `.catch()` +- Access transaction state and metadata + +### Strategy Factories + +Each strategy factory returns an object with: + +- `execute(fn)` - wraps the function with Pacer's utility +- `cleanup()` - cleans up the Pacer instance + +Example for debounceStrategy: + +```typescript +// NOTE: Import path needs validation - Pacer may export from main entry point +// Likely: import { Debouncer } from '@tanstack/pacer' or similar +import { Debouncer } from "@tanstack/pacer" // TODO: Validate actual export path + +export function debounceStrategy(opts: { + wait: number + leading?: boolean + trailing?: boolean +}) { + const debouncer = new Debouncer(opts) + + return { + _type: "debounce" as const, + execute: (fn: () => void) => { + debouncer.execute(fn) + }, + cleanup: () => { + debouncer.cancel() + }, + } +} +``` + +### React Hook Implementation + +```typescript +export function useSerializedTransaction(config) { + // Include strategy in dependencies to handle strategy changes + const { mutate, cleanup } = useMemo(() => { + return createSerializedTransaction(config) + }, [config.mutationFn, config.metadata, config.strategy]) + + // Cleanup on unmount or when dependencies change + useEffect(() => { + return () => cleanup() + }, [cleanup]) + + // Use useCallback to provide stable reference + const stableMutate = useCallback(mutate, [mutate]) + + return stableMutate +} +``` + +**Key fixes:** + +- Include `config.strategy` in `useMemo` dependencies to handle strategy changes +- Properly cleanup when strategy changes (via useEffect cleanup) +- Return stable callback reference via `useCallback` + +## Benefits + +- ✅ Leverages battle-tested TanStack Pacer utilities +- ✅ Reduces backend write contention +- ✅ Framework-agnostic core promotes consistency +- ✅ Type-safe, composable API +- ✅ Aligns with TanStack ecosystem patterns +- ✅ Supports all 5 framework integrations +- ✅ Simple, declarative API for users +- ✅ Easy to add custom strategies + +## Open Questions + +1. Should we support custom strategies? (i.e., users passing their own strategy objects) +2. Do we need lifecycle callbacks like `onSuccess`, `onError` for each mutate call? +3. Should batching strategy automatically merge mutations or keep them separate? +4. Rate limiting strategy - useful or skip for now? + +## Notes + +- ❌ Dropped merge strategy for now (more complex to design, less clear use case) +- The pattern follows existing TanStack patterns where core is framework-agnostic +- Similar to how `useLiveQuery` wraps core query logic per framework diff --git a/docs/collections/powersync-collection.md b/docs/collections/powersync-collection.md new file mode 100644 index 000000000..afc665836 --- /dev/null +++ b/docs/collections/powersync-collection.md @@ -0,0 +1,475 @@ +--- +title: PowerSync Collection +--- + +# PowerSync Collection + +PowerSync collections provide seamless integration between TanStack DB and [PowerSync](https://powersync.com), enabling automatic synchronization between your in-memory TanStack DB collections and PowerSync's SQLite database. This gives you offline-ready persistence, real-time sync capabilities, and powerful conflict resolution. + +## Overview + +The `@tanstack/powersync-db-collection` package allows you to create collections that: + +- Automatically mirror the state of an underlying PowerSync SQLite database +- Reactively update when PowerSync records change +- Support optimistic mutations with rollback on error +- Provide persistence handlers to keep PowerSync in sync with TanStack DB transactions +- Use PowerSync's efficient SQLite-based storage engine +- Work with PowerSync's real-time sync features for offline-first scenarios +- Leverage PowerSync's built-in conflict resolution and data consistency guarantees +- Enable real-time synchronization with PostgreSQL, MongoDB and MySQL backends + +## 1. Installation + +Install the PowerSync collection package along with your preferred framework integration. +PowerSync currently works with Web, React Native and Node.js. The examples below use the Web SDK. +See the PowerSync quickstart [docs](https://docs.powersync.com/installation/quickstart-guide) for more details. + +```bash +npm install @tanstack/powersync-db-collection @powersync/web @journeyapps/wa-sqlite +``` + +### 2. Create a PowerSync Database and Schema + +```ts +import { Schema, Table, column } from "@powersync/web" + +// Define your schema +const APP_SCHEMA = new Schema({ + documents: new Table({ + name: column.text, + author: column.text, + created_at: column.text, + archived: column.integer, + }), +}) + +// Initialize PowerSync database +const db = new PowerSyncDatabase({ + database: { + dbFilename: "app.sqlite", + }, + schema: APP_SCHEMA, +}) +``` + +### 3. (optional) Configure Sync with a Backend + +```ts +import { + AbstractPowerSyncDatabase, + PowerSyncBackendConnector, + PowerSyncCredentials, +} from "@powersync/web" + +// TODO implement your logic here +class Connector implements PowerSyncBackendConnector { + fetchCredentials: () => Promise + + /** Upload local changes to the app backend. + * + * Use {@link AbstractPowerSyncDatabase.getCrudBatch} to get a batch of changes to upload. + * + * Any thrown errors will result in a retry after the configured wait period (default: 5 seconds). + */ + uploadData: (database: AbstractPowerSyncDatabase) => Promise +} + +// Configure the client to connect to a PowerSync service and your backend +db.connect(new Connector()) +``` + +### 4. Create a TanStack DB Collection + +There are two main ways to create a collection: using type inference or using schema validation. Type inference will infer collection types from the underlying PowerSync SQLite tables. Schema validation can be used for additional input/output validations and type transforms. + +#### Option 1: Using Table Type Inference + +The collection types are automatically inferred from the PowerSync schema table definition. The table is used to construct a default standard schema validator which is used internally to validate collection operations. + +Collection mutations accept SQLite types and queries report data with SQLite types. + +```ts +import { createCollection } from "@tanstack/react-db" +import { powerSyncCollectionOptions } from "@tanstack/powersync-db-collection" + +const documentsCollection = createCollection( + powerSyncCollectionOptions({ + database: db, + table: APP_SCHEMA.props.documents, + }) +) + +/** Note: The types for input and output are defined as this */ +// Used for mutations like `insert` or `update` +type DocumentCollectionInput = { + id: string + name: string | null + author: string | null + created_at: string | null // SQLite TEXT + archived: number | null // SQLite integer +} +// The type of query/data results +type DocumentCollectionOutput = DocumentCollectionInput +``` + +The standard PowerSync SQLite types map to these TypeScript types: + +| PowerSync Column Type | TypeScript Type | Description | +| --------------------- | ---------------- | -------------------------------------------------------------------- | +| `column.text` | `string \| null` | Text values, commonly used for strings, JSON, dates (as ISO strings) | +| `column.integer` | `number \| null` | Integer values, also used for booleans (0/1) | +| `column.real` | `number \| null` | Floating point numbers | + +Note: All PowerSync column types are nullable by default. + +#### Option 2: SQLite Types with Schema Validation + +Additional validations for collection mutations can be performed with a custom schema. The Schema below asserts that +the `name`, `author` and `created_at` fields are required as input. `name` also has an additional string length check. + +Note: The input and output types specified in this example still satisfy the underlying SQLite types. An additional `deserializationSchema` is required if the typing differs. See the examples below for more details. + +The application logic (including the backend) should enforce that all incoming synced data passes validation with the `schema`. Failing to validate data will result in inconsistency of the collection data. This is a fatal error! An `onDeserializationError` handler must be provided to react to this case. + +```ts +import { createCollection } from "@tanstack/react-db" +import { powerSyncCollectionOptions } from "@tanstack/powersync-db-collection" +import { z } from "zod" + +// Schema validates SQLite types but adds constraints +const schema = z.object({ + id: z.string(), + name: z.string().min(3, { message: "Should be at least 3 characters" }), + author: z.string(), + created_at: z.string(), // SQLite TEXT for dates + archived: z.number(), +}) + +const documentsCollection = createCollection( + powerSyncCollectionOptions({ + database: db, + table: APP_SCHEMA.props.documents, + schema, + onDeserializationError: (error) => { + // Present fatal error + }, + }) +) + +/** Note: The types for input and output are defined as this */ +// Used for mutations like `insert` or `update` +type DocumentCollectionInput = { + id: string + name: string + author: string + created_at: string // SQLite TEXT + archived: number // SQLite integer +} +// The type of query/data results +type DocumentCollectionOutput = DocumentCollectionInput +``` + +#### Option 3: Transform SQLite Input Types to Rich Output Types + +You can transform SQLite types to richer types (like Date objects) while keeping SQLite-compatible input types: + +Note: The Transformed types are provided by TanStackDB to the PowerSync SQLite persister. These types need to be serialized in +order to be persisted to SQLite. Most types are converted by default. For custom types, override the serialization by providing a +`serializer` param. + +The example below uses `nullable` columns, this is not a requirement. + +The application logic (including the backend) should enforce that all incoming synced data passes validation with the `schema`. Failing to validate data will result in inconsistency of the collection data. This is a fatal error! An `onDeserializationError` handler must be provided to react to this case. + +```ts +const schema = z.object({ + id: z.string(), + name: z.string().nullable(), + created_at: z + .string() + .nullable() + .transform((val) => (val ? new Date(val) : null)), // Transform SQLite TEXT to Date + archived: z + .number() + .nullable() + .transform((val) => (val != null ? val > 0 : null)), // Transform SQLite INTEGER to boolean +}) + +const documentsCollection = createCollection( + powerSyncCollectionOptions({ + database: db, + table: APP_SCHEMA.props.documents, + schema, + onDeserializationError: (error) => { + // Present fatal error + }, + // Optional: custom column serialization + serializer: { + // Dates are serialized by default, this is just an example + created_at: (value) => (value ? value.toISOString() : null), + }, + }) +) + +/** Note: The types for input and output are defined as this */ +// Used for mutations like `insert` or `update` +type DocumentCollectionInput = { + id: string + name: string | null + author: string | null + created_at: string | null // SQLite TEXT + archived: number | null +} +// The type of query/data results +type DocumentCollectionOutput = { + id: string + name: string | null + author: string | null + created_at: Date | null // JS Date instance + archived: boolean | null // JS boolean +} +``` + +#### Option 4: Custom Input/Output Types with Deserialization + +The input and output types can be completely decoupled from the internal SQLite types. This can be used to accept rich values for input mutations. +We require an additional `deserializationSchema` in order to validate and transform incoming synced (SQLite) updates. This schema should convert the incoming SQLite update to the output type. + +The application logic (including the backend) should enforce that all incoming synced data passes validation with the `deserializationSchema`. Failing to validate data will result in inconsistency of the collection data. This is a fatal error! An `onDeserializationError` handler must be provided to react to this case. + +```ts +// Our input/output types use Date and boolean +const schema = z.object({ + id: z.string(), + name: z.string(), + author: z.string(), + created_at: z.date(), // Accept Date objects as input + archived: z.boolean(), // Accept Booleans as input +}) + +// Schema to transform from SQLite types to our output types +const deserializationSchema = z.object({ + id: z.string(), + name: z.string(), + author: z.string(), + created_at: z + .string() + .transform((val) => (new Date(val))), // SQLite TEXT to Date + archived: z + .number() + .transform((val) => (val > 0), // SQLite INTEGER to Boolean +}) + +const documentsCollection = createCollection( + powerSyncCollectionOptions({ + database: db, + table: APP_SCHEMA.props.documents, + schema, + deserializationSchema, + onDeserializationError: (error) => { + // Present fatal error + }, + }) +) + +/** Note: The types for input and output are defined as this */ +// Used for mutations like `insert` or `update` +type DocumentCollectionInput = { + id: string + name: string + author: string + created_at: Date + archived: boolean +} +// The type of query/data results +type DocumentCollectionOutput = DocumentCollectionInput +``` + +## Features + +### Offline-First + +PowerSync collections are offline-first by default. All data is stored locally in a SQLite database, allowing your app to work without an internet connection. Changes are automatically synced when connectivity is restored. + +### Real-Time Sync + +When connected to a PowerSync backend, changes are automatically synchronized in real-time across all connected clients. The sync process handles: + +- Bi-directional sync with the server +- Conflict resolution +- Queue management for offline changes +- Automatic retries on connection loss + +### Working with Rich JavaScript Types + +PowerSync collections support rich JavaScript types like `Date`, `Boolean`, and custom objects while maintaining SQLite compatibility. The collection handles serialization and deserialization automatically: + +```typescript +import { z } from "zod" +import { Schema, Table, column } from "@powersync/web" +import { createCollection } from "@tanstack/react-db" +import { powerSyncCollectionOptions } from "@tanstack/powersync-db-collection" + +// Define PowerSync SQLite schema +const APP_SCHEMA = new Schema({ + tasks: new Table({ + title: column.text, + due_date: column.text, // Stored as ISO string in SQLite + completed: column.integer, // Stored as 0/1 in SQLite + metadata: column.text, // Stored as JSON string in SQLite + }), +}) + +// Define rich types schema +const taskSchema = z.object({ + id: z.string(), + title: z.string().nullable(), + due_date: z + .string() + .nullable() + .transform((val) => (val ? new Date(val) : null)), // Convert to Date + completed: z + .number() + .nullable() + .transform((val) => (val != null ? val > 0 : null)), // Convert to boolean + metadata: z + .string() + .nullable() + .transform((val) => (val ? JSON.parse(val) : null)), // Parse JSON +}) + +// Create collection with rich types +const tasksCollection = createCollection( + powerSyncCollectionOptions({ + database: db, + table: APP_SCHEMA.props.tasks, + schema: taskSchema, + }) +) + +// Work with rich types in your code +await tasksCollection.insert({ + id: crypto.randomUUID(), + title: "Review PR", + due_date: "2025-10-30T10:00:00Z", // String input is automatically converted to Date + completed: 0, // Number input is automatically converted to boolean + metadata: JSON.stringify({ priority: "high" }), +}) + +// Query returns rich types +const task = tasksCollection.get("task-1") +console.log(task.due_date instanceof Date) // true +console.log(typeof task.completed) // "boolean" +console.log(task.metadata.priority) // "high" +``` + +### Type Safety with Rich Types + +The collection maintains type safety throughout: + +```typescript +type TaskInput = { + id: string + title: string | null + due_date: string | null // Accept ISO string for mutations + completed: number | null // Accept 0/1 for mutations + metadata: string | null // Accept JSON string for mutations +} + +type TaskOutput = { + id: string + title: string | null + due_date: Date | null // Get Date object in queries + completed: boolean | null // Get boolean in queries + metadata: { + priority: string + [key: string]: any + } | null +} + +// TypeScript enforces correct types: +tasksCollection.insert({ + due_date: new Date(), // Error: Type 'Date' is not assignable to type 'string' +}) + +const task = tasksCollection.get("task-1") +task.due_date.getTime() // OK - TypeScript knows this is a Date +``` + +### Optimistic Updates + +Updates to the collection are applied optimistically to the local state first, then synchronized with PowerSync and the backend. If an error occurs during sync, the changes are automatically rolled back. + +## Configuration Options + +The `powerSyncCollectionOptions` function accepts the following options: + +```ts +interface PowerSyncCollectionConfig { + // Required options + database: PowerSyncDatabase + table: Table + + // Schema validation and type transformation + schema?: StandardSchemaV1 + deserializationSchema?: StandardSchemaV1 // Required for custom input types + onDeserializationError?: (error: StandardSchemaV1.FailureResult) => void // Required for custom input types + + // Optional Custom serialization + serializer?: { + [Key in keyof TOutput]?: (value: TOutput[Key]) => SQLiteCompatibleType + } + + // Performance tuning + syncBatchSize?: number // Control batch size for initial sync, defaults to 1000 +} +``` + +## Advanced Transactions + +When you need more control over transaction handling, such as batching multiple operations or handling complex transaction scenarios, you can use PowerSync's transaction system directly with TanStack DB transactions. + +```ts +import { createTransaction } from "@tanstack/react-db" +import { PowerSyncTransactor } from "@tanstack/powersync-db-collection" + +// Create a transaction that won't auto-commit +const batchTx = createTransaction({ + autoCommit: false, + mutationFn: async ({ transaction }) => { + // Use PowerSyncTransactor to apply the transaction to PowerSync + await new PowerSyncTransactor({ database: db }).applyTransaction( + transaction + ) + }, +}) + +// Perform multiple operations in the transaction +batchTx.mutate(() => { + // Add multiple documents in a single transaction + for (let i = 0; i < 5; i++) { + documentsCollection.insert({ + id: crypto.randomUUID(), + name: `Document ${i}`, + content: `Content ${i}`, + created_at: new Date().toISOString(), + updated_at: new Date().toISOString(), + }) + } +}) + +// Commit the transaction +await batchTx.commit() + +// Wait for the changes to be persisted +await batchTx.isPersisted.promise +``` + +This approach allows you to: + +- Batch multiple operations into a single transaction +- Control when the transaction is committed +- Ensure all operations are atomic +- Wait for persistence confirmation +- Handle complex transaction scenarios diff --git a/docs/collections/query-collection.md b/docs/collections/query-collection.md index cd4e7eedc..91a0f7dea 100644 --- a/docs/collections/query-collection.md +++ b/docs/collections/query-collection.md @@ -9,6 +9,7 @@ Query collections provide seamless integration between TanStack DB and TanStack ## Overview The `@tanstack/query-db-collection` package allows you to create collections that: + - Automatically sync with remote data via TanStack Query - Support optimistic updates with automatic rollback on errors - Handle persistence through customizable mutation handlers @@ -23,17 +24,17 @@ npm install @tanstack/query-db-collection @tanstack/query-core @tanstack/db ## Basic Usage ```typescript -import { QueryClient } from '@tanstack/query-core' -import { createCollection } from '@tanstack/db' -import { queryCollectionOptions } from '@tanstack/query-db-collection' +import { QueryClient } from "@tanstack/query-core" +import { createCollection } from "@tanstack/db" +import { queryCollectionOptions } from "@tanstack/query-db-collection" const queryClient = new QueryClient() const todosCollection = createCollection( queryCollectionOptions({ - queryKey: ['todos'], + queryKey: ["todos"], queryFn: async () => { - const response = await fetch('/api/todos') + const response = await fetch("/api/todos") return response.json() }, queryClient, @@ -55,7 +56,7 @@ The `queryCollectionOptions` function accepts the following options: ### Query Options -- `select`: Function that lets extract array items when they’re wrapped with metadata +- `select`: Function that lets extract array items when they're wrapped with metadata - `enabled`: Whether the query should automatically run (default: `true`) - `refetchInterval`: Refetch interval in milliseconds - `retry`: Retry configuration for failed queries @@ -83,30 +84,30 @@ You can define handlers that are called when mutations occur. These handlers can ```typescript const todosCollection = createCollection( queryCollectionOptions({ - queryKey: ['todos'], + queryKey: ["todos"], queryFn: fetchTodos, queryClient, getKey: (item) => item.id, - + onInsert: async ({ transaction }) => { - const newItems = transaction.mutations.map(m => m.modified) + const newItems = transaction.mutations.map((m) => m.modified) await api.createTodos(newItems) // Returning nothing or { refetch: true } will trigger a refetch // Return { refetch: false } to skip automatic refetch }, - + onUpdate: async ({ transaction }) => { - const updates = transaction.mutations.map(m => ({ + const updates = transaction.mutations.map((m) => ({ id: m.key, - changes: m.changes + changes: m.changes, })) await api.updateTodos(updates) }, - + onDelete: async ({ transaction }) => { - const ids = transaction.mutations.map(m => m.key) + const ids = transaction.mutations.map((m) => m.key) await api.deleteTodos(ids) - } + }, }) ) ``` @@ -119,14 +120,15 @@ You can control this behavior by returning an object with a `refetch` property: ```typescript onInsert: async ({ transaction }) => { - await api.createTodos(transaction.mutations.map(m => m.modified)) - + await api.createTodos(transaction.mutations.map((m) => m.modified)) + // Skip the automatic refetch return { refetch: false } } ``` This is useful when: + - You're confident the server state matches what you sent - You want to avoid unnecessary network requests - You're handling state updates through other mechanisms (like WebSockets) @@ -135,7 +137,10 @@ This is useful when: The collection provides these utility methods via `collection.utils`: -- `refetch()`: Manually trigger a refetch of the query +- `refetch(opts?)`: Manually trigger a refetch of the query + - `opts.throwOnError`: Whether to throw an error if the refetch fails (default: `false`) + - Bypasses `enabled: false` to support imperative/manual refetching patterns (similar to hook `refetch()` behavior) + - Returns `QueryObserverResult` for inspecting the result ## Direct Writes @@ -144,10 +149,12 @@ Direct writes are intended for scenarios where the normal query/mutation flow do ### Understanding the Data Stores Query Collections maintain two data stores: + 1. **Synced Data Store** - The authoritative state synchronized with the server via `queryFn` 2. **Optimistic Mutations Store** - Temporary changes that are applied optimistically before server confirmation Normal collection operations (insert, update, delete) create optimistic mutations that are: + - Applied immediately to the UI - Sent to the server via persistence handlers - Rolled back automatically if the server request fails @@ -158,6 +165,7 @@ Direct writes bypass this system entirely and write directly to the synced data ### When to Use Direct Writes Direct writes should be used when: + - You need to sync real-time updates from WebSockets or server-sent events - You're dealing with large datasets where refetching everything is too expensive - You receive incremental updates or server-computed field updates @@ -167,19 +175,28 @@ Direct writes should be used when: ```typescript // Insert a new item directly to the synced data store -todosCollection.utils.writeInsert({ id: '1', text: 'Buy milk', completed: false }) +todosCollection.utils.writeInsert({ + id: "1", + text: "Buy milk", + completed: false, +}) // Update an existing item in the synced data store -todosCollection.utils.writeUpdate({ id: '1', completed: true }) +todosCollection.utils.writeUpdate({ id: "1", completed: true }) // Delete an item from the synced data store -todosCollection.utils.writeDelete('1') +todosCollection.utils.writeDelete("1") // Upsert (insert or update) in the synced data store -todosCollection.utils.writeUpsert({ id: '1', text: 'Buy milk', completed: false }) +todosCollection.utils.writeUpsert({ + id: "1", + text: "Buy milk", + completed: false, +}) ``` These operations: + - Write directly to the synced data store - Do NOT create optimistic mutations - Do NOT trigger automatic query refetches @@ -192,10 +209,10 @@ The `writeBatch` method allows you to perform multiple operations atomically. An ```typescript todosCollection.utils.writeBatch(() => { - todosCollection.utils.writeInsert({ id: '1', text: 'Buy milk' }) - todosCollection.utils.writeInsert({ id: '2', text: 'Walk dog' }) - todosCollection.utils.writeUpdate({ id: '3', completed: true }) - todosCollection.utils.writeDelete('4') + todosCollection.utils.writeInsert({ id: "1", text: "Buy milk" }) + todosCollection.utils.writeInsert({ id: "2", text: "Walk dog" }) + todosCollection.utils.writeUpdate({ id: "3", completed: true }) + todosCollection.utils.writeDelete("4") }) ``` @@ -203,17 +220,17 @@ todosCollection.utils.writeBatch(() => { ```typescript // Handle real-time updates from WebSocket without triggering full refetches -ws.on('todos:update', (changes) => { +ws.on("todos:update", (changes) => { todosCollection.utils.writeBatch(() => { - changes.forEach(change => { + changes.forEach((change) => { switch (change.type) { - case 'insert': + case "insert": todosCollection.utils.writeInsert(change.data) break - case 'update': + case "update": todosCollection.utils.writeUpdate(change.data) break - case 'delete': + case "delete": todosCollection.utils.writeDelete(change.id) break } @@ -229,13 +246,13 @@ When the server returns computed fields (like server-generated IDs or timestamps ```typescript const todosCollection = createCollection( queryCollectionOptions({ - queryKey: ['todos'], + queryKey: ["todos"], queryFn: fetchTodos, queryClient, getKey: (item) => item.id, onInsert: async ({ transaction }) => { - const newItems = transaction.mutations.map(m => m.modified) + const newItems = transaction.mutations.map((m) => m.modified) // Send to server and get back items with server-computed fields const serverItems = await api.createTodos(newItems) @@ -243,7 +260,7 @@ const todosCollection = createCollection( // Sync server-computed fields (like server-generated IDs, timestamps, etc.) // to the collection's synced data store todosCollection.utils.writeBatch(() => { - serverItems.forEach(serverItem => { + serverItems.forEach((serverItem) => { todosCollection.utils.writeInsert(serverItem) }) }) @@ -254,26 +271,26 @@ const todosCollection = createCollection( }, onUpdate: async ({ transaction }) => { - const updates = transaction.mutations.map(m => ({ + const updates = transaction.mutations.map((m) => ({ id: m.key, - changes: m.changes + changes: m.changes, })) const serverItems = await api.updateTodos(updates) // Sync server-computed fields from the update response todosCollection.utils.writeBatch(() => { - serverItems.forEach(serverItem => { + serverItems.forEach((serverItem) => { todosCollection.utils.writeUpdate(serverItem) }) }) return { refetch: false } - } + }, }) ) // Usage is just like a regular collection -todosCollection.insert({ text: 'Buy milk', completed: false }) +todosCollection.insert({ text: "Buy milk", completed: false }) ``` ### Example: Large Dataset Pagination @@ -282,10 +299,10 @@ todosCollection.insert({ text: 'Buy milk', completed: false }) // Load additional pages without refetching existing data const loadMoreTodos = async (page) => { const newTodos = await api.getTodos({ page, limit: 50 }) - + // Add new items without affecting existing ones todosCollection.utils.writeBatch(() => { - newTodos.forEach(todo => { + newTodos.forEach((todo) => { todosCollection.utils.writeInsert(todo) }) }) @@ -318,31 +335,33 @@ Since the query collection expects `queryFn` to return the complete state, you c ```typescript const todosCollection = createCollection( queryCollectionOptions({ - queryKey: ['todos'], + queryKey: ["todos"], queryFn: async ({ queryKey }) => { // Get existing data from cache const existingData = queryClient.getQueryData(queryKey) || [] - + // Fetch only new/updated items (e.g., changes since last sync) - const lastSyncTime = localStorage.getItem('todos-last-sync') - const newData = await fetch(`/api/todos?since=${lastSyncTime}`).then(r => r.json()) - + const lastSyncTime = localStorage.getItem("todos-last-sync") + const newData = await fetch(`/api/todos?since=${lastSyncTime}`).then( + (r) => r.json() + ) + // Merge new data with existing data - const existingMap = new Map(existingData.map(item => [item.id, item])) - + const existingMap = new Map(existingData.map((item) => [item.id, item])) + // Apply updates and additions - newData.forEach(item => { + newData.forEach((item) => { existingMap.set(item.id, item) }) - + // Handle deletions if your API provides them if (newData.deletions) { - newData.deletions.forEach(id => existingMap.delete(id)) + newData.deletions.forEach((id) => existingMap.delete(id)) } - + // Update sync time - localStorage.setItem('todos-last-sync', new Date().toISOString()) - + localStorage.setItem("todos-last-sync", new Date().toISOString()) + // Return the complete merged state return Array.from(existingMap.values()) }, @@ -353,6 +372,7 @@ const todosCollection = createCollection( ``` This pattern allows you to: + - Fetch only incremental changes from your API - Merge those changes with existing data - Return the complete state that the collection expects @@ -363,6 +383,7 @@ This pattern allows you to: Direct writes update the collection immediately and also update the TanStack Query cache. However, they do not prevent the normal query sync behavior. If your `queryFn` returns data that conflicts with your direct writes, the query data will take precedence. To handle this properly: + 1. Use `{ refetch: false }` in your persistence handlers when using direct writes 2. Set appropriate `staleTime` to prevent unnecessary refetches 3. Design your `queryFn` to be aware of incremental updates (e.g., only fetch new data) @@ -376,4 +397,4 @@ All direct write methods are available on `collection.utils`: - `writeDelete(keys)`: Delete one or more items directly - `writeUpsert(data)`: Insert or update one or more items directly - `writeBatch(callback)`: Perform multiple operations atomically -- `refetch()`: Manually trigger a refetch of the query +- `refetch(opts?)`: Manually trigger a refetch of the query diff --git a/docs/guides/live-queries.md b/docs/guides/live-queries.md index 5b1664b20..d738327fc 100644 --- a/docs/guides/live-queries.md +++ b/docs/guides/live-queries.md @@ -38,6 +38,7 @@ The result types are automatically inferred from your query structure, providing - [Joins](#joins) - [Subqueries](#subqueries) - [groupBy and Aggregations](#groupby-and-aggregations) +- [findOne](#findone) - [Distinct](#distinct) - [Order By, Limit, and Offset](#order-by-limit-and-offset) - [Composable Queries](#composable-queries) @@ -988,6 +989,106 @@ const engineeringStats = deptStats.get(1) > - **Single column grouping**: Keyed by the actual value (e.g., `deptStats.get(1)`) > - **Multiple column grouping**: Keyed by a JSON string of the grouped values (e.g., `userStats.get('[1,"admin"]')`) +## findOne + +Use `findOne` to return a single result instead of an array. This is useful when you expect to find at most one matching record, such as when querying by a unique identifier. + +The `findOne` method changes the return type from an array to a single object or `undefined`. When no matching record is found, the result is `undefined`. + +### Method Signature + +```ts +findOne(): Query +``` + +### Basic Usage + +Find a specific user by ID: + +```ts +const user = createLiveQueryCollection((q) => + q + .from({ users: usersCollection }) + .where(({ users }) => eq(users.id, 1)) + .findOne() +) + +// Result type: User | undefined +// If user with id=1 exists: { id: 1, name: 'John', ... } +// If not found: undefined +``` + +### With React Hooks + +Use `findOne` with `useLiveQuery` to get a single record: + +```tsx +import { useLiveQuery } from '@tanstack/react-db' +import { eq } from '@tanstack/db' + +function UserProfile({ userId }: { userId: string }) { + const { data: user, isLoading } = useLiveQuery((q) => + q + .from({ users: usersCollection }) + .where(({ users }) => eq(users.id, userId)) + .findOne() + , [userId]) + + if (isLoading) return
Loading...
+ if (!user) return
User not found
+ + return
{user.name}
+} +``` + +### With Select + +Combine `findOne` with `select` to project specific fields: + +```ts +const userEmail = createLiveQueryCollection((q) => + q + .from({ users: usersCollection }) + .where(({ users }) => eq(users.id, 1)) + .select(({ users }) => ({ + id: users.id, + email: users.email, + })) + .findOne() +) + +// Result type: { id: number, email: string } | undefined +``` + +### Return Type Behavior + +The return type changes based on whether `findOne` is used: + +```ts +// Without findOne - returns array +const users = createLiveQueryCollection((q) => + q.from({ users: usersCollection }) +) +// Type: Array + +// With findOne - returns single object or undefined +const user = createLiveQueryCollection((q) => + q.from({ users: usersCollection }).findOne() +) +// Type: User | undefined +``` + +### Best Practices + +**Use when:** +- Querying by unique identifiers (ID, email, etc.) +- You expect at most one result +- You want type-safe single-record access without array indexing + +**Avoid when:** +- You might have multiple matching records (use regular queries instead) +- You need to iterate over results + ## Distinct Use `distinct` to remove duplicate rows from your query results based on the selected columns. The `distinct` operator ensures that each unique combination of selected values appears only once in the result set. diff --git a/docs/guides/mutations.md b/docs/guides/mutations.md index 24e8aba92..f81fea8e6 100644 --- a/docs/guides/mutations.md +++ b/docs/guides/mutations.md @@ -100,6 +100,7 @@ The benefits: - [Operation Handlers](#operation-handlers) - [Creating Custom Actions](#creating-custom-actions) - [Manual Transactions](#manual-transactions) +- [Paced Mutations](#paced-mutations) - [Mutation Merging](#mutation-merging) - [Controlling Optimistic Behavior](#controlling-optimistic-behavior) - [Transaction States](#transaction-states) @@ -892,6 +893,238 @@ tx.isPersisted.promise.then(() => { console.log(tx.state) // 'pending', 'persisting', 'completed', or 'failed' ``` +## Paced Mutations + +Paced mutations provide fine-grained control over **when and how** mutations are persisted to your backend. Instead of persisting every mutation immediately, you can use timing strategies to batch, delay, or queue mutations based on your application's needs. + +Powered by [TanStack Pacer](https://github.com/TanStack/pacer), paced mutations are ideal for scenarios like: +- **Auto-save forms** that wait for the user to stop typing +- **Slider controls** that need smooth updates without overwhelming the backend +- **Sequential workflows** where order matters and every mutation must persist + +### Key Design + +The fundamental difference between strategies is how they handle transactions: + +**Debounce/Throttle**: Only one pending transaction (collecting mutations) and one persisting transaction (writing to backend) at a time. Multiple rapid mutations automatically merge together into a single transaction. + +**Queue**: Each mutation creates a separate transaction, guaranteed to run in the order they're made (FIFO by default, configurable to LIFO). All mutations are guaranteed to persist. + +### Available Strategies + +| Strategy | Behavior | Best For | +|----------|----------|----------| +| **`debounceStrategy`** | Wait for inactivity before persisting. Only final state is saved. | Auto-save forms, search-as-you-type | +| **`throttleStrategy`** | Ensure minimum spacing between executions. Mutations between executions are merged. | Sliders, progress updates, analytics | +| **`queueStrategy`** | Each mutation becomes a separate transaction, processed sequentially in order (FIFO by default, configurable to LIFO). All mutations guaranteed to persist. | Sequential workflows, file uploads, rate-limited APIs | + +### Debounce Strategy + +The debounce strategy waits for a period of inactivity before persisting. This is perfect for auto-save scenarios where you want to wait until the user stops typing before saving their work. + +```tsx +import { usePacedMutations, debounceStrategy } from "@tanstack/react-db" + +function AutoSaveForm() { + const mutate = usePacedMutations({ + mutationFn: async ({ transaction }) => { + // Persist the final merged state to the backend + await api.forms.save(transaction.mutations) + }, + // Wait 500ms after the last change before persisting + strategy: debounceStrategy({ wait: 500 }), + }) + + const handleChange = (field: string, value: string) => { + // Multiple rapid changes merge into a single transaction + mutate(() => { + formCollection.update(formId, (draft) => { + draft[field] = value + }) + }) + } + + return ( +
+ handleChange('title', e.target.value)} /> +