queue gives your services one queue API with Redis, SQL, NATS, SQS, RabbitMQ, and in-process drivers.
queue is a backend-agnostic job queue runtime. Your application code depends on queue.Queue and fluent queue.Task values. The driver decides whether work runs via Redis/Asynq, a SQL table, NATS, SQS, RabbitMQ, an in-process worker pool, or synchronously in the caller.
Current matrix trust status and known integration gaps are tracked in docs/integration-scenarios.md.
go get github.com/goforj/queueimport (
"context"
"time"
"github.com/goforj/queue"
)
type EmailPayload struct {
ID int `json:"id"`
To string `json:"to"`
}
func emailHandler(ctx context.Context, task queue.Task) error {
_ = ctx
var payload EmailPayload
if err := task.Bind(&payload); err != nil {
return err
}
_ = payload
return nil
}
func main() {
// Create queue runtime.
q, _ := queue.NewWorkerpool()
// Register handler on queue runtime.
q.Register("emails:send", emailHandler)
// Start workers (2 concurrent workers).
_ = q.Workers(2).StartWorkers(context.Background())
defer q.Shutdown(context.Background())
// Build a task with payload and dispatch behavior.
task := queue.NewTask("emails:send").
Payload(EmailPayload{
ID: 123,
To: "user@example.com",
}).
OnQueue("critical").
Delay(5 * time.Second).
Timeout(20 * time.Second).
Retry(3)
// Dispatch the task.
_ = q.Dispatch(task)
}Switch to Redis without changing job code:
q, _ := queue.NewRedis("127.0.0.1:6379")Use SQL for a durable local queue runtime:
q, _ := queue.NewDatabase("sqlite", "file:queue.db?_busy_timeout=5000")null: drop-only dispatch path when you want queue calls to no-op in tests/dev.sync: deterministic unit tests with inline execution and no external broker.workerpool: async local behavior tests without external infrastructure.integrationtag + backend matrix: full broker/database realism (Redis, SQL, NATS, SQS, RabbitMQ).
Use null when you only need to exercise dispatch call paths without execution.
Use sync when you need handler logic to run in the same test/process deterministically.
fake := queue.NewFake()
fake.AssertNothingDispatched(t)
// exercise code that dispatches jobs against fake
_ = fake.Dispatch(
queue.NewTask("orders:ship").
Payload(map[string]any{"id": 42}).
OnQueue("orders"),
)
fake.AssertDispatched(t, "orders:ship")
fake.AssertDispatchedOn(t, "orders", "orders:ship")
fake.AssertDispatchedTimes(t, "orders:ship", 1)
fake.AssertNotDispatched(t, "orders:cancel")
fake.AssertCount(t, 1)// Null: dispatch is accepted and dropped.
q, _ := queue.NewNull()
_ = q.Dispatch(
queue.NewTask("emails:send").
Payload(map[string]any{"id": 1}).
OnQueue("default"),
)// Sync: register handler and execute inline on dispatch.
q, _ := queue.NewSync()
q.Register("emails:send", emailHandler)
_ = q.Workers(1).StartWorkers(context.Background())
defer q.Shutdown(context.Background())
_ = q.Dispatch(
queue.NewTask("emails:send").
Payload(EmailPayload{ID: 1, To: "user@example.com"}).
OnQueue("default"),
)// Workerpool: async local workers.
q, _ := queue.NewWorkerpool()
q.Register("emails:send", emailHandler)
_ = q.Workers(2).StartWorkers(context.Background())
defer q.Shutdown(context.Background())
_ = q.Dispatch(
queue.NewTask("emails:send").
Payload(EmailPayload{ID: 1, To: "user@example.com"}).
OnQueue("default"),
)// Database: durable SQL-backed queue.
q, _ := queue.NewDatabase("sqlite", "file:queue.db?_busy_timeout=5000")
q.Register("emails:send", emailHandler)
_ = q.Workers(2).StartWorkers(context.Background())
defer q.Shutdown(context.Background())
_ = q.Dispatch(
queue.NewTask("emails:send").
Payload(EmailPayload{ID: 1, To: "user@example.com"}).
OnQueue("default"),
)// Redis: broker-backed async queue.
q, _ := queue.NewRedis("127.0.0.1:6379")
q.Register("emails:send", emailHandler)
_ = q.Workers(2).StartWorkers(context.Background())
defer q.Shutdown(context.Background())
_ = q.Dispatch(
queue.NewTask("emails:send").
Payload(EmailPayload{ID: 1, To: "user@example.com"}).
OnQueue("default"),
)// NATS: broker-backed async queue.
q, _ := queue.NewNATS("nats://127.0.0.1:4222")
q.Register("emails:send", emailHandler)
_ = q.Workers(2).StartWorkers(context.Background())
defer q.Shutdown(context.Background())
_ = q.Dispatch(
queue.NewTask("emails:send").
Payload(EmailPayload{ID: 1, To: "user@example.com"}).
OnQueue("default"),
)// SQS: broker-backed async queue.
q, _ := queue.NewSQS("us-east-1")
q.Register("emails:send", emailHandler)
_ = q.Workers(2).StartWorkers(context.Background())
defer q.Shutdown(context.Background())
_ = q.Dispatch(
queue.NewTask("emails:send").
Payload(EmailPayload{ID: 1, To: "user@example.com"}).
OnQueue("default"),
)// RabbitMQ: broker-backed async queue.
q, _ := queue.NewRabbitMQ("amqp://guest:guest@127.0.0.1:5672/")
q.Register("emails:send", emailHandler)
_ = q.Workers(2).StartWorkers(context.Background())
defer q.Shutdown(context.Background())
_ = q.Dispatch(
queue.NewTask("emails:send").
Payload(EmailPayload{ID: 1, To: "user@example.com"}).
OnQueue("default"),
)task := queue.NewTask("emails:send").
// Payload can be bytes, structs, maps, or JSON-marshalable values.
// Default payload is empty.
Payload(map[string]any{"id": 123, "to": "user@example.com"}).
// OnQueue sets the queue name.
// Default is empty; broker-style drivers expect an explicit queue.
OnQueue("default").
// Timeout sets per-task execution timeout.
// Default is unset; some drivers may apply driver/runtime defaults.
Timeout(20 * time.Second).
// Retry sets max retries.
// Default is 0, which means one total attempt.
Retry(3).
// Backoff sets retry delay.
// Default is unset; Redis dispatch returns ErrBackoffUnsupported.
Backoff(500 * time.Millisecond).
// Delay schedules first execution in the future.
// Default is 0 (run immediately).
Delay(2 * time.Second).
// UniqueFor deduplicates Type+Payload for a TTL window.
// Default is 0 (no dedupe).
UniqueFor(45 * time.Second)
_ = q.Dispatch(task)go test ./docs/bench -tags benchrender| Driver | N | ns/op | B/op | allocs/op |
|---|---|---|---|---|
| mysql | 579 | 1959891 | 9295 | 151 |
| nats | 1631804 | 727.8 | 1293 | 14 |
| postgres | 1227 | 1107497 | 16162 | 239 |
| rabbitmq | 8144 | 143554 | 4575 | 112 |
| redis | 13513 | 87141 | 2112 | 33 |
| sqs | 1143 | 1019487 | 65594 | 758 |
| Driver | N | ns/op | B/op | allocs/op |
|---|---|---|---|---|
| database-sqlite | 6452 | 395034 | 3345 | 87 |
| null | 26300509 | 47.63 | 128 | 1 |
| sync | 3980317 | 299.3 | 408 | 6 |
| workerpool | 1829486 | 635.2 | 456 | 7 |
Attach an observer when creating a queue. Use StatsCollector for in-memory counters and throughput windows.
collector := queue.NewStatsCollector()
q, _ := queue.New(queue.Config{
Driver: queue.DriverRedis,
RedisAddr: "127.0.0.1:6379",
Observer: collector,
})
snapshot, _ := queue.SnapshotQueue(context.Background(), q, collector)
counters, _ := snapshot.Queue("default")
throughput, _ := snapshot.Throughput("default")
fmt.Printf("%+v\n", counters)
fmt.Printf("hour=%+v\n", throughput.Hour)SnapshotQueue prefers native driver stats when available and falls back to the collector snapshot when a driver does not expose native stats.
Use queue.SupportsNativeStats(q) and queue.SupportsPause(q) to branch runtime behavior safely.
Use queue.MultiObserver(...) when you want multiple observer behaviors at once, such as logging plus stats collection.
collector := queue.NewStatsCollector()
loggerObserver := queue.ObserverFunc(func(event queue.Event) {
// send to your logger here
_ = event
})
q, _ := queue.New(queue.Config{
Driver: queue.DriverRedis,
RedisAddr: "127.0.0.1:6379",
Observer: queue.MultiObserver(loggerObserver, collector),
})
// SnapshotQueue can still use the same collector instance.
snapshot, _ := queue.SnapshotQueue(context.Background(), q, collector)
_, _ = snapshot.Queue("default")Use Observer as your middleware hook for structured logging.
Observers receive events for the entire runtime (all queues and task types).
To observe only specific tasks, filter by event.TaskType (and/or event.Queue) inside your observer.
This example logs every event kind with human-readable messages.
logger := slog.New(slog.NewJSONHandler(os.Stdout, nil))
observer := queue.ObserverFunc(func(event queue.Event) {
attemptInfo := fmt.Sprintf("attempt=%d/%d", event.Attempt, event.MaxRetry+1)
taskInfo := fmt.Sprintf("task=%s key=%s queue=%s driver=%s", event.TaskType, event.TaskKey, event.Queue, event.Driver)
switch event.Kind {
case queue.EventEnqueueAccepted:
logger.Info("Accepted dispatch", "msg", fmt.Sprintf("Accepted %s", taskInfo), "scheduled", event.Scheduled, "at", event.Time.Format(time.RFC3339Nano))
case queue.EventEnqueueRejected:
logger.Error("Dispatch failed", "msg", fmt.Sprintf("Rejected %s", taskInfo), "error", event.Err)
case queue.EventEnqueueDuplicate:
logger.Warn("Skipped duplicate job", "msg", fmt.Sprintf("Duplicate %s", taskInfo))
case queue.EventEnqueueCanceled:
logger.Warn("Canceled dispatch", "msg", fmt.Sprintf("Canceled %s", taskInfo), "error", event.Err)
case queue.EventProcessStarted:
logger.Info("Started processing job", "msg", fmt.Sprintf("Started %s (%s)", taskInfo, attemptInfo), "at", event.Time.Format(time.RFC3339Nano))
case queue.EventProcessSucceeded:
logger.Info("Processed job", "msg", fmt.Sprintf("Processed %s in %s (%s)", taskInfo, event.Duration, attemptInfo))
case queue.EventProcessFailed:
logger.Error("Processing failed", "msg", fmt.Sprintf("Failed %s after %s (%s)", taskInfo, event.Duration, attemptInfo), "error", event.Err)
case queue.EventProcessRetried:
logger.Warn("Retrying job", "msg", fmt.Sprintf("Retry scheduled for %s (%s)", taskInfo, attemptInfo), "error", event.Err)
case queue.EventProcessArchived:
logger.Error("Archived failed job", "msg", fmt.Sprintf("Archived %s after final failure (%s)", taskInfo, attemptInfo), "error", event.Err)
case queue.EventQueuePaused:
logger.Info("Paused queue", "msg", fmt.Sprintf("Paused queue=%s driver=%s", event.Queue, event.Driver))
case queue.EventQueueResumed:
logger.Info("Resumed queue", "msg", fmt.Sprintf("Resumed queue=%s driver=%s", event.Queue, event.Driver))
default:
logger.Info("Queue event", "msg", fmt.Sprintf("kind=%s %s", event.Kind, taskInfo))
}
})
q, _ := queue.New(queue.Config{
Driver: queue.DriverRedis,
RedisAddr: "127.0.0.1:6379",
Observer: observer,
})Example: only log one task type.
observer := queue.ObserverFunc(func(event queue.Event) {
if event.TaskType != "emails:send" {
return
}
// log event...
})Example: hook only specific event kinds.
observer := queue.ObserverFunc(func(event queue.Event) {
switch event.Kind {
case queue.EventProcessFailed, queue.EventProcessRetried, queue.EventProcessArchived:
// alerting / error logs
case queue.EventProcessSucceeded:
// success metrics
default:
return
}
})If you prefer zerolog, implement the same Observer interface in a small adapter and set it on Config.Observer.
Legend: âś“ supported, - unsupported/fallback.
| Driver | Native Stats |
PauseQueue/ResumeQueue |
Collector counters | Throughput windows |
|---|---|---|---|---|
| Sync | âś“ | âś“ | âś“ | âś“ |
| Workerpool | âś“ | âś“ | âś“ | âś“ |
| Database (sqlite/mysql/postgres) | âś“ | - | âś“ | âś“ |
| Redis | âś“ | âś“ | âś“ | âś“ |
| NATS | - | - | âś“ | âś“ |
| SQS | - | - | âś“ | âś“ |
| RabbitMQ | - | - | âś“ | âś“ |
| Event | Meaning |
|---|---|
| EventEnqueueAccepted | Task was accepted by dispatch. |
| EventEnqueueRejected | Task dispatch failed with an error. |
| EventEnqueueDuplicate | Task dispatch was rejected as duplicate (UniqueFor). |
| EventEnqueueCanceled | Dispatch was canceled by context timeout/cancelation. |
| EventProcessStarted | Worker started handling a task. |
| EventProcessSucceeded | Worker completed a task successfully. |
| EventProcessFailed | Worker attempt failed. |
| EventProcessRetried | Failed attempt was requeued for another attempt. |
| EventProcessArchived | Failed task reached terminal failure (no retries left). |
| EventQueuePaused | Queue consumption was paused. |
| EventQueueResumed | Queue consumption was resumed. |
For full field-level semantics and guarantees, see docs/events.md.
// Register handlers before starting workers.
q.Register("emails:send", emailHandler)
// Start workers with explicit concurrency.
ctx := context.Background()
_ = q.Workers(2).StartWorkers(ctx)
// Dispatch jobs using either Dispatch(...) or DispatchCtx(...).
_ = q.Dispatch(
queue.NewTask("emails:send").
Payload(EmailPayload{ID: 123, To: "user@example.com"}).
OnQueue("default"),
)
// Shutdown gracefully drains in-flight work (where supported).
shutdownCtx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
_ = q.Shutdown(shutdownCtx)Use queue.Config with New for advanced/custom setups where you need multiple fields together.
Use q.Workers(n).StartWorkers(ctx) to configure worker count before start.
Common:
| Field | Notes |
|---|---|
| Driver | Selects backend. |
| DefaultQueue | Default queue name used by helpers (task-level OnQueue(...) still controls dispatch target). |
Null:
| Field | Required | Notes |
|---|---|---|
| Driver | âś“ | DriverNull |
Sync:
| Field | Required | Notes |
|---|---|---|
| Driver | âś“ | DriverSync |
Workerpool:
| Field | Required | Notes |
|---|---|---|
| Driver | âś“ | DriverWorkerpool |
Database:
| Field | Required | Notes |
|---|---|---|
| Driver | âś“ | DriverDatabase |
| Database | o | Existing *sql.DB handle; if set, driver/DSN can be omitted. |
| DatabaseDriver | âś“* | sqlite, mysql, or pgx (required when Database is nil). |
| DatabaseDSN | âś“* | Connection string (required when Database is nil). |
| DefaultQueue | o | Queue default for DB-backed runtime. |
Redis:
| Field | Required | Notes |
|---|---|---|
| Driver | âś“ | DriverRedis |
| RedisAddr | âś“ | Required for Redis queue dispatching. |
| RedisPassword | o | Redis auth password. |
| RedisDB | o | Redis logical DB index. |
NATS:
| Field | Required | Notes |
|---|---|---|
| Driver | âś“ | DriverNATS |
| NATSURL | âś“ | Required for NATS queue dispatching. |
SQS:
| Field | Required | Notes |
|---|---|---|
| Driver | âś“ | DriverSQS |
| SQSRegion | o | AWS region (defaults to us-east-1). |
| SQSEndpoint | o | Override endpoint (localstack/testing). |
| SQSAccessKey | o | Static access key. |
| SQSSecretKey | o | Static secret key. |
RabbitMQ:
| Field | Required | Notes |
|---|---|---|
| Driver | âś“ | DriverRabbitMQ |
| RabbitMQURL | âś“ | Required for RabbitMQ queue dispatching. |
The API section below is autogenerated; do not edit between the markers.
New creates a queue based on Config.Driver.
q, err := queue.NewSync()
if err != nil {
return
}
type EmailPayload struct {
ID int `json:"id"`
}
q.Register("emails:send", func(ctx context.Context, task queue.Task) error {
var payload EmailPayload
if err := task.Bind(&payload); err != nil {
return err
}
return nil
})
defer q.Shutdown(context.Background())
context.Background(),
queue.NewTask("emails:send").
Payload(EmailPayload{ID: 1}).
OnQueue("default"),
)NewDatabase creates a SQL-backed queue runtime.
q, err := queue.NewDatabase("sqlite", "file:queue.db?_busy_timeout=5000")
if err != nil {
return
}NewNATS creates a NATS-backed queue runtime.
q, err := queue.NewNATS("nats://127.0.0.1:4222")
if err != nil {
return
}NewNull creates a drop-only queue runtime.
q, err := queue.NewNull()
if err != nil {
return
}NewQueueWithDefaults creates a queue runtime and sets the default queue name.
q, err := queue.NewQueueWithDefaults("critical", queue.Config{
Driver: queue.DriverSync,
})
if err != nil {
return
}
type EmailPayload struct {
ID int `json:"id"`
}
q.Register("emails:send", func(ctx context.Context, task queue.Task) error {
var payload EmailPayload
if err := task.Bind(&payload); err != nil {
return err
}
return nil
})
defer q.Shutdown(context.Background())NewRabbitMQ creates a RabbitMQ-backed queue runtime.
q, err := queue.NewRabbitMQ("amqp://guest:guest@127.0.0.1:5672/")
if err != nil {
return
}NewRedis creates a Redis-backed queue runtime.
q, err := queue.NewRedis("127.0.0.1:6379")
if err != nil {
return
}NewSQS creates an SQS-backed queue runtime.
q, err := queue.NewSQS("us-east-1")
if err != nil {
return
}NewStatsCollector creates an event collector for queue counters.
collector := queue.NewStatsCollector()NewSync creates a synchronous in-process queue runtime.
q, err := queue.NewSync()
if err != nil {
return
}NewWorkerpool creates an in-process workerpool queue runtime.
q, err := queue.NewWorkerpool()
if err != nil {
return
}Active returns active count for a queue.
snapshot := queue.StatsSnapshot{
ByQueue: map[string]queue.QueueCounters{
"default": {Active: 2},
},
}
fmt.Println(snapshot.Active("default"))
// Output: 2Archived returns archived count for a queue.
snapshot := queue.StatsSnapshot{
ByQueue: map[string]queue.QueueCounters{
"default": {Archived: 7},
},
}
fmt.Println(snapshot.Archived("default"))
// Output: 7Failed returns failed count for a queue.
snapshot := queue.StatsSnapshot{
ByQueue: map[string]queue.QueueCounters{
"default": {Failed: 2},
},
}
fmt.Println(snapshot.Failed("default"))
// Output: 2MultiObserver fans out events to multiple observers.
events := make(chan queue.Event, 2)
observer := queue.MultiObserver(
queue.ChannelObserver{Events: events},
queue.ObserverFunc(func(queue.Event) {}),
)
observer.Observe(queue.Event{Kind: queue.EventEnqueueAccepted})
fmt.Println(len(events))
// Output: 1Observe calls the wrapped function.
Example: observer func logging hook
logger := slog.New(slog.NewTextHandler(os.Stdout, nil))
observer := queue.ObserverFunc(func(event queue.Event) {
logger.Info("queue event",
"kind", event.Kind,
"driver", event.Driver,
"queue", event.Queue,
"task_type", event.TaskType,
"attempt", event.Attempt,
"max_retry", event.MaxRetry,
"duration", event.Duration,
"err", event.Err,
)
})
observer.Observe(queue.Event{
Kind: queue.EventProcessSucceeded,
Driver: queue.DriverSync,
Queue: "default",
TaskType: "emails:send",
})Example: observe event
collector := queue.NewStatsCollector()
collector.Observe(queue.Event{
Kind: queue.EventEnqueueAccepted,
Driver: queue.DriverSync,
Queue: "default",
Time: time.Now(),
})PauseQueue pauses queue consumption for drivers that support it.
q, _ := queue.NewSync()
snapshot, _ := queue.SnapshotQueue(context.Background(), q, nil)
fmt.Println(snapshot.Paused("default"))
// Output: 1Paused returns paused count for a queue.
collector := queue.NewStatsCollector()
collector.Observe(queue.Event{
Kind: queue.EventQueuePaused,
Driver: queue.DriverSync,
Queue: "default",
Time: time.Now(),
})
snapshot := collector.Snapshot()
fmt.Println(snapshot.Paused("default"))
// Output: 1Pending returns pending count for a queue.
snapshot := queue.StatsSnapshot{
ByQueue: map[string]queue.QueueCounters{
"default": {Pending: 3},
},
}
fmt.Println(snapshot.Pending("default"))
// Output: 3Processed returns processed count for a queue.
snapshot := queue.StatsSnapshot{
ByQueue: map[string]queue.QueueCounters{
"default": {Processed: 11},
},
}
fmt.Println(snapshot.Processed("default"))
// Output: 11Queue returns queue counters for a queue name.
collector := queue.NewStatsCollector()
collector.Observe(queue.Event{
Kind: queue.EventEnqueueAccepted,
Driver: queue.DriverSync,
Queue: "default",
Time: time.Now(),
})
snapshot := collector.Snapshot()
counters, ok := snapshot.Queue("default")
fmt.Println(ok, counters.Pending)
// Output: true 1Queues returns sorted queue names present in the snapshot.
collector := queue.NewStatsCollector()
collector.Observe(queue.Event{
Kind: queue.EventEnqueueAccepted,
Driver: queue.DriverSync,
Queue: "critical",
Time: time.Now(),
})
snapshot := collector.Snapshot()
names := snapshot.Queues()
fmt.Println(len(names), names[0])
// Output: 1 criticalResumeQueue resumes queue consumption for drivers that support it.
q, _ := queue.NewSync()
snapshot, _ := queue.SnapshotQueue(context.Background(), q, nil)
fmt.Println(snapshot.Paused("default"))
// Output: 0Scheduled returns scheduled count for a queue.
snapshot := queue.StatsSnapshot{
ByQueue: map[string]queue.QueueCounters{
"default": {Scheduled: 4},
},
}
fmt.Println(snapshot.Scheduled("default"))
// Output: 4Snapshot returns a copy of collected counters.
collector := queue.NewStatsCollector()
collector.Observe(queue.Event{
Kind: queue.EventEnqueueAccepted,
Driver: queue.DriverSync,
Queue: "default",
Time: time.Now(),
})
collector.Observe(queue.Event{
Kind: queue.EventProcessStarted,
Driver: queue.DriverSync,
Queue: "default",
TaskKey: "task-1",
Time: time.Now(),
})
collector.Observe(queue.Event{
Kind: queue.EventProcessSucceeded,
Driver: queue.DriverSync,
Queue: "default",
TaskKey: "task-1",
Duration: 12 * time.Millisecond,
Time: time.Now(),
})
snapshot := collector.Snapshot()
counters, _ := snapshot.Queue("default")
throughput, _ := snapshot.Throughput("default")
fmt.Printf("queues=%v\n", snapshot.Queues())
fmt.Printf("counters=%+v\n", counters)
fmt.Printf("hour=%+v\n", throughput.Hour)
// Output:
// queues=[default]
// counters={Pending:0 Active:0 Scheduled:0 Retry:0 Archived:0 Processed:1 Failed:0 Paused:0 AvgWait:0s AvgRun:12ms}
// hour={Processed:1 Failed:0}SnapshotQueue returns driver-native stats, falling back to collector data.
q, _ := queue.NewSync()
snapshot, _ := queue.SnapshotQueue(context.Background(), q, nil)
_, ok := snapshot.Queue("default")
fmt.Println(ok)
// Output: trueSupportsNativeStats reports whether a queue runtime exposes native stats snapshots.
q, _ := queue.NewSync()
fmt.Println(queue.SupportsNativeStats(q))
// Output: trueSupportsPause reports whether a queue runtime supports PauseQueue/ResumeQueue.
q, _ := queue.NewSync()
fmt.Println(queue.SupportsPause(q))
// Output: trueThroughput returns rolling throughput windows for a queue name.
collector := queue.NewStatsCollector()
collector.Observe(queue.Event{
Kind: queue.EventProcessSucceeded,
Driver: queue.DriverSync,
Queue: "default",
Time: time.Now(),
})
snapshot := collector.Snapshot()
throughput, ok := snapshot.Throughput("default")
fmt.Printf("ok=%v hour=%+v day=%+v week=%+v\n", ok, throughput.Hour, throughput.Day, throughput.Week)
// Output: ok=true hour={Processed:1 Failed:0} day={Processed:1 Failed:0} week={Processed:1 Failed:0}Dispatch submits a typed job payload using the default queue.
q, err := queue.NewSync()
if err != nil {
return
}
type EmailPayload struct {
ID int `json:"id"`
}
q.Register("emails:send", func(ctx context.Context, task queue.Task) error {
var payload EmailPayload
if err := task.Bind(&payload); err != nil {
return err
}
return nil
})
task := queue.NewTask("emails:send").
Payload(EmailPayload{ID: 1}).
OnQueue("default").
Delay(10 * time.Millisecond)Driver returns the active queue driver.
q, err := queue.NewSync()
if err != nil {
return
}
driverAware, ok := q.(interface{ Driver() queue.Driver })
if !ok {
return
}
fmt.Println(driverAware.Driver())
// Output: syncRegister associates a handler with a task type.
q, err := queue.NewSync()
if err != nil {
return
}
type EmailPayload struct {
ID int `json:"id"`
}
q.Register("emails:send", func(ctx context.Context, task queue.Task) error {
var payload EmailPayload
if err := task.Bind(&payload); err != nil {
return err
}
return nil
})Shutdown drains running work and releases resources.
q, err := queue.NewWorkerpool()
if err != nil {
return
}StartWorkers starts worker execution.
q, err := queue.NewWorkerpool()
if err != nil {
return
}Backoff sets delay between retries.
task := queue.NewTask("emails:send").Backoff(500 * time.Millisecond)Bind unmarshals task payload JSON into dst.
type EmailPayload struct {
ID int `json:"id"`
}
task := queue.NewTask("emails:send").Payload(EmailPayload{ID: 1})
var payload EmailPayloadDelay defers execution by duration.
task := queue.NewTask("emails:send").Delay(300 * time.Millisecond)NewTask creates a task value with a required task type.
task := queue.NewTask("emails:send")OnQueue sets the target queue name.
task := queue.NewTask("emails:send").OnQueue("critical")Payload sets task payload from common value types.
Example: payload bytes
taskBytes := queue.NewTask("emails:send").Payload([]byte(`{"id":1}`))Example: payload struct
type Meta struct {
Nested bool `json:"nested"`
}
type EmailPayload struct {
ID int `json:"id"`
To string `json:"to"`
Meta Meta `json:"meta"`
}
taskStruct := queue.NewTask("emails:send").Payload(EmailPayload{
ID: 1,
To: "user@example.com",
Meta: Meta{Nested: true},
})Example: payload map
taskMap := queue.NewTask("emails:send").Payload(map[string]any{
"id": 1,
"to": "user@example.com",
"meta": map[string]any{"nested": true},
})PayloadBytes returns a copy of task payload bytes.
task := queue.NewTask("emails:send").Payload([]byte(`{"id":1}`))
payload := task.PayloadBytes()PayloadJSON marshals payload as JSON.
task := queue.NewTask("emails:send").PayloadJSON(map[string]int{"id": 1})Retry sets max retry attempts.
Example: retry
task := queue.NewTask("emails:send").Retry(4)Example: retry count getter
snapshot := queue.StatsSnapshot{
ByQueue: map[string]queue.QueueCounters{
"default": {Retry: 1},
},
}
fmt.Println(snapshot.Retry("default"))
// Output: 1Timeout sets per-task execution timeout.
task := queue.NewTask("emails:send").Timeout(10 * time.Second)UniqueFor enables uniqueness dedupe within the given TTL.
task := queue.NewTask("emails:send").UniqueFor(45 * time.Second)AssertCount fails when dispatch count is not expected.
fake := queue.NewFake()
fake.AssertCount(nil, 1)AssertDispatched fails when taskType was not dispatched.
fake := queue.NewFake()
fake.AssertDispatched(nil, "emails:send")AssertDispatchedOn fails when taskType was not dispatched on queueName.
fake := queue.NewFake()
queue.NewTask("emails:send").
OnQueue("critical"),
)
fake.AssertDispatchedOn(nil, "critical", "emails:send")AssertDispatchedTimes fails when taskType dispatch count does not match expected.
fake := queue.NewFake()
fake.AssertDispatchedTimes(nil, "emails:send", 2)AssertNotDispatched fails when taskType was dispatched.
fake := queue.NewFake()
fake.AssertNotDispatched(nil, "emails:cancel")AssertNothingDispatched fails when any dispatch was recorded.
fake := queue.NewFake()
fake.AssertNothingDispatched(nil)DispatchCtx submits a typed job payload using the provided context.
fake := queue.NewFake()
ctx := context.Background()
err := fake.DispatchCtx(ctx, queue.NewTask("emails:send").OnQueue("default"))
fmt.Println(err == nil)
// Output: trueNewFake creates a queue fake that records dispatches and provides assertions.
fake := queue.NewFake()
queue.NewTask("emails:send").
Payload(map[string]any{"id": 1}).
OnQueue("critical"),
)
records := fake.Records()
fmt.Println(len(records), records[0].Queue, records[0].Task.Type)
// Output: 1 critical emails:sendRecords returns a copy of all dispatch records.
fake := queue.NewFake()
records := fake.Records()
fmt.Println(len(records), records[0].Task.Type)
// Output: 1 emails:sendReset clears all recorded dispatches.
fake := queue.NewFake()
fmt.Println(len(fake.Records()))
fake.Reset()
fmt.Println(len(fake.Records()))
// Output:
// 1
// 0Workers sets desired worker concurrency before StartWorkers.
fake := queue.NewFake()
q := fake.Workers(4)
fmt.Println(q != nil)
// Output: true