Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 2 additions & 8 deletions .github/workflows/test.yml
Original file line number Diff line number Diff line change
Expand Up @@ -78,16 +78,10 @@ jobs:
run: nimble install -y

- name: Compile examples (asyncdispatch)
run: |
nim c examples/basic_query.nim
nim c examples/pool.nim
nim c examples/listen_notify.nim
run: for f in examples/*.nim; do nim c "$f"; done

- name: Compile examples (chronos)
run: |
nim c -d:asyncBackend=chronos examples/basic_query.nim
nim c -d:asyncBackend=chronos examples/pool.nim
nim c -d:asyncBackend=chronos examples/listen_notify.nim
run: for f in examples/*.nim; do nim c -d:asyncBackend=chronos "$f"; done

- name: Gen docs
run: |
Expand Down
3 changes: 3 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -10,3 +10,6 @@ async_postgres/*

tests/*
!tests/*.*

examples/*
!examples/*.*
10 changes: 9 additions & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -105,8 +105,16 @@ SSL backend differs by async backend:
The [examples](examples/) directory contains runnable samples:

- [basic_query](examples/basic_query.nim) — Connect, insert, and query rows
- [prepared_statement](examples/prepared_statement.nim) — Server-side prepared statements
- [transaction](examples/transaction.nim) — Transaction control with rollback and isolation levels
- [cursor](examples/cursor.nim) — Server-side cursors for streaming large result sets
- [pipeline](examples/pipeline.nim) — Batch multiple operations in a single round trip
- [copy](examples/copy.nim) — Bulk import/export with COPY protocol
- [large_object](examples/large_object.nim) — Large Object API for binary data
- [listen_notify](examples/listen_notify.nim) — LISTEN/NOTIFY
- [pool](examples/pool.nim) — Connection pooling
- [listen_notify](examples/listen_notify.nim) — LISTEN/NOTIFY with auto-reconnect
- [pool_cluster](examples/pool_cluster.nim) — Read/write splitting with pool cluster
- [advisory_lock](examples/advisory_lock.nim) — Application-level distributed locking

## Documentation

Expand Down
44 changes: 44 additions & 0 deletions examples/advisory_lock.nim
Original file line number Diff line number Diff line change
@@ -0,0 +1,44 @@
## Advisory lock example.
##
## Demonstrates PostgreSQL advisory locks for application-level
## distributed locking, including try-lock and scoped locking.
##
## Usage:
## nim c -r examples/advisory_lock.nim

import pkg/async_postgres

const Dsn = "postgresql://test:test@127.0.0.1:15432/test?sslmode=disable"

proc main() {.async.} =
let conn = await connect(Dsn)
defer:
await conn.close()

# Acquire and release an exclusive advisory lock (session-level)
let lockKey = 42'i64
await conn.advisoryLock(lockKey)
echo "Acquired advisory lock: key=", lockKey

discard await conn.advisoryUnlock(lockKey)
echo "Released advisory lock: key=", lockKey

# Try-lock: non-blocking acquisition
let acquired = await conn.advisoryTryLock(100'i64)
echo "\nTry-lock key=100: acquired=", acquired
if acquired:
discard await conn.advisoryUnlock(100'i64)

# Two-key variant (int32, int32)
await conn.advisoryLock(1'i32, 2'i32)
echo "\nAcquired two-key lock: (1, 2)"
discard await conn.advisoryUnlock(1'i32, 2'i32)
echo "Released two-key lock: (1, 2)"

# Scoped locking with withAdvisoryLock macro
conn.withAdvisoryLock(200'i64):
echo "\nInside withAdvisoryLock (key=200)"
# Lock is automatically released when the block exits
echo "Lock released after withAdvisoryLock"

waitFor main()
63 changes: 63 additions & 0 deletions examples/copy.nim
Original file line number Diff line number Diff line change
@@ -0,0 +1,63 @@
## COPY protocol example.
##
## Demonstrates bulk data import/export using PostgreSQL's COPY protocol,
## including both buffered and streaming variants.
##
## Usage:
## nim c -r examples/copy.nim

import pkg/async_postgres

const Dsn = "postgresql://test:test@127.0.0.1:15432/test?sslmode=disable"

proc main() {.async.} =
let conn = await connect(Dsn)
defer:
await conn.close()

discard await conn.exec(
"""
CREATE TEMP TABLE items (
id int NOT NULL,
name text NOT NULL
)
"""
)

# copyIn: bulk insert with string data
let tag = await conn.copyIn("COPY items FROM STDIN", "1\tAlice\n2\tBob\n3\tCharlie\n")
echo "copyIn result: ", tag.commandTag

# copyOut: bulk export
let result = await conn.copyOut("COPY items TO STDOUT")
echo "\ncopyOut rows:"
for chunk in result.data:
echo " ", chunk.toString()

# copyInStream: streaming bulk insert
discard await conn.exec("TRUNCATE items")

var idx = 0
let rows = @["10\tDave\n".toBytes(), "20\tEve\n".toBytes()]
let inCb = makeCopyInCallback:
if idx < rows.len:
let chunk = rows[idx]
inc idx
chunk
else:
newSeq[byte]()

let inInfo = await conn.copyInStream("COPY items FROM STDIN", inCb)
echo "\ncopyInStream result: ", inInfo.commandTag

# copyOutStream: streaming bulk export
var chunks: seq[seq[byte]]
let outCb = makeCopyOutCallback:
chunks.add(data)

let outInfo = await conn.copyOutStream("COPY items TO STDOUT", outCb)
echo "\ncopyOutStream rows (", outInfo.commandTag, "):"
for chunk in chunks:
echo " ", chunk.toString()

waitFor main()
48 changes: 48 additions & 0 deletions examples/cursor.nim
Original file line number Diff line number Diff line change
@@ -0,0 +1,48 @@
## Server-side cursor example.
##
## Demonstrates streaming large result sets using server-side cursors
## with chunk-based fetching.
##
## Usage:
## nim c -r examples/cursor.nim

import pkg/async_postgres

const Dsn = "postgresql://test:test@127.0.0.1:15432/test?sslmode=disable"

proc main() {.async.} =
let conn = await connect(Dsn)
defer:
await conn.close()

discard await conn.exec(
"""
CREATE TEMP TABLE numbers (
id serial PRIMARY KEY,
value int4 NOT NULL
)
"""
)

# Insert sample data
for i in 1'i32 .. 25:
discard await conn.exec(sql"INSERT INTO numbers (value) VALUES ({i})")

# Open a cursor with a chunk size of 10 rows
let cursor =
await conn.openCursor("SELECT id, value FROM numbers ORDER BY id", chunkSize = 10)

# Fetch rows in chunks until exhausted
var total = 0
while not cursor.exhausted:
let rows = await cursor.fetchNext()
echo "Fetched ", rows.len, " rows:"
for row in rows:
echo " id=", row.getInt("id"), " value=", row.getInt("value")
total += rows.len

echo "\nTotal rows fetched: ", total

await cursor.close()

waitFor main()
53 changes: 53 additions & 0 deletions examples/large_object.nim
Original file line number Diff line number Diff line change
@@ -0,0 +1,53 @@
## Large Object example.
##
## Demonstrates storing and retrieving binary data using PostgreSQL's
## Large Object API. All operations must be within a transaction.
##
## Usage:
## nim c -r examples/large_object.nim

import pkg/async_postgres

const Dsn = "postgresql://test:test@127.0.0.1:15432/test?sslmode=disable"

proc main() {.async.} =
let conn = await connect(Dsn)
defer:
await conn.close()

var oid: Oid

# Create and write a Large Object (must be inside a transaction)
conn.withTransaction:
oid = await conn.loCreate()
echo "Created Large Object: oid=", oid

conn.withLargeObject(lo, oid, INV_READWRITE):
let data = "Hello, Large Object!".toBytes()
let written = await lo.loWrite(data)
echo "Wrote ", written, " bytes"

# Read it back
conn.withTransaction:
conn.withLargeObject(lo, oid, INV_READ):
let content = await lo.loReadAll()
echo "Read back: ", content.toString()

# Check size
let size = await lo.loSize()
echo "Size: ", size, " bytes"

# Streaming read
conn.withTransaction:
conn.withLargeObject(lo, oid, INV_READ):
echo "\nStreaming read:"
let cb = makeLoReadCallback:
echo " chunk (", data.len, " bytes): ", data.toString()
await lo.loReadStream(cb, chunkSize = 10)

# Clean up
conn.withTransaction:
await conn.loUnlink(oid)
echo "\nDeleted Large Object: oid=", oid

waitFor main()
56 changes: 56 additions & 0 deletions examples/pipeline.nim
Original file line number Diff line number Diff line change
@@ -0,0 +1,56 @@
## Pipeline example.
##
## Demonstrates batching multiple queries and commands into a single
## round trip using the pipeline API for improved performance.
##
## Usage:
## nim c -r examples/pipeline.nim

import std/strutils
import pkg/async_postgres

const Dsn = "postgresql://test:test@127.0.0.1:15432/test?sslmode=disable"

proc main() {.async.} =
let conn = await connect(Dsn)
defer:
await conn.close()

discard await conn.exec(
"""
CREATE TEMP TABLE tasks (
id serial PRIMARY KEY,
title text NOT NULL,
done bool NOT NULL DEFAULT false
)
"""
)

# Build a pipeline: multiple operations sent in a single round trip
let p = conn.newPipeline()

p.addExec("INSERT INTO tasks (title) VALUES ($1)", @[toPgParam("Write docs")])
p.addExec("INSERT INTO tasks (title) VALUES ($1)", @[toPgParam("Fix bug")])
p.addExec(
"INSERT INTO tasks (title, done) VALUES ($1, $2)",
@[toPgParam("Ship v1"), toPgParam(true)],
)
p.addQuery("SELECT id, title, done FROM tasks ORDER BY id")
p.addQuery("SELECT count(*) FROM tasks WHERE done = true")

# Execute all operations at once
let results = await p.execute()

for i, r in results:
case r.kind
of prkExec:
echo "Operation ", i, ": ", r.commandResult.commandTag
of prkQuery:
echo "Operation ", i, " (", r.queryResult.rowCount, " rows):"
for row in r.queryResult.rows:
var cols: seq[string]
for j in 0 ..< r.queryResult.fields.len:
cols.add(r.queryResult.fields[j].name & "=" & row.getStr(j))
echo " ", cols.join(", ")

waitFor main()
57 changes: 57 additions & 0 deletions examples/pool_cluster.nim
Original file line number Diff line number Diff line change
@@ -0,0 +1,57 @@
## Pool cluster example.
##
## Demonstrates read/write splitting using a pool cluster with
## primary and replica configurations. In this example, both pools
## point to the same server for simplicity.
##
## Usage:
## nim c -r examples/pool_cluster.nim

import pkg/async_postgres

proc main() {.async.} =
let connConfig = ConnConfig(
host: "127.0.0.1",
port: 15432,
user: "test",
password: "test",
database: "test",
sslMode: sslDisable,
)

let primaryConfig = PoolConfig(connConfig: connConfig, minSize: 1, maxSize: 3)
let replicaConfig = PoolConfig(connConfig: connConfig, minSize: 1, maxSize: 3)

# fallbackPrimary: if replica is unavailable, fall back to primary for reads
let cluster =
await newPoolCluster(primaryConfig, replicaConfig, fallback = fallbackPrimary)
defer:
await cluster.close()

# Write operations go to the primary
discard await cluster.writeExec(
"CREATE TABLE IF NOT EXISTS messages (id serial PRIMARY KEY, body text NOT NULL)"
)
discard await cluster.writeExec("TRUNCATE messages")
discard
await cluster.writeExec("INSERT INTO messages (body) VALUES ('hello'), ('world')")

# Read operations go to the replica (or primary as fallback)
let res = await cluster.readQuery("SELECT id, body FROM messages ORDER BY id")
echo "Messages:"
for row in res.rows:
echo " id=", row.getInt("id"), " body=", row.getStr("body")

# Using withWriteConnection / withReadConnection for multi-statement operations
cluster.withWriteConnection(conn):
discard
await conn.exec("INSERT INTO messages (body) VALUES ('from withWriteConnection')")

cluster.withReadConnection(readConn):
let count = await readConn.queryValue(int64, "SELECT count(*) FROM messages")
echo "\nTotal messages: ", count

# Cleanup
discard await cluster.writeExec("DROP TABLE messages")

waitFor main()
Loading