Compare commits

..

No commits in common. "328cc635364a8a733698f341d687a16e6ff59204" and "137422601b10286ec364e3be588424420139c2bf" have entirely different histories.

3 changed files with 243 additions and 262 deletions

View File

@ -25,13 +25,13 @@ TODO: Write the documentation in more detail here.
## Benchmarks
Performance is generally on par with [postgres.js][1] and up to **5x faster** than [deno-postgres][2]. Keep in mind that database driver benchmarks are largely dependent on the database performance itself and does not necessarily represent accurate real-world performance.
Performance is generally on par with [postgres-js][1] and up to **5x faster** than [deno-postgres][2]. Keep in mind that database driver benchmarks are largely dependent on the database performance itself and does not necessarily represent accurate real-world performance.
Tested on a 4 core, 2800 MHz, x86_64-pc-linux-gnu, QEMU VM, with Deno 2.1.4 and PostgreSQL 17.1 on localhost:
Query `select * from pg_type`:
```
```log
CPU | Common KVM Processor v2.0
Runtime | Deno 2.1.4 (x86_64-unknown-linux-gnu)
@ -39,78 +39,78 @@ benchmark time/iter (avg) iter/s (min … max) p75
--------------- ----------------------------- --------------------- --------------------------
group select n=1
pglue 8.8 ms 113.8 ( 7.2 ms … 11.8 ms) 9.7 ms 11.8 ms 11.8 ms
postgres.js 10.8 ms 92.3 ( 8.1 ms … 22.0 ms) 11.2 ms 22.0 ms 22.0 ms
deno-postgres 38.9 ms 25.7 ( 23.5 ms … 51.9 ms) 40.3 ms 51.9 ms 51.9 ms
pglue 8.3 ms 120.4 ( 7.2 ms … 14.4 ms) 8.5 ms 14.4 ms 14.4 ms
postgres-js 10.8 ms 92.3 ( 8.1 ms … 26.5 ms) 10.7 ms 26.5 ms 26.5 ms
deno-postgres 37.1 ms 26.9 ( 33.4 ms … 41.3 ms) 38.5 ms 41.3 ms 41.3 ms
summary
pglue
1.23x faster than postgres.js
4.42x faster than deno-postgres
1.30x faster than postgres-js
4.47x faster than deno-postgres
group select n=5
pglue 40.1 ms 25.0 ( 36.1 ms … 48.2 ms) 40.7 ms 48.2 ms 48.2 ms
postgres.js 48.7 ms 20.5 ( 38.9 ms … 61.2 ms) 52.7 ms 61.2 ms 61.2 ms
deno-postgres 184.7 ms 5.4 (166.5 ms … 209.5 ms) 190.7 ms 209.5 ms 209.5 ms
pglue 39.9 ms 25.1 ( 37.2 ms … 49.6 ms) 40.8 ms 49.6 ms 49.6 ms
postgres-js 42.4 ms 23.6 ( 36.5 ms … 61.8 ms) 44.2 ms 61.8 ms 61.8 ms
deno-postgres 182.5 ms 5.5 (131.9 ms … 211.8 ms) 193.4 ms 211.8 ms 211.8 ms
summary
pglue
1.22x faster than postgres.js
4.61x faster than deno-postgres
1.06x faster than postgres-js
4.57x faster than deno-postgres
group select n=10
pglue 80.7 ms 12.4 ( 73.5 ms … 95.4 ms) 82.2 ms 95.4 ms 95.4 ms
postgres.js 89.1 ms 11.2 ( 82.5 ms … 101.7 ms) 94.4 ms 101.7 ms 101.7 ms
deno-postgres 375.3 ms 2.7 (327.4 ms … 393.9 ms) 390.7 ms 393.9 ms 393.9 ms
pglue 78.9 ms 12.7 ( 72.3 ms … 88.9 ms) 82.5 ms 88.9 ms 88.9 ms
postgres-js 92.0 ms 10.9 ( 77.6 ms … 113.6 ms) 101.2 ms 113.6 ms 113.6 ms
deno-postgres 326.6 ms 3.1 (208.8 ms … 406.0 ms) 388.8 ms 406.0 ms 406.0 ms
summary
pglue
1.10x faster than postgres.js
4.65x faster than deno-postgres
1.17x faster than postgres-js
4.14x faster than deno-postgres
```
Query `insert into my_table (a, b, c) values (${a}, ${b}, ${c})`:
```
```log
group insert n=1
pglue 259.2 µs 3,858 (165.4 µs … 2.8 ms) 258.0 µs 775.4 µs 2.8 ms
postgres.js 235.9 µs 4,239 (148.8 µs … 1.2 ms) 250.3 µs 577.4 µs 585.6 µs
deno-postgres 306.7 µs 3,260 (198.8 µs … 1.3 ms) 325.9 µs 1.0 ms 1.3 ms
pglue 303.3 µs 3,297 (165.6 µs … 2.4 ms) 321.6 µs 1.1 ms 2.4 ms
postgres-js 260.4 µs 3,840 (132.9 µs … 2.7 ms) 276.4 µs 1.1 ms 2.7 ms
deno-postgres 281.6 µs 3,552 (186.1 µs … 1.5 ms) 303.8 µs 613.6 µs 791.8 µs
summary
pglue
1.10x slower than postgres.js
1.18x faster than deno-postgres
1.17x slower than postgres-js
1.08x slower than deno-postgres
group insert n=10
pglue 789.7 µs 1,266 (553.2 µs … 2.7 ms) 783.4 µs 2.4 ms 2.7 ms
postgres.js 755.6 µs 1,323 (500.5 µs … 3.4 ms) 795.0 µs 2.8 ms 3.4 ms
deno-postgres 2.2 ms 458.1 ( 1.6 ms … 5.2 ms) 2.3 ms 4.8 ms 5.2 ms
pglue 1.1 ms 878.5 (605.5 µs … 3.2 ms) 1.1 ms 2.2 ms 3.2 ms
postgres-js 849.3 µs 1,177 (529.5 µs … 10.1 ms) 770.6 µs 3.0 ms 10.1 ms
deno-postgres 2.3 ms 439.4 ( 1.4 ms … 4.9 ms) 2.5 ms 4.1 ms 4.9 ms
summary
pglue
1.04x slower than postgres.js
2.76x faster than deno-postgres
1.34x slower than postgres-js
2.00x faster than deno-postgres
group insert n=100
pglue 5.8 ms 172.0 ( 3.2 ms … 9.9 ms) 6.8 ms 9.9 ms 9.9 ms
postgres.js 13.0 ms 76.8 ( 8.6 ms … 20.8 ms) 15.4 ms 20.8 ms 20.8 ms
deno-postgres 18.5 ms 54.1 ( 14.3 ms … 32.1 ms) 20.0 ms 32.1 ms 32.1 ms
pglue 8.3 ms 121.0 ( 5.0 ms … 13.6 ms) 9.3 ms 13.6 ms 13.6 ms
postgres-js 13.0 ms 76.7 ( 9.0 ms … 26.9 ms) 14.1 ms 26.9 ms 26.9 ms
deno-postgres 19.8 ms 50.5 ( 14.2 ms … 31.8 ms) 22.5 ms 31.8 ms 31.8 ms
summary
pglue
2.24x faster than postgres.js
3.18x faster than deno-postgres
1.58x faster than postgres-js
2.40x faster than deno-postgres
group insert n=200
pglue 8.8 ms 113.4 ( 6.0 ms … 14.1 ms) 10.0 ms 14.1 ms 14.1 ms
postgres.js 28.2 ms 35.5 ( 21.1 ms … 47.0 ms) 29.6 ms 47.0 ms 47.0 ms
deno-postgres 37.0 ms 27.0 ( 32.0 ms … 48.1 ms) 39.4 ms 48.1 ms 48.1 ms
pglue 15.1 ms 66.2 ( 9.4 ms … 21.1 ms) 16.8 ms 21.1 ms 21.1 ms
postgres-js 27.8 ms 36.0 ( 22.5 ms … 39.2 ms) 30.2 ms 39.2 ms 39.2 ms
deno-postgres 40.6 ms 24.6 ( 33.5 ms … 51.4 ms) 42.2 ms 51.4 ms 51.4 ms
summary
pglue
3.20x faster than postgres.js
4.20x faster than deno-postgres
1.84x faster than postgres-js
2.68x faster than deno-postgres
```
[1]: https://github.com/porsager/postgres

View File

@ -60,7 +60,7 @@ for (const n of [1, 5, 10]) {
});
Deno.bench({
name: `postgres.js`,
name: `postgres-js`,
group: `select n=${n}`,
async fn(b) {
await bench_select(b, n, () => c_pgjs`select * from pg_type`);
@ -95,7 +95,7 @@ for (const n of [1, 10, 100, 200]) {
});
Deno.bench({
name: `postgres.js`,
name: `postgres-js`,
group: `insert n=${n}`,
async fn(b) {
await c_pgjs`begin`;

425
wire.ts
View File

@ -1,6 +1,5 @@
import {
type BinaryLike,
buf_concat,
buf_concat_fast,
buf_eq,
buf_xor,
@ -8,9 +7,8 @@ import {
from_base64,
from_utf8,
jit,
type Receiver,
semaphore,
type Sender,
semaphore_fast,
to_base64,
to_utf8,
TypedEmitter,
@ -91,7 +89,7 @@ export class PostgresError extends WireError {
}
}
function severity_log_level(s: string): LogLevel {
function severity_level(s: string): LogLevel {
switch (s) {
case "DEBUG":
return "debug";
@ -478,38 +476,57 @@ export interface Channel
}
export async function wire_connect(options: WireOptions) {
const wire = new Wire(options);
return await wire.connect(), wire;
const { host, port } = options;
const wire = new Wire(await socket_connect(host, port), options);
return await wire.connected, wire;
}
async function socket_connect(hostname: string, port: number) {
if (hostname.startsWith("/")) {
const path = join(hostname, `.s.PGSQL.${port}`);
return await Deno.connect({ transport: "unix", path });
} else {
const socket = await Deno.connect({ transport: "tcp", hostname, port });
return socket.setNoDelay(), socket.setKeepAlive(), socket;
}
}
export class Wire extends TypedEmitter<WireEvents> implements Disposable {
readonly #socket;
readonly #params;
readonly #connect;
readonly #auth;
readonly #connected;
readonly #query;
readonly #begin;
readonly #listen;
readonly #notify;
readonly #close;
get socket() {
return this.#socket;
}
get params() {
return this.#params;
}
constructor(options: WireOptions) {
get connected() {
return this.#connected;
}
constructor(socket: Deno.Conn, options: WireOptions) {
super();
({
params: this.#params,
connect: this.#connect,
auth: this.#auth,
query: this.#query,
begin: this.#begin,
listen: this.#listen,
notify: this.#notify,
close: this.#close,
} = wire_impl(this, options));
}
connect() {
return this.#connect();
} = wire_impl(this, socket, options));
this.#socket = socket;
(this.#connected = this.#auth()).catch(close);
}
query<T = Row>(sql: SqlFragment): Query<T>;
@ -563,164 +580,112 @@ export class Wire extends TypedEmitter<WireEvents> implements Disposable {
}
}
async function socket_connect(hostname: string, port: number) {
if (hostname.startsWith("/")) {
const path = join(hostname, `.s.PGSQL.${port}`);
return await Deno.connect({ transport: "unix", path });
} else {
const socket = await Deno.connect({ transport: "tcp", hostname, port });
return socket.setNoDelay(), socket.setKeepAlive(), socket;
}
}
const msg_PD = object({ P: Parse, D: Describe });
const msg_BE = object({ B: Bind, E: Execute });
const msg_BEc = object({ B: Bind, E: Execute, c: CopyDone });
const msg_BEcC = object({ B: Bind, E: Execute, c: CopyDone, C: Close });
function wire_impl(
wire: Wire,
{ host, port, user, database, password, runtime_params, types }: WireOptions
socket: Deno.Conn,
{ user, database, password, runtime_params, types }: WireOptions
) {
// current runtime parameters as reported by postgres
const params: Parameters = Object.create(null);
function log(level: LogLevel, ctx: object, msg: string) {
wire.emit("log", level, ctx, msg);
}
// wire supports re-connection; socket and read/write channels are null when closed
let connected = false;
let socket: Deno.Conn | null = null;
let read_pop: Receiver<Uint8Array> | null = null;
let write_push: Sender<Uint8Array> | null = null;
async function read<T>(type: Encoder<T>) {
const msg = read_pop !== null ? await read_pop() : null;
if (msg !== null) return ser_decode(type, msg_check_err(msg));
else throw new WireError(`connection closed`);
const msg = await read_recv();
if (msg === null) throw new WireError(`connection closed`);
else return ser_decode(type, msg_check_err(msg));
}
async function read_msg() {
const msg = read_pop !== null ? await read_pop() : null;
if (msg !== null) return msg;
else throw new WireError(`connection closed`);
async function read_raw() {
const msg = await read_recv();
if (msg === null) throw new WireError(`connection closed`);
else return msg;
}
async function read_socket(socket: Deno.Conn, push: Sender<Uint8Array>) {
let err;
async function* read_socket() {
const buf = new Uint8Array(64 * 1024);
for (let n; (n = await socket.read(buf)) !== null; )
yield buf.subarray(0, n);
}
const read_recv = channel.receiver<Uint8Array>(async function read(send) {
let err: unknown;
try {
const header_size = 5;
const read_buf = new Uint8Array(64 * 1024); // shared buffer for all socket reads
let buf = new Uint8Array(); // concatenated messages read so far
let buf = new Uint8Array();
for await (const chunk of read_socket()) {
buf = buf_concat_fast(buf, chunk);
for (let read; (read = await socket.read(read_buf)) !== null; ) {
buf = buf_concat_fast(buf, read_buf.subarray(0, read)); // push read bytes to buf
while (buf.length >= header_size) {
const size = ser_decode(Header, buf).length + 1;
if (buf.length < size) break;
const msg = buf.subarray(0, size); // shift one message from buf
buf = buf.subarray(size);
if (!handle_msg(msg)) push(msg);
for (let n; (n = ser_decode(Header, buf).length + 1) <= buf.length; ) {
const msg = buf.subarray(0, n);
buf = buf.subarray(n);
switch (msg_type(msg)) {
// https://www.postgresql.org/docs/current/protocol-flow.html#PROTOCOL-ASYNC
case NoticeResponse.type: {
const { fields } = ser_decode(NoticeResponse, msg);
const notice = new PostgresError(fields);
log(severity_level(notice.severity), notice, notice.message);
wire.emit("notice", notice);
continue;
}
case ParameterStatus.type: {
const { name, value } = ser_decode(ParameterStatus, msg);
const prev = params[name] ?? null;
Object.defineProperty(params, name, {
configurable: true,
enumerable: true,
value,
});
wire.emit("parameter", name, value, prev);
continue;
}
case NotificationResponse.type: {
const { channel, payload, process_id } = ser_decode(
NotificationResponse,
msg
);
wire.emit("notify", channel, payload, process_id);
channels.get(channel)?.emit("notify", payload, process_id);
continue;
}
}
send(msg);
}
}
// there should be nothing left in buf if we gracefully exited
if (buf.length !== 0) throw new WireError(`unexpected end of stream`);
} catch (e) {
throw (err = e);
throw ((err = e), e);
} finally {
if (connected) close(err);
wire.emit("close", err);
}
}
function handle_msg(msg: Uint8Array) {
switch (msg_type(msg)) {
// https://www.postgresql.org/docs/current/protocol-flow.html#PROTOCOL-ASYNC
case NoticeResponse.type: {
const { fields } = ser_decode(NoticeResponse, msg);
const notice = new PostgresError(fields);
log(severity_log_level(notice.severity), notice, notice.message);
wire.emit("notice", notice);
return true;
}
case ParameterStatus.type: {
const { name, value } = ser_decode(ParameterStatus, msg);
const prev = params[name] ?? null;
Object.defineProperty(params, name, {
configurable: true,
enumerable: true,
value,
});
wire.emit("parameter", name, value, prev);
return true;
}
case NotificationResponse.type: {
const { channel, payload, process_id } = ser_decode(
NotificationResponse,
msg
);
wire.emit("notify", channel, payload, process_id);
channels.get(channel)?.emit("notify", payload, process_id);
return true;
}
}
return false;
}
});
function write<T>(type: Encoder<T>, value: T) {
write_msg(ser_encode(type, value));
return write_raw(ser_encode(type, value));
}
function write_msg(buf: Uint8Array) {
if (write_push !== null) write_push(buf);
else throw new WireError(`connection closed`);
}
async function write_socket(socket: Deno.Conn, pop: Receiver<Uint8Array>) {
let err;
try {
for (let buf; (buf = await pop()) !== null; ) {
const bufs = [buf]; // proactively dequeue more queued msgs synchronously, if any
for (let i = 1, buf; (buf = pop.try()) !== null; ) bufs[i++] = buf;
if (bufs.length !== 1) buf = buf_concat(bufs); // write queued msgs concatenated, reduce write syscalls
for (let i = 0, n = buf.length; i < n; )
i += await socket.write(buf.subarray(i));
}
} catch (e) {
throw (err = e);
} finally {
if (connected) close(err);
}
}
async function connect() {
using _rlock = await rlock();
using _wlock = await wlock();
if (connected) return;
try {
const s = (socket = await socket_connect(host, port));
read_pop = channel.receiver((push) => read_socket(s, push));
write_push = channel.sender((pop) => write_socket(s, pop));
await handle_auth(); // run auth with rw lock
connected = true;
} catch (e) {
throw (close(e), e);
}
async function write_raw(buf: Uint8Array) {
for (let i = 0, n = buf.length; i < n; )
i += await socket.write(buf.subarray(i));
}
function close(reason?: unknown) {
socket?.close(), (socket = null);
read_pop?.close(reason), (read_pop = null);
write_push?.close(reason), (write_push = null);
for (const name of Object.keys(params))
delete (params as Record<string, string>)[name];
st_cache.clear(), (st_ids = 0);
(tx_status = "I"), (tx_stack.length = 0);
connected &&= (wire.emit("close", reason), false);
socket.close(), read_recv.close(reason);
}
// https://www.postgresql.org/docs/current/protocol-flow.html#PROTOCOL-FLOW-PIPELINING
const rlock = semaphore();
const wlock = semaphore();
const rlock = semaphore_fast();
const wlock = semaphore_fast();
function pipeline<T>(
w: () => void | PromiseLike<void>,
@ -732,38 +697,39 @@ function wire_impl(
});
}
async function pipeline_read<T>(r: () => T | PromiseLike<T>) {
using _lock = await rlock();
try {
return await r();
} finally {
function pipeline_read<T>(r: () => T | PromiseLike<T>) {
return rlock(async function pipeline_read() {
try {
let msg;
while (msg_type((msg = await read_msg())) !== ReadyForQuery.type);
({ tx_status } = ser_decode(ReadyForQuery, msg));
} catch {
// ignored
return await r();
} finally {
try {
let msg;
while (msg_type((msg = await read_raw())) !== ReadyForQuery.type);
({ tx_status } = ser_decode(ReadyForQuery, msg));
} catch {
// ignored
}
}
}
});
}
async function pipeline_write<T>(w: () => T | PromiseLike<T>) {
using _lock = await wlock();
try {
return await w();
} finally {
function pipeline_write<T>(w: () => T | PromiseLike<T>) {
return wlock(async function pipeline_write() {
try {
write(Sync, {});
} catch {
// ignored
return await w();
} finally {
try {
await write(Sync, {});
} catch {
// ignored
}
}
}
});
}
// https://www.postgresql.org/docs/current/protocol-flow.html#PROTOCOL-FLOW-START-UP
async function handle_auth() {
// always run within rw lock (see connect())
write(StartupMessage, {
async function auth() {
await write(StartupMessage, {
version: 196608,
params: {
application_name: "pglue",
@ -778,7 +744,7 @@ function wire_impl(
});
auth: for (;;) {
const msg = msg_check_err(await read_msg());
const msg = msg_check_err(await read_raw());
switch (msg_type(msg)) {
case NegotiateProtocolVersion.type: {
const { bad_options } = ser_decode(NegotiateProtocolVersion, msg);
@ -796,7 +762,7 @@ function wire_impl(
throw new WireError(`kerberos authentication is deprecated`);
case 3: // AuthenticationCleartextPassword
write(PasswordMessage, { password });
await write(PasswordMessage, { password });
continue;
case 5: // AuthenticationMD5Password
@ -812,7 +778,7 @@ function wire_impl(
// AuthenticationSASL
case 10:
await handle_auth_sasl();
await auth_sasl();
continue;
default:
@ -820,9 +786,8 @@ function wire_impl(
}
}
// wait for ready
ready: for (;;) {
const msg = msg_check_err(await read_msg());
const msg = msg_check_err(await read_raw());
switch (msg_type(msg)) {
case BackendKeyData.type:
continue; // ignored
@ -832,18 +797,11 @@ function wire_impl(
break ready;
}
}
// re-listen previously registered channels
await Promise.all(
channels
.keys()
.map((name) => query(sql`listen ${sql.ident(name)}`).execute())
);
}
// https://www.postgresql.org/docs/current/sasl-authentication.html#SASL-SCRAM-SHA-256
// https://datatracker.ietf.org/doc/html/rfc5802
async function handle_auth_sasl() {
async function auth_sasl() {
const bits = 256;
const hash = `SHA-${bits}`;
const mechanism = `SCRAM-${hash}`;
@ -900,7 +858,7 @@ function wire_impl(
)}`;
const client_first_message_bare = `${username},${initial_nonce}`;
const client_first_message = `${gs2_header}${client_first_message_bare}`;
write(SASLInitialResponse, { mechanism, data: client_first_message });
await write(SASLInitialResponse, { mechanism, data: client_first_message });
const server_first_message_str = from_utf8(
(await read(AuthenticationSASLContinue)).data
@ -919,7 +877,7 @@ function wire_impl(
const client_proof = buf_xor(client_key, client_signature);
const proof = `p=${to_base64(client_proof)}`;
const client_final_message = `${client_final_message_without_proof},${proof}`;
write(SASLResponse, { data: client_final_message });
await write(SASLResponse, { data: client_final_message });
const server_key = await hmac(salted_password, "Server Key");
const server_signature = await hmac(server_key, auth_message);
@ -939,28 +897,29 @@ function wire_impl(
readonly name = `__st${st_ids++}`;
constructor(readonly query: string) {}
#parse_task: Promise<{
parse_task: Promise<{
ser_params: ParameterSerializer;
Row: RowConstructor;
}> | null = null;
parse() {
return (this.#parse_task ??= this.#parse());
return (this.parse_task ??= this.#parse());
}
async #parse() {
try {
const { name, query } = this;
return await pipeline(
() => {
write(Parse, { statement: name, query, param_types: [] });
write(Describe, { which: "S", name });
},
() =>
write(msg_PD, {
P: { statement: name, query, param_types: [] },
D: { which: "S", name },
}),
async () => {
await read(ParseComplete);
const ser_params = make_param_ser(await read(ParameterDescription));
const msg = msg_check_err(await read_msg());
const msg = msg_check_err(await read_raw());
const Row =
msg_type(msg) === NoData.type
? EmptyRow
@ -970,13 +929,13 @@ function wire_impl(
}
);
} catch (e) {
throw ((this.#parse_task = null), e);
throw ((this.parse_task = null), e);
}
}
#portals = 0;
portals = 0;
portal() {
return `${this.name}_${this.#portals++}`;
return `${this.name}_${this.portals++}`;
}
}
@ -985,7 +944,6 @@ function wire_impl(
(params: unknown[]): (string | null)[];
}
// makes function to serialize query parameters
function make_param_ser({ param_types }: ParameterDescription) {
return jit.compiled<ParameterSerializer>`function ser_params(xs) {
return [
@ -1002,7 +960,6 @@ function wire_impl(
new (columns: (BinaryLike | null)[]): Row;
}
// makes function to create Row objects
const EmptyRow = make_row_ctor({ columns: [] });
function make_row_ctor({ columns }: RowDescription) {
const Row = jit.compiled<RowConstructor>`function Row(xs) {
@ -1041,7 +998,7 @@ function wire_impl(
stdout: WritableStream<Uint8Array> | null
) {
for (let rows = [], i = 0; ; ) {
const msg = msg_check_err(await read_msg());
const msg = msg_check_err(await read_raw());
switch (msg_type(msg)) {
default:
case DataRow.type:
@ -1077,7 +1034,7 @@ function wire_impl(
if (stream !== null) {
const writer = stream.getWriter();
try {
for (let msg; msg_type((msg = await read_msg())) !== CopyDone.type; ) {
for (let msg; msg_type((msg = await read_raw())) !== CopyDone.type; ) {
const { data } = ser_decode(CopyData, msg_check_err(msg));
await writer.write(to_utf8(data));
}
@ -1089,7 +1046,7 @@ function wire_impl(
writer.releaseLock();
}
} else {
while (msg_type(msg_check_err(await read_msg())) !== CopyDone.type);
while (msg_type(msg_check_err(await read_raw())) !== CopyDone.type);
}
}
@ -1098,16 +1055,16 @@ function wire_impl(
const reader = stream.getReader();
try {
for (let next; !(next = await reader.read()).done; )
write(CopyData, { data: next.value });
write(CopyDone, {});
await write(CopyData, { data: next.value });
await write(CopyDone, {});
} catch (e) {
write(CopyFail, { cause: String(e) });
await write(CopyFail, { cause: String(e) });
throw e;
} finally {
reader.releaseLock();
}
} else {
write(CopyDone, {});
await write(CopyDone, {});
}
}
@ -1116,20 +1073,20 @@ function wire_impl(
stdin: ReadableStream<Uint8Array> | null,
stdout: WritableStream<Uint8Array> | null
): ResultStream<Row> {
yield* await pipeline(
() => {
log("debug", { query }, `executing simple query`);
write(QueryMessage, { query });
write_copy_in(stdin);
log("debug", { query: query }, `executing simple query`);
const { chunks, err } = await pipeline(
async () => {
await write(QueryMessage, { query });
return write_copy_in(stdin);
},
async () => {
for (let chunks = [], err; ; ) {
const msg = await read_msg();
const msg = await read_raw();
switch (msg_type(msg)) {
default:
case ReadyForQuery.type:
if (err) throw err;
else return chunks;
return { chunks, err };
case RowDescription.type: {
const Row = make_row_ctor(ser_decode(RowDescription, msg));
@ -1153,6 +1110,8 @@ function wire_impl(
}
);
yield* chunks;
if (err) throw err;
return { tag: "" };
}
@ -1162,7 +1121,12 @@ function wire_impl(
stdin: ReadableStream<Uint8Array> | null,
stdout: WritableStream<Uint8Array> | null
): ResultStream<Row> {
const { query, name: statement } = st;
log(
"debug",
{ query: st.query, statement: st.name, params },
`executing query`
);
const { ser_params, Row } = await st.parse();
const param_values = ser_params(params);
const portal = st.portal();
@ -1170,17 +1134,23 @@ function wire_impl(
try {
const { rows, tag } = await pipeline(
async () => {
log("debug", { query, statement, params }, `executing query`);
write(Bind, {
const B = {
portal,
statement: st.name,
param_formats: [],
param_values,
column_formats: [],
});
write(Execute, { portal, row_limit: 0 });
await write_copy_in(stdin);
write(Close, { which: "P", name: portal });
};
const E = { portal, row_limit: 0 };
const C = { which: "P" as const, name: portal };
if (stdin !== null) {
await write(msg_BE, { B, E });
await write_copy_in(stdin);
return write(Close, C);
} else {
return write(msg_BEcC, { B, E, c: {}, C });
}
},
async () => {
await read(BindComplete);
@ -1193,7 +1163,7 @@ function wire_impl(
} catch (e) {
try {
await pipeline(
() => write(Close, { which: "P", name: portal }),
() => write(Close, { which: "P" as const, name: portal }),
() => read(CloseComplete)
);
} catch {
@ -1211,24 +1181,34 @@ function wire_impl(
stdin: ReadableStream<Uint8Array> | null,
stdout: WritableStream<Uint8Array> | null
): ResultStream<Row> {
const { query, name: statement } = st;
log(
"debug",
{ query: st.query, statement: st.name, params },
`executing chunked query`
);
const { ser_params, Row } = await st.parse();
const param_values = ser_params(params);
const portal = st.portal();
try {
let { done, rows, tag } = await pipeline(
() => {
log("debug", { query, statement, params }, `executing chunked query`);
write(Bind, {
async () => {
const B = {
portal,
statement: st.name,
param_formats: [],
param_values,
column_formats: [],
});
write(Execute, { portal, row_limit: chunk_size });
return write_copy_in(stdin);
};
const E = { portal, row_limit: chunk_size };
if (stdin !== null) {
await write(msg_BE, { B, E });
return write_copy_in(stdin);
} else {
return write(msg_BEc, { B, E, c: {} });
}
},
async () => {
await read(BindComplete);
@ -1250,7 +1230,7 @@ function wire_impl(
return { tag };
} finally {
await pipeline(
() => write(Close, { which: "P", name: portal }),
() => write(Close, { which: "P" as const, name: portal }),
() => read(CloseComplete)
);
}
@ -1378,7 +1358,7 @@ function wire_impl(
}
};
return { params, connect, query, begin, listen, notify, close };
return { params, auth, query, begin, listen, notify, close };
}
export interface PoolOptions extends WireOptions {
@ -1537,8 +1517,9 @@ function pool_impl(
};
async function connect() {
const wire = new PoolWire(options);
await wire.connect(), all.add(wire);
const { host, port } = options;
const wire = new PoolWire(await socket_connect(host, port), options);
await wire.connected, all.add(wire);
const { connection_id } = wire;
return wire
.on("log", (l, c, s) => pool.emit("log", l, { ...c, connection_id }, s))