diff --git a/src/PgContractCache.ts b/src/PgContractCache.ts index caf7fba..f6e5ccc 100644 --- a/src/PgContractCache.ts +++ b/src/PgContractCache.ts @@ -1,14 +1,13 @@ import { BasicSortKeyCache, CacheKey, - CacheOptions, EvalStateResult, LoggerFactory, PruneStats, SortKeyCacheResult, } from "warp-contracts"; import { Pool, PoolClient } from "pg"; -import { PgCacheOptions } from "./PgCacheOptions"; +import { PgContractCacheOptions } from "./PgContractCacheOptions"; export class PgContractCache implements BasicSortKeyCache> @@ -18,10 +17,7 @@ export class PgContractCache private readonly pool: Pool; private client: PoolClient; - constructor( - private readonly cacheOptions: CacheOptions, - private readonly pgCacheOptions?: PgCacheOptions - ) { + constructor(private readonly pgCacheOptions?: PgContractCacheOptions) { if (!pgCacheOptions) { this.pgCacheOptions = { minEntriesPerContract: 10, @@ -315,7 +311,7 @@ export class PgContractCache } async rollback(): Promise { - await this.client.query("BEGIN;"); + await this.client.query("ROLLBACK;"); } storage(): S { diff --git a/src/PgCacheOptions.ts b/src/PgContractCacheOptions.ts similarity index 62% rename from src/PgCacheOptions.ts rename to src/PgContractCacheOptions.ts index c7cccb9..ff5d65c 100644 --- a/src/PgCacheOptions.ts +++ b/src/PgContractCacheOptions.ts @@ -1,6 +1,6 @@ import { ClientConfig } from "pg"; -export interface PgCacheOptions extends ClientConfig { +export interface PgContractCacheOptions extends ClientConfig { minEntriesPerContract: number; maxEntriesPerContract: number; } diff --git a/src/PgSortKeyCache.ts b/src/PgSortKeyCache.ts new file mode 100644 index 0000000..5d21fa7 --- /dev/null +++ b/src/PgSortKeyCache.ts @@ -0,0 +1,348 @@ +import { + BatchDBOp, + CacheKey, + LoggerFactory, + PruneStats, + SortKeyCache, + SortKeyCacheResult, +} from "warp-contracts"; +import { Pool, PoolClient } from "pg"; +import { SortKeyCacheRangeOptions } from "warp-contracts/lib/types/cache/SortKeyCacheRangeOptions"; +import { PgSortKeyCacheOptions } from "./PgSortKeyCacheOptions"; + +export class PgSortKeyCache implements SortKeyCache { + private readonly logger = LoggerFactory.INST.create(PgSortKeyCache.name); + + private readonly tableName: string; + private pool: Pool; + private client: PoolClient; + + constructor(private readonly pgCacheOptions: PgSortKeyCacheOptions) { + if (!pgCacheOptions.tableName) { + throw new Error("Table name cannot be empty"); + } + this.tableName = pgCacheOptions.tableName; + } + + private async createTableIfNotExists() { + await this.connection().query( + "CREATE schema if not exists warp; SET search_path TO 'warp';" + ); + this.logger.info(`Attempting to create table ${this.tableName}`); + const query = ` + CREATE TABLE IF NOT EXISTS "${this.tableName}" + ( + id bigserial, + key TEXT NOT NULL, + sort_key TEXT NOT NULL, + value JSONB, + PRIMARY KEY (key, sort_key) + ); + CREATE INDEX IF NOT EXISTS "idx_${this.tableName}_key_sk" ON "${this.tableName}" (key, sort_key DESC); + CREATE INDEX IF NOT EXISTS "idx_${this.tableName}_key" ON "${this.tableName}" (key);`; + await this.connection().query(query); + } + + async begin(): Promise { + this.logger.debug(`Begin transaction`); + if (this.client == null) { + this.client = await this.pool.connect(); + } + await this.client.query("BEGIN;"); + } + + async close(): Promise { + if (this.client) { + this.client.release(); + this.client = null; + } + await this.pool.end(); + this.pool = null; + this.logger.info(`Connection closed`); + return; + } + + async commit(): Promise { + this.logger.debug(`Commit transaction`); + if (this.client == null) { + this.logger.error(`Called commit when no connection established.`); + return; + } + await this.client.query("COMMIT;"); + } + + async delete(key: string): Promise { + await this.connection().query( + `DELETE FROM warp."${this.tableName}" WHERE key = $1;`, + [key] + ); + } + + dump(): Promise { + return Promise.resolve(undefined); + } + + async get(cacheKey: CacheKey): Promise | null> { + const result = await this.connection().query( + `SELECT value + FROM warp."${this.tableName}" + WHERE key = $1 + AND sort_key = $2;`, + [cacheKey.key, cacheKey.sortKey] + ); + + if (result && result.rows.length > 0) { + return new SortKeyCacheResult(cacheKey.sortKey, result.rows[0].value); + } + return null; + } + + async getLast(key: string): Promise | null> { + const result = await this.connection().query( + `SELECT sort_key, value FROM warp."${this.tableName}" WHERE key = $1 ORDER BY sort_key DESC LIMIT 1;`, + [key] + ); + + if (result && result.rows && result.rows.length > 0) { + return new SortKeyCacheResult( + result.rows[0].sort_key, + result.rows[0].value + ); + } + return null; + } + + async getLastSortKey(): Promise { + const result = await this.connection().query( + `SELECT max(sort_key) as sort_key FROM warp."${this.tableName}";` + ); + return result.rows[0].sort_key == "" ? null : result.rows[0].sortKey; + } + + async getLessOrEqual( + key: string, + sortKey: string + ): Promise | null> { + const result = await this.connection().query( + `SELECT sort_key, value FROM warp."${this.tableName}" WHERE key = $1 AND sort_key <= $2 ORDER BY sort_key DESC LIMIT 1;`, + [key, sortKey] + ); + + if (result && result.rows.length > 0) { + return new SortKeyCacheResult( + result.rows[0].sort_key, + result.rows[0].value + ); + } + return null; + } + + async open(): Promise { + const conf = this.pgCacheOptions; + this.pool = new Pool(conf).on('error', (err) => { + this.logger.error('Unexpected error on idle client', err); + }); + + this.logger.info(`Connecting pg... ${conf.user}@${conf.host}:${conf.port}/${conf.database}`); + await this.pool.query( "CREATE schema if not exists warp; SET search_path TO 'warp';"); + await this.createTableIfNotExists(); + this.logger.info(`Setup finished`); + } + + private connection(): Pool | PoolClient { + if (this.client) { + return this.client; + } + return this.pool; + } + + /** + Let's assume that given contract cache contains these sortKeys: [a, b, c, d, e, f] + Let's assume entriesStored = 2 + After pruning, the cache should be left with these keys: [e,f]. + + const entries = await contractCache.keys({ reverse: true, limit: entriesStored }).all(); + This would return in this case entries [f, e] (notice the "reverse: true"). + + await contractCache.clear({ lt: entries[entries.length - 1] }); + This effectively means: await contractCache.clear({ lt: e }); + -> hence the entries [a,b,c,d] are removed and left are the [e,f] + */ + async prune(entriesStored = 5): Promise { + if (!entriesStored || entriesStored <= 0) { + entriesStored = 1; + } + + const allItems = +( + await this.client.query( + `SELECT count(1) AS total + FROM warp."${this.tableName}"` + ) + ).rows[0].total; + + const deleted = +( + await this.client.query( + ` + WITH sorted_cache AS + (SELECT id, key, sort_key, row_number() over (PARTITION BY "key" ORDER BY sort_key DESC) AS rw + FROM warp."${this.tableName}"), deleted AS + ( + DELETE + FROM warp."${this.tableName}" + WHERE id IN (SELECT id FROM sorted_cache WHERE rw > $1) RETURNING *) + SELECT count(1) AS del_total + FROM deleted; + `, + [entriesStored] + ) + ).rows[0].del_total; + + return { + entriesBefore: allItems, + entriesAfter: allItems - deleted, + sizeBefore: -1, + sizeAfter: -1, + }; + } + + async put(stateCacheKey: CacheKey, value: V): Promise { + const stringifiedValue = JSON.stringify(value); + await this.removeOldestEntries(stateCacheKey.key); + + await this.connection().query( + ` + INSERT INTO warp."${this.tableName}" (key, sort_key, value) + VALUES ($1, $2, $3) + ON CONFLICT(key, sort_key) DO UPDATE SET value = EXCLUDED.value`, + [stateCacheKey.key, stateCacheKey.sortKey, stringifiedValue] + ); + } + + private async removeOldestEntries(key: string) { + const rs = await this.connection().query( + ` + SELECT count(1) as total + FROM warp."${this.tableName}" + GROUP BY key + ` + ); + if (rs.rows.length > 0) { + const entriesTotal = rs.rows[0].total; + if (entriesTotal >= this.pgCacheOptions.maxEntriesPerKey) { + await this.connection().query( + ` + WITH sorted_cache AS + (SELECT id, row_number() over (ORDER BY sort_key DESC) AS rw + FROM warp."${this.tableName}" + WHERE key = $1) + DELETE + FROM warp."${this.tableName}" + WHERE id IN (SELECT id FROM sorted_cache WHERE rw >= $2); + `, + [key, this.pgCacheOptions.minEntriesPerKey] + ); + } + } + } + + async rollback(): Promise { + this.logger.debug(`Rollback`); + if (this.client == null) { + this.logger.error(`Rollback called, but no connection established`); + return; + } + await this.client.query("ROLLBACK;"); + } + + storage(): S { + return this.client as S; + } + + async drop(): Promise { + await this.client.query( + ` + DROP INDEX IF EXISTS "idx_${this.tableName}_key_sk"; + DROP INDEX IF EXISTS "idx_${this.tableName}_key"; + DROP INDEX IF EXISTS "idx_${this.tableName}_owner"; + DROP TABLE IF EXISTS warp."${this.tableName}"; + ` + ); + } + + async batch(opStack: BatchDBOp[]): Promise { + try { + await this.begin(); + for (const op of opStack) { + if (op.type === "put") { + await this.put(op.key, op.value); + } else if (op.type === "del") { + await this.delete(op.key); + } + } + await this.commit(); + } catch (e) { + await this.rollback(); + throw e; + } finally { + this.client.release(); + this.client = null; + } + } + + async del(cacheKey: CacheKey): Promise { + await this.connection().query( + ` + INSERT INTO warp."${this.tableName}" (key, sort_key, value) + VALUES ($1, $2, NULL) + ON CONFLICT(key, sort_key) DO UPDATE SET value = EXCLUDED.value`, + [cacheKey.key, cacheKey.sortKey] + ); + return Promise.resolve(undefined); + } + + async keys( + sortKey: string, + options?: SortKeyCacheRangeOptions + ): Promise { + const order = options?.reverse ? "DESC" : "ASC"; + const result = await this.connection().query({ + text: `WITH latest_values AS (SELECT DISTINCT ON (key) key, sort_key, value + FROM warp."${this.tableName}" + WHERE sort_key <= $1 + AND value IS NOT NULL + AND ($2::text IS NULL OR key >= $2) + AND ($3::text IS NULL OR key < $3) + order by key ${order}, sort_key desc + LIMIT $4::bigint) + select key, value + from latest_values + order by key ${order};`, + values: [sortKey, options?.gte, options?.lt, options?.limit], + rowMode: "array", + }); + return result.rows.flat(); + } + + async kvMap( + sortKey: string, + options?: SortKeyCacheRangeOptions + ): Promise> { + const order = options?.reverse ? "DESC" : "ASC"; + const result = await this.connection().query( + ` + WITH latest_values AS (SELECT DISTINCT ON (key) key, sort_key, value + FROM warp."${this.tableName}" + WHERE sort_key <= $1 + AND value IS NOT NULL + AND ($2::text IS NULL OR key >= $2) + AND ($3::text IS NULL OR key < $3) + order by key ${order}, sort_key desc + LIMIT $4::bigint) + select key, value + from latest_values + order by key ${order};`, + [sortKey, options?.gte, options?.lt, options?.limit] + ); + return new Map(result.rows.map((i): [string, V] => [i.key, i.value])); + } +} diff --git a/src/PgSortKeyCacheOptions.ts b/src/PgSortKeyCacheOptions.ts new file mode 100644 index 0000000..3ae0c64 --- /dev/null +++ b/src/PgSortKeyCacheOptions.ts @@ -0,0 +1,7 @@ +import { ClientConfig } from "pg"; + +export interface PgSortKeyCacheOptions extends ClientConfig { + tableName: string; + minEntriesPerKey: number; + maxEntriesPerKey: number; +} diff --git a/src/__tests__/sqlite-cache-prune.test.ts b/src/__tests__/pg-cache-prune.test.ts similarity index 87% rename from src/__tests__/sqlite-cache-prune.test.ts rename to src/__tests__/pg-cache-prune.test.ts index 10a3279..aac2ace 100644 --- a/src/__tests__/sqlite-cache-prune.test.ts +++ b/src/__tests__/pg-cache-prune.test.ts @@ -1,11 +1,11 @@ -import { cache, getContractId, getSortKey } from "./utils"; +import { contractCache, getContractId, getSortKey } from "./utils"; import { CacheKey } from "warp-contracts"; describe("Postgres cache prune", () => { it("handle improper args", async () => { const contracts = 10; const entriesPerContract = 1; - const sut = await cache(contracts, entriesPerContract); + const sut = await contractCache(contracts, entriesPerContract); const noopStats = { entriesAfter: contracts, entriesBefore: contracts }; expect(await sut.prune(0)).toMatchObject(noopStats); @@ -17,7 +17,7 @@ describe("Postgres cache prune", () => { it("no deletion should be performed", async () => { const contracts = 10; const entriesPerContract = 1; - const sut = await cache(contracts, entriesPerContract); + const sut = await contractCache(contracts, entriesPerContract); const noopStats = { entriesAfter: contracts, entriesBefore: contracts }; expect(await sut.prune(1)).toMatchObject(noopStats); @@ -33,7 +33,7 @@ describe("Postgres cache prune", () => { it("should remove all unneeded entries, one contract", async () => { const contracts = 1; const entriesPerContract = 10; - const sut = await cache(contracts, entriesPerContract); + const sut = await contractCache(contracts, entriesPerContract); expect(await sut.prune(1)).toMatchObject({ entriesBefore: contracts * entriesPerContract, entriesAfter: contracts * 1, @@ -45,7 +45,7 @@ describe("Postgres cache prune", () => { it("should remove all unneeded entries, in many contracts", async () => { const contracts = 200; const entriesPerContract = 10; - const sut = await cache(contracts, entriesPerContract); + const sut = await contractCache(contracts, entriesPerContract); expect(await sut.prune(2)).toMatchObject({ entriesBefore: contracts * entriesPerContract, entriesAfter: contracts * 2, @@ -58,7 +58,7 @@ describe("Postgres cache prune", () => { const contracts = 100; const entriesPerContract = 20; const toLeave = 3; - const sut = await cache(contracts, entriesPerContract); + const sut = await contractCache(contracts, entriesPerContract); await sut.prune(toLeave); for (let i = 0; i < contracts; i++) { @@ -93,7 +93,7 @@ describe("Postgres cache prune", () => { it("deletes first contract from cache", async () => { const contracts = 7; const entriesPerContract = 12; - const sut = await cache(contracts, entriesPerContract); + const sut = await contractCache(contracts, entriesPerContract); await sut.delete(getContractId(0)); @@ -120,7 +120,7 @@ describe("Postgres cache prune", () => { const contracts = 7; const entriesPerContract = 12; const removedContractIdx = 3; - const sut = await cache(contracts, entriesPerContract); + const sut = await contractCache(contracts, entriesPerContract); await sut.delete(getContractId(removedContractIdx)); diff --git a/src/__tests__/sqlite-cache.test.ts b/src/__tests__/pg-cache.test.ts similarity index 97% rename from src/__tests__/sqlite-cache.test.ts rename to src/__tests__/pg-cache.test.ts index 86e8e52..03fa4be 100644 --- a/src/__tests__/sqlite-cache.test.ts +++ b/src/__tests__/pg-cache.test.ts @@ -1,9 +1,9 @@ -import { cache, getContractId, getSortKey, evalState } from "./utils"; +import { contractCache, getContractId, getSortKey, evalState } from "./utils"; import { CacheKey } from "warp-contracts"; describe("Postgres cache", () => { it("should return proper data", async () => { - const sut = await cache(0, 100); + const sut = await contractCache(0, 100); await sut.put( { @@ -194,7 +194,7 @@ describe("Postgres cache", () => { it("respects limits for max interactions per contract", async () => { const max = 10; - const sut = await cache(0, 0, max); + const sut = await contractCache(0, 0, max); for (let j = 0; j < max; j++) { await sut.put( @@ -261,7 +261,7 @@ describe("Postgres cache", () => { }); it("should keep the latest insert, even it is the smallest one", async () => { - const sut = await cache(0, 0, 2); + const sut = await contractCache(0, 0, 2); await sut.put( { diff --git a/src/__tests__/pg-sort-key-cache-commit-rollback.test.ts b/src/__tests__/pg-sort-key-cache-commit-rollback.test.ts new file mode 100644 index 0000000..c0485f7 --- /dev/null +++ b/src/__tests__/pg-sort-key-cache-commit-rollback.test.ts @@ -0,0 +1,166 @@ +import { CacheKey } from "warp-contracts"; +import { getSortKey, sortKeyCache } from "./utils"; +import { PgSortKeyCache } from "../PgSortKeyCache"; + +let sut: PgSortKeyCache; + +beforeAll(async () => { + sut = await sortKeyCache(100); +}) + +describe('Postgres sort key cache transactions testing', () => { + it('access range keys during active transaction and commit', async () => { + await sut.open(); + const sortKey = 343; + + await sut.begin(); + await sut.put(new CacheKey("key.one", getSortKey(sortKey)), 1); + await sut.put(new CacheKey("key.two", getSortKey(sortKey)), 2); + + const transactionKeys = await sut.keys(getSortKey(sortKey)); + expect(transactionKeys).toContain("key.one"); + expect(transactionKeys).toContain("key.two"); + + const kvKeys = Array.from( + ( + await sut.kvMap(getSortKey(sortKey), { gte: "key.", lt: "key.\xff" }) + ).keys() + ); + expect(kvKeys).toContain("key.one"); + expect(kvKeys).toContain("key.two"); + + await sut.commit(); + + expect((await sut.getLast("key.one")).cachedValue).toEqual(1); + expect((await sut.getLast("key.three"))?.cachedValue).toBeFalsy(); + + await sut.close(); + }); + + it("keys order natural and reversed", async () => { + await sut.open(); + const sortKey = 348; + + await sut.begin(); + + await sut.put(new CacheKey("user.11", getSortKey(sortKey)), 2); + await sut.put(new CacheKey("user.12", getSortKey(sortKey)), 2); + await sut.put(new CacheKey("user.13", getSortKey(sortKey)), 2); + await sut.put(new CacheKey("user.14", getSortKey(sortKey)), 2); + await sut.put(new CacheKey("user.15", getSortKey(sortKey)), 2); + + const naturalOrder = Array.from( + ( + await sut.kvMap(getSortKey(sortKey), { gte: "user.11", lt: "user.14" }) + ).keys() + ); + const reverseOrder = Array.from( + ( + await sut.kvMap(getSortKey(sortKey), { + gte: "user.11", + lt: "user.14", + reverse: true, + }) + ).keys() + ); + expect(naturalOrder.reverse()).toEqual(reverseOrder); + + await sut.commit(); + + await sut.begin(); + await sut.del(new CacheKey("user.12", getSortKey(sortKey))); + + const items = Array.from( + ( + await sut.kvMap(getSortKey(sortKey), { + gte: "user.11", + lt: "user.14", + reverse: true, + }) + ).keys() + ); + expect(items).toEqual(["user.13", "user.11"]); + + await sut.commit(); + await sut.close(); + }); + + it("access range keys during active transaction and rollback", async () => { + await sut.open(); + const sortKey = 384; + + await sut.begin(); + await sut.put(new CacheKey("key.one", getSortKey(sortKey)), 11); + await sut.put(new CacheKey("key.three", getSortKey(sortKey)), 3); + await sut.del(new CacheKey("key.two", getSortKey(sortKey))); + + const transactionKeys = await sut.keys(getSortKey(sortKey)); + expect(transactionKeys).toContain("key.one"); + expect(transactionKeys).toContain("key.three"); + + const kvKeys = Array.from( + ( + await sut.kvMap(getSortKey(sortKey), { gte: "key.", lt: "key.\xff" }) + ).keys() + ); + expect(kvKeys).toContain("key.one"); + expect(kvKeys).toContain("key.three"); + + expect((await sut.getLast("key.one")).cachedValue).toEqual(11); + expect((await sut.getLast("key.two"))?.cachedValue).toBeFalsy(); + expect((await sut.getLast("key.three")).cachedValue).toEqual(3); + + await sut.rollback(); + + expect((await sut.getLast("key.one")).cachedValue).toEqual(1); + expect((await sut.getLast("key.two")).cachedValue).toEqual(2); + expect((await sut.getLast("key.three"))?.cachedValue).toBeFalsy(); + + await sut.close(); + }); + + it("multiple operations", async () => { + await sut.open(); + const sortKey = 395; + + await sut.begin(); + await sut.put(new CacheKey("key.one", getSortKey(sortKey)), 111); + await sut.put(new CacheKey("key.two", getSortKey(sortKey)), 222); + await sut.put(new CacheKey("key.four", getSortKey(sortKey)), 333); + await sut.put(new CacheKey("key.five", getSortKey(sortKey)), 333); + + await sut.del(new CacheKey("key.two", getSortKey(sortKey))); + await sut.del(new CacheKey("key.fa", getSortKey(sortKey))); + + const transactionKeys = await sut.keys(getSortKey(sortKey)); + expect(transactionKeys).toContain("key.one"); + expect(transactionKeys).toContain("key.four"); + + const kvKeys = Array.from( + ( + await sut.kvMap(getSortKey(sortKey), { + gte: "key.", + lt: "key.\xff", + limit: 2, + }) + ).keys() + ); + expect(kvKeys).toEqual(["key.five", "key.four"]); + + await sut.rollback(); + + const rollbackKeys = Array.from( + ( + await sut.kvMap(getSortKey(sortKey), { + gte: "key.", + lt: "key.\xff", + reverse: true, + }) + ).keys() + ); + expect(rollbackKeys).toEqual(["key.two", "key.one"]); + + await sut.drop(); + await sut.close(); + }); +}); diff --git a/src/__tests__/utils.ts b/src/__tests__/utils.ts index 2f26d69..a812a6b 100644 --- a/src/__tests__/utils.ts +++ b/src/__tests__/utils.ts @@ -1,6 +1,8 @@ import { PgContractCache } from "../PgContractCache"; -import { PgCacheOptions } from "../PgCacheOptions"; -import { defaultCacheOptions, EvalStateResult } from "warp-contracts"; +import { PgContractCacheOptions } from "../PgContractCacheOptions"; +import { EvalStateResult } from "warp-contracts"; +import { PgSortKeyCacheOptions } from "../PgSortKeyCacheOptions"; +import { PgSortKeyCache } from "../PgSortKeyCache"; export const getContractId = (i: number) => `contract${i}`.padStart(43, "0"); @@ -12,20 +14,22 @@ export const getSortKey = (j: number) => "0" )},1643210931796,81e1bea09d3262ee36ce8cfdbbb2ce3feb18a717c3020c47d206cb8ecb43b767`; -export const cache = async function ( +export const contractCache = async function ( numContracts: number, numRepeatingEntries: number, maxEntries?: number ): Promise> { - const pgOptions: PgCacheOptions = { + const pgOptions: PgContractCacheOptions = { minEntriesPerContract: maxEntries || 100 * numRepeatingEntries, maxEntriesPerContract: maxEntries || 100 * numRepeatingEntries, - user: "postgres", - database: "postgres", - port: 5432, + user: 'postgres', + password: 'postgres', + host: 'localhost', + database: 'postgres', + port: 5432 }; - const sut = new PgContractCache(defaultCacheOptions, pgOptions); + const sut = new PgContractCache(pgOptions); await sut.open(); for (let i = 0; i < numContracts; i++) { @@ -43,6 +47,24 @@ export const cache = async function ( return sut; }; +export const sortKeyCache = async function ( + numRepeatingEntries: number, + maxEntries?: number +): Promise> { + const pgOptions: PgSortKeyCacheOptions = { + tableName: "kiwi", + minEntriesPerKey: maxEntries || 100 * numRepeatingEntries, + maxEntriesPerKey: maxEntries || 100 * numRepeatingEntries, + user: 'postgres', + password: 'postgres', + host: 'localhost', + database: 'postgres', + port: 5432 + }; + + return new PgSortKeyCache(pgOptions); +}; + export const evalState = function (value: any) { return new EvalStateResult(value, {}, {}); }; diff --git a/src/index.ts b/src/index.ts index 8d39749..977fcd9 100644 --- a/src/index.ts +++ b/src/index.ts @@ -1,2 +1,5 @@ -export * from "./PgCacheOptions"; +export * from "./PgContractCacheOptions"; export * from "./PgContractCache"; + +export * from "./PgSortKeyCacheOptions"; +export * from "./PgSortKeyCache";