Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

v6 #30

Draft
wants to merge 5 commits into
base: main
Choose a base branch
from
Draft

v6 #30

Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .github/workflows/test.yml
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ jobs:
runs-on: ubuntu-latest
strategy:
matrix:
node-version: ['16', '18']
node-version: ['20']
redis-version: ['6']

steps:
Expand Down
11 changes: 3 additions & 8 deletions index.js
Original file line number Diff line number Diff line change
Expand Up @@ -94,19 +94,14 @@ const createCachedHafasClient = (hafas, storage, opt = {}) => {

// rather static data
reachableFrom: (_, opt = {}) => dynamicCachePeriod(5, 12, 60, opt.when),
locations: HOUR,
stop: HOUR,
nearby: HOUR,
locations: () => HOUR,
stop: () => HOUR,
nearby: () => HOUR,

...(opt.cachePeriods || {}),
}
for (const [key, val] of Object.entries(cachePeriods)) {
// todo [breaking]: always expect a function
if ('function' === typeof val) continue
if ('number' === typeof val) {
cachePeriods[key] = () => val
continue
}
throw new TypeError(`opt.cachePeriods.${key} must be a number or a function returning a number`)
}

Expand Down
4 changes: 2 additions & 2 deletions package.json
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@
"bugs": "https://github.com/public-transport/cached-hafas-client/issues",
"license": "ISC",
"engines": {
"node": ">=16"
"node": ">=20"
},
"dependencies": {
"common-prefix": "^1.1.0",
Expand All @@ -46,7 +46,7 @@
"vbb-hafas": "^8.0.0"
},
"cached-hafas-client": {
"dataVersion": 4
"dataVersion": 5
},
"peerDependencies": {
"hafas-client": ">=6.0 <=6.0",
Expand Down
20 changes: 10 additions & 10 deletions readme.md
Original file line number Diff line number Diff line change
Expand Up @@ -78,7 +78,7 @@ const MINUTE = 60 * SECOND

const cachePeriods = {
// cache all cachedHafas.stop(…) calls for 10m
stop: 10 * MINUTE,
stop: () => 10 * MINUTE,
// cache cachedHafas.trip(tripId, opt) based on sqrt(opt.when - now)
trip: (_, opt = {}) => {
const diffSecs = (new Date(opt.when) - Date.now()) / SECOND
Expand Down Expand Up @@ -133,15 +133,15 @@ createCachedHafas(hafas, storage, opt = {})
```js
{
cachePeriods: {
departures: 30_1000, arrivals: 30_1000, // 30s
journeys: 30_1000, // 30s
refreshJourney: 60_1000, // 1m
trip: 30_1000, // 30s
radar: 10_1000, // 10s
locations: 3_600_1000, // 1h
stop: 3_600_1000, // 1h
nearby: 3_600_1000, // 1h
reachableFrom: 30_1000,
departures: () => 30_1000, arrivals: () => 30_1000, // 30s
journeys: () => 30_1000, // 30s
refreshJourney: () => 60_1000, // 1m
trip: () => 30_1000, // 30s
radar: () => 10_1000, // 10s
locations: () => 3_600_1000, // 1h
stop: () => 3_600_1000, // 1h
nearby: () => 3_600_1000, // 1h
reachableFrom: () => 30_1000,
},
}
```
Expand Down
49 changes: 38 additions & 11 deletions stores/redis.js
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,8 @@ local function read_collection (id)
cursor = res[1];

for _, key in ipairs(res[2]) do
local __, ___, when, i = string.find(key, "[^:]+:[^:]+:[^:]+:([^:]+):([^:]+)");
-- version:collections_rows:{method:inputHash}:collection_id:when:i
local __, ___, when, i = string.find(key, "[^:]+:[^:]+:[^:]+:[^:]+:[^:]+:([^:]+):([^:]+)");
when = tonumber(when);
i = tonumber(i);

Expand Down Expand Up @@ -155,6 +156,20 @@ const createRedisStore = (db) => {
debug('init')
}

// collections
// version:collections:{method:inputHash}:created -> collectionId:when:duration
// version:collections_rows:{method:inputHash}:collection_id:when:i

// We're using a Redis Cluster hash tag `{method:inputHash}` here. Both the
// collection entry and the row entries must be read in atomicly, so we need
// to make sure they have the same hash tag!
const _collectionPrefix = (method, inputHash) => {
return `${VERSION}:${COLLECTIONS}:{${method}:${inputHash}}`
}
const _collectionRowsPrefix = (method, inputHash) => {
return `${VERSION}:${COLLECTIONS_ROWS}:{${method}:${inputHash}}`
}

const readCollection = async (args) => {
debug('readCollection', args)
const {
Expand All @@ -164,11 +179,14 @@ const createRedisStore = (db) => {
const createdMin = Math.floor(args.createdMin / 1000)
const createdMax = Math.ceil(args.createdMax / 1000)

const colPrefix = _collectionPrefix(method, inputHash) + ':'
// The common prefix is usually longer than just colPrefix because createdMin
// & createdMax usually share some digits.
const prefix = commonPrefix([
[VERSION, COLLECTIONS, method, inputHash, createdMin].join(':'),
[VERSION, COLLECTIONS, method, inputHash, createdMax].join(':')
`${colPrefix}${createdMin}`,
`${colPrefix}${createdMax}`,
])
const rowsPrefix = `${VERSION}:${COLLECTIONS_ROWS}:`
const rowsPrefix = _collectionRowsPrefix(method, inputHash) + ':'
const rows = await db[_readMatchingCollection](
prefix, rowsPrefix,
createdMin, createdMax,
Expand All @@ -194,29 +212,35 @@ const createRedisStore = (db) => {

const collectionId = randomBytes(10).toString('hex')


const rowsPrefix = _collectionRowsPrefix(method, inputHash)
const cmds = [
[
'set',
[VERSION, COLLECTIONS, method, inputHash, created].join(':'),
[_collectionPrefix(method, inputHash), created].join(':'),
[collectionId, when, duration].join(':'),
'PX', cachePeriod,
],
...rows.map((row, i) => {
// todo: fall back to plannedWhen?
const t = +new Date(row.when)
if (Number.isNaN(t)) throw new Error(`rows[${i}].when must be a number or an ISO 8601 string`)
const key = [VERSION, COLLECTIONS_ROWS, collectionId, t, i].join(':')
// todo: add method & inputHash to match hash tag of `COLLECTIONS:`?
const key = [rowsPrefix, collectionId, t, i].join(':')
return ['set', key, row.data, 'PX', cachePeriod]
}),
]
await db.multi(cmds).exec()
}

// atomics
// method:inputHash:created:id
// version:atoms:{method:inputHash}:created:id
// todo: this fails with `created` timestamps of different lengths (2033)

// We're using a Redis Cluster hash tag `{method:inputHash}` here.
const _atomPrefix = (method, inputHash) => {
return `${VERSION}:${ATOMS}:{${method}:${inputHash}}`
}

const readAtom = async (args) => {
debug('readAtom', args)
const {
Expand All @@ -226,9 +250,12 @@ const createRedisStore = (db) => {
const createdMax = Math.ceil(args.createdMax / 1000)
const deserialize = args.deserialize || JSON.parse

const pref = _atomPrefix(method, inputHash) + ':'
// The common prefix is usually longer than just colPrefix because createdMin
// & createdMax usually share some digits.
const keysPrefix = commonPrefix([
[VERSION, ATOMS, method, inputHash, createdMin].join(':'),
[VERSION, ATOMS, method, inputHash, createdMax].join(':')
`${pref}${createdMin}`,
`${pref}${createdMax}`
])
const val = await db[_readMatchingAtom]([
keysPrefix,
Expand All @@ -247,7 +274,7 @@ const createRedisStore = (db) => {
const created = Math.round(args.created / 1000)
const serialize = args.serialize || JSON.stringify

const key = [VERSION, ATOMS, method, inputHash, created].join(':')
const key = [_atomPrefix(method, inputHash), created].join(':')
await db.set(key, serialize(val), 'PX', cachePeriod)
}

Expand Down
10 changes: 5 additions & 5 deletions test/index.js
Original file line number Diff line number Diff line change
Expand Up @@ -62,11 +62,11 @@ const runTests = (storeName, createDb, createStore) => {
const store = createStore(db)
const cachedMocked = createCachedHafas(mocked, store, {
cachePeriods: {
departures: ttl, arrivals: ttl,
journeys: ttl, refreshJourney: ttl, trip: ttl,
radar: ttl,
locations: ttl, stop: ttl, nearby: ttl,
reachableFrom: ttl,
departures: () => ttl, arrivals: () => ttl,
journeys: () => ttl, refreshJourney: () => ttl, trip: () => ttl,
radar: () => ttl,
locations: () => ttl, stop: () => ttl, nearby: () => ttl,
reachableFrom: () => ttl,
...cachePeriods,
}
})
Expand Down
10 changes: 5 additions & 5 deletions test/with-metrics.js
Original file line number Diff line number Diff line change
Expand Up @@ -32,11 +32,11 @@ test('trackCachingMetrics works', async (t) => {
const store = createInMemoryStore()
const cachedMocked = createCachedHafas(mocked, store, {
cachePeriods: {
departures: ttl, arrivals: ttl,
journeys: ttl, refreshJourney: ttl, trip: ttl,
radar: ttl,
locations: ttl, stop: ttl, nearby: ttl,
reachableFrom: ttl,
departures: () => ttl, arrivals: () => ttl,
journeys: () => ttl, refreshJourney: () => ttl, trip: () => ttl,
radar: () => ttl,
locations: () => ttl, stop: () => ttl, nearby: () => ttl,
reachableFrom: () => ttl,
},
})

Expand Down