Skip to content

Commit

Permalink
Merge pull request #48 from auth0/POC-go-every-n
Browse files Browse the repository at this point in the history
feat: relax take count's
  • Loading branch information
joseluisdiaz authored Oct 24, 2023
2 parents 66eff99 + 05a43d7 commit e851446
Show file tree
Hide file tree
Showing 4 changed files with 80 additions and 10 deletions.
1 change: 1 addition & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -52,6 +52,7 @@ Buckets:
- `per_interval` (number): is the amount of tokens that the bucket receive on every interval.
- `interval` (number): defines the interval in milliseconds.
- `unlimited` (boolean = false): unlimited requests (skip take).
- `skip_n_calls` (number): take will go to redis every `n` calls instead of going in every take.

Ping:

Expand Down
33 changes: 32 additions & 1 deletion lib/db.js
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@ const ms = require('ms');
const fs = require('fs');
const _ = require('lodash');
const async = require('async');
const LRU = require('lru-cache');
const utils = require('./utils');
const Redis = require('ioredis');
const { validateParams } = require('./validation');
Expand Down Expand Up @@ -51,6 +52,7 @@ class LimitDBRedis extends EventEmitter {
this.configurateBuckets(config.buckets);
this.prefix = config.prefix;
this.globalTTL = (config.globalTTL || ms('7d')) / 1000;
this.callCounts = new LRU({ max: 50 });

const redisOptions = {
// a low commandTimeout value would likely cause sharded clusters to fail `enableReadyCheck` due to it running `CLUSTER INFO`
Expand Down Expand Up @@ -199,7 +201,7 @@ class LimitDBRedis extends EventEmitter {

const key = `${params.type}:${params.key}`;

const count = this._determineCount({
let count = this._determineCount({
paramsCount: params.count,
defaultCount: 1,
bucketKeyConfigSize: bucketKeyConfig.size,
Expand All @@ -215,6 +217,31 @@ class LimitDBRedis extends EventEmitter {
});
}

if (bucketKeyConfig.skip_n_calls > 0) {
const prevCall = this.callCounts.get(key);

if (prevCall) {
const shouldGoToRedis = prevCall?.count >= bucketKeyConfig.skip_n_calls


if (!shouldGoToRedis) {
prevCall.count ++;
return process.nextTick(callback, null, prevCall.res);
}

// if lastCall not exists it's the first time that we go to redis.
// so we don't change count; subsequently calls take count should be
// proportional to the number of call that we skip.
// if count=3, and we go every 5 times, take should 15
// This parameter is most likely 1, and doing times is an overkill but better safe than sorry.
if (shouldGoToRedis) {
count *= bucketKeyConfig.skip_n_calls;
}
}


}

this.redis.take(key,
bucketKeyConfig.ms_per_interval || 0,
bucketKeyConfig.size,
Expand All @@ -238,6 +265,10 @@ class LimitDBRedis extends EventEmitter {
delayed: false,
};

if (bucketKeyConfig.skip_n_calls > 0) {
this.callCounts.set(key, { res, count: 0 });
}

return callback(null, res);
});
}
Expand Down
1 change: 1 addition & 0 deletions lib/utils.js
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@ function normalizeTemporals(params) {
'interval',
'size',
'unlimited',
'skip_n_calls'
]);

INTERVAL_SHORTCUTS.forEach(intervalShortcut => {
Expand Down
55 changes: 46 additions & 9 deletions test/db.tests.js
Original file line number Diff line number Diff line change
Expand Up @@ -57,21 +57,17 @@ const buckets = {
size: 1,
per_second: 1
},
cached: {
global: {
size: 3,
per_hour: 2,
enable_cache: true,
overrides: {
faster: {
skipit: {
skip_n_calls: 2,
size: 3,
per_second: 1,
enable_cache: true
},
disabled: {
size: 5,
per_hour: 2,
per_hour: 3
}
}

}
};

Expand Down Expand Up @@ -530,6 +526,47 @@ describe('LimitDBRedis', () => {
});
});

it('should call redis and not set local cache count', (done) => {
const params = { type: 'global', key: 'aTenant'};
db.take(params, (err) => {
if (err) {
return done(err);
}

assert.equal(db.callCounts['global:aTenant'], undefined);
done();
});
});

it('should skip calls', (done) => {
const params = { type: 'global', key: 'skipit'};

async.series([
(cb) => db.take(params, cb), // redis
(cb) => db.take(params, cb), // cache
(cb) => db.take(params, cb), // cache
(cb) => {
assert.equal(db.callCounts.get('global:skipit').count, 2);
cb();
},
(cb) => db.take(params, cb), // redis
(cb) => db.take(params, cb), // cache
(cb) => db.take(params, cb), // cache
(cb) => db.take(params, cb), // redis (first nonconformant)
(cb) => db.take(params, cb), // cache (first cached)
(cb) => {
assert.equal(db.callCounts.get('global:skipit').count, 1);
assert.notOk(db.callCounts.get('global:skipit').res.conformant);
cb();
},
], (err, _results) => {
if (err) {
return done(err);
}

done();
})
});
});

describe('PUT', () => {
Expand Down

0 comments on commit e851446

Please sign in to comment.