Here's how I implemented marker token with memcache in GAE:
Edit: take a (different) hit.
This is partly borrowed from https://github.com/simonw/ratelimitcache/blob/master/ratelimitcache.py
def throttle(key, rate_count, rate_seconds, tries=3): ''' returns True if throttled (not enough tokens available) else False implements token bucket algorithm ''' client = memcache.Client(CLIENT_ARGS) for _ in range(tries): now = int(time.time()) keys = ['%s-%s' % (key, str(now-i)) for i in range(rate_seconds)] client.add(keys[0], 0, time=rate_seconds+1) tokens = client.get_multi(keys[1:]) tokens[keys[0]] = client.gets(keys[0]) if sum(tokens.values()) >= rate_count: return True if client.cas(keys[0], tokens[keys[0]] + 1, time=rate_seconds+1) != 0: return False logging.error('cache contention error') return True
Here are some usage examples:
def test_that_it_throttles_too_many_requests(self): burst = 1 interval = 1 assert shared.rate_limit.throttle('test', burst, interval) is False assert shared.rate_limit.throttle('test', burst, interval) is True def test_that_it_doesnt_throttle_burst_of_requests(self): burst = 16 interval = 1 for i in range(burst): assert shared.rate_limit.throttle('test', burst, interval) is False time.sleep(interval + 1)
natb1 source share