This is already possible with Memcache, but if you are already drinking the Koolaid...
import time class Lock(object): def __init__(self, key, expires=60, timeout=10): """ Distributed locking using Redis SETNX and GETSET. Usage:: with Lock('my_lock'): print "Critical section" :param expires We consider any existing lock older than ``expires`` seconds to be invalid in order to detect crashed clients. This value must be higher than it takes the critical section to execute. :param timeout If another client has already obtained the lock, sleep for a maximum of ``timeout`` seconds before giving up. A value of 0 means we never wait. """ self.key = key self.timeout = timeout self.expires = expires def __enter__(self): timeout = self.timeout while timeout >= 0: expires = time.time() + self.expires + 1 if redis.setnx(self.key, expires): # We gained the lock; enter critical section return current_value = redis.get(self.key) # We found an expired lock and nobody raced us to replacing it if current_value and float(current_value) < time.time() and \ redis.getset(self.key, expires) == current_value: return timeout -= 1 time.sleep(1) raise LockTimeout("Timeout whilst waiting for lock") def __exit__(self, exc_type, exc_value, traceback): redis.delete(self.key) class LockTimeout(BaseException): pass
One common use case for distributed locks in web applications is to prevent clients dog-piling onto an expensive cache key:
def cache_without_dogpiling(key, cb, cache_expiry=None, *args, **kwargs): val = cache.get(key) if val is not None: return val # Cache miss; gain the lock to prevent multiple clients calling cb() with Lock(key, *args, **kwargs): # Check cache again - another client may have set the cache val = cache.get(key) if val is None: val = cb() cache.set(key, val, cache_expiry) return val def slow(): print "Inside slow()" return 1 + 1 # Python is slow >>> cache_without_dogpiling('my_key', slow, 60 * 10) Inside slow() 2 >>> cache_without_dogpiling('my_key', slow, 60 * 10) 2
As a bonus, if you don't want your test or development environment to rely on Redis, you can replace it with a no-op lock:
import contextlib @contextlib.contextmanager def Lock(*args, **kwargs): yield