The functools module provides powerful caching decorators that can dramatically speed up expensive or recursive functions through memoization.
Basic lru_cache
from functools import lru_cache
@lru_cache(maxsize=128)
def expensive_computation(n: int) -> int:
print(f"Computing {n}...")
return sum(i * i for i in range(n))
# First call computes
result1 = expensive_computation(1000) # "Computing 1000..."
# Second call returns cached result
result2 = expensive_computation(1000) # No output (cached)
print(result1 == result2) # TrueUnbounded Cache
from functools import cache # Python 3.9+
@cache
def factorial(n: int) -> int:
if n <= 1:
return 1
return n * factorial(n - 1)
# Same as @lru_cache(maxsize=None)
factorial(100) # All intermediate results cachedRecursive Fibonacci
from functools import lru_cache
# Without cache: O(2^n) - exponentially slow
def fib_slow(n):
if n < 2:
return n
return fib_slow(n - 1) + fib_slow(n - 2)
# With cache: O(n) - linear
@lru_cache(maxsize=None)
def fib(n):
if n < 2:
return n
return fib(n - 1) + fib(n - 2)
# fib_slow(35) takes seconds
# fib(35) is instant
print(fib(100)) # 354224848179261915075Cache Statistics
from functools import lru_cache
@lru_cache(maxsize=32)
def compute(x: int) -> int:
return x * x
for i in range(50):
compute(i % 10) # Reuse some values
info = compute.cache_info()
print(f"Hits: {info.hits}") # Cache hits
print(f"Misses: {info.misses}") # Cache misses
print(f"Size: {info.currsize}") # Current cache size
print(f"Max: {info.maxsize}") # Maximum sizeClearing Cache
from functools import lru_cache
@lru_cache(maxsize=100)
def get_user(user_id: int) -> dict:
# Expensive database call
return {"id": user_id, "name": f"User {user_id}"}
# Use cached version
get_user(1)
get_user(2)
# Clear entire cache (e.g., after database update)
get_user.cache_clear()
# Or for specific key - not directly supported
# Workaround: wrap in class with custom cleartyped Parameter
from functools import lru_cache
# Without typed: 1 and 1.0 are same key
@lru_cache(maxsize=100)
def compute_untyped(x):
print(f"Computing {x} (type: {type(x).__name__})")
return x * 2
compute_untyped(1) # Computing 1 (type: int)
compute_untyped(1.0) # No output - uses cached int result!
# With typed: 1 and 1.0 are different keys
@lru_cache(maxsize=100, typed=True)
def compute_typed(x):
print(f"Computing {x} (type: {type(x).__name__})")
return x * 2
compute_typed(1) # Computing 1 (type: int)
compute_typed(1.0) # Computing 1.0 (type: float)Caching Class Methods
from functools import lru_cache
class DataProcessor:
def __init__(self, multiplier: int):
self.multiplier = multiplier
@lru_cache(maxsize=100)
def process(self, value: int) -> int:
print(f"Processing {value}")
return value * self.multiplier
# Warning: cache is shared across instances!
p1 = DataProcessor(2)
p2 = DataProcessor(3)
p1.process(10) # Processing 10 -> 20
p2.process(10) # Returns 20 (wrong! cached from p1)Per-Instance Caching
from functools import lru_cache
class DataProcessor:
def __init__(self, multiplier: int):
self.multiplier = multiplier
# Create instance-specific cached method
self.process = lru_cache(maxsize=100)(self._process)
def _process(self, value: int) -> int:
print(f"Processing {value} with multiplier {self.multiplier}")
return value * self.multiplier
p1 = DataProcessor(2)
p2 = DataProcessor(3)
p1.process(10) # Processing 10 with multiplier 2 -> 20
p2.process(10) # Processing 10 with multiplier 3 -> 30cached_property (Python 3.8+)
from functools import cached_property
class DataAnalyzer:
def __init__(self, data: list):
self.data = data
@cached_property
def statistics(self) -> dict:
print("Computing statistics...")
return {
"mean": sum(self.data) / len(self.data),
"min": min(self.data),
"max": max(self.data),
}
analyzer = DataAnalyzer([1, 2, 3, 4, 5])
print(analyzer.statistics) # "Computing statistics..." + result
print(analyzer.statistics) # Just result (cached)
# Clear cached property
del analyzer.statistics
print(analyzer.statistics) # RecomputesCustom Cache Key
from functools import lru_cache
def make_hashable(obj):
"""Convert unhashable types to hashable."""
if isinstance(obj, dict):
return tuple(sorted((k, make_hashable(v)) for k, v in obj.items()))
if isinstance(obj, list):
return tuple(make_hashable(x) for x in obj)
return obj
def cached_with_dict_args(func):
"""Wrapper to allow dict arguments with lru_cache."""
@lru_cache(maxsize=128)
def cached_func(*args):
return func(*[dict(a) if isinstance(a, tuple) else a for a in args])
def wrapper(*args):
hashable_args = tuple(make_hashable(a) for a in args)
return cached_func(*hashable_args)
wrapper.cache_clear = cached_func.cache_clear
wrapper.cache_info = cached_func.cache_info
return wrapper
@cached_with_dict_args
def process_config(config: dict) -> str:
print("Processing...")
return str(config)
process_config({"a": 1}) # Processing...
process_config({"a": 1}) # CachedTime-Based Cache Expiration
from functools import lru_cache, wraps
import time
def timed_lru_cache(seconds: int, maxsize: int = 128):
"""LRU cache with time-based expiration."""
def decorator(func):
func = lru_cache(maxsize=maxsize)(func)
func.expiration = time.time() + seconds
@wraps(func)
def wrapper(*args, **kwargs):
if time.time() > func.expiration:
func.cache_clear()
func.expiration = time.time() + seconds
return func(*args, **kwargs)
wrapper.cache_clear = func.cache_clear
wrapper.cache_info = func.cache_info
return wrapper
return decorator
@timed_lru_cache(seconds=60)
def get_data(key: str) -> str:
print(f"Fetching {key}...")
return f"data_{key}"
get_data("test") # Fetching test...
get_data("test") # Cached for 60 secondsAsync Caching
from functools import lru_cache
import asyncio
# lru_cache doesn't work directly with async
# Use a wrapper or third-party library
def async_lru_cache(maxsize=128):
cache = {}
def decorator(func):
async def wrapper(*args):
if args in cache:
return cache[args]
result = await func(*args)
if len(cache) >= maxsize:
cache.pop(next(iter(cache)))
cache[args] = result
return result
wrapper.cache_clear = cache.clear
return wrapper
return decorator
@async_lru_cache(maxsize=100)
async def fetch_data(url: str) -> str:
print(f"Fetching {url}")
await asyncio.sleep(0.1) # Simulate network
return f"data from {url}"Best Practices
from functools import lru_cache
# 1. Only cache pure functions (same input = same output)
@lru_cache
def pure_function(x: int) -> int:
return x * 2 # Always same result for same input
# 2. Arguments must be hashable
# @lru_cache
# def bad(items: list): # TypeError: unhashable type: 'list'
# pass
# 3. Set appropriate maxsize
@lru_cache(maxsize=1000) # For many unique inputs
def large_cache(x): pass
@lru_cache(maxsize=32) # For few unique inputs
def small_cache(x): pass
# 4. Monitor cache effectiveness
print(my_func.cache_info())Caching transforms O(n) or O(2^n) problems into O(1) lookups. Use it for expensive computations, recursive algorithms, and repeated calculations.
React to this post: