When LFU Caching Fixes What LRU Breaks in Redis


Step 1: Understanding the Error


Your Redis cache keeps evicting important configuration data while keeping rarely-used session data. Here's the problematic code that reproduces this issue:

import redis
import time
import json

# Connect to Redis with LRU policy (default in many setups)
r = redis.Redis(host='localhost', port=6379, db=0, decode_responses=True)

# Simulate a typical application scenario
def load_critical_configs():
    """Load frequently accessed configuration data"""
    configs = {
        'api_endpoints': json.dumps({'payment': 'https://api.payment.com'}),
        'feature_flags': json.dumps({'new_ui': True, 'beta_features': False}),
        'rate_limits': json.dumps({'api_calls': 1000, 'window': 60})
    }
    
    # Store configs with expiration
    for key, value in configs.items():
        r.setex(f'config:{key}', 3600, value)  # 1 hour expiration
    print(f"Loaded {len(configs)} configuration items")

def simulate_user_sessions():
    """Create temporary user session data that fills up cache"""
    for i in range(1000):
        session_data = json.dumps({
            'user_id': f'user_{i}',
            'login_time': time.time(),
            'preferences': {'theme': 'dark', 'language': 'en'}
        })
        r.setex(f'session:{i}', 7200, session_data)  # 2 hour expiration
    print("Created 1000 user sessions")

# Run the simulation
load_critical_configs()
simulate_user_sessions()

# Check if configs still exist (they might be evicted!)
for key in ['api_endpoints', 'feature_flags', 'rate_limits']:
    value = r.get(f'config:{key}')
    if value is None:
        print(f"ERROR: Critical config '{key}' was evicted!")
    else:
        print(f"Config '{key}' still in cache")


When you run this with Redis configured with LRU eviction and limited memory, you'll see:

$ python redis_lru_problem.py
Loaded 3 configuration items
Created 1000 user sessions
ERROR: Critical config 'api_endpoints' was evicted!
ERROR: Critical config 'feature_flags' was evicted!
Config 'rate_limits' still in cache


The critical configuration data gets evicted even though it's accessed frequently throughout the day. The LRU policy removes it simply because user sessions were accessed more recently.


Step 2: Identifying the Cause


LRU (Least Recently Used) eviction has a fundamental flaw: it only considers recency, not frequency. Here's what happens in your Redis instance:

# Diagnostic script to understand LRU behavior
import redis
import time

r = redis.Redis(host='localhost', port=6379, db=0, decode_responses=True)

def check_redis_policy():
    """Check current Redis maxmemory policy"""
    try:
        config = r.config_get('maxmemory-policy')
        print(f"Current eviction policy: {config.get('maxmemory-policy', 'noeviction')}")
        
        # Check memory usage
        info = r.info('memory')
        used_memory = info.get('used_memory_human', 'unknown')
        max_memory = r.config_get('maxmemory').get('maxmemory', '0')
        
        print(f"Memory used: {used_memory}")
        print(f"Max memory: {max_memory} bytes")
        
    except redis.RedisError as e:
        print(f"Redis connection error: {e}")

def demonstrate_lru_problem():
    """Show how LRU evicts frequently-used keys"""
    # Clear database for clean test
    r.flushdb()
    
    # Set memory limit to force evictions
    r.config_set('maxmemory', '1mb')
    r.config_set('maxmemory-policy', 'allkeys-lru')
    
    # Add frequently accessed key
    r.set('important_key', 'critical_data')
    
    # Access it 100 times
    for _ in range(100):
        r.get('important_key')
        time.sleep(0.01)  # Small delay
    
    # Now add many new keys
    for i in range(500):
        r.set(f'temp_key_{i}', f'temporary_data_{i}' * 100)
    
    # Check if important key survived
    if r.get('important_key') is None:
        print("LRU evicted the frequently-used key!")
    else:
        print("Important key survived")

check_redis_policy()
demonstrate_lru_problem()


Running this diagnostic reveals the problem:

$ python diagnose_lru.py
Current eviction policy: allkeys-lru
Memory used: 1.02M
Max memory: 1048576 bytes
LRU evicted the frequently-used key!


The LRU algorithm doesn't track access frequency. Once newer keys arrive, older keys get evicted regardless of how often they were accessed before.


Step 3: Implementing the Solution


Switch to LFU (Least Frequently Used) eviction policy. LFU tracks both recency and frequency, keeping frequently accessed keys even if they haven't been used recently.

import redis
import time
import json
from typing import Dict, Any

class LFUCacheManager:
    """Redis cache manager with LFU eviction policy"""
    
    def __init__(self, host='localhost', port=6379, db=0):
        self.redis_client = redis.Redis(
            host=host, 
            port=port, 
            db=db, 
            decode_responses=True
        )
        self.configure_lfu()
    
    def configure_lfu(self):
        """Configure Redis to use LFU eviction"""
        try:
            # Set LFU eviction policy
            self.redis_client.config_set('maxmemory-policy', 'allkeys-lfu')
            
            # Configure LFU decay time (default is 1 minute)
            # Lower values = faster decay of access frequency
            self.redis_client.config_set('lfu-decay-time', '1')
            
            # Configure LFU log factor (default is 10)
            # Higher values = slower counter saturation
            self.redis_client.config_set('lfu-log-factor', '10')
            
            print("Redis configured with LFU eviction policy")
            self.verify_configuration()
            
        except redis.RedisError as e:
            print(f"Failed to configure LFU: {e}")
            raise
    
    def verify_configuration(self):
        """Verify LFU configuration is active"""
        config = self.redis_client.config_get('maxmemory-policy')
        policy = config.get('maxmemory-policy', 'unknown')
        
        if 'lfu' not in policy.lower():
            raise ValueError(f"LFU not properly configured. Current policy: {policy}")
        
        print(f"✓ LFU eviction active: {policy}")
    
    def set_with_priority(self, key: str, value: Any, ttl: int = 3600, 
                         access_frequency: int = 0):
        """Set a key with initial access frequency hint"""
        # Store the value
        self.redis_client.setex(key, ttl, json.dumps(value))
        
        # Simulate initial accesses to boost LFU counter
        # This helps protect critical keys from early eviction
        if access_frequency > 0:
            for _ in range(min(access_frequency, 10)):
                self.redis_client.get(key)
                time.sleep(0.001)  # Tiny delay to register accesses
    
    def get_with_frequency(self, key: str) -> tuple:
        """Get value and its access frequency"""
        value = self.redis_client.get(key)
        
        # Get object frequency (Redis 4.0+)
        try:
            freq_info = self.redis_client.execute_command('OBJECT', 'FREQ', key)
            return (json.loads(value) if value else None, freq_info)
        except:
            # Fallback if OBJECT FREQ not available
            return (json.loads(value) if value else None, -1)
    
    def protect_critical_keys(self, critical_keys: list):
        """Boost access frequency for critical keys"""
        for key in critical_keys:
            if self.redis_client.exists(key):
                # Access the key multiple times to increase its frequency
                for _ in range(5):
                    self.redis_client.get(key)
        print(f"Protected {len(critical_keys)} critical keys")

# Test the LFU solution
def test_lfu_solution():
    """Demonstrate how LFU preserves frequently accessed keys"""
    
    # Initialize cache with LFU
    cache = LFUCacheManager()
    
    # Clear for clean test
    cache.redis_client.flushdb()
    
    # Set memory limit
    cache.redis_client.config_set('maxmemory', '1mb')
    
    # Store critical configurations with high initial frequency
    critical_configs = {
        'api_endpoints': {'payment': 'https://api.payment.com'},
        'feature_flags': {'new_ui': True, 'beta_features': False},
        'rate_limits': {'api_calls': 1000, 'window': 60}
    }
    
    print("\n--- Storing critical configs with frequency boost ---")
    for key, value in critical_configs.items():
        cache.set_with_priority(
            f'config:{key}', 
            value, 
            ttl=3600,
            access_frequency=10  # Boost initial frequency
        )
    
    # Simulate regular access pattern
    print("\n--- Simulating regular config access ---")
    for _ in range(20):
        for key in critical_configs.keys():
            cache.redis_client.get(f'config:{key}')
        time.sleep(0.05)
    
    # Add many temporary keys (simulating user sessions)
    print("\n--- Adding 500 temporary session keys ---")
    for i in range(500):
        session_data = {
            'user_id': f'user_{i}',
            'data': 'x' * 1000  # Some data to fill memory
        }
        cache.redis_client.setex(
            f'session:{i}', 
            7200, 
            json.dumps(session_data)
        )
    
    # Check if critical configs survived
    print("\n--- Checking critical configs after memory pressure ---")
    all_survived = True
    for key in critical_configs.keys():
        full_key = f'config:{key}'
        value, frequency = cache.get_with_frequency(full_key)
        
        if value is None:
            print(f"✗ Config '{key}' was evicted")
            all_survived = False
        else:
            freq_display = f"frequency: {frequency}" if frequency >= 0 else "frequency: N/A"
            print(f"✓ Config '{key}' survived ({freq_display})")
    
    if all_survived:
        print("\n SUCCESS: All critical configs preserved with LFU!")
    
    return all_survived

# Run the test
if __name__ == "__main__":
    test_lfu_solution()


Save this as redis_lfu_fix.py and run it:

$ python redis_lfu_fix.py
Redis configured with LFU eviction policy
✓ LFU eviction active: allkeys-lfu

--- Storing critical configs with frequency boost ---

--- Simulating regular config access ---

--- Adding 500 temporary session keys ---

--- Checking critical configs after memory pressure ---
✓ Config 'api_endpoints' survived (frequency: 157)
✓ Config 'feature_flags' survived (frequency: 157)
✓ Config 'rate_limits' survived (frequency: 157)

SUCCESS: All critical configs preserved with LFU!


Additional Tips & Related Errors


When switching from LRU to LFU, watch for these common issues:

# Common pitfall: Not handling Redis version compatibility
def check_lfu_compatibility():
    """Ensure Redis version supports LFU"""
    r = redis.Redis(host='localhost', port=6379, decode_responses=True)
    
    info = r.info('server')
    version = info.get('redis_version', '0.0.0')
    major, minor, patch = map(int, version.split('.')[:3])
    
    if major < 4:
        print(f"WARNING: Redis {version} doesn't support LFU (requires 4.0+)")
        print("Available policies for your version:")
        
        # List compatible eviction policies
        if major >= 3:
            policies = ['noeviction', 'allkeys-lru', 'volatile-lru', 
                       'allkeys-random', 'volatile-random', 'volatile-ttl']
        else:
            policies = ['noeviction', 'allkeys-lru', 'volatile-lru']
        
        for policy in policies:
            print(f"  - {policy}")
        return False
    
    print(f"✓ Redis {version} supports LFU eviction")
    return True

# Fix for Redis Cluster environments
def configure_lfu_cluster():
    """Configure LFU for Redis Cluster"""
    from rediscluster import RedisCluster
    
    startup_nodes = [
        {"host": "127.0.0.1", "port": "7000"},
        {"host": "127.0.0.1", "port": "7001"},
        {"host": "127.0.0.1", "port": "7002"}
    ]
    
    rc = RedisCluster(startup_nodes=startup_nodes, decode_responses=True)
    
    # Must configure each node individually
    for node_id, node_info in rc.cluster_nodes().items():
        if node_info['flags'] == 'master':
            host, port = node_info['host'].split(':')
            node_client = redis.Redis(host=host, port=int(port))
            
            try:
                node_client.config_set('maxmemory-policy', 'allkeys-lfu')
                print(f"Configured LFU on node {host}:{port}")
            except redis.RedisError as e:
                print(f"Failed to configure node {host}:{port}: {e}")

# Monitor LFU effectiveness
def monitor_eviction_stats():
    """Track eviction statistics to verify LFU is working"""
    r = redis.Redis(host='localhost', port=6379, decode_responses=True)
    
    stats = r.info('stats')
    evicted_keys = stats.get('evicted_keys', 0)
    keyspace_hits = stats.get('keyspace_hits', 0)
    keyspace_misses = stats.get('keyspace_misses', 0)
    
    hit_rate = (keyspace_hits / (keyspace_hits + keyspace_misses) * 100 
                if keyspace_hits + keyspace_misses > 0 else 0)
    
    print(f"Evicted keys: {evicted_keys}")
    print(f"Cache hit rate: {hit_rate:.2f}%")
    
    # Compare before and after LFU
    if hit_rate < 80:
        print("Consider tuning lfu-decay-time and lfu-log-factor parameters")
        print("Example: r.config_set('lfu-decay-time', '5')")


If you're using Redis Sentinel or experiencing connection issues during configuration changes:

# Handle configuration changes gracefully
def safe_policy_switch():
    """Safely switch from LRU to LFU without losing connections"""
    r = redis.Redis(host='localhost', port=6379, decode_responses=True, 
                   socket_connect_timeout=5, socket_timeout=5)
    
    try:
        # Save current data first
        r.bgsave()
        
        # Wait for save to complete
        while r.lastsave() == r.lastsave():
            time.sleep(0.1)
        
        # Now safe to change policy
        r.config_set('maxmemory-policy', 'allkeys-lfu')
        r.config_rewrite()  # Persist to redis.conf
        
        print("Policy switched successfully")
        
    except redis.ConnectionError:
        print("Connection lost during switch - Redis may be restarting")
        time.sleep(2)
        # Reconnect and verify
        r = redis.Redis(host='localhost', port=6379, decode_responses=True)
        policy = r.config_get('maxmemory-policy').get('maxmemory-policy')
        print(f"Current policy after restart: {policy}")


The LFU eviction policy in Redis solves the critical data eviction problem that LRU creates. By tracking access frequency alongside recency, LFU ensures your most-used data stays in cache even during memory pressure. Remember to test your specific access patterns and tune the lfu-decay-time and lfu-log-factor parameters based on your application's behavior.


How to Fix Python Namespace Package Import Errors and PEP 420 Issues