Good logging is essential for debugging and monitoring. Here's how to use Python's logging module effectively.
Basic Setup
import logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
logger.info("Application started")
logger.warning("Low disk space")
logger.error("Database connection failed")Log Levels
logger.debug("Detailed debug info") # 10
logger.info("General information") # 20
logger.warning("Something unexpected") # 30
logger.error("Something failed") # 40
logger.critical("System is down") # 50Set level to control what gets logged:
logging.basicConfig(level=logging.DEBUG) # Everything
logging.basicConfig(level=logging.WARNING) # Warnings and aboveProper Configuration
Don't use basicConfig in libraries. Configure in your application entry point:
# config.py
import logging
def setup_logging():
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
datefmt="%Y-%m-%d %H:%M:%S"
)
# main.py
from config import setup_logging
if __name__ == "__main__":
setup_logging()
# ... rest of appFormat Strings
Common format attributes:
format = "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
# Available attributes:
# %(asctime)s - Timestamp
# %(name)s - Logger name
# %(levelname)s - DEBUG, INFO, etc.
# %(message)s - The log message
# %(filename)s - Source file
# %(lineno)d - Line number
# %(funcName)s - Function name
# %(pathname)s - Full pathExample output:
2026-03-21 10:30:45 - myapp.users - INFO - User created: alice@example.com
Handlers
Send logs to different destinations:
import logging
from logging.handlers import RotatingFileHandler
logger = logging.getLogger("myapp")
logger.setLevel(logging.DEBUG)
# Console handler
console = logging.StreamHandler()
console.setLevel(logging.INFO)
# File handler with rotation
file_handler = RotatingFileHandler(
"app.log",
maxBytes=10_000_000, # 10 MB
backupCount=5
)
file_handler.setLevel(logging.DEBUG)
# Different formats per handler
console_format = logging.Formatter("%(levelname)s - %(message)s")
file_format = logging.Formatter(
"%(asctime)s - %(name)s - %(levelname)s - %(message)s"
)
console.setFormatter(console_format)
file_handler.setFormatter(file_format)
logger.addHandler(console)
logger.addHandler(file_handler)Structured Logging
For production, use structured logs (JSON):
import logging
import json
class JSONFormatter(logging.Formatter):
def format(self, record):
log_data = {
"timestamp": self.formatTime(record),
"level": record.levelname,
"logger": record.name,
"message": record.getMessage(),
}
if record.exc_info:
log_data["exception"] = self.formatException(record.exc_info)
return json.dumps(log_data)
# Usage
handler = logging.StreamHandler()
handler.setFormatter(JSONFormatter())
logger.addHandler(handler)
logger.info("User logged in", extra={"user_id": 123})Or use python-json-logger:
from pythonjsonlogger import jsonlogger
handler = logging.StreamHandler()
handler.setFormatter(jsonlogger.JsonFormatter())Adding Context
Include extra data in logs:
# Using extra parameter
logger.info("Order placed", extra={"order_id": "ABC123", "total": 99.99})
# Using LoggerAdapter
class ContextAdapter(logging.LoggerAdapter):
def process(self, msg, kwargs):
return f"[{self.extra['request_id']}] {msg}", kwargs
logger = ContextAdapter(logging.getLogger(__name__), {"request_id": "req-123"})
logger.info("Processing request") # [req-123] Processing requestConfiguration with dictConfig
For complex setups, use dictionary configuration:
import logging.config
LOGGING_CONFIG = {
"version": 1,
"disable_existing_loggers": False,
"formatters": {
"standard": {
"format": "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
},
},
"handlers": {
"console": {
"class": "logging.StreamHandler",
"level": "INFO",
"formatter": "standard",
},
"file": {
"class": "logging.handlers.RotatingFileHandler",
"level": "DEBUG",
"formatter": "standard",
"filename": "app.log",
"maxBytes": 10000000,
"backupCount": 5,
},
},
"loggers": {
"": { # Root logger
"handlers": ["console", "file"],
"level": "DEBUG",
},
"urllib3": {
"level": "WARNING", # Quiet noisy library
},
},
}
logging.config.dictConfig(LOGGING_CONFIG)Best Practices
1. Use __name__ for logger names:
logger = logging.getLogger(__name__)2. Don't format strings yourself:
# Bad - always formats
logger.debug("User %s logged in" % username)
# Good - only formats if level matches
logger.debug("User %s logged in", username)3. Include relevant context:
# Not helpful
logger.error("Operation failed")
# Helpful
logger.error("Failed to process order %s: %s", order_id, error)4. Use exception logging:
try:
risky_operation()
except Exception:
logger.exception("Operation failed") # Includes traceback5. Silence noisy libraries:
logging.getLogger("urllib3").setLevel(logging.WARNING)
logging.getLogger("sqlalchemy.engine").setLevel(logging.WARNING)Quick Reference
import logging
# Get logger
logger = logging.getLogger(__name__)
# Log at different levels
logger.debug("Debug message")
logger.info("Info message")
logger.warning("Warning message")
logger.error("Error message")
logger.critical("Critical message")
logger.exception("Error with traceback") # Use in except block
# Include variables
logger.info("User %s performed %s", user_id, action)
# Extra data
logger.info("Event", extra={"user_id": 123})Good logs make debugging production issues possible. Invest in your logging setup early.