Good tests make refactoring safe and bugs obvious. Here are the patterns I rely on.
pytest Basics
pytest discovers and runs tests with minimal ceremony:
pip install pytest# test_calculator.py
def test_addition():
assert 2 + 2 == 4
def test_subtraction():
assert 5 - 3 == 2pytest # Run all tests
pytest -v # Verbose output
pytest -x # Stop on first failure
pytest -k "addition" # Run tests matching patternTest discovery rules:
- Files:
test_*.pyor*_test.py - Functions:
test_* - Classes:
Test*(no__init__)
Fixtures: Reusable Setup
Fixtures provide test dependencies via injection:
import pytest
@pytest.fixture
def user():
return {"id": 1, "name": "Alice", "email": "alice@example.com"}
@pytest.fixture
def admin(user):
return {**user, "role": "admin"}
def test_user_has_email(user):
assert "@" in user["email"]
def test_admin_inherits_user_fields(admin):
assert admin["name"] == "Alice"
assert admin["role"] == "admin"Fixture Scope
Control fixture lifetime:
@pytest.fixture(scope="function") # Default: fresh per test
def per_test_connection():
return create_connection()
@pytest.fixture(scope="module") # Once per file
def shared_client():
return APIClient()
@pytest.fixture(scope="session") # Once per test run
def database():
return setup_database()Cleanup with yield
@pytest.fixture
def temp_directory():
path = Path(tempfile.mkdtemp())
yield path
shutil.rmtree(path) # Cleanup after test
@pytest.fixture
def database_transaction(db):
tx = db.begin()
yield tx
tx.rollback() # Always rollback, keep db cleanconftest.py
Share fixtures across files:
# tests/conftest.py
import pytest
from myapp import create_app, db
@pytest.fixture(scope="session")
def app():
return create_app(testing=True)
@pytest.fixture
def client(app):
return app.test_client()All tests inherit fixtures from conftest.py in their directory and parent directories.
Mocking with unittest.mock
Mock external dependencies to isolate units:
from unittest.mock import Mock, patch, MagicMock
# Simple mock
def test_user_service():
repo = Mock()
repo.find_by_id.return_value = {"id": 1, "name": "Alice"}
service = UserService(repo)
user = service.get_user(1)
assert user["name"] == "Alice"
repo.find_by_id.assert_called_once_with(1)patch Decorator
from unittest.mock import patch
@patch("myapp.services.requests.get")
def test_fetch_data(mock_get):
mock_get.return_value.json.return_value = {"status": "ok"}
result = fetch_external_data()
assert result == {"status": "ok"}
mock_get.assert_called_once()patch Context Manager
def test_time_dependent_code():
with patch("myapp.time.time") as mock_time:
mock_time.return_value = 1000000000
result = get_timestamp()
assert result == 1000000000patch.object for Methods
def test_instance_method():
user = User("Alice")
with patch.object(user, "send_email") as mock_send:
user.welcome()
mock_send.assert_called_once_with("Welcome, Alice!")MagicMock for Complex Objects
def test_file_processing():
mock_file = MagicMock()
mock_file.read.return_value = "file contents"
mock_file.__enter__.return_value = mock_file
with patch("builtins.open", return_value=mock_file):
result = process_file("data.txt")
assert result == "processed: file contents"Mock Side Effects
def test_retry_logic():
mock_api = Mock()
# Fail twice, then succeed
mock_api.call.side_effect = [
ConnectionError("Failed"),
ConnectionError("Failed again"),
{"status": "success"}
]
result = retry_api_call(mock_api, max_retries=3)
assert result == {"status": "success"}
assert mock_api.call.call_count == 3Parametrized Tests
Test multiple scenarios without repetition:
import pytest
@pytest.mark.parametrize("input,expected", [
("hello", 5),
("", 0),
("world", 5),
("hello world", 11),
])
def test_string_length(input, expected):
assert len(input) == expectedMultiple Parameters
@pytest.mark.parametrize("a,b,expected", [
(2, 3, 5),
(-1, 1, 0),
(0, 0, 0),
(100, -100, 0),
])
def test_add(a, b, expected):
assert add(a, b) == expectedParametrize with IDs
@pytest.mark.parametrize("email,valid", [
("user@example.com", True),
("invalid-email", False),
("", False),
("user@.com", False),
], ids=["valid", "no-at", "empty", "no-domain"])
def test_email_validation(email, valid):
assert is_valid_email(email) == validCombining Parametrize
@pytest.mark.parametrize("x", [0, 1, 2])
@pytest.mark.parametrize("y", [10, 20])
def test_combinations(x, y):
# Runs 6 times: (0,10), (0,20), (1,10), (1,20), (2,10), (2,20)
assert x + y >= 10Test Organization Patterns
Directory Structure
tests/
├── conftest.py # Shared fixtures
├── unit/
│ ├── test_models.py
│ ├── test_services.py
│ └── test_utils.py
├── integration/
│ ├── conftest.py # Integration-specific fixtures
│ ├── test_api.py
│ └── test_database.py
└── e2e/
└── test_workflows.py
Test Classes for Grouping
class TestUserCreation:
def test_creates_with_valid_data(self):
user = User.create(name="Alice", email="alice@example.com")
assert user.id is not None
def test_fails_without_email(self):
with pytest.raises(ValidationError):
User.create(name="Alice")
def test_normalizes_email_to_lowercase(self):
user = User.create(name="Alice", email="ALICE@Example.COM")
assert user.email == "alice@example.com"Markers for Test Categories
# pytest.ini or pyproject.toml
# [pytest]
# markers =
# slow: marks tests as slow
# integration: requires external services
import pytest
@pytest.mark.slow
def test_large_file_processing():
...
@pytest.mark.integration
def test_database_connection():
...pytest -m "not slow" # Skip slow tests
pytest -m integration # Only integration testsCommon Testing Pitfalls
1. Testing Implementation, Not Behavior
Bad:
def test_user_creation():
user = User("Alice")
assert user._name == "Alice" # Testing private attribute
assert user._created_at is not NoneGood:
def test_user_creation():
user = User("Alice")
assert user.name == "Alice" # Test public interface
assert user.is_new()2. Shared Mutable State
Bad:
users = [] # Shared between tests!
def test_add_user():
users.append(User("Alice"))
assert len(users) == 1
def test_add_another_user():
users.append(User("Bob"))
assert len(users) == 1 # FAILS! users has 2 nowGood:
@pytest.fixture
def users():
return [] # Fresh list per test
def test_add_user(users):
users.append(User("Alice"))
assert len(users) == 13. Over-Mocking
Bad:
def test_calculate_total():
# Mocking the function we're testing!
with patch("myapp.calculate_total") as mock:
mock.return_value = 100
assert calculate_total([50, 50]) == 100 # Useless testGood:
def test_calculate_total():
# Test actual behavior, only mock external dependencies
assert calculate_total([50, 50]) == 1004. Ignoring Edge Cases
Incomplete:
def test_divide():
assert divide(10, 2) == 5Better:
@pytest.mark.parametrize("a,b,expected", [
(10, 2, 5),
(0, 5, 0),
(-10, 2, -5),
(10, -2, -5),
])
def test_divide(a, b, expected):
assert divide(a, b) == expected
def test_divide_by_zero():
with pytest.raises(ZeroDivisionError):
divide(10, 0)5. Brittle Assertions
Brittle:
def test_api_response():
response = api.get_user(1)
assert response == {
"id": 1,
"name": "Alice",
"email": "alice@example.com",
"created_at": "2026-01-01T00:00:00Z",
"updated_at": "2026-01-02T00:00:00Z",
}Robust:
def test_api_response():
response = api.get_user(1)
assert response["id"] == 1
assert response["name"] == "Alice"
assert "created_at" in response6. Missing Exception Details
Incomplete:
def test_invalid_input():
with pytest.raises(ValueError):
process("")Better:
def test_invalid_input():
with pytest.raises(ValueError, match="cannot be empty"):
process("")7. Slow Tests
Slow:
def test_timeout_behavior():
time.sleep(30) # Actually waits 30 seconds
...Fast:
def test_timeout_behavior():
with patch("myapp.time.sleep"): # Mock sleep
with patch("myapp.time.time") as mock_time:
mock_time.side_effect = [0, 31] # Simulate time passing
result = check_timeout(30)
assert result is True8. Test Interdependence
Bad:
def test_create_user():
global user_id
user_id = create_user("Alice")
assert user_id is not None
def test_delete_user():
delete_user(user_id) # Depends on test_create_user running first!Good:
@pytest.fixture
def user_id():
id = create_user("Alice")
yield id
delete_user(id) # Cleanup
def test_user_exists(user_id):
assert get_user(user_id) is not None
def test_delete_user(user_id):
delete_user(user_id)
assert get_user(user_id) is NoneQuick Reference
# Fixtures
@pytest.fixture(scope="function")
def thing():
return create()
@pytest.fixture
def thing_with_cleanup():
x = create()
yield x
cleanup(x)
# Parametrize
@pytest.mark.parametrize("a,b", [(1, 2), (3, 4)])
def test_something(a, b): ...
# Markers
@pytest.mark.skip(reason="...")
@pytest.mark.skipif(condition, reason="...")
@pytest.mark.xfail(reason="...")
# Exceptions
with pytest.raises(ValueError, match="pattern"):
failing_code()
# Mocking
with patch("module.function") as mock:
mock.return_value = value
mock.side_effect = [a, b, c]
mock.side_effect = Exception("boom")Write tests that fail for the right reasons and pass for the right reasons. That's the goal.