CtrlK
BlogDocsLog inGet started
Tessl Logo

tessl/pypi-locust

Developer-friendly load testing framework for HTTP and other protocols with distributed testing capabilities.

Pending
Overview
Eval results
Files

debugging.mddocs/

Debugging and Utilities

Locust provides debugging tools and utilities for development-time testing, task analysis, and troubleshooting load test scenarios.

Capabilities

Single User Debugging

Function for running a single user instance to debug test scenarios without full load test execution.

from locust.debug import run_single_user

def run_single_user(user_class, include_length=False, include_time=False, 
                   include_context=False, include_payload=False, loglevel="WARNING"):
    """
    Run a single user instance for debugging purposes.
    
    Executes one user instance to test task behavior, request patterns,
    and response handling without starting a full load test.
    
    Args:
        user_class: User class to run
        include_length (bool): Include response length in output
        include_time (bool): Include timing information in output
        include_context (bool): Include request context information
        include_payload (bool): Include request/response payload data
        loglevel (str): Logging level ("DEBUG", "INFO", "WARNING", "ERROR")
    
    Usage:
        run_single_user(MyHttpUser, include_time=True, loglevel="INFO")
    """

Task Ratio Analysis

Functions for analyzing and inspecting task execution ratios and distributions.

from locust.user.inspectuser import print_task_ratio, print_task_ratio_json, get_ratio

def print_task_ratio(user_classes, num_users, total):
    """
    Print task execution ratios for user classes.
    
    Displays the relative frequency and probability of task execution
    for analysis and debugging of task distribution.
    
    Args:
        user_classes (list): List of User classes to analyze
        num_users (int): Total number of users to distribute
        total (bool): Whether to show total ratios across all users
    """

def print_task_ratio_json(user_classes, num_users):
    """
    Print task execution ratios in JSON format.
    
    Outputs task ratio analysis as structured JSON data
    for programmatic processing and integration.
    
    Args:
        user_classes (list): List of User classes to analyze
        num_users (int): Total number of users to distribute
    """

def get_ratio(user_classes, user_spawned, total):
    """
    Get task execution ratios as data structure.
    
    Returns task ratio analysis as dictionary for 
    programmatic inspection and processing.
    
    Args:
        user_classes (list): List of User classes to analyze
        user_spawned (dict): Dict mapping user class names to spawned counts
        total (bool): Whether to calculate total ratios across all users
        
    Returns:
        dict: Task ratio analysis data with nested task ratios
    """

Usage Examples

Basic Single User Debugging

from locust import HttpUser, task, between
from locust.debug import run_single_user

class DebugUser(HttpUser):
    wait_time = between(1, 3)
    host = "http://localhost:8000"
    
    def on_start(self):
        print("User starting - performing login")
        response = self.client.post("/login", json={
            "username": "testuser",
            "password": "secret"
        })
        print(f"Login response: {response.status_code}")
    
    @task(3)
    def browse_pages(self):
        print("Browsing pages task")
        pages = ["/", "/about", "/products"]
        for page in pages:
            response = self.client.get(page)
            print(f"Page {page}: {response.status_code}")
    
    @task(1) 
    def api_call(self):
        print("API call task")
        response = self.client.get("/api/data")
        print(f"API response: {response.status_code}, Length: {len(response.content)}")
    
    def on_stop(self):
        print("User stopping - performing logout")
        self.client.post("/logout")

if __name__ == "__main__":
    # Debug the user behavior
    print("=== Running Single User Debug ===")
    run_single_user(DebugUser, include_time=True, include_length=True, loglevel="INFO")

Advanced Single User Debugging

from locust import HttpUser, TaskSet, task, between
from locust.debug import run_single_user
import json

class APITestSet(TaskSet):
    def on_start(self):
        print("Starting API test set")
        # Authenticate
        response = self.client.post("/auth", json={
            "client_id": "test_client",
            "client_secret": "test_secret"
        })
        self.token = response.json().get("access_token")
        print(f"Got auth token: {self.token[:20]}...")
    
    @task
    def test_endpoint_a(self):
        print("Testing endpoint A")
        headers = {"Authorization": f"Bearer {self.token}"}
        response = self.client.get("/api/endpoint-a", headers=headers)
        print(f"Endpoint A: {response.status_code}")
        
        # Debug response content
        if response.status_code != 200:
            print(f"Error response: {response.text}")
    
    @task
    def test_endpoint_b(self):
        print("Testing endpoint B with payload")
        headers = {"Authorization": f"Bearer {self.token}"}
        payload = {"test_data": "debug_value", "timestamp": time.time()}
        
        response = self.client.post("/api/endpoint-b", 
                                  json=payload, 
                                  headers=headers)
        print(f"Endpoint B: {response.status_code}")
        
        # Debug response validation
        try:
            result = response.json()
            if "status" in result:
                print(f"API Status: {result['status']}")
        except json.JSONDecodeError:
            print("Response is not valid JSON")

class DebugAPIUser(HttpUser):
    wait_time = between(1, 2)
    host = "http://api.localhost:8000"
    tasks = [APITestSet]

if __name__ == "__main__":
    # Comprehensive debugging with all options
    print("=== Comprehensive API Debug ===")
    run_single_user(
        DebugAPIUser,
        include_length=True,
        include_time=True, 
        include_context=True,
        include_payload=True,
        loglevel="DEBUG"
    )

Task Ratio Analysis

from locust import HttpUser, TaskSet, task, between
from locust.user.inspectuser import print_task_ratio, print_task_ratio_json, get_ratio

class ShoppingTaskSet(TaskSet):
    @task(5)  # 5x weight
    def browse_products(self):
        self.client.get("/products")
    
    @task(3)  # 3x weight
    def view_product(self):
        self.client.get("/product/123")
    
    @task(1)  # 1x weight
    def add_to_cart(self):
        self.client.post("/cart/add")

class UserBehaviorA(HttpUser):
    wait_time = between(1, 3)
    tasks = [ShoppingTaskSet]
    weight = 3  # 3x more likely to be selected

class UserBehaviorB(HttpUser):
    wait_time = between(2, 5)
    weight = 1  # 1x likelihood
    
    @task(2)
    def admin_panel(self):
        self.client.get("/admin")
    
    @task(1)
    def reports(self):
        self.client.get("/reports")

if __name__ == "__main__":
    user_classes = [UserBehaviorA, UserBehaviorB]
    num_users = 100  # Total users to analyze
    
    print("=== Task Ratio Analysis ===")
    print_task_ratio(user_classes, num_users, total=True)
    
    print("\n=== Task Ratio JSON ===")
    print_task_ratio_json(user_classes, num_users)
    
    print("\n=== Programmatic Analysis ===")
    # Calculate user distribution based on weights
    user_spawned = {"UserBehaviorA": 75, "UserBehaviorB": 25}  # Example distribution
    ratios = get_ratio(user_classes, user_spawned, total=True)
    
    # Process ratio data
    for user_class, data in ratios.items():
        print(f"\nUser Class: {user_class}")
        print(f"Selection Weight: {data.get('weight', 1)}")
        print("Tasks:")
        for task_name, task_ratio in data.get('tasks', {}).items():
            print(f"  {task_name}: {task_ratio:.2%}")

Development-Time Testing

from locust import HttpUser, task, between, events
from locust.debug import run_single_user
import time

class DevelopmentUser(HttpUser):
    wait_time = between(0.5, 1)  # Fast for development
    host = "http://localhost:3000"
    
    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self.start_time = time.time()
        self.request_count = 0
    
    def on_start(self):
        print("🚀 Starting development test")
        print(f"Target host: {self.host}")
        
        # Check if server is responding
        try:
            response = self.client.get("/health")
            if response.status_code == 200:
                print("✅ Server health check passed")
            else:
                print(f"⚠️  Server health check failed: {response.status_code}")
        except Exception as e:
            print(f"❌ Server health check error: {e}")
    
    @task(3)
    def test_main_page(self):
        """Test main application page"""
        print("Testing main page...")
        start = time.time()
        
        response = self.client.get("/")
        duration = (time.time() - start) * 1000
        
        self.request_count += 1
        print(f"Main page: {response.status_code} ({duration:.1f}ms)")
        
        # Development-specific checks
        if duration > 2000:
            print("⚠️  Slow response detected!")
        
        if response.status_code != 200:
            print(f"❌ Unexpected status: {response.status_code}")
            print(f"Response: {response.text[:200]}...")
    
    @task(2)
    def test_api_endpoint(self):
        """Test API endpoint functionality"""
        print("Testing API endpoint...")
        
        # Test with different parameters
        test_params = [
            {"id": 1, "type": "user"},
            {"id": 2, "type": "admin"},
            {"id": 999, "type": "invalid"}  # Test edge case
        ]
        
        for params in test_params:
            response = self.client.get("/api/data", params=params)
            print(f"API test {params}: {response.status_code}")
            
            # Validate response structure in development
            if response.status_code == 200:
                try:
                    data = response.json()
                    required_fields = ["id", "status", "data"]
                    missing_fields = [f for f in required_fields if f not in data]
                    if missing_fields:
                        print(f"⚠️  Missing fields: {missing_fields}")
                except json.JSONDecodeError:
                        print("❌ Invalid JSON response")
    
    @task(1)
    def test_form_submission(self):
        """Test form submission"""
        print("Testing form submission...")
        
        test_data = {
            "name": "Test User",
            "email": "test@example.com",
            "message": "Development test message"
        }
        
        response = self.client.post("/contact", json=test_data)
        print(f"Form submission: {response.status_code}")
        
        # Check for validation errors in development
        if response.status_code == 400:
            try:
                errors = response.json().get("errors", [])
                print(f"Validation errors: {errors}")
            except:
                print("Could not parse validation errors")
    
    def on_stop(self):
        duration = time.time() - self.start_time
        print(f"\n📊 Development Test Summary:")
        print(f"Duration: {duration:.1f} seconds")
        print(f"Requests: {self.request_count}")
        print(f"Rate: {self.request_count/duration:.1f} req/sec")
        print("🏁 Development test completed")

if __name__ == "__main__":
    print("=== Development Testing Mode ===")
    run_single_user(
        DevelopmentUser,
        include_time=True,
        include_length=True,
        loglevel="INFO"
    )

Custom Debugging Tools

from locust import HttpUser, task, between
from locust.debug import run_single_user
import json
import time

class DebuggingMixin:
    """Mixin for adding debugging capabilities to users"""
    
    def debug_request(self, method, url, **kwargs):
        """Enhanced request method with debugging"""
        print(f"🔍 {method.upper()} {url}")
        
        # Log request details
        if 'json' in kwargs:
            print(f"   Request JSON: {json.dumps(kwargs['json'], indent=2)}")
        if 'data' in kwargs:
            print(f"   Request Data: {kwargs['data']}")
        if 'headers' in kwargs:
            print(f"   Headers: {kwargs['headers']}")
        
        start_time = time.time()
        response = getattr(self.client, method.lower())(url, **kwargs)
        duration = (time.time() - start_time) * 1000
        
        # Log response details
        print(f"   ⏱️  {duration:.1f}ms | 📊 {response.status_code} | 📏 {len(response.content)} bytes")
        
        if response.status_code >= 400:
            print(f"   ❌ Error Response: {response.text[:200]}...")
        
        # Try to parse JSON response
        try:
            json_response = response.json()
            print(f"   📋 Response JSON: {json.dumps(json_response, indent=2)[:300]}...")
        except:
            print(f"   📄 Response Text: {response.text[:100]}...")
        
        return response
    
    def debug_wait(self, message="Waiting"):
        """Debug-friendly wait with message"""
        wait_time = self.wait_time() if callable(self.wait_time) else self.wait_time
        print(f"⏳ {message} for {wait_time:.1f} seconds...")
        time.sleep(wait_time)

class DebugHTTPUser(DebuggingMixin, HttpUser):
    """HTTP User with debugging capabilities"""
    
    wait_time = between(1, 2)
    host = "http://localhost:8000"
    
    @task
    def debug_workflow(self):
        """Example workflow with debugging"""
        print("\n🎯 Starting debug workflow")
        
        # Step 1: Login
        print("Step 1: Login")
        login_response = self.debug_request("post", "/login", json={
            "username": "debug_user",
            "password": "debug_pass"
        })
        
        if login_response.status_code == 200:
            token = login_response.json().get("token")
            headers = {"Authorization": f"Bearer {token}"}
            
            # Step 2: Get user data
            print("Step 2: Get user data") 
            self.debug_request("get", "/api/user/me", headers=headers)
            
            # Step 3: Update profile
            print("Step 3: Update profile")
            self.debug_request("put", "/api/user/profile", 
                             json={"name": "Debug User Updated"},
                             headers=headers)
        
        print("✅ Debug workflow completed\n")
        self.debug_wait("Workflow complete, waiting")

# Interactive debugging session
def interactive_debug():
    """Interactive debugging session"""
    print("=== Interactive Debugging Session ===")
    
    while True:
        print("\nOptions:")
        print("1. Run single user")
        print("2. Analyze task ratios") 
        print("3. Run with detailed logging")
        print("4. Exit")
        
        choice = input("Select option (1-4): ").strip()
        
        if choice == "1":
            print("Running single user...")
            run_single_user(DebugHTTPUser, include_time=True, loglevel="INFO")
        
        elif choice == "2":
            print("Analyzing task ratios...")
            print_task_ratio([DebugHTTPUser], num_users=10, total=True)
        
        elif choice == "3":
            print("Running with detailed logging...")
            run_single_user(DebugHTTPUser, 
                           include_time=True,
                           include_length=True, 
                           include_context=True,
                           include_payload=True,
                           loglevel="DEBUG")
        
        elif choice == "4":
            print("Exiting debug session")
            break
        
        else:
            print("Invalid option")

if __name__ == "__main__":
    interactive_debug()

Types

from typing import Dict, List, Any, Type, Optional
from locust import User

# Debugging function types
UserClass = Type[User]
UserClassList = List[UserClass]
LogLevel = str  # "DEBUG", "INFO", "WARNING", "ERROR"

# Task ratio analysis types
TaskRatioData = Dict[str, Any]
TaskRationResult = Dict[str, TaskRatioData]

# Debug function signatures
def run_single_user(
    user_class: UserClass,
    include_length: bool = False,
    include_time: bool = False,
    include_context: bool = False,
    include_payload: bool = False,
    loglevel: LogLevel = "WARNING"
) -> None: ...

def print_task_ratio(user_classes: UserClassList) -> None: ...
def print_task_ratio_json(user_classes: UserClassList) -> None: ...
def get_ratio(user_classes: UserClassList) -> TaskRationResult: ...

Install with Tessl CLI

npx tessl i tessl/pypi-locust

docs

contrib.md

debugging.md

events.md

exceptions.md

index.md

load-shapes.md

tasksets.md

user-classes.md

wait-time.md

tile.json