CtrlK
BlogDocsLog inGet started
Tessl Logo

tessl/pypi-webuiapi

Python API client for AUTOMATIC1111/stable-diffusion-webui enabling programmatic Stable Diffusion image generation

Overview
Eval results
Files

interfaces.mddocs/

Extension Interfaces

Specialized interfaces for extension functionality, image analysis, and advanced processing operations. These classes provide structured access to extension-specific APIs and specialized processing tools.

Capabilities

Model Keywords Interface

Interface for retrieving model-specific keywords and metadata.

class ModelKeywordInterface:
    """Interface for model keyword generation and lookup."""
    
    def __init__(self, webuiapi: WebUIApi):
        """
        Initialize the interface with a WebUIApi instance.
        
        Parameters:
        - webuiapi: WebUIApi client instance
        """
    
    def get_keywords(self, model_name: str) -> ModelKeywordResult:
        """
        Get keywords associated with a specific model.

        Parameters:
        - model_name: Name of the model to query

        Returns:
        ModelKeywordResult containing keywords and metadata
        """

class ModelKeywordResult:
    """Result container for model keyword operations."""
    
    def __init__(self):
        self.keywords: List[str] = []  # Associated keywords
        self.model: str = ""  # Model name
        self.oldhash: str = ""  # Model hash
        self.match_source: str = ""  # Source of keyword match

Image Processing Interfaces

Specialized interfaces for advanced image processing operations.

class InstructPix2PixInterface:
    """Interface for instruction-based image editing using InstructPix2Pix."""
    
    def __init__(self, webuiapi: WebUIApi):
        """
        Initialize the interface with a WebUIApi instance.
        
        Parameters:
        - webuiapi: WebUIApi client instance
        """
    
    def img2img(
        self,
        prompt: str,
        images: List[Image.Image],
        negative_prompt: str = "",
        **kwargs
    ) -> WebUIApiResult:
        """
        Edit images using natural language instructions.

        Parameters:
        - prompt: Natural language editing instruction
        - images: List of input images to edit
        - negative_prompt: What to avoid in editing
        - **kwargs: Additional img2img parameters

        Returns:
        WebUIApiResult containing edited images
        """

class RemBGInterface:
    """Interface for background removal operations."""
    
    def __init__(self, webuiapi: WebUIApi):
        """
        Initialize the interface with a WebUIApi instance.
        
        Parameters:
        - webuiapi: WebUIApi client instance
        """
    
    def rembg(
        self,
        input_image: str,
        model: str = "u2net",
        return_mask: bool = False,
        alpha_matting: bool = False,
        alpha_matting_foreground_threshold: int = 270,
        alpha_matting_background_threshold: int = 10,
        alpha_matting_erode_size: int = 10
    ) -> Dict:
        """
        Remove background from images.

        Parameters:
        - input_image: Base64-encoded input image
        - model: Background removal model ("u2net", "u2netp", "silueta", etc.)
        - return_mask: Return the segmentation mask
        - alpha_matting: Apply alpha matting for better edges
        - alpha_matting_foreground_threshold: Foreground threshold for alpha matting
        - alpha_matting_background_threshold: Background threshold for alpha matting  
        - alpha_matting_erode_size: Erosion size for alpha matting

        Returns:
        Dictionary containing processed image and optional mask
        """

Segment Anything Interface

Comprehensive interface for Segment Anything Model (SAM) operations including detection, segmentation, and mask processing.

class SegmentAnythingInterface:
    """Interface for Segment Anything Model operations."""
    
    def __init__(self, webuiapi: WebUIApi):
        """
        Initialize the interface with a WebUIApi instance.
        
        Parameters:
        - webuiapi: WebUIApi client instance
        """
    
    def heartbeat(self) -> Dict:
        """
        Check if SegmentAnything extension is responsive.

        Returns:
        Status dictionary indicating extension health
        """
    
    def get_sam_models(self) -> List[str]:
        """
        Get list of available SAM models.

        Returns:
        List of SAM model names
        """
    
    def sam_predict(
        self,
        sam_model_name: str,
        input_image: str,
        sam_positive_points: List[List[int]] = None,
        sam_negative_points: List[List[int]] = None,
        sam_bbox: List[int] = None,
        **kwargs
    ) -> SegmentAnythingSamResult:
        """
        Perform SAM prediction with point or bounding box prompts.

        Parameters:
        - sam_model_name: SAM model to use for prediction
        - input_image: Base64-encoded input image
        - sam_positive_points: List of [x, y] positive prompt points
        - sam_negative_points: List of [x, y] negative prompt points
        - sam_bbox: Bounding box as [x1, y1, x2, y2]

        Returns:
        SegmentAnythingSamResult containing segmentation masks
        """
    
    def dino_predict(
        self,
        dino_model_name: str,
        input_image: str,
        dino_text_prompt: str,
        dino_box_threshold: float = 0.3,
        dino_preview_checkbox: bool = False,
        **kwargs
    ) -> SegmentAnythingGinoResult:
        """
        Perform DINO prediction for object detection.

        Parameters:
        - dino_model_name: DINO model to use
        - input_image: Base64-encoded input image
        - dino_text_prompt: Text description of objects to detect
        - dino_box_threshold: Detection confidence threshold
        - dino_preview_checkbox: Generate preview visualization

        Returns:
        SegmentAnythingGinoResult containing detection results
        """
    
    def dilate_mask(
        self,
        input_image: str,
        mask: str,
        dilate_amount: int = 5,
        **kwargs
    ) -> SegmentAnythingDilationResult:
        """
        Dilate/erode segmentation masks.

        Parameters:
        - input_image: Base64-encoded input image
        - mask: Base64-encoded mask image
        - dilate_amount: Dilation amount (positive=dilate, negative=erode)

        Returns:
        SegmentAnythingDilationResult containing processed mask
        """
    
    def generate_semantic_segmentation(
        self,
        input_image: str,
        category_names: List[str],
        **kwargs
    ) -> Dict:
        """
        Generate semantic segmentation with category labels.

        Parameters:
        - input_image: Base64-encoded input image
        - category_names: List of category names to segment

        Returns:
        Dictionary containing semantic segmentation results
        """
    
    def sam_and_semantic_seg_with_cat_id(
        self,
        input_image: str,
        category_id: int,
        **kwargs
    ) -> SegmentAnythingSemanticSegWithCatIdResult:
        """
        Combine SAM with semantic segmentation using category ID.

        Parameters:
        - input_image: Base64-encoded input image
        - category_id: Category ID for segmentation

        Returns:
        SegmentAnythingSemanticSegWithCatIdResult containing combined results
        """

Image Tagging Interface

Interface for automatic image tagging and interrogation using various models.

class TaggerInterface:
    """Interface for automatic image tagging and analysis."""
    
    def __init__(self, webuiapi: WebUIApi):
        """
        Initialize the interface with a WebUIApi instance.
        
        Parameters:
        - webuiapi: WebUIApi client instance
        """
    
    def tagger_interrogate(
        self,
        image: str,
        model: str = "wd14-vit-v2-git",
        threshold: float = 0.35,
        additional_tags: str = "",
        exclude_tags: str = "",
        sort_by_alphabetical_order: bool = False,
        add_confident_as_weight: bool = False,
        replace_underscore: bool = False,
        replace_underscore_excludes: str = "",
        escape_tag: bool = False
    ) -> Dict:
        """
        Generate tags for images using trained tagging models.

        Parameters:
        - image: Base64-encoded input image
        - model: Tagging model to use ("wd14-vit-v2-git", "deepdanbooru", etc.)
        - threshold: Confidence threshold for tag inclusion
        - additional_tags: Additional tags to include
        - exclude_tags: Tags to exclude from results
        - sort_by_alphabetical_order: Sort tags alphabetically
        - add_confident_as_weight: Add confidence as weight to tags
        - replace_underscore: Replace underscores in tags
        - replace_underscore_excludes: Tags to exclude from underscore replacement
        - escape_tag: Escape special characters in tags

        Returns:
        Dictionary containing generated tags and confidence scores
        """
    
    def tagger_interrogators(self) -> List[str]:
        """
        Get list of available tagging models.

        Returns:
        List of available interrogator/tagger model names
        """

Usage Examples:

import webuiapi
from PIL import Image

api = webuiapi.WebUIApi()

# Model keywords lookup
keyword_interface = webuiapi.ModelKeywordInterface(api)
result = keyword_interface.get_keywords("realistic_vision_v40")
print(f"Keywords: {result.keywords}")

# Background removal
rembg_interface = webuiapi.RemBGInterface(api)
image = Image.open("portrait.jpg")
image_b64 = webuiapi.raw_b64_img(image)

bg_result = rembg_interface.rembg(
    input_image=image_b64,
    model="u2net",
    return_mask=True,
    alpha_matting=True
)

# Instruction-based editing
instruct_interface = webuiapi.InstructPix2PixInterface(api)
edit_result = instruct_interface.img2img(
    prompt="make the sky more dramatic",
    images=[image],
    negative_prompt="unrealistic, fake"
)

# Segment Anything operations
sam_interface = webuiapi.SegmentAnythingInterface(api)

# Check available models
sam_models = sam_interface.get_sam_models()
print(f"Available SAM models: {sam_models}")

# SAM prediction with point prompts
sam_result = sam_interface.sam_predict(
    sam_model_name="sam_vit_h_4b8939.pth",
    input_image=image_b64,
    sam_positive_points=[[250, 300], [280, 320]],  # Click points
    sam_negative_points=[[100, 100]]  # Avoid this area
)

# DINO object detection
dino_result = sam_interface.dino_predict(
    dino_model_name="GroundingDINO_SwinT_OGC",
    input_image=image_b64,
    dino_text_prompt="person, face",
    dino_box_threshold=0.3
)

# Image tagging
tagger_interface = webuiapi.TaggerInterface(api)

# Get available taggers
taggers = tagger_interface.tagger_interrogators()
print(f"Available taggers: {taggers}")

# Generate tags
tag_result = tagger_interface.tagger_interrogate(
    image=image_b64,
    model="wd14-vit-v2-git",
    threshold=0.35,
    sort_by_alphabetical_order=True,
    add_confident_as_weight=True
)

print(f"Generated tags: {tag_result}")

Types

class ModelKeywordResult:
    """Container for model keyword lookup results."""
    keywords: List[str]  # Associated keywords
    model: str  # Model name
    oldhash: str  # Model hash
    match_source: str  # Source of keyword match

class SegmentAnythingSamResult:
    """Result container for SAM prediction operations."""
    # Contains segmentation masks and prediction metadata

class SegmentAnythingGinoResult:
    """Result container for DINO prediction operations."""
    # Contains object detection boxes and confidence scores

class SegmentAnythingDilationResult:
    """Result container for mask dilation operations."""
    # Contains processed masks after dilation/erosion

class SegmentAnythingControlNetSegRandomResult:
    """Result container for ControlNet segmentation (random mode)."""
    # Contains segmentation results with random sampling

class SegmentAnythingControlNetSegNotRandomResult:
    """Result container for ControlNet segmentation (non-random mode)."""
    # Contains deterministic segmentation results

class SegmentAnythingSemanticSegWithCatIdResult:
    """Result container for semantic segmentation with category ID."""
    # Contains semantic segmentation with category labels

Install with Tessl CLI

npx tessl i tessl/pypi-webuiapi

docs

configuration.md

controlnet.md

extensions.md

image-generation.md

image-processing.md

index.md

interfaces.md

model-management.md

tile.json