CtrlK
BlogDocsLog inGet started
Tessl Logo

tessl/pypi-peft

State-of-the-art Parameter-Efficient Fine-Tuning (PEFT) methods for efficiently adapting large pretrained models

Pending
Overview
Eval results
Files

utilities.mddocs/

Utilities and State Management

Essential utility functions for managing PEFT model state, loading/saving adapters, preparing models for training, and handling various integration scenarios. These functions provide the foundational operations for PEFT workflows.

Capabilities

State Dictionary Management

Functions for extracting, setting, and managing PEFT model state dictionaries.

def get_peft_model_state_dict(
    model,
    state_dict: Optional[dict] = None,
    adapter_name: str = "default"
) -> dict:
    """
    Get the state dictionary of PEFT model parameters.
    
    Args:
        model: PEFT model instance
        state_dict: Optional state dict to filter, if None uses model.state_dict()
        adapter_name: Name of the adapter to get state dict for
        
    Returns:
        Dictionary containing only PEFT parameters
    """

def set_peft_model_state_dict(
    model,
    peft_model_state_dict: dict,
    adapter_name: str = "default"
):
    """
    Set the state dictionary of PEFT model parameters.
    
    Args:
        model: PEFT model instance
        peft_model_state_dict: State dictionary containing PEFT parameters
        adapter_name: Name of the adapter to set state dict for
    """

def load_peft_weights(model_id: str, device: Optional[str] = None) -> dict:
    """
    Load PEFT weights from a model identifier or path.
    
    Args:
        model_id: Model identifier or local path
        device: Device to load weights on
        
    Returns:
        Dictionary containing loaded PEFT weights
    """

Model Preparation and Training Utilities

Functions for preparing models for efficient training, especially with quantization.

def prepare_model_for_kbit_training(
    model,
    use_gradient_checkpointing: bool = True,
    gradient_checkpointing_kwargs: Optional[dict] = None
):
    """
    Prepare model for k-bit training by enabling gradient computation for input embeddings.
    
    Args:
        model: Model to prepare for training
        use_gradient_checkpointing: Whether to enable gradient checkpointing
        gradient_checkpointing_kwargs: Additional arguments for gradient checkpointing
        
    Returns:
        Prepared model ready for k-bit training
    """

def cast_mixed_precision_params(
    model,
    dtype: torch.dtype = torch.float16
):
    """
    Cast mixed precision parameters to specified dtype.
    
    Args:
        model: Model to cast parameters for
        dtype: Target dtype for parameters
    """

Configuration and Mapping Utilities

Functions for working with PEFT configurations and model mappings.

def get_peft_config(config_dict: dict) -> PeftConfig:
    """
    Get PEFT configuration from dictionary.
    
    Args:
        config_dict: Dictionary containing configuration parameters
        
    Returns:
        Appropriate PeftConfig instance
    """

def inject_adapter_in_model(
    peft_config: PeftConfig,
    model,
    adapter_name: str = "default"
):
    """
    Inject adapter into model based on PEFT configuration.
    
    Args:
        peft_config: PEFT configuration
        model: Base model to inject adapter into
        adapter_name: Name of the adapter
    """

Preprocessing and Postprocessing

Utility functions for data preprocessing and model-specific postprocessing.

def shift_tokens_right(input_ids: torch.Tensor, pad_token_id: int, decoder_start_token_id: int):
    """
    Shift input tokens to the right for sequence-to-sequence training.
    
    Args:
        input_ids: Input token IDs
        pad_token_id: Padding token ID
        decoder_start_token_id: Decoder start token ID
        
    Returns:
        Shifted token IDs
    """

def bloom_model_postprocess_past_key_value(past_key_values, batch_size: int, seq_len: int):
    """
    Postprocess past key values for BLOOM models.
    
    Args:
        past_key_values: Past key value tensors
        batch_size: Batch size
        seq_len: Sequence length
        
    Returns:
        Postprocessed past key values
    """

Integration Utilities

Functions for integrating with various frameworks and handling device management.

def map_cache_to_layer_device_map(
    cache, 
    layer_device_map: dict,
    offload_dir: Optional[str] = None
):
    """
    Map cache tensors to layer device map for distributed inference.
    
    Args:
        cache: Cache object to map
        layer_device_map: Mapping of layers to devices
        offload_dir: Directory for offloading tensors
        
    Returns:
        Mapped cache object
    """

Target Module Mappings

Predefined mappings of model architectures to commonly used target modules for different PEFT methods.

# LoRA target modules for different model architectures
TRANSFORMERS_MODELS_TO_LORA_TARGET_MODULES_MAPPING: dict = {
    "t5": ["q", "v"],
    "mt5": ["q", "v"], 
    "bart": ["q_proj", "v_proj"],
    "gpt2": ["c_attn"],
    "bloom": ["query_key_value"],
    "blip-2": ["q", "v", "q_proj", "v_proj"],
    "opt": ["q_proj", "v_proj"],
    "gptj": ["q_proj", "v_proj"],
    "gpt_neox": ["query_key_value"],
    "gpt_neo": ["q_proj", "v_proj"],
    "bert": ["query", "value"],
    "roberta": ["query", "value"],
    "xlm-roberta": ["query", "value"],
    "electra": ["query", "value"],
    "deberta-v2": ["query_proj", "value_proj"],
    "deberta": ["in_proj"],
    "layoutlm": ["query", "value"],
    "llama": ["q_proj", "v_proj"],
    "chatglm": ["query_key_value"],
    "gpt_bigcode": ["c_attn"],
    "mpt": ["Wqkv"],
    "RefinedWebModel": ["query_key_value"],
    "RefinedWeb": ["query_key_value"],
    "falcon": ["query_key_value"],
    "btlm": ["c_proj", "c_attn"],
    "codegen": ["qkv_proj"],
    "mistral": ["q_proj", "v_proj"],
    "mixtral": ["q_proj", "v_proj"],
    "stablelm": ["q_proj", "v_proj"],
    "phi": ["q_proj", "v_proj", "fc1", "fc2"],
    "gemma": ["q_proj", "v_proj"],
}

# AdaLoRA target modules
TRANSFORMERS_MODELS_TO_ADALORA_TARGET_MODULES_MAPPING: dict = {
    "t5": ["q", "v"],
    "mt5": ["q", "v"],
    "bart": ["q_proj", "v_proj"],
    "gpt2": ["c_attn"],
    "bloom": ["query_key_value"],
    "opt": ["q_proj", "v_proj"],
    "gptj": ["q_proj", "v_proj"],
    "gpt_neox": ["query_key_value"],
    "gpt_neo": ["q_proj", "v_proj"],
    "llama": ["q_proj", "v_proj"],
    "bert": ["query", "value"],
    "roberta": ["query", "value"],
}

# IA3 target modules and feedforward modules
TRANSFORMERS_MODELS_TO_IA3_TARGET_MODULES_MAPPING: dict = {
    "t5": ["k", "v", "wo"],
    "mt5": ["k", "v", "wo"],
    "gpt2": ["c_attn", "mlp.c_proj"],
    "bloom": ["query_key_value", "mlp.dense_4h_to_h"],
    "opt": ["k_proj", "v_proj", "fc2"],
    "gptj": ["k_proj", "v_proj", "fc_out"],
    "gpt_neox": ["query_key_value", "dense_4h_to_h"],
    "gpt_neo": ["k_proj", "v_proj", "c_proj"],
    "bart": ["k_proj", "v_proj", "fc2"],
    "llama": ["k_proj", "v_proj", "down_proj"],
}

TRANSFORMERS_MODELS_TO_IA3_FEEDFORWARD_MODULES_MAPPING: dict = {
    "t5": ["wo"],
    "mt5": ["wo"],
    "gpt2": ["mlp.c_proj"],
    "bloom": ["mlp.dense_4h_to_h"],
    "opt": ["fc2"],
    "gptj": ["fc_out"],
    "gpt_neox": ["dense_4h_to_h"],
    "gpt_neo": ["c_proj"],
    "bart": ["fc2"],
    "llama": ["down_proj"],
}

Constants and Configuration Names

Important constants used throughout the PEFT library.

CONFIG_NAME: str = "adapter_config.json"
WEIGHTS_NAME: str = "adapter_model.bin"
SAFETENSORS_WEIGHTS_NAME: str = "adapter_model.safetensors"

INCLUDE_LINEAR_LAYERS_SHORTHAND: List[str] = ["linear", "Linear"]

Usage Examples

Saving and Loading PEFT State

from peft import get_peft_model_state_dict, set_peft_model_state_dict
import torch

# Get PEFT state dictionary
peft_state_dict = get_peft_model_state_dict(peft_model)

# Save to file
torch.save(peft_state_dict, "peft_weights.pt")

# Load and set state dictionary
loaded_state_dict = torch.load("peft_weights.pt")
set_peft_model_state_dict(peft_model, loaded_state_dict)

Preparing Model for Quantized Training

from transformers import AutoModelForCausalLM, BitsAndBytesConfig
from peft import prepare_model_for_kbit_training, LoraConfig, get_peft_model

# Load quantized model
bnb_config = BitsAndBytesConfig(
    load_in_4bit=True,
    bnb_4bit_use_double_quant=True,
    bnb_4bit_quant_type="nf4",
    bnb_4bit_compute_dtype=torch.bfloat16
)

model = AutoModelForCausalLM.from_pretrained(
    "microsoft/DialoGPT-medium",
    quantization_config=bnb_config,
    device_map="auto"
)

# Prepare for k-bit training
model = prepare_model_for_kbit_training(
    model,
    use_gradient_checkpointing=True
)

# Add PEFT adapter
peft_config = LoraConfig(
    r=16,
    lora_alpha=32,
    target_modules=["c_attn", "c_proj"],
    lora_dropout=0.1,
    bias="none",
    task_type="CAUSAL_LM"
)

peft_model = get_peft_model(model, peft_config)

Working with Mixed Precision

from peft import cast_mixed_precision_params

# Cast parameters to half precision
cast_mixed_precision_params(peft_model, torch.float16)

# Training loop with automatic mixed precision
from torch.cuda.amp import autocast, GradScaler

scaler = GradScaler()

for batch in dataloader:
    optimizer.zero_grad()
    
    with autocast():
        outputs = peft_model(**batch)
        loss = outputs.loss
    
    scaler.scale(loss).backward()
    scaler.step(optimizer)
    scaler.update()

Using Target Module Mappings

from peft import TRANSFORMERS_MODELS_TO_LORA_TARGET_MODULES_MAPPING, LoraConfig

# Get recommended target modules for model architecture
model_type = model.config.model_type
target_modules = TRANSFORMERS_MODELS_TO_LORA_TARGET_MODULES_MAPPING.get(model_type)

if target_modules:
    lora_config = LoraConfig(
        r=16,
        lora_alpha=32,
        target_modules=target_modules,
        task_type="CAUSAL_LM"
    )
else:
    # Fallback to manual specification
    lora_config = LoraConfig(
        r=16,
        lora_alpha=32,
        target_modules=["q_proj", "v_proj"],  # Manual specification
        task_type="CAUSAL_LM"
    )

Handling Sequence-to-Sequence Tasks

from peft import shift_tokens_right

# Prepare decoder input ids for seq2seq training
def prepare_decoder_input_ids_from_labels(labels, pad_token_id, decoder_start_token_id):
    return shift_tokens_right(labels, pad_token_id, decoder_start_token_id)

# Example usage in training
labels = tokenizer("Target text", return_tensors="pt").input_ids
decoder_input_ids = prepare_decoder_input_ids_from_labels(
    labels, 
    tokenizer.pad_token_id, 
    tokenizer.eos_token_id
)

outputs = peft_model(
    input_ids=input_ids,
    decoder_input_ids=decoder_input_ids,
    labels=labels
)
loss = outputs.loss

Loading Weights from Hub or Local Path

from peft import load_peft_weights

# Load from Hugging Face Hub
weights = load_peft_weights("username/my-peft-adapter")

# Load from local path
weights = load_peft_weights("./local/peft/adapter")

# Load with specific device
weights = load_peft_weights("username/my-peft-adapter", device="cuda:0")

Install with Tessl CLI

npx tessl i tessl/pypi-peft

docs

advanced-methods.md

auto-classes.md

core-models.md

index.md

lora-methods.md

prompt-learning.md

utilities.md

tile.json