CtrlK
BlogDocsLog inGet started
Tessl Logo

tessl/pypi-pyspark

Python API for Apache Spark, providing distributed computing, data analysis, and machine learning capabilities

Pending

Quality

Pending

Does it follow best practices?

Impact

Pending

No eval scenarios have been run

Overview
Eval results
Files

pandas-api.mddocs/

Pandas API on Spark

Pandas-compatible API for familiar pandas operations on distributed datasets. This enables seamless scaling of pandas workflows to large datasets while maintaining the familiar pandas interface and functionality.

Capabilities

DataFrame Operations

Core DataFrame functionality with pandas-compatible interface.

class DataFrame:
    """Pandas-compatible DataFrame on Spark."""
    
    def head(self, n=5):
        """
        Return first n rows.
        
        Parameters:
        - n (int): Number of rows
        
        Returns:
        DataFrame with first n rows
        """
    
    def tail(self, n=5):
        """
        Return last n rows.
        
        Parameters:
        - n (int): Number of rows
        
        Returns:
        DataFrame with last n rows
        """
    
    def describe(self, percentiles=None, include=None, exclude=None):
        """
        Generate descriptive statistics.
        
        Parameters:
        - percentiles (list): Percentiles to include
        - include: Data types to include
        - exclude: Data types to exclude
        
        Returns:
        DataFrame with statistics
        """
    
    def info(self, verbose=None, buf=None, max_cols=None, memory_usage=None, show_counts=None):
        """Print DataFrame info."""
    
    def count(self):
        """Count non-null values."""
    
    def sum(self, axis=None, skipna=True, level=None, numeric_only=None, min_count=0):
        """Sum values."""
    
    def mean(self, axis=None, skipna=True, level=None, numeric_only=None):
        """Calculate mean."""
    
    def median(self, axis=None, skipna=True, level=None, numeric_only=None):
        """Calculate median."""
    
    def std(self, axis=None, skipna=True, level=None, ddof=1, numeric_only=None):
        """Calculate standard deviation."""
    
    def var(self, axis=None, skipna=True, level=None, ddof=1, numeric_only=None):
        """Calculate variance."""
    
    def min(self, axis=None, skipna=True, level=None, numeric_only=None):
        """Return minimum values."""
    
    def max(self, axis=None, skipna=True, level=None, numeric_only=None):
        """Return maximum values."""
    
    def groupby(self, by=None, axis=0, level=None, as_index=True, sort=True, group_keys=True, squeeze=False, observed=False, dropna=True):
        """
        Group DataFrame by columns.
        
        Parameters:
        - by: Columns to group by
        - axis (int): Axis to group by
        - level: Level for MultiIndex
        - as_index (bool): Whether to use group keys as index
        - sort (bool): Sort group keys
        - group_keys (bool): Add group keys to index
        - squeeze (bool): Reduce dimensionality
        - observed (bool): Only show observed values for categorical
        - dropna (bool): Drop NA values from groups
        
        Returns:
        GroupBy object
        """
    
    def merge(self, right, how='inner', on=None, left_on=None, right_on=None, 
              left_index=False, right_index=False, sort=False, suffixes=('_x', '_y')):
        """
        Merge DataFrames.
        
        Parameters:
        - right (DataFrame): DataFrame to merge with
        - how (str): Type of merge ('left', 'right', 'outer', 'inner')
        - on: Column names to join on
        - left_on: Left DataFrame column names
        - right_on: Right DataFrame column names
        - left_index (bool): Use left index as join key
        - right_index (bool): Use right index as join key
        - sort (bool): Sort join keys
        - suffixes (tuple): Suffixes for overlapping column names
        
        Returns:
        Merged DataFrame
        """
    
    def join(self, other, on=None, how='left', lsuffix='', rsuffix='', sort=False):
        """Join DataFrames."""
    
    def drop(self, labels=None, axis=0, index=None, columns=None, level=None, 
             inplace=False, errors='raise'):
        """Drop specified labels."""
    
    def dropna(self, axis=0, how='any', thresh=None, subset=None, inplace=False):
        """Remove missing values."""
    
    def fillna(self, value=None, method=None, axis=None, inplace=False, limit=None, downcast=None):
        """Fill missing values."""
    
    def sort_values(self, by, axis=0, ascending=True, inplace=False, kind='quicksort', na_position='last'):
        """Sort by values."""
    
    def sort_index(self, axis=0, level=None, ascending=True, inplace=False, kind='quicksort', na_position='last', sort_remaining=True):
        """Sort by index."""

class Series:
    """Pandas-compatible Series on Spark."""
    
    def head(self, n=5):
        """Return first n elements."""
    
    def tail(self, n=5):
        """Return last n elements."""
    
    def describe(self, percentiles=None, include=None, exclude=None):
        """Generate descriptive statistics."""
    
    def value_counts(self, normalize=False, sort=True, ascending=False, bins=None, dropna=True):
        """Count unique values."""
    
    def unique(self):
        """Return unique values."""
    
    def nunique(self, dropna=True):
        """Count unique values."""
    
    def drop_duplicates(self, keep='first', inplace=False):
        """Remove duplicate values."""

Data I/O Functions

Functions for reading and writing data in pandas-compatible format.

def read_csv(path, sep=',', header='infer', names=None, index_col=None, 
             usecols=None, squeeze=False, prefix=None, mangle_dupe_cols=True,
             dtype=None, engine=None, converters=None, true_values=None, 
             false_values=None, skipinitialspace=False, skiprows=None, 
             skipfooter=0, nrows=None, na_values=None, keep_default_na=True,
             na_filter=True, verbose=False, skip_blank_lines=True, 
             parse_dates=False, infer_datetime_format=False, keep_date_col=False,
             date_parser=None, dayfirst=False, cache_dates=True, iterator=False,
             chunksize=None, compression='infer', thousands=None, decimal='.',
             lineterminator=None, quotechar='"', quoting=0, doublequote=True,
             escapechar=None, comment=None, encoding=None, dialect=None,
             error_bad_lines=True, warn_bad_lines=True, delim_whitespace=False,
             low_memory=True, memory_map=False, float_precision=None):
    """
    Read CSV file into DataFrame.
    
    Parameters:
    - path (str): File path
    - sep (str): Column separator
    - header: Row to use as column names
    - names (list): Column names
    - index_col: Column to use as row labels
    - usecols: Columns to read
    - dtype: Data type specification
    - parse_dates: Parse date columns
    - na_values: Additional strings to recognize as NA
    
    Returns:
    DataFrame
    """

def read_parquet(path, engine='auto', columns=None, **kwargs):
    """
    Read Parquet file into DataFrame.
    
    Parameters:
    - path (str): File path
    - engine (str): Parquet library to use
    - columns (list): Columns to read
    
    Returns:
    DataFrame
    """

def read_json(path_or_buf=None, orient=None, typ='frame', dtype=None, 
              convert_axes=None, convert_dates=True, keep_default_dates=True,
              numpy=False, precise_float=False, date_unit=None, encoding=None,
              lines=False, chunksize=None, compression='infer'):
    """Read JSON file into DataFrame."""

def read_excel(io, sheet_name=0, header=0, names=None, index_col=None,
               usecols=None, squeeze=False, dtype=None, engine=None,
               converters=None, true_values=None, false_values=None,
               skiprows=None, nrows=None, na_values=None, keep_default_na=True,
               na_filter=True, verbose=False, parse_dates=False,
               date_parser=None, thousands=None, comment=None, skipfooter=0,
               convert_float=True, mangle_dupe_cols=True):
    """Read Excel file into DataFrame."""

Utility Functions

def concat(objs, axis=0, join='outer', ignore_index=False, keys=None,
           levels=None, names=None, verify_integrity=False, sort=False, copy=True):
    """
    Concatenate pandas objects.
    
    Parameters:
    - objs: Objects to concatenate
    - axis (int): Axis to concatenate along
    - join (str): How to handle indexes ('inner' or 'outer')
    - ignore_index (bool): Ignore index values
    - keys: Construct hierarchical index
    - sort (bool): Sort non-concatenation axis
    
    Returns:
    Concatenated object
    """

def melt(frame, id_vars=None, value_vars=None, var_name=None, value_name='value',
         col_level=None, ignore_index=True):
    """
    Unpivot DataFrame from wide to long format.
    
    Parameters:
    - frame (DataFrame): DataFrame to melt
    - id_vars: Columns to use as identifier variables
    - value_vars: Columns to unpivot
    - var_name (str): Name for variable column
    - value_name (str): Name for value column
    
    Returns:
    Melted DataFrame
    """

def get_dummies(data, prefix=None, prefix_sep='_', dummy_na=False,
                columns=None, sparse=False, drop_first=False, dtype=None):
    """
    Convert categorical variables to dummy/indicator variables.
    
    Parameters:
    - data: Input data
    - prefix: String to append to column names
    - prefix_sep (str): Separator between prefix and category
    - dummy_na (bool): Include column for NAs
    - columns: Columns to encode
    - drop_first (bool): Drop first category to avoid collinearity
    - dtype: Data type for new columns
    
    Returns:
    DataFrame with dummy variables
    """

def to_datetime(arg, errors='raise', dayfirst=False, yearfirst=False,
                utc=None, format=None, exact=True, unit=None,
                infer_datetime_format=False, origin='unix', cache=True):
    """
    Convert argument to datetime.
    
    Parameters:
    - arg: Object to convert
    - errors (str): Error handling ('raise', 'coerce', 'ignore')
    - format (str): strftime format
    - unit (str): Unit of numeric values
    
    Returns:
    Datetime object
    """

def date_range(start=None, end=None, periods=None, freq=None, tz=None,
               normalize=False, name=None, closed=None, **kwargs):
    """
    Generate range of dates.
    
    Parameters:
    - start: Start date
    - end: End date  
    - periods (int): Number of periods
    - freq (str): Frequency string
    - tz: Time zone
    - normalize (bool): Normalize to midnight
    - name (str): Name for index
    
    Returns:
    DatetimeIndex
    """

def from_pandas(pdf):
    """
    Create PySpark DataFrame from pandas DataFrame.
    
    Parameters:
    - pdf (pandas.DataFrame): pandas DataFrame
    
    Returns:
    pyspark.pandas.DataFrame
    """

def sql(query, **kwargs):
    """
    Execute SQL query on pandas objects.
    
    Parameters:
    - query (str): SQL query string
    
    Returns:
    Query result as DataFrame
    """

Configuration

def get_option(pat):
    """
    Get configuration option.
    
    Parameters:
    - pat (str): Option pattern
    
    Returns:
    Option value
    """

def set_option(pat, value):
    """
    Set configuration option.
    
    Parameters:
    - pat (str): Option pattern
    - value: Option value
    """

def reset_option(pat):
    """
    Reset configuration option to default.
    
    Parameters:
    - pat (str): Option pattern
    """

def option_context(*args):
    """
    Context manager for temporarily setting options.
    
    Parameters:
    - args: Option-value pairs
    
    Returns:
    Context manager
    """

class options:
    """Options configuration object."""
    pass

Types

class Index:
    """Index for pandas objects."""
    
    def to_pandas(self):
        """Convert to pandas Index."""

class MultiIndex(Index):
    """Multi-level index."""
    
    @classmethod
    def from_tuples(cls, tuples, sortorder=None, names=None):
        """Create MultiIndex from tuples."""
    
    @classmethod
    def from_arrays(cls, arrays, sortorder=None, names=None):
        """Create MultiIndex from arrays."""

class DatetimeIndex(Index):
    """Index for datetime data."""
    
    def strftime(self, date_format):
        """Format datetime as strings."""

class CategoricalIndex(Index):
    """Index for categorical data."""
    
    @property
    def categories(self):
        """Categories of the index."""

class NamedAgg:
    """Named aggregation for groupby operations."""
    
    def __init__(self, column, aggfunc):
        """
        Create named aggregation.
        
        Parameters:
        - column (str): Column name
        - aggfunc: Aggregation function
        """

Install with Tessl CLI

npx tessl i tessl/pypi-pyspark

docs

core-context-rdds.md

index.md

legacy-mllib.md

machine-learning.md

pandas-api.md

resource-management.md

sql-dataframes.md

streaming.md

tile.json