Financial functions for Python providing performance analysis, risk metrics, portfolio optimization, and data retrieval for quantitative finance
—
Data processing utilities including frequency conversion, merging, date calculations, and pandas extensions for financial time series manipulation. Essential tools for data preparation and transformation in quantitative analysis.
Utilities for combining multiple time series and handling data alignment.
def merge(*series):
"""
Merge multiple Series and/or DataFrames with proper alignment.
Parameters:
- *series: Variable number of pandas Series or DataFrames to merge
Returns:
pd.DataFrame: Merged data with aligned dates
"""
def drop_duplicate_cols(df):
"""
Remove duplicate columns keeping the one with longest history.
Parameters:
- df (pd.DataFrame): DataFrame potentially containing duplicate columns
Returns:
pd.DataFrame: DataFrame with duplicate columns removed
"""Convert time series between different frequencies while preserving financial data characteristics.
def to_monthly(series, method="ffill", how="end"):
"""
Convert time series to monthly frequency.
Parameters:
- series (pd.Series or pd.DataFrame): Input time series
- method (str): Resampling method ('ffill', 'mean', 'last', etc.)
- how (str): How to handle period boundaries ('end', 'start')
Returns:
pd.Series or pd.DataFrame: Monthly frequency data
"""
def asfreq_actual(series, freq, method="ffill", how="end", normalize=False):
"""
Resample to specified frequency keeping actual dates (not period ends).
Parameters:
- series (pd.Series): Input time series
- freq (str): Target frequency ('D', 'W', 'M', 'Y', etc.)
- method (str): Fill method for missing values
- how (str): Period boundary handling
- normalize (bool): Normalize timestamps to midnight
Returns:
pd.Series: Resampled series with actual dates
"""Financial date calculations and business day utilities.
def year_frac(start, end):
"""
Calculate year fraction between two dates using ACT/365 convention.
Parameters:
- start (datetime): Start date
- end (datetime): End date
Returns:
float: Year fraction (e.g., 0.25 for 3 months)
"""
def get_num_days_required(offset, period="d", perc_required=0.90, annualization_factor=252):
"""
Estimate number of days required for reliable statistics calculation.
Parameters:
- offset (str): Time offset (e.g., '1Y', '6M', '3M')
- period (str): Period type ('d' for daily, 'm' for monthly)
- perc_required (float): Required data percentage (default: 0.90)
- annualization_factor (int): Trading days per year (default: 252)
Returns:
int: Estimated required number of days
"""
def calc_mtd(daily_prices, monthly_prices):
"""
Calculate month-to-date returns from daily and monthly price series.
Parameters:
- daily_prices (pd.Series): Daily price series
- monthly_prices (pd.Series): Monthly price series
Returns:
pd.Series: Month-to-date returns
"""
def calc_ytd(daily_prices, yearly_prices):
"""
Calculate year-to-date returns from daily and yearly price series.
Parameters:
- daily_prices (pd.Series): Daily price series
- yearly_prices (pd.Series): Yearly price series
Returns:
pd.Series: Year-to-date returns
"""General data transformation and cleaning functions.
def annualize(returns, durations, one_year=365.0):
"""
Annualize returns using actual durations.
Parameters:
- returns (array-like): Return values
- durations (array-like): Duration of each return in days
- one_year (float): Days in one year (default: 365.0)
Returns:
array-like: Annualized returns
"""
def deannualize(returns, nperiods):
"""
Convert annualized returns to different frequency basis.
Parameters:
- returns (array-like): Annualized returns
- nperiods (int): Number of periods per year for target frequency
Returns:
array-like: Returns converted to target frequency
"""
def infer_freq(data):
"""
Infer most likely frequency from time series index.
Parameters:
- data (pd.Series or pd.DataFrame): Time series data
Returns:
str: Inferred frequency string (e.g., 'D', 'M', 'Y')
"""
def infer_nperiods(data, annualization_factor=None):
"""
Infer number of periods for annualization based on data frequency.
Parameters:
- data (pd.Series or pd.DataFrame): Time series data
- annualization_factor (int): Override annualization factor (default: None)
Returns:
int: Number of periods for annualization
"""Plotting functions for financial data visualization.
def plot_heatmap(data, title="Heatmap", show_legend=True, show_labels=True, label_fmt=".2f", vmin=None, vmax=None, figsize=None, label_color="w", cmap="RdBu", **kwargs):
"""
Plot data as a heatmap with customizable formatting.
Parameters:
- data (pd.DataFrame): Data to plot
- title (str): Plot title
- show_legend (bool): Whether to show color legend
- show_labels (bool): Whether to show value labels on cells
- label_fmt (str): Format string for value labels
- vmin, vmax (float): Color scale bounds
- figsize (tuple): Figure size
- label_color (str): Color for value labels
- cmap (str): Colormap name
- **kwargs: Additional matplotlib arguments
Returns:
matplotlib objects for further customization
"""
def plot_corr_heatmap(data, **kwargs):
"""
Plot correlation matrix as heatmap with sensible defaults.
Parameters:
- data (pd.DataFrame): Data for correlation calculation
- **kwargs: Arguments passed to plot_heatmap()
Returns:
matplotlib objects for further customization
"""Financial number formatting and display functions for reports and presentations.
def fmtp(number):
"""
Format number as percentage with 2 decimal places.
Parameters:
- number (float): Number to format (0.1523 becomes '15.23%')
Returns:
str: Formatted percentage string, returns '-' for NaN values
"""
def fmtpn(number):
"""
Format number as percentage without % symbol.
Parameters:
- number (float): Number to format (0.1523 becomes '15.23')
Returns:
str: Formatted percentage number, returns '-' for NaN values
"""
def fmtn(number):
"""
Format number as float with 2 decimal places.
Parameters:
- number (float): Number to format
Returns:
str: Formatted float string, returns '-' for NaN values
"""
def as_percent(item, digits=2):
"""
Format pandas Series/DataFrame values as percentages.
Parameters:
- item (pd.Series or pd.DataFrame): Data to format
- digits (int): Number of decimal places (default: 2)
Returns:
pd.Series or pd.DataFrame: Formatted with percentage strings
"""
def as_format(item, format_str=".2f"):
"""
Apply format string to pandas Series/DataFrame values.
Parameters:
- item (pd.Series or pd.DataFrame): Data to format
- format_str (str): Python format string (default: '.2f')
Returns:
pd.Series or pd.DataFrame: Formatted strings
"""Utilities for parsing arguments and cleaning ticker symbols.
def parse_arg(arg):
"""
Parse flexible argument input (string, list, tuple, CSV).
Parameters:
- arg (str, list, tuple): Input to parse
Can be 'AAPL,MSFT,GOOGL' or ['AAPL', 'MSFT'] or ('AAPL', 'MSFT')
Returns:
list: Parsed and cleaned list of arguments
"""
def clean_ticker(ticker):
"""
Clean ticker symbol for consistent usage.
Removes non-alphanumeric characters and converts to lowercase.
Handles cases like '^VIX' -> 'vix', 'SPX Index' -> 'spx'
Parameters:
- ticker (str): Raw ticker symbol
Returns:
str: Cleaned ticker symbol
"""
def clean_tickers(tickers):
"""
Apply clean_ticker to multiple tickers.
Parameters:
- tickers (list): List of ticker symbols
Returns:
list: List of cleaned ticker symbols
"""Advanced data transformation utilities.
def scale(val, src, dst):
"""
Scale value from source range to destination range with clipping.
Parameters:
- val (float): Value to scale
- src (tuple): Source range (min, max)
- dst (tuple): Destination range (min, max)
Returns:
float: Scaled value, clipped to destination bounds
"""
def get_freq_name(period):
"""
Get human-readable name for pandas frequency string.
Parameters:
- period (str): Pandas frequency code ('D', 'M', 'Y', etc.)
Returns:
str or None: Human-readable frequency name or None if unknown
"""Core function that enables method chaining on pandas objects.
def extend_pandas():
"""
Extend pandas Series and DataFrame with all ffn functions as methods.
This function is called automatically when ffn is imported, enabling
method chaining like: prices.to_returns().calc_sharpe()
Returns:
None (modifies pandas classes in-place)
"""import ffn
import pandas as pd
# Download data with different date ranges
spy_data = ffn.get('SPY', start='2020-01-01', end='2023-12-31')
vti_data = ffn.get('VTI', start='2020-06-01', end='2023-06-30') # Shorter range
bond_data = ffn.get('BND', start='2019-01-01', end='2023-12-31') # Longer range
# Merge with automatic alignment
merged_data = ffn.merge(spy_data, vti_data, bond_data)
print(f"Merged data shape: {merged_data.shape}")
print(f"Date range: {merged_data.index[0]} to {merged_data.index[-1]}")
# Handle duplicate columns scenario
duplicate_data = pd.concat([spy_data, spy_data.rename(columns={'SPY': 'SPY_dup'})], axis=1)
print(f"Before dedup: {duplicate_data.columns.tolist()}")
clean_data = ffn.drop_duplicate_cols(duplicate_data)
print(f"After dedup: {clean_data.columns.tolist()}")import ffn
# Download daily data
daily_prices = ffn.get('AAPL,MSFT', start='2020-01-01')
daily_returns = ffn.to_returns(daily_prices).dropna()
# Convert to monthly
monthly_prices = ffn.to_monthly(daily_prices, method="last")
monthly_returns = ffn.to_returns(monthly_prices).dropna()
print(f"Daily data points: {len(daily_prices)}")
print(f"Monthly data points: {len(monthly_prices)}")
# Custom frequency conversion
weekly_prices = ffn.asfreq_actual(daily_prices['AAPL'], freq='W', method='last')
print(f"Weekly data points: {len(weekly_prices)}")
# Performance comparison across frequencies
daily_sharpe = ffn.calc_sharpe(daily_returns['AAPL'], rf=0.02)
monthly_sharpe = ffn.calc_sharpe(monthly_returns['AAPL'], rf=0.02)
print(f"Daily Sharpe: {daily_sharpe:.3f}")
print(f"Monthly Sharpe: {monthly_sharpe:.3f}")import ffn
import pandas as pd
from datetime import datetime
# Calculate year fractions
start_date = datetime(2020, 1, 1)
end_date = datetime(2020, 12, 31)
year_fraction = ffn.year_frac(start_date, end_date)
print(f"Year fraction for 2020: {year_fraction:.4f}")
# Quarter calculation
q1_end = datetime(2020, 3, 31)
q1_fraction = ffn.year_frac(start_date, q1_end)
print(f"Q1 2020 year fraction: {q1_fraction:.4f}")
# Estimate required data for reliable statistics
days_1y = ffn.get_num_days_required('1Y', perc_required=0.95)
days_6m = ffn.get_num_days_required('6M', perc_required=0.90)
days_3m = ffn.get_num_days_required('3M', perc_required=0.85)
print(f"Days needed for 1Y analysis (95% req): {days_1y}")
print(f"Days needed for 6M analysis (90% req): {days_6m}")
print(f"Days needed for 3M analysis (85% req): {days_3m}")import ffn
import numpy as np
# Download data and calculate returns
prices = ffn.get('SPY', start='2020-01-01')['SPY']
returns = ffn.to_returns(prices).dropna()
# Calculate holding period durations
durations = np.ones(len(returns)) # Daily returns = 1 day each
# Annualize using actual durations
annualized_returns = ffn.annualize(returns, durations, one_year=365)
print(f"First 5 annualized daily returns: {annualized_returns[:5]}")
# Convert annualized returns to monthly basis
monthly_equivalent = ffn.deannualize(annualized_returns, nperiods=12)
print(f"Monthly equivalent returns (first 5): {monthly_equivalent[:5]}")
# Compare methodologies
annual_return_compound = (1 + returns).prod() ** (252 / len(returns)) - 1
annual_return_ffn = ffn.calc_cagr(prices)
print(f"Compound annual return: {annual_return_compound:.4f}")
print(f"FFN CAGR: {annual_return_ffn:.4f}")import ffn
import matplotlib.pyplot as plt
# Download multi-asset data
assets = ['SPY', 'QQQ', 'IWM', 'EFA', 'EEM']
prices = ffn.get(assets, start='2020-01-01')
returns = ffn.to_returns(prices).dropna()
# Correlation heatmap
correlation_matrix = returns.corr()
ffn.plot_corr_heatmap(correlation_matrix, title='Asset Correlation Matrix',
figsize=(8, 6))
plt.show()
# Custom heatmap for return statistics
stats_data = pd.DataFrame({
'Mean': returns.mean() * 252, # Annualized
'Volatility': returns.std() * np.sqrt(252),
'Sharpe': [ffn.calc_sharpe(returns[col], rf=0.02) for col in returns.columns]
}).T
ffn.plot_heatmap(stats_data, title='Asset Statistics Heatmap',
label_fmt='.3f', cmap='viridis', figsize=(10, 4))
plt.show()
# Monthly return heatmap
monthly_returns = ffn.to_monthly(prices, method='last')
monthly_ret = ffn.to_returns(monthly_returns).dropna()
# Reshape for heatmap (years vs months)
monthly_pivot = monthly_ret['SPY'].groupby([monthly_ret.index.year, monthly_ret.index.month]).first().unstack()
monthly_pivot.columns = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun',
'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
ffn.plot_heatmap(monthly_pivot, title='SPY Monthly Returns by Year',
label_fmt='.1%', cmap='RdYlGn', figsize=(12, 8))
plt.show()import ffn
# FFN automatically extends pandas with all functions
prices = ffn.get('AAPL,MSFT', start='2020-01-01')
# Method chaining examples
portfolio_analysis = (prices
.rebase(100) # Rebase to 100
.to_returns() # Convert to returns
.dropna() # Clean data
.to_monthly(method='last') # Convert to monthly
.merge(ffn.get('SPY', start='2020-01-01').to_returns().dropna().to_monthly()) # Add benchmark
)
print(f"Chained analysis shape: {portfolio_analysis.shape}")
# Complex chaining for performance metrics
performance_metrics = {}
for asset in ['AAPL', 'MSFT']:
metrics = (prices[asset]
.to_returns()
.dropna()
.calc_perf_stats(rf=0.02)
)
performance_metrics[asset] = {
'CAGR': metrics.cagr,
'Sharpe': metrics.sharpe,
'Max DD': metrics.max_drawdown
}
import pandas as pd
metrics_df = pd.DataFrame(performance_metrics).T
print("Performance Metrics via Chaining:")
print(metrics_df.round(3))
# Data pipeline with chaining
def create_analysis_pipeline(tickers, start_date, benchmark='SPY'):
"""Complete analysis pipeline using method chaining."""
# Core data
data = (ffn.get(tickers, start=start_date)
.dropna()
.rebase(100)
)
# Returns analysis
returns = (data
.to_returns()
.dropna()
)
# Add benchmark
benchmark_returns = (ffn.get(benchmark, start=start_date)
.to_returns()
.dropna()
)
benchmark_returns.columns = ['Benchmark']
# Final dataset
final_data = ffn.merge(returns, benchmark_returns)
return {
'prices': data,
'returns': final_data,
'correlations': final_data.corr(),
'performance': {col: ffn.calc_perf_stats(data[col] if col in data.columns
else ffn.to_price_index(final_data[col]))
for col in final_data.columns}
}
# Run pipeline
pipeline_results = create_analysis_pipeline(['AAPL', 'MSFT'], '2020-01-01')
print(f"Pipeline created {len(pipeline_results['performance'])} performance analyses")Install with Tessl CLI
npx tessl i tessl/pypi-ffn