Financial functions for Python providing performance analysis, risk metrics, portfolio optimization, and data retrieval for quantitative finance
—
FFN automatically extends pandas Series and DataFrame objects with financial analysis methods, enabling seamless method chaining and functional composition. All FFN functions become available as methods on pandas objects when FFN is imported.
Core functionality that adds FFN methods to pandas objects automatically.
def extend_pandas():
"""
Extend pandas Series and DataFrame with all FFN functions as methods.
Called automatically when FFN is imported. Enables method chaining like:
prices.to_returns().calc_sharpe()
Returns:
None (modifies pandas classes in-place)
"""When FFN is imported, all the following methods become available on pandas Series and DataFrame objects:
# Convert between prices and returns
Series.to_returns() # Convert prices to simple returns
Series.to_log_returns() # Convert prices to log returns
Series.to_price_index(start=100) # Convert returns to price index
Series.rebase(value=100) # Rebase series to specified value
DataFrame.rebase(value=100) # Rebase all columns to specified value# Performance statistics and analysis
Series.calc_perf_stats(rf=0.0, annualization_factor=252) # Create PerformanceStats object
DataFrame.calc_stats() # Create GroupStats object for multiple series
Series.calc_cagr() # Calculate Compound Annual Growth Rate
Series.calc_total_return() # Calculate total return# Risk and drawdown analysis
Series.calc_max_drawdown() # Calculate maximum drawdown
Series.to_drawdown_series() # Convert to drawdown series
DataFrame.to_drawdown_series() # Calculate drawdowns for all columns
Series.calc_sharpe(rf=0.0, nperiods=None, annualize=True) # Sharpe ratio
DataFrame.calc_sharpe(rf=0.0, nperiods=None, annualize=True) # Sharpe for all columns
Series.calc_sortino_ratio(rf=0.0, nperiods=None, annualize=True) # Sortino ratio
Series.calc_calmar_ratio() # Calmar ratio
Series.to_ulcer_index() # Ulcer Index calculation
Series.to_ulcer_performance_index(rf=0.0, nperiods=None) # Ulcer Performance Index# Portfolio weight calculation methods
DataFrame.calc_inv_vol_weights() # Inverse volatility weights
DataFrame.calc_mean_var_weights(weight_bounds=(0.0, 1.0), rf=0.0, covar_method="ledoit-wolf", options=None) # Mean-variance weights
DataFrame.calc_erc_weights(initial_weights=None, risk_weights=None, covar_method="ledoit-wolf", risk_parity_method="ccd", maximum_iterations=100, tolerance=1e-8) # Equal risk contribution weights# Advanced statistical methods
DataFrame.calc_clusters(n=None, plot=False) # K-means clustering
DataFrame.calc_ftca(threshold=0.5) # Fast Threshold Clustering Algorithm
Series.calc_prob_mom(other_returns) # Probabilistic momentum
DataFrame.resample_returns(func, seed=0, num_trials=100) # Bootstrap resampling# Data transformation and processing
Series.to_monthly(method="ffill", how="end") # Convert to monthly frequency
DataFrame.to_monthly(method="ffill", how="end") # Convert all columns to monthly
Series.asfreq_actual(freq, method="ffill", how="end", normalize=False) # Custom frequency conversion
DataFrame.drop_duplicate_cols() # Remove duplicate columns
Series.winsorize(axis=0, limits=0.01) # Winsorize outliers
DataFrame.winsorize(axis=0, limits=0.01) # Winsorize all columns
Series.rescale(min=0.0, max=1.0, axis=0) # Rescale to range
DataFrame.rescale(min=0.0, max=1.0, axis=0) # Rescale all columns
DataFrame.rollapply(window, fn) # Rolling function application# Risk-adjusted return calculations
Series.to_excess_returns(rf, nperiods=None) # Calculate excess returns
DataFrame.to_excess_returns(rf, nperiods=None) # Excess returns for all columns
Series.calc_information_ratio(benchmark_returns) # Information ratio vs benchmark
Series.calc_risk_return_ratio() # Return/risk ratio (Sharpe without rf)# Data formatting methods
Series.as_percent(digits=2) # Format as percentage strings
DataFrame.as_percent(digits=2) # Format all values as percentages
Series.as_format(format_str=".2f") # Apply custom format string
DataFrame.as_format(format_str=".2f") # Format all values with custom string# Plotting and visualization
DataFrame.plot_heatmap(title="Heatmap", show_legend=True, show_labels=True, label_fmt=".2f", vmin=None, vmax=None, figsize=None, label_color="w", cmap="RdBu", **kwargs) # General heatmap
DataFrame.plot_corr_heatmap(**kwargs) # Correlation heatmap with sensible defaultsimport ffn
import pandas as pd
# Download price data
prices = ffn.get('AAPL,MSFT,GOOGL', start='2020-01-01')
# Method chaining example - from prices to risk analysis
analysis = (prices
.rebase(100) # Normalize to 100
.to_returns() # Convert to returns
.dropna() # Remove NaN values
.calc_sharpe(rf=0.02) # Calculate Sharpe ratios
)
print("Sharpe ratios via method chaining:")
print(analysis)import ffn
# Create comprehensive performance pipeline
def analyze_asset(ticker, start_date, benchmark='SPY'):
"""Complete performance analysis using pandas extensions."""
# Get asset and benchmark data
asset_prices = ffn.get(ticker, start=start_date)
benchmark_prices = ffn.get(benchmark, start=start_date)
# Performance analysis using method chaining
asset_performance = (asset_prices
.iloc[:, 0] # Get first column as Series
.calc_perf_stats(rf=0.02) # Create PerformanceStats object
)
# Risk analysis
risk_metrics = {
'Max Drawdown': asset_prices.iloc[:, 0].calc_max_drawdown(),
'Sharpe Ratio': asset_prices.to_returns().dropna().iloc[:, 0].calc_sharpe(rf=0.02),
'Calmar Ratio': asset_prices.iloc[:, 0].calc_calmar_ratio()
}
# Comparative analysis
combined_returns = ffn.merge(
asset_prices.to_returns().dropna(),
benchmark_prices.to_returns().dropna()
)
correlations = combined_returns.corr()
return {
'performance': asset_performance,
'risk_metrics': risk_metrics,
'correlations': correlations,
'returns': combined_returns
}
# Run analysis
results = analyze_asset('AAPL', '2020-01-01')
print(f"AAPL CAGR: {results['performance'].cagr:.2%}")
print(f"AAPL Sharpe: {results['risk_metrics']['Sharpe Ratio']:.3f}")import ffn
import numpy as np
# Multi-asset portfolio construction
assets = ['SPY', 'QQQ', 'IWM', 'EFA', 'EEM', 'BND']
prices = ffn.get(assets, start='2020-01-01')
returns = prices.to_returns().dropna()
# Different weighting schemes using pandas extensions
equal_weights = pd.Series(1/len(assets), index=returns.columns)
inv_vol_weights = returns.calc_inv_vol_weights()
erc_weights = returns.calc_erc_weights()
mean_var_weights = returns.calc_mean_var_weights(rf=0.02)
# Portfolio performance comparison
portfolios = {
'Equal Weight': equal_weights,
'Inverse Vol': inv_vol_weights,
'Risk Parity': erc_weights,
'Mean-Variance': mean_var_weights
}
# Calculate portfolio returns and statistics
portfolio_analysis = {}
for name, weights in portfolios.items():
# Portfolio returns using method chaining
portfolio_returns = (returns * weights).sum(axis=1)
portfolio_prices = portfolio_returns.to_price_index(start=100)
# Performance analysis via extensions
perf_stats = portfolio_prices.calc_perf_stats(rf=0.02)
portfolio_analysis[name] = {
'weights': weights,
'cagr': perf_stats.cagr,
'sharpe': perf_stats.daily_sharpe,
'max_dd': perf_stats.max_drawdown,
'calmar': perf_stats.calmar
}
# Display results
results_df = pd.DataFrame(portfolio_analysis).T
results_df[['cagr', 'sharpe', 'max_dd', 'calmar']].round(3)import ffn
# Complex data processing using method chaining
raw_data = ffn.get(['AAPL', 'MSFT', 'GOOGL', 'AMZN', 'TSLA'], start='2020-01-01')
# Multi-step processing pipeline
processed_data = (raw_data
.drop_duplicate_cols() # Remove any duplicate columns
.dropna() # Remove NaN values
.rebase(100) # Normalize to 100
.to_monthly(method='last') # Convert to monthly
.to_returns() # Calculate returns
.winsorize(limits=0.02) # Winsorize extreme values
.dropna() # Clean again
)
print(f"Processed data shape: {processed_data.shape}")
print(f"Monthly return statistics:")
print(processed_data.describe().round(4))
# Risk analysis on processed data
risk_analysis = {
'Sharpe Ratios': processed_data.calc_sharpe(rf=0.02),
'Correlations': processed_data.corr(),
'Drawdowns': processed_data.to_price_index().to_drawdown_series()
}
# Visualization using extensions
processed_data.corr().plot_corr_heatmap(title='Tech Stock Correlations (Monthly)',
figsize=(8, 6))
# Advanced statistical analysis
clusters = processed_data.calc_clusters(n=2, plot=True)
print(f"Asset clusters: {clusters}")import ffn
# Get data and calculate statistics
prices = ffn.get(['SPY', 'QQQ', 'BND'], start='2020-01-01')
returns = prices.to_returns().dropna()
# Calculate key metrics using extensions
metrics = pd.DataFrame({
'CAGR': [prices[col].calc_cagr() for col in prices.columns],
'Volatility': returns.std() * np.sqrt(252),
'Sharpe': returns.calc_sharpe(rf=0.02),
'Max Drawdown': [prices[col].calc_max_drawdown() for col in prices.columns],
'Calmar': [prices[col].calc_calmar_ratio() for col in prices.columns]
}, index=prices.columns)
# Format for presentation using extensions
print("Formatted Performance Summary:")
print("CAGR and Max Drawdown (as percentages):")
print(metrics[['CAGR', 'Max Drawdown']].as_percent())
print("\nAll metrics (2 decimal places):")
print(metrics.as_format('.3f'))
# Create formatted correlation matrix
correlations = returns.corr()
print("\nCorrelation Matrix (as percentages):")
print(correlations.as_format('.1%'))import ffn is executedInstall with Tessl CLI
npx tessl i tessl/pypi-ffn