Python interface for ERDDAP data servers that simplifies accessing scientific datasets
—
Search capabilities across multiple ERDDAP servers simultaneously with optional parallel processing. These functions allow you to discover datasets across the entire ERDDAP ecosystem rather than searching individual servers one by one.
Note: These functions must be imported directly from erddapy.multiple_server_search as they are not included in the main package exports.
Search multiple ERDDAP servers for datasets matching a query string using Google-like search syntax.
def search_servers(
query: str,
*,
servers_list: list[str] = None,
parallel: bool = False,
protocol: str = "tabledap"
) -> DataFrame:
"""
Search all servers for a query string.
Parameters:
- query: Search terms with Google-like syntax:
* Words separated by spaces (searches separately)
* "quoted phrases" for exact matches
* -excludedWord to exclude terms
* -"excluded phrase" to exclude phrases
* Partial word matching (e.g., "spee" matches "speed")
- servers_list: Optional list of server URLs. If None, searches all servers
- parallel: If True, uses joblib for parallel processing
- protocol: 'tabledap' or 'griddap'
Returns:
- pandas.DataFrame with columns: Title, Institution, Dataset ID, Server url
"""Usage Examples:
from erddapy.multiple_server_search import search_servers
# Basic search across all servers
results = search_servers("temperature salinity")
print(f"Found {len(results)} datasets")
print(results[['Title', 'Institution', 'Dataset ID']].head())
# Search for exact phrase
buoy_data = search_servers('"sea surface temperature"')
# Exclude certain terms
ocean_not_air = search_servers('temperature -air -atmospheric')
# Search specific servers only
coastal_servers = [
"http://erddap.secoora.org/erddap",
"http://www.neracoos.org/erddap"
]
coastal_results = search_servers(
"glider",
servers_list=coastal_servers
)
# Parallel search for faster results
large_search = search_servers(
"chlorophyll",
parallel=True
)Advanced search with detailed constraint parameters for precise dataset discovery.
def advanced_search_servers(
servers_list: list[str] = None,
*,
parallel: bool = False,
protocol: str = "tabledap",
**kwargs
) -> DataFrame:
"""
Advanced search across multiple ERDDAP servers with constraints.
Parameters:
- servers_list: Optional list of server URLs. If None, searches all servers
- parallel: If True, uses joblib for parallel processing
- protocol: 'tabledap' or 'griddap'
- **kwargs: Search constraints including:
* search_for: Query string (same as search_servers)
* cdm_data_type, institution, ioos_category: Metadata filters
* keywords, long_name, standard_name, variableName: Variable filters
* minLon, maxLon, minLat, maxLat: Geographic bounds
* minTime, maxTime: Temporal bounds
* items_per_page, page: Pagination controls
Returns:
- pandas.DataFrame with matching datasets
"""Usage Examples:
from erddapy.multiple_server_search import advanced_search_servers
# Geographic and temporal constraints
gulf_data = advanced_search_servers(
search_for="temperature",
minLat=25.0,
maxLat=31.0,
minLon=-98.0,
maxLon=-80.0,
minTime="2020-01-01T00:00:00Z",
maxTime="2020-12-31T23:59:59Z",
parallel=True
)
# Filter by data type and institution
mooring_data = advanced_search_servers(
cdm_data_type="TimeSeries",
institution="NOAA",
ioos_category="Temperature"
)
# Search by variable characteristics
salinity_vars = advanced_search_servers(
standard_name="sea_water_salinity",
protocol="tabledap"
)
# GridDAP satellite data
satellite_sst = advanced_search_servers(
search_for="sea surface temperature satellite",
protocol="griddap",
cdm_data_type="Grid"
)Internal function for processing search results from individual servers.
def fetch_results(
url: str,
key: str,
protocol: str
) -> dict[str, DataFrame]:
"""
Fetch search results from a single server.
Parameters:
- url: ERDDAP search URL
- key: Server identifier key
- protocol: 'tabledap' or 'griddap'
Returns:
- Dictionary with server key mapped to DataFrame, or None if server fails
"""The search functions return pandas DataFrames with standardized columns for easy analysis:
from erddapy.multiple_server_search import search_servers
import pandas as pd
# Perform search
results = search_servers("glider temperature", parallel=True)
# Analyze results
print("Search Results Summary:")
print(f"Total datasets found: {len(results)}")
print(f"Unique institutions: {results['Institution'].nunique()}")
print(f"Servers with data: {results['Server url'].nunique()}")
# Group by institution
by_institution = results.groupby('Institution').size().sort_values(ascending=False)
print("\nDatasets by Institution:")
print(by_institution.head(10))
# Find datasets from specific regions
secoora_data = results[results['Server url'].str.contains('secoora')]
print(f"\nSECOORA datasets: {len(secoora_data)}")
# Export results
results.to_csv('erddap_search_results.csv', index=False)For faster searches across many servers, install joblib and use parallel processing:
pip install joblibfrom erddapy.multiple_server_search import search_servers
# Enable parallel processing
results = search_servers(
"ocean color chlorophyll",
parallel=True # Uses all CPU cores
)
# Check performance difference
import time
start = time.time()
serial_results = search_servers("temperature", parallel=False)
serial_time = time.time() - start
start = time.time()
parallel_results = search_servers("temperature", parallel=True)
parallel_time = time.time() - start
print(f"Serial search: {serial_time:.2f} seconds")
print(f"Parallel search: {parallel_time:.2f} seconds")
print(f"Speedup: {serial_time/parallel_time:.1f}x")The multi-server search functions handle individual server failures gracefully:
from erddapy.multiple_server_search import search_servers
# Some servers may be offline or return errors
results = search_servers("salinity")
# Results automatically exclude failed servers
print(f"Collected results from available servers: {len(results)}")
# Check server availability by testing with small search
test_servers = [
"http://erddap.secoora.org/erddap",
"http://invalid-server.example.com/erddap", # This will fail
"https://gliders.ioos.us/erddap"
]
test_results = search_servers(
"test",
servers_list=test_servers
)
# Only includes results from working serversUse search results to configure ERDDAP instances for data download:
from erddapy.multiple_server_search import search_servers
from erddapy import ERDDAP
# Search for specific datasets
results = search_servers("glider ru29")
if len(results) > 0:
# Use first result
dataset = results.iloc[0]
# Create ERDDAP instance for the server
e = ERDDAP(
server=dataset['Server url'],
protocol="tabledap"
)
# Set the dataset ID
e.dataset_id = dataset['Dataset ID']
# Download the data
df = e.to_pandas(response="csv")
print(f"Downloaded {len(df)} records from {dataset['Title']}")Install with Tessl CLI
npx tessl i tessl/pypi-erddapy