CtrlK
BlogDocsLog inGet started
Tessl Logo

tessl/pypi-yellowbrick

A suite of visual analysis and diagnostic tools for machine learning.

Overview
Eval results
Files

text.mddocs/

Text Analysis

Specialized visualizers for text analysis and natural language processing, providing tools for exploring text corpora, visualizing document embeddings, and analyzing linguistic patterns. These visualizers support various NLP workflows and text preprocessing pipelines.

Capabilities

Text Embeddings Visualization

High-dimensional text embedding visualization using dimensionality reduction techniques like t-SNE and UMAP for exploring document similarity and clustering patterns.

class TSNEVisualizer(Visualizer):
    """
    t-SNE visualization for text embeddings and high-dimensional data.
    
    Parameters:
    - labels: list, text labels for data points
    - classes: list, class labels for coloring
    - random_state: int, random state for reproducibility
    - perplexity: float, t-SNE perplexity parameter
    - early_exaggeration: float, early exaggeration parameter
    - learning_rate: float, learning rate parameter
    - n_iter: int, number of iterations
    - metric: str, distance metric
    """
    def __init__(self, labels=None, classes=None, random_state=None, perplexity=30.0, early_exaggeration=12.0, learning_rate=200.0, n_iter=1000, metric='euclidean', **kwargs): ...
    def fit(self, X, y=None, **kwargs): ...
    def show(self, **kwargs): ...

class UMAPVisualizer(Visualizer):
    """
    UMAP visualization for text embeddings and high-dimensional data.
    
    Parameters:
    - labels: list, text labels for data points
    - classes: list, class labels for coloring
    - random_state: int, random state for reproducibility
    - n_neighbors: int, number of neighbors parameter
    - min_dist: float, minimum distance parameter
    - metric: str, distance metric
    """
    def __init__(self, labels=None, classes=None, random_state=None, n_neighbors=15, min_dist=0.1, metric='euclidean', **kwargs): ...
    def fit(self, X, y=None, **kwargs): ...
    def show(self, **kwargs): ...

def tsne(X, y=None, labels=None, classes=None, **kwargs):
    """
    Functional API for t-SNE visualization.
    
    Parameters:
    - X: feature matrix (document embeddings)
    - y: target vector (optional)
    - labels: list, text labels for data points
    - classes: list, class labels
    
    Returns:
    TSNEVisualizer instance
    """

def umap(X, y=None, labels=None, classes=None, **kwargs):
    """
    Functional API for UMAP visualization.
    
    Parameters:
    - X: feature matrix (document embeddings)
    - y: target vector (optional)
    - labels: list, text labels for data points
    - classes: list, class labels
    
    Returns:
    UMAPVisualizer instance
    """

Usage Example:

from yellowbrick.text import TSNEVisualizer, UMAPVisualizer, tsne, umap
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.datasets import fetch_20newsgroups

# Load text data
categories = ['alt.atheism', 'talk.religion.misc', 'comp.graphics', 'sci.space']
newsgroups = fetch_20newsgroups(subset='train', categories=categories)
corpus = newsgroups.data
labels = newsgroups.target_names

# Vectorize text
vectorizer = TfidfVectorizer(max_features=1000, stop_words='english')
X = vectorizer.fit_transform(corpus)

# t-SNE visualization
tsne_viz = TSNEVisualizer(labels=labels, classes=newsgroups.target_names)
tsne_viz.fit(X.toarray(), newsgroups.target)
tsne_viz.show()

# UMAP visualization
umap_viz = UMAPVisualizer(labels=labels, classes=newsgroups.target_names)
umap_viz.fit(X.toarray(), newsgroups.target)
umap_viz.show()

# Functional API
tsne(X.toarray(), newsgroups.target, classes=newsgroups.target_names)
umap(X.toarray(), newsgroups.target, classes=newsgroups.target_names)

Frequency Distribution Analysis

Word and token frequency distribution visualization for understanding vocabulary characteristics and identifying important terms in text corpora.

class FreqDistVisualizer(Visualizer):
    """
    Frequency distribution visualizer for text analysis.
    
    Parameters:
    - features: list, feature names (words/tokens)
    - n: int, number of top features to display
    - orient: str, orientation ('h' for horizontal, 'v' for vertical)
    """
    def __init__(self, features=None, n=50, orient='h', **kwargs): ...
    def fit(self, corpus, **kwargs): ...
    def show(self, **kwargs): ...

def freqdist(corpus, features=None, n=50, **kwargs):
    """
    Functional API for frequency distribution visualization.
    
    Parameters:
    - corpus: text corpus or frequency data
    - features: list, feature names
    - n: int, number of top features to display
    
    Returns:
    FreqDistVisualizer instance
    """

Usage Example:

from yellowbrick.text import FreqDistVisualizer, freqdist
from sklearn.feature_extraction.text import CountVectorizer
from collections import Counter
import re

# Prepare text data
documents = [
    "The quick brown fox jumps over the lazy dog",
    "A journey of a thousand miles begins with a single step",
    "To be or not to be that is the question"
]

# Method 1: Using CountVectorizer
vectorizer = CountVectorizer(stop_words='english')
X = vectorizer.fit_transform(documents)
features = vectorizer.get_feature_names_out()

# Sum word frequencies across documents
word_frequencies = X.sum(axis=0).A1
freq_data = dict(zip(features, word_frequencies))

viz = FreqDistVisualizer(features=features)
viz.fit(freq_data)
viz.show()

# Method 2: Using raw text with Counter
text = ' '.join(documents).lower()
words = re.findall(r'\b\w+\b', text)
word_counts = Counter(words)

freqdist(word_counts, n=20)

Part-of-Speech Analysis

Part-of-speech tag distribution visualization for analyzing grammatical patterns and linguistic structure in text corpora.

class PosTagVisualizer(Visualizer):
    """
    Part-of-speech tag visualizer for linguistic analysis.
    
    Parameters:
    - tagset: str, POS tagset to use ('universal', 'penn')
    - colormap: str, matplotlib colormap for bars
    """
    def __init__(self, tagset='universal', colormap='Set2', **kwargs): ...
    def fit(self, corpus, **kwargs): ...
    def show(self, **kwargs): ...

Usage Example:

from yellowbrick.text import PosTagVisualizer
import nltk
from nltk import pos_tag, word_tokenize

# Download required NLTK data
nltk.download('punkt')
nltk.download('averaged_perceptron_tagger')
nltk.download('universal_tagset')

# Prepare text data
documents = [
    "The quick brown fox jumps over the lazy dog",
    "Natural language processing is fascinating",
    "Machine learning algorithms can analyze text effectively"
]

# Tokenize and tag
tagged_corpus = []
for doc in documents:
    tokens = word_tokenize(doc.lower())
    tags = pos_tag(tokens, tagset='universal')
    tagged_corpus.extend(tags)

# Visualize POS distribution
pos_viz = PosTagVisualizer(tagset='universal')
pos_viz.fit(tagged_corpus)
pos_viz.show()

Word Dispersion Plot

Word dispersion visualization showing the distribution of specific words throughout a text corpus, useful for analyzing word usage patterns and document structure.

class DispersionPlot(Visualizer):
    """
    Word dispersion plot for analyzing word distribution in text.
    
    Parameters:
    - words: list, target words to analyze
    - labels: list, labels for documents or text segments
    - ignore_case: bool, whether to ignore case differences
    """
    def __init__(self, words, labels=None, ignore_case=True, **kwargs): ...
    def fit(self, corpus, **kwargs): ...
    def show(self, **kwargs): ...

def dispersion(corpus, words, labels=None, **kwargs):
    """
    Functional API for word dispersion visualization.
    
    Parameters:
    - corpus: text corpus or list of documents
    - words: list, target words to analyze
    - labels: list, document labels
    
    Returns:
    DispersionPlot instance
    """

Usage Example:

from yellowbrick.text import DispersionPlot, dispersion

# Sample text corpus
corpus = [
    "The data science field is rapidly evolving with machine learning",
    "Machine learning algorithms require large datasets for training",
    "Data analysis and data visualization are key data science skills",
    "Python and R are popular programming languages for data science",
    "Deep learning is a subset of machine learning with neural networks"
]

# Target words to analyze
target_words = ['data', 'machine', 'learning', 'science']

# Create dispersion plot
dispersion_viz = DispersionPlot(words=target_words)
dispersion_viz.fit(corpus)
dispersion_viz.show()

# Functional API
dispersion(corpus, target_words)

Word Correlation Analysis

Word correlation visualization for understanding relationships between words and identifying semantic clusters in text data.

class WordCorrelationPlot(Visualizer):
    """
    Word correlation plot for analyzing semantic relationships.
    
    Parameters:
    - words: list, words to analyze correlations
    - method: str, correlation method ('pearson', 'spearman')
    - colormap: str, matplotlib colormap for heatmap
    """
    def __init__(self, words=None, method='pearson', colormap='RdYlBu_r', **kwargs): ...
    def fit(self, X, **kwargs): ...
    def show(self, **kwargs): ...

def word_correlation(X, words=None, method='pearson', **kwargs):
    """
    Functional API for word correlation visualization.
    
    Parameters:
    - X: document-term matrix
    - words: list, words to analyze
    - method: str, correlation method
    
    Returns:
    WordCorrelationPlot instance
    """

Usage Example:

from yellowbrick.text import WordCorrelationPlot, word_correlation
from sklearn.feature_extraction.text import TfidfVectorizer
import pandas as pd

# Sample documents
documents = [
    "machine learning algorithms process data efficiently",
    "data science involves statistical analysis and visualization",
    "artificial intelligence and machine learning are related fields",
    "deep learning uses neural networks for pattern recognition",
    "data analysis requires statistical knowledge and programming skills"
]

# Vectorize documents
vectorizer = TfidfVectorizer(max_features=20, stop_words='english')
X = vectorizer.fit_transform(documents)
feature_names = vectorizer.get_feature_names_out()

# Select specific words for correlation analysis
target_words = ['machine', 'learning', 'data', 'analysis', 'statistical']
word_indices = [i for i, word in enumerate(feature_names) if word in target_words]

# Extract relevant columns
X_subset = X.toarray()[:, word_indices]
subset_words = [feature_names[i] for i in word_indices]

# Create correlation plot
corr_viz = WordCorrelationPlot(words=subset_words, method='pearson')
corr_viz.fit(X_subset)
corr_viz.show()

# Functional API
word_correlation(X_subset, words=subset_words, method='spearman')

Usage Patterns

Comprehensive Text Analysis Pipeline

from yellowbrick.text import TSNEVisualizer, FreqDistVisualizer, DispersionPlot
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.datasets import fetch_20newsgroups
import matplotlib.pyplot as plt

# Load text dataset
categories = ['alt.atheism', 'comp.graphics', 'sci.med', 'soc.religion.christian']
newsgroups = fetch_20newsgroups(subset='train', categories=categories, remove=('headers', 'footers', 'quotes'))
corpus = newsgroups.data[:1000]  # Use subset for faster processing
target = newsgroups.target[:1000]
target_names = [newsgroups.target_names[i] for i in range(len(categories))]

# Step 1: Frequency analysis
print("Step 1: Word frequency analysis")
count_vectorizer = CountVectorizer(max_features=100, stop_words='english', min_df=2)
count_matrix = count_vectorizer.fit_transform(corpus)
feature_names = count_vectorizer.get_feature_names_out()

# Create frequency distribution
word_frequencies = count_matrix.sum(axis=0).A1
freq_data = dict(zip(feature_names, word_frequencies))
freq_viz = FreqDistVisualizer(features=feature_names, n=30)
freq_viz.fit(freq_data)
freq_viz.show()

# Step 2: Document embedding visualization
print("Step 2: Document embedding visualization")
tfidf_vectorizer = TfidfVectorizer(max_features=500, stop_words='english', min_df=2, max_df=0.8)
tfidf_matrix = tfidf_vectorizer.fit_transform(corpus)

# t-SNE visualization
tsne_viz = TSNEVisualizer(classes=target_names, random_state=42)
tsne_viz.fit(tfidf_matrix.toarray(), target)
tsne_viz.show()

# Step 3: Word dispersion analysis
print("Step 3: Word dispersion analysis")
# Select most frequent words for dispersion analysis
top_words = sorted(freq_data.items(), key=lambda x: x[1], reverse=True)[:8]
dispersion_words = [word for word, _ in top_words]

dispersion_viz = DispersionPlot(words=dispersion_words)
dispersion_viz.fit(corpus)
dispersion_viz.show()

Comparative Text Analysis

from yellowbrick.text import TSNEVisualizer, UMAPVisualizer
from sklearn.feature_extraction.text import TfidfVectorizer
import matplotlib.pyplot as plt

# Compare t-SNE and UMAP embeddings
vectorizer = TfidfVectorizer(max_features=1000, stop_words='english')
X = vectorizer.fit_transform(corpus)

# Create side-by-side comparison
fig, axes = plt.subplots(1, 2, figsize=(15, 6))

# t-SNE visualization
tsne_viz = TSNEVisualizer(classes=target_names, ax=axes[0], random_state=42)
tsne_viz.fit(X.toarray(), target)
tsne_viz.finalize()
axes[0].set_title('t-SNE Embedding')

# UMAP visualization
umap_viz = UMAPVisualizer(classes=target_names, ax=axes[1], random_state=42)
umap_viz.fit(X.toarray(), target)
umap_viz.finalize()
axes[1].set_title('UMAP Embedding')

plt.tight_layout()
plt.show()

Topic Modeling Visualization

from yellowbrick.text import TSNEVisualizer
from sklearn.decomposition import LatentDirichletAllocation
from sklearn.feature_extraction.text import CountVectorizer
import numpy as np

# Prepare data for topic modeling
vectorizer = CountVectorizer(max_features=1000, stop_words='english', min_df=2, max_df=0.8)
doc_term_matrix = vectorizer.fit_transform(corpus)

# Fit LDA model
n_topics = 4
lda_model = LatentDirichletAllocation(n_components=n_topics, random_state=42)
doc_topic_matrix = lda_model.fit_transform(doc_term_matrix)

# Assign documents to dominant topics
dominant_topics = np.argmax(doc_topic_matrix, axis=1)
topic_names = [f'Topic {i}' for i in range(n_topics)]

# Visualize documents in topic space
tsne_viz = TSNEVisualizer(classes=topic_names, random_state=42)
tsne_viz.fit(doc_topic_matrix, dominant_topics)
tsne_viz.show()

# Print top words for each topic
feature_names = vectorizer.get_feature_names_out()
for topic_idx, topic in enumerate(lda_model.components_):
    top_words = [feature_names[i] for i in topic.argsort()[-10:][::-1]]
    print(f"Topic {topic_idx}: {', '.join(top_words)}")

Multilingual Text Analysis

from yellowbrick.text import FreqDistVisualizer, TSNEVisualizer
from sklearn.feature_extraction.text import TfidfVectorizer
from collections import Counter
import re

# Sample multilingual text (English and Spanish)
multilingual_corpus = [
    "machine learning is transforming technology",
    "el aprendizaje automático está transformando la tecnología",
    "data science involves statistical analysis",
    "la ciencia de datos involucra análisis estadístico",
    "artificial intelligence enables automation",
    "la inteligencia artificial permite la automatización"
]

# Language labels
languages = ['English', 'Spanish', 'English', 'Spanish', 'English', 'Spanish']

# Character-level analysis for language detection
char_vectorizer = TfidfVectorizer(analyzer='char', ngram_range=(2, 3), max_features=100)
char_features = char_vectorizer.fit_transform(multilingual_corpus)

# Visualize language clustering
tsne_viz = TSNEVisualizer(classes=['English', 'Spanish'], random_state=42)
tsne_viz.fit(char_features.toarray(), [0 if lang == 'English' else 1 for lang in languages])
tsne_viz.show()

# Word frequency analysis per language
english_docs = [doc for doc, lang in zip(multilingual_corpus, languages) if lang == 'English']
spanish_docs = [doc for doc, lang in zip(multilingual_corpus, languages) if lang == 'Spanish']

for lang, docs in [('English', english_docs), ('Spanish', spanish_docs)]:
    print(f"\n{lang} word frequencies:")
    all_words = ' '.join(docs).lower()
    words = re.findall(r'\b\w+\b', all_words)
    word_counts = Counter(words)
    
    freq_viz = FreqDistVisualizer(n=10)
    freq_viz.fit(word_counts)
    freq_viz.show()

Install with Tessl CLI

npx tessl i tessl/pypi-yellowbrick

docs

classification.md

clustering.md

data-utilities.md

features.md

index.md

model-selection.md

regression.md

text.md

tile.json