Google Cloud Natural Language API client library providing sentiment analysis, entity recognition, text classification, and content moderation capabilities
—
Analyzes the emotional tone and attitude in text content, providing sentiment scores and magnitude measurements to understand how positive, negative, or neutral the text is. Sentiment analysis is useful for understanding customer feedback, social media content, reviews, and any text where emotional context matters.
Performs sentiment analysis on the provided text, returning overall document sentiment and per-sentence sentiment scores.
def analyze_sentiment(
self,
request: Optional[Union[AnalyzeSentimentRequest, dict]] = None,
*,
document: Optional[Document] = None,
encoding_type: Optional[EncodingType] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Union[float, object] = gapic_v1.method.DEFAULT,
metadata: Sequence[Tuple[str, Union[str, bytes]]] = ()
) -> AnalyzeSentimentResponse:
"""
Analyzes the sentiment of the provided text.
Args:
request: The request object containing document and options
document: Input document for analysis
encoding_type: Text encoding type for offset calculations
retry: Retry configuration for the request
timeout: Request timeout in seconds
metadata: Additional metadata to send with the request
Returns:
AnalyzeSentimentResponse containing sentiment analysis results
"""from google.cloud import language
# Initialize client
client = language.LanguageServiceClient()
# Create document
document = language.Document(
content="I love this product! It's absolutely fantastic and works perfectly.",
type_=language.Document.Type.PLAIN_TEXT
)
# Analyze sentiment
response = client.analyze_sentiment(
request={"document": document}
)
# Access overall document sentiment
document_sentiment = response.document_sentiment
print(f"Document sentiment score: {document_sentiment.score}")
print(f"Document sentiment magnitude: {document_sentiment.magnitude}")
# Access per-sentence sentiment
for i, sentence in enumerate(response.sentences):
print(f"Sentence {i}: {sentence.text.content}")
print(f" Sentiment score: {sentence.sentiment.score}")
print(f" Sentiment magnitude: {sentence.sentiment.magnitude}")class AnalyzeSentimentRequest:
document: Document
encoding_type: EncodingTypeclass AnalyzeSentimentResponse:
document_sentiment: Sentiment
language: str
sentences: MutableSequence[Sentence]Represents sentiment analysis results with score and magnitude values.
class Sentiment:
magnitude: float # Range: [0.0, +inf) - Emotional intensity
score: float # Range: [-1.0, 1.0] - Positive/negative polarityScore Interpretation:
Magnitude Interpretation:
Represents an individual sentence with its sentiment analysis.
class Sentence:
text: TextSpan
sentiment: SentimentRepresents a span of text with content and position information.
class TextSpan:
content: str
begin_offset: intdef analyze_multiple_texts(client, texts):
"""Analyze sentiment for multiple text samples."""
results = []
for text in texts:
document = language.Document(
content=text,
type_=language.Document.Type.PLAIN_TEXT
)
response = client.analyze_sentiment(
request={"document": document}
)
results.append({
'text': text,
'score': response.document_sentiment.score,
'magnitude': response.document_sentiment.magnitude
})
return results
# Usage
texts = [
"This is amazing!",
"I hate this product.",
"It's okay, nothing special."
]
results = analyze_multiple_texts(client, texts)
for result in results:
print(f"Text: {result['text']}")
print(f"Score: {result['score']}, Magnitude: {result['magnitude']}")# Analyze sentiment in HTML content
html_content = """
<html>
<body>
<h1>Product Review</h1>
<p>This product is <strong>absolutely wonderful</strong>!
I would definitely recommend it to anyone.</p>
</body>
</html>
"""
document = language.Document(
content=html_content,
type_=language.Document.Type.HTML
)
response = client.analyze_sentiment(
request={"document": document}
)
print(f"HTML sentiment score: {response.document_sentiment.score}")# When you need character offset information for UTF-16 encoded text
document = language.Document(
content="感情分析のテスト", # Japanese text
type_=language.Document.Type.PLAIN_TEXT
)
response = client.analyze_sentiment(
request={
"document": document,
"encoding_type": language.EncodingType.UTF16
}
)
# Offsets will be calculated based on UTF-16 encoding
for sentence in response.sentences:
print(f"Sentence: {sentence.text.content}")
print(f"UTF-16 offset: {sentence.text.begin_offset}")from google.api_core import exceptions
try:
response = client.analyze_sentiment(
request={"document": document},
timeout=10.0
)
except exceptions.InvalidArgument as e:
print(f"Invalid request: {e}")
except exceptions.DeadlineExceeded:
print("Request timed out")
except exceptions.GoogleAPIError as e:
print(f"API error: {e}")The API automatically detects language but works best with:
Specify document language for better accuracy:
document = language.Document(
content="Esto es fantástico!",
type_=language.Document.Type.PLAIN_TEXT,
language="es" # Spanish
)Install with Tessl CLI
npx tessl i tessl/pypi-google-cloud-language