Google Cloud Natural Language API client library providing sentiment analysis, entity recognition, text classification, and content moderation capabilities
npx @tessl/cli install tessl/pypi-google-cloud-language@2.17.0A comprehensive Python client library for Google Cloud Natural Language API that provides natural language understanding technologies to developers. The library enables developers to perform advanced text analysis operations including sentiment analysis for determining emotional tone, entity recognition and analysis for identifying people, places, organizations and other entities, content classification for categorizing text into predefined categories, syntax analysis for understanding grammatical structure, and content moderation for detecting harmful content.
pip install google-cloud-languagefrom google.cloud import languageFor specific API versions:
from google.cloud import language_v1
from google.cloud import language_v1beta2
from google.cloud import language_v2Import specific client classes:
from google.cloud.language import LanguageServiceClient
from google.cloud.language import LanguageServiceAsyncClientImport for type annotations:
from typing import Optional, Union, Sequence, Tuple, MutableMapping, MutableSequence
from google.api_core import gapic_v1
from google.api_core.retry import OptionalRetryfrom google.cloud import language
# Initialize the client
client = language.LanguageServiceClient()
# Create a document object
document = language.Document(
content="Google Cloud Natural Language API is amazing!",
type_=language.Document.Type.PLAIN_TEXT
)
# Analyze sentiment
response = client.analyze_sentiment(
request={"document": document}
)
# Access results
sentiment = response.document_sentiment
print(f"Sentiment score: {sentiment.score}")
print(f"Sentiment magnitude: {sentiment.magnitude}")
# Analyze entities
entities_response = client.analyze_entities(
request={"document": document}
)
for entity in entities_response.entities:
print(f"Entity: {entity.name}, Type: {entity.type_.name}")The Google Cloud Language library is organized around three main API versions:
The library provides both synchronous and asynchronous clients, multiple transport options (gRPC, REST), and comprehensive error handling with Google Cloud authentication integration.
Core client classes for interacting with the Google Cloud Natural Language API, supporting both synchronous and asynchronous operations with configurable transport layers.
class LanguageServiceClient:
def __init__(self, *, credentials=None, transport=None, client_options=None, client_info=None): ...
class LanguageServiceAsyncClient:
def __init__(self, *, credentials=None, transport=None, client_options=None, client_info=None): ...Analyzes the emotional tone and attitude in text content, providing sentiment scores and magnitude measurements to understand how positive, negative, or neutral the text is.
def analyze_sentiment(
self,
request: Optional[Union[AnalyzeSentimentRequest, dict]] = None,
*,
document: Optional[Document] = None,
encoding_type: Optional[EncodingType] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Union[float, object] = gapic_v1.method.DEFAULT,
metadata: Sequence[Tuple[str, Union[str, bytes]]] = ()
) -> AnalyzeSentimentResponse: ...Identifies and extracts named entities (people, places, organizations, etc.) from text, providing detailed information about each entity including type, salience, and mentions within the text.
def analyze_entities(
self,
request: Optional[Union[AnalyzeEntitiesRequest, dict]] = None,
*,
document: Optional[Document] = None,
encoding_type: Optional[EncodingType] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Union[float, object] = gapic_v1.method.DEFAULT,
metadata: Sequence[Tuple[str, Union[str, bytes]]] = ()
) -> AnalyzeEntitiesResponse: ...Combines entity recognition with sentiment analysis to determine the sentiment associated with each identified entity, useful for understanding opinions about specific people, places, or topics.
def analyze_entity_sentiment(
self,
request: Optional[Union[AnalyzeEntitySentimentRequest, dict]] = None,
*,
document: Optional[Document] = None,
encoding_type: Optional[EncodingType] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Union[float, object] = gapic_v1.method.DEFAULT,
metadata: Sequence[Tuple[str, Union[str, bytes]]] = ()
) -> AnalyzeEntitySentimentResponse: ...Provides linguistic analysis including part-of-speech tagging, dependency parsing, and token-level information to understand the grammatical structure and linguistic properties of text.
def analyze_syntax(
self,
request: Optional[Union[AnalyzeSyntaxRequest, dict]] = None,
*,
document: Optional[Document] = None,
encoding_type: Optional[EncodingType] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Union[float, object] = gapic_v1.method.DEFAULT,
metadata: Sequence[Tuple[str, Union[str, bytes]]] = ()
) -> AnalyzeSyntaxResponse: ...Categorizes text documents into predefined classification categories, enabling automated content organization and filtering based on subject matter and themes.
def classify_text(
self,
request: Optional[Union[ClassifyTextRequest, dict]] = None,
*,
document: Optional[Document] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Union[float, object] = gapic_v1.method.DEFAULT,
metadata: Sequence[Tuple[str, Union[str, bytes]]] = ()
) -> ClassifyTextResponse: ...Detects and flags potentially harmful, inappropriate, or unsafe content in text, providing moderation categories and confidence scores for content filtering applications.
def moderate_text(
self,
request: Optional[Union[ModerateTextRequest, dict]] = None,
*,
document: Optional[Document] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Union[float, object] = gapic_v1.method.DEFAULT,
metadata: Sequence[Tuple[str, Union[str, bytes]]] = ()
) -> ModerateTextResponse: ...Performs multiple types of analysis in a single API call for efficiency, allowing you to get sentiment, entities, syntax, classification, and moderation results simultaneously.
def annotate_text(
self,
request: Optional[Union[AnnotateTextRequest, dict]] = None,
*,
document: Optional[Document] = None,
features: Optional[AnnotateTextRequest.Features] = None,
encoding_type: Optional[EncodingType] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Union[float, object] = gapic_v1.method.DEFAULT,
metadata: Sequence[Tuple[str, Union[str, bytes]]] = ()
) -> AnnotateTextResponse: ...class AnalyzeSentimentRequest:
document: Document
encoding_type: EncodingType
class AnalyzeSentimentResponse:
document_sentiment: Sentiment
language: str
sentences: MutableSequence[Sentence]
class AnalyzeEntitiesRequest:
document: Document
encoding_type: EncodingType
class AnalyzeEntitiesResponse:
entities: MutableSequence[Entity]
language: str
class AnalyzeEntitySentimentRequest:
document: Document
encoding_type: EncodingType
class AnalyzeEntitySentimentResponse:
entities: MutableSequence[Entity]
language: str
class AnalyzeSyntaxRequest:
document: Document
encoding_type: EncodingType
class AnalyzeSyntaxResponse:
sentences: MutableSequence[Sentence]
tokens: MutableSequence[Token]
language: str
class ClassifyTextRequest:
document: Document
classification_model_options: ClassificationModelOptions
class ClassifyTextResponse:
categories: MutableSequence[ClassificationCategory]
class ModerateTextRequest:
document: Document
class ModerateTextResponse:
moderation_categories: MutableSequence[ClassificationCategory]
class AnnotateTextRequest:
document: Document
features: AnnotateTextRequest.Features
encoding_type: EncodingType
class Features:
extract_syntax: bool
extract_entities: bool
extract_document_sentiment: bool
extract_entity_sentiment: bool
classify_text: bool
moderate_text: bool
class AnnotateTextResponse:
sentences: MutableSequence[Sentence]
tokens: MutableSequence[Token]
entities: MutableSequence[Entity]
document_sentiment: Sentiment
language: str
categories: MutableSequence[ClassificationCategory]
moderation_categories: MutableSequence[ClassificationCategory]class Document:
class Type(proto.Enum):
TYPE_UNSPECIFIED = 0
PLAIN_TEXT = 1
HTML = 2
content: str
gcs_content_uri: str
type_: Type
language: strclass Sentiment:
magnitude: float
score: floatclass Entity:
class Type(proto.Enum):
UNKNOWN = 0
PERSON = 1
LOCATION = 2
ORGANIZATION = 3
EVENT = 4
WORK_OF_ART = 5
CONSUMER_GOOD = 6
OTHER = 7
PHONE_NUMBER = 9
ADDRESS = 10
DATE = 11
NUMBER = 12
PRICE = 13
name: str
type_: Type
metadata: MutableMapping[str, str]
salience: float
mentions: MutableSequence[EntityMention]
sentiment: Sentimentclass TextSpan:
content: str
begin_offset: intclass EntityMention:
class Type(proto.Enum):
TYPE_UNKNOWN = 0
PROPER = 1
COMMON = 2
text: TextSpan
type_: Type
sentiment: Sentiment
probability: floatclass ClassificationCategory:
name: str
confidence: floatclass Token:
text: TextSpan
part_of_speech: PartOfSpeech
dependency_edge: DependencyEdge
lemma: strclass PartOfSpeech:
class Tag(proto.Enum):
UNKNOWN = 0
ADJ = 1
ADP = 2
ADV = 3
CONJ = 4
DET = 5
NOUN = 6
NUM = 7
PRON = 8
PRT = 9
PUNCT = 10
VERB = 11
X = 12
AFFIX = 13
tag: Tag
aspect: Aspect
case: Case
form: Form
gender: Gender
mood: Mood
number: Number
person: Person
proper: Proper
reciprocity: Reciprocity
tense: Tense
voice: Voiceclass DependencyEdge:
class Label(proto.Enum):
UNKNOWN = 0
ROOT = 54
NSUBJ = 28
DOBJ = 18
# ... (additional labels available)
head_token_index: int
label: Labelclass ClassificationModelOptions:
class V1Model(proto.Message):
pass
class V2Model(proto.Message):
pass
v1_model: V1Model
v2_model: V2Modelclass Sentence:
text: TextSpan
sentiment: Sentimentclass EncodingType(proto.Enum):
NONE = 0
UTF8 = 1
UTF16 = 2
UTF32 = 3