CtrlK
BlogDocsLog inGet started
Tessl Logo

tessl/maven-org-mongodb--mongodb-driver-sync

The MongoDB Synchronous Driver for Java providing blocking I/O patterns for database operations

Pending
Overview
Eval results
Files

query-aggregation.mddocs/

Query and Aggregation

Advanced querying capabilities including complex filtering, sorting, projection, aggregation pipelines, MapReduce operations, and distinct value queries.

Capabilities

FindIterable Interface

Rich query interface providing filtering, sorting, projection, and cursor options for document retrieval.

/**
 * Interface for configuring and executing find queries
 */
public interface FindIterable<TResult> extends MongoIterable<TResult> {
    /**
     * Sets the query filter to limit results
     * @param filter the query filter as Bson
     * @return FindIterable with applied filter
     */
    FindIterable<TResult> filter(Bson filter);
    
    /**
     * Sets the sort criteria for results
     * @param sort the sort specification as Bson
     * @return FindIterable with applied sort
     */
    FindIterable<TResult> sort(Bson sort);
    
    /**
     * Sets the projection to limit returned fields
     * @param projection the projection specification as Bson
     * @return FindIterable with applied projection
     */
    FindIterable<TResult> projection(Bson projection);
    
    /**
     * Sets the maximum number of documents to return
     * @param limit the maximum number of documents
     * @return FindIterable with applied limit
     */
    FindIterable<TResult> limit(int limit);
    
    /**
     * Sets the number of documents to skip
     * @param skip the number of documents to skip
     * @return FindIterable with applied skip
     */
    FindIterable<TResult> skip(int skip);
    
    /**
     * Sets the cursor type for the query
     * @param cursorType the cursor type (TAILABLE, TAILABLE_AWAIT, etc.)
     * @return FindIterable with specified cursor type
     */
    FindIterable<TResult> cursorType(CursorType cursorType);
    
    /**
     * Sets whether to allow disk use for large result sets
     * @param allowDiskUse true to allow disk use
     * @return FindIterable with disk use option
     */
    FindIterable<TResult> allowDiskUse(Boolean allowDiskUse);
    
    /**
     * Sets the maximum execution time
     * @param maxTime the maximum time
     * @param timeUnit the time unit
     * @return FindIterable with time limit
     */
    FindIterable<TResult> maxTime(long maxTime, TimeUnit timeUnit);
    
    /**
     * Sets collation for string comparisons
     * @param collation the collation specification
     * @return FindIterable with applied collation
     */
    FindIterable<TResult> collation(Collation collation);
    
    /**
     * Explains the query execution plan
     * @param verbosity the level of detail for explanation
     * @return query execution plan as Document
     */
    Document explain(ExplainVerbosity verbosity);
}

Usage Examples:

import com.mongodb.client.model.Filters;
import com.mongodb.client.model.Sorts;
import com.mongodb.client.model.Projections;

// Complex query with multiple conditions
FindIterable<Document> results = collection.find()
    .filter(Filters.and(
        Filters.gte("age", 18),
        Filters.lt("age", 65),
        Filters.eq("status", "active"),
        Filters.in("department", Arrays.asList("sales", "marketing"))
    ))
    .sort(Sorts.orderBy(Sorts.ascending("lastName"), Sorts.descending("age")))
    .projection(Projections.include("firstName", "lastName", "email", "department"))
    .limit(20)
    .skip(10);

// Execute query
for (Document doc : results) {
    System.out.println(doc.toJson());
}

// Text search with sorting by relevance
FindIterable<Document> textResults = collection.find()
    .filter(Filters.text("java mongodb driver"))
    .sort(Sorts.metaTextScore("score"))
    .projection(Projections.metaTextScore("score"));

// Explain query performance
Document explanation = collection.find()
    .filter(Filters.eq("category", "electronics"))
    .explain(ExplainVerbosity.EXECUTION_STATS);
System.out.println("Query plan: " + explanation.toJson());

AggregateIterable Interface

Aggregation pipeline interface for complex data processing and transformation operations.

/**
 * Interface for configuring and executing aggregation pipelines
 */
public interface AggregateIterable<TResult> extends MongoIterable<TResult> {
    /**
     * Enables or disables the use of disk for large aggregations
     * @param allowDiskUse true to allow disk usage
     * @return AggregateIterable with disk use option
     */
    AggregateIterable<TResult> allowDiskUse(Boolean allowDiskUse);
    
    /**
     * Sets the maximum execution time
     * @param maxTime the maximum time
     * @param timeUnit the time unit
     * @return AggregateIterable with time limit
     */
    AggregateIterable<TResult> maxTime(long maxTime, TimeUnit timeUnit);
    
    /**
     * Enables bypassing document validation for write stages
     * @param bypassDocumentValidation true to bypass validation
     * @return AggregateIterable with validation bypass
     */
    AggregateIterable<TResult> bypassDocumentValidation(Boolean bypassDocumentValidation);
    
    /**
     * Sets collation for string operations in the pipeline
     * @param collation the collation specification
     * @return AggregateIterable with applied collation
     */
    AggregateIterable<TResult> collation(Collation collation);
    
    /**
     * Provides a hint for which index to use
     * @param hint the index hint as Bson
     * @return AggregateIterable with index hint
     */
    AggregateIterable<TResult> hint(Bson hint);
    
    /**
     * Writes aggregation results to a collection (for $out or $merge stages)
     */
    void toCollection();
    
    /**
     * Explains the aggregation execution plan
     * @param verbosity the level of detail for explanation
     * @return aggregation execution plan as Document
     */
    Document explain(ExplainVerbosity verbosity);
}

Usage Examples:

import com.mongodb.client.model.Aggregates;
import com.mongodb.client.model.Accumulators;
import com.mongodb.client.model.Sorts;

// Sales analytics aggregation pipeline
List<Bson> pipeline = Arrays.asList(
    // Filter recent orders
    Aggregates.match(Filters.gte("orderDate", LocalDate.now().minusDays(30))),
    
    // Lookup customer information
    Aggregates.lookup("customers", "customerId", "_id", "customer"),
    
    // Unwind customer array
    Aggregates.unwind("$customer"),
    
    // Group by customer and calculate totals
    Aggregates.group("$customer.region",
        Accumulators.sum("totalSales", "$amount"),
        Accumulators.avg("avgOrderValue", "$amount"),
        Accumulators.sum("orderCount", 1)
    ),
    
    // Sort by total sales descending
    Aggregates.sort(Sorts.descending("totalSales")),
    
    // Add computed fields
    Aggregates.addFields(new Field("salesPerOrder", 
        new Document("$divide", Arrays.asList("$totalSales", "$orderCount"))))
);

// Execute aggregation
AggregateIterable<Document> results = collection.aggregate(pipeline)
    .allowDiskUse(true)
    .maxTime(30, TimeUnit.SECONDS);

for (Document result : results) {
    System.out.println("Region: " + result.getString("_id"));
    System.out.println("Total Sales: " + result.getDouble("totalSales"));
    System.out.println("Average Order: " + result.getDouble("avgOrderValue"));
}

// Complex aggregation with faceted search
List<Bson> facetPipeline = Arrays.asList(
    Aggregates.match(Filters.eq("status", "published")),
    Aggregates.facet(
        new Facet("categories", 
            Aggregates.group("$category", Accumulators.sum("count", 1)),
            Aggregates.sort(Sorts.descending("count"))
        ),
        new Facet("priceRanges",
            Aggregates.bucket("$price", 
                Arrays.asList(0, 50, 100, 500, 1000),
                new BucketOptions().defaultBucket("1000+")
                    .output(Accumulators.sum("count", 1))
            )
        ),
        new Facet("dateHistogram",
            Aggregates.group(
                new Document("$dateToString", 
                    new Document("format", "%Y-%m")
                        .append("date", "$createdDate")
                ),
                Accumulators.sum("count", 1)
            )
        )
    )
);

Document facetResults = collection.aggregate(facetPipeline).first();
System.out.println("Faceted results: " + facetResults.toJson());

DistinctIterable Interface

Interface for retrieving distinct values from a collection field.

/**
 * Interface for distinct value queries
 */
public interface DistinctIterable<TResult> extends MongoIterable<TResult> {
    /**
     * Sets the query filter to limit which documents are examined
     * @param filter the query filter as Bson
     * @return DistinctIterable with applied filter
     */
    DistinctIterable<TResult> filter(Bson filter);
    
    /**
     * Sets the maximum execution time
     * @param maxTime the maximum time
     * @param timeUnit the time unit
     * @return DistinctIterable with time limit
     */
    DistinctIterable<TResult> maxTime(long maxTime, TimeUnit timeUnit);
    
    /**
     * Sets collation for string comparisons
     * @param collation the collation specification
     * @return DistinctIterable with applied collation
     */
    DistinctIterable<TResult> collation(Collation collation);
}

Usage Examples:

// Get distinct categories
DistinctIterable<String> categories = collection.distinct("category", String.class);
for (String category : categories) {
    System.out.println("Category: " + category);
}

// Get distinct values with filter
DistinctIterable<String> activeUserCountries = collection
    .distinct("country", String.class)
    .filter(Filters.eq("status", "active"));

List<String> countries = new ArrayList<>();
activeUserCountries.into(countries);
System.out.println("Active user countries: " + countries);

// Get distinct with complex filter
DistinctIterable<Integer> distinctAges = collection
    .distinct("age", Integer.class)
    .filter(Filters.and(
        Filters.gte("registrationDate", LocalDate.now().minusYears(1)),
        Filters.eq("verified", true)
    ));

MapReduceIterable Interface

Interface for MapReduce operations (legacy approach, aggregation preferred).

/**
 * Interface for MapReduce operations
 */
public interface MapReduceIterable<TResult> extends MongoIterable<TResult> {
    /**
     * Sets the name of the collection to output results to
     * @param collectionName the output collection name
     * @return MapReduceIterable with output collection
     */
    MapReduceIterable<TResult> collectionName(String collectionName);
    
    /**
     * Sets the finalize function for post-processing
     * @param finalizeFunction JavaScript function as string
     * @return MapReduceIterable with finalize function
     */
    MapReduceIterable<TResult> finalizeFunction(String finalizeFunction);
    
    /**
     * Sets the scope variables available to map, reduce, and finalize functions
     * @param scope the scope variables as Bson
     * @return MapReduceIterable with scope variables
     */
    MapReduceIterable<TResult> scope(Bson scope);
    
    /**
     * Sets the sort criteria to apply before mapping
     * @param sort the sort specification as Bson
     * @return MapReduceIterable with applied sort
     */
    MapReduceIterable<TResult> sort(Bson sort);
    
    /**
     * Sets the query filter to limit input documents
     * @param filter the query filter as Bson
     * @return MapReduceIterable with applied filter
     */
    MapReduceIterable<TResult> filter(Bson filter);
    
    /**
     * Sets the maximum number of documents to process
     * @param limit the maximum number of documents
     * @return MapReduceIterable with applied limit
     */
    MapReduceIterable<TResult> limit(int limit);
    
    /**
     * Sets the MapReduce action (output behavior)
     * @param action the MapReduce action
     * @return MapReduceIterable with specified action
     */
    MapReduceIterable<TResult> action(MapReduceAction action);
    
    /**
     * Writes results to a collection
     */
    void toCollection();
}

Usage Examples:

// MapReduce for counting documents by category (better done with aggregation)
String mapFunction = "function() { emit(this.category, 1); }";
String reduceFunction = "function(key, values) { return Array.sum(values); }";

MapReduceIterable<Document> mapReduceResults = collection
    .mapReduce(mapFunction, reduceFunction)
    .filter(Filters.eq("status", "active"));

for (Document result : mapReduceResults) {
    System.out.println("Category: " + result.getString("_id") + 
                      ", Count: " + result.getInteger("value"));
}

// Note: Modern applications should use aggregation instead:
List<Bson> equivalentAggregation = Arrays.asList(
    Aggregates.match(Filters.eq("status", "active")),
    Aggregates.group("$category", Accumulators.sum("count", 1))
);

Query Optimization

Advanced querying techniques for performance optimization.

// Index hints for query optimization
FindIterable<Document> optimizedQuery = collection.find()
    .filter(Filters.and(
        Filters.gte("timestamp", startDate),
        Filters.lt("timestamp", endDate),
        Filters.eq("userId", userId)
    ))
    .hint(new Document("userId", 1).append("timestamp", 1))
    .sort(Sorts.descending("timestamp"));

// Partial results for large collections
FindIterable<Document> partialResults = collection.find()
    .filter(complexFilter)
    .partial(true)  // Return partial results if some shards are down
    .maxTime(5, TimeUnit.SECONDS);

// Cursor configuration for different use cases
FindIterable<Document> tailableCursor = collection.find()
    .cursorType(CursorType.TailableAwait)
    .maxAwaitTime(1, TimeUnit.SECONDS)
    .noCursorTimeout(true);

Query Result Processing

Different approaches for processing query results efficiently.

// Iterator-based processing
try (MongoCursor<Document> cursor = collection.find().iterator()) {
    while (cursor.hasNext()) {
        Document doc = cursor.next();
        // Process document
    }
}

// Stream-like processing with forEach
collection.find()
    .filter(Filters.eq("processed", false))
    .forEach(document -> {
        // Process each document
        processDocument(document);
        
        // Mark as processed
        collection.updateOne(
            Filters.eq("_id", document.getObjectId("_id")),
            Updates.set("processed", true)
        );
    });

// Collect results into collections
List<Document> results = new ArrayList<>();
collection.find()
    .filter(Filters.eq("category", "important"))
    .into(results);

// Transform results
List<String> names = collection.find()
    .projection(Projections.include("name"))
    .map(doc -> doc.getString("name"))
    .into(new ArrayList<>());

Install with Tessl CLI

npx tessl i tessl/maven-org-mongodb--mongodb-driver-sync

docs

change-streams.md

collection-crud.md

connection-management.md

database-operations.md

encryption.md

gridfs.md

index-management.md

index.md

query-aggregation.md

sessions-transactions.md

tile.json