Apache Ranger Audit Plugin Framework providing centralized audit logging capabilities for Apache Ranger security plugins across various big data components.
npx @tessl/cli install tessl/maven-org-apache-ranger--ranger-plugins-audit@2.7.0Apache Ranger Audit Plugin Framework provides centralized audit logging capabilities for Apache Ranger security plugins across various big data components (HDFS, Hive, HBase, Knox, etc.). It implements a configurable audit queue system with support for batching, buffering, and multiple destination types including HDFS, Solr, Elasticsearch, Kafka, CloudWatch, and Log4j.
<groupId>org.apache.ranger</groupId><artifactId>ranger-plugins-audit</artifactId><version>2.7.0</version>import org.apache.ranger.audit.provider.AuditProviderFactory;
import org.apache.ranger.audit.provider.AuditHandler;
import org.apache.ranger.audit.model.AuthzAuditEvent;import org.apache.ranger.audit.provider.AuditProviderFactory;
import org.apache.ranger.audit.provider.AuditHandler;
import org.apache.ranger.audit.model.AuthzAuditEvent;
import java.util.Properties;
// Initialize audit framework
Properties auditProps = new Properties();
auditProps.setProperty("xasecure.audit.is.enabled", "true");
auditProps.setProperty("xasecure.audit.hdfs.is.enabled", "true");
auditProps.setProperty("xasecure.audit.hdfs.destination.directory", "/ranger/audit");
AuditProviderFactory factory = AuditProviderFactory.getInstance();
factory.init(auditProps, "myapp");
// Get audit provider
AuditHandler auditProvider = factory.getAuditProvider();
// Create and log audit event
AuthzAuditEvent auditEvent = new AuthzAuditEvent();
auditEvent.setRepositoryName("myservice");
auditEvent.setRepositoryType(EnumRepositoryType.HDFS);
auditEvent.setUser("john.doe");
auditEvent.setAccessType("read");
auditEvent.setResourceType("path");
auditEvent.setResourcePath("/data/sensitive");
auditEvent.setAccessResult(1); // ALLOWED
auditEvent.setEventTime(new Date());
// Log the audit event
auditProvider.log(auditEvent);
// Shutdown
factory.shutdown();The framework is organized as a multi-module Maven project with a layered architecture:
This design enables reliable audit trails across the entire Hadoop ecosystem through standardized audit event processing with configurable reliability, performance, and destination options.
Primary audit framework components including factories, handlers, and event models that form the foundation of the audit system.
// Main factory for audit providers
public class AuditProviderFactory {
public static AuditProviderFactory getInstance();
public void init(Properties props, String appType);
public AuditHandler getAuditProvider();
public void shutdown();
}
// Primary interface for audit handlers
public interface AuditHandler {
public boolean log(AuditEventBase event);
public boolean log(Collection<AuditEventBase> events);
public boolean logJSON(String event);
public boolean logJSON(Collection<String> events);
public boolean logFile(File file);
public void init(Properties props);
public void init(Properties props, String basePropertyName);
public void start();
public void stop();
public void waitToComplete();
public void waitToComplete(long timeout);
public String getName();
public void flush();
}
// Primary audit event model
public class AuthzAuditEvent extends AuditEventBase {
// Comprehensive getters/setters for audit event data
}Pluggable audit destination implementations for sending audit events to various storage and messaging systems.
// Base class for audit destinations
public abstract class AuditDestination extends BaseAuditHandler {
public abstract void init(Properties props, String basePropertyName);
public abstract void start();
public abstract void stop();
public abstract void flush();
}
// HDFS audit destination
public class HDFSAuditDestination extends AuditDestination {
public void init(Properties props, String basePropertyName);
public void logJSON(Collection<String> events);
public void logFile(File file);
}
// Solr audit destination
public class SolrAuditDestination extends AuditDestination {
public void init(Properties props, String basePropertyName);
public void log(Collection<AuditEventBase> events);
}Asynchronous audit processing with configurable queues, batching, and file spooling for reliability.
// Asynchronous audit provider
public class AsyncAuditProvider extends BaseAuditHandler {
public AsyncAuditProvider(String name, int maxQueueSize, int maxBatchInterval);
public void init(Properties props);
public void log(AuditEventBase event);
public void start();
public void stop();
public void waitToComplete();
}
// Base class for audit queues
public abstract class AuditQueue extends AuditDestination {
// Queue configuration and drain management methods
}
// Multi-destination audit provider
public class MultiDestAuditProvider extends BaseAuditHandler {
public void addAuditProvider(AuditHandler provider);
public void addAuditProviders(List<AuditHandler> providers);
}Audit file writers supporting multiple output formats including JSON and ORC.
// Interface for audit file writers
public interface RangerAuditWriter {
public void init(Properties props, String basePropertyName, String auditProviderName, Map<String,String> auditConfigs);
public void log(Collection<String> events);
public void logFile(File file);
public void start();
public void stop();
public void flush();
}
// JSON audit writer for HDFS
public class RangerJSONAuditWriter extends AbstractRangerAuditWriter {
public void init(Properties props, String basePropertyName, String auditProviderName, Map<String,String> auditConfigs);
public void log(Collection<String> events);
}
// ORC audit writer
public class RangerORCAuditWriter extends AbstractRangerAuditWriter {
public void init(Properties props, String basePropertyName, String auditProviderName, Map<String,String> auditConfigs);
public void log(Collection<String> events);
}// Base abstract class for all audit events
public abstract class AuditEventBase {
public abstract String getEventKey();
public Date getEventTime();
public void setEventCount(long eventCount);
public void setEventDurationMS(long eventDurationMS);
}
// Repository type constants
public final class EnumRepositoryType {
public static final int HDFS = 1;
public static final int HBASE = 2;
public static final int HIVE = 3;
public static final int XAAGENT = 4;
public static final int KNOX = 5;
public static final int STORM = 6;
}
// Spool file status enumeration
public enum SPOOL_FILE_STATUS {
pending, write_inprogress, read_inprogress, done
}
// Audit exception type
public class AuditMessageException extends Exception {
public AuditMessageException(String message);
public AuditMessageException(String message, Throwable cause);
}