or run

npx @tessl/cli init
Log in

Version

Tile

Overview

Evals

Files

Files

docs

calendar-management.mdcore-scheduling.mdenterprise-features.mdexception-handling.mdindex.mdjob-management.mdlisteners-events.mdmatcher-framework.mdpersistence-storage.mdschedule-builders.mdtrigger-management.mdutilities-helpers.md

persistence-storage.mddocs/

0

# Persistence and Storage

1

2

Comprehensive data persistence system in Quartz supporting both in-memory and database storage with sophisticated connection pooling, transaction management, and database-specific optimizations. This system provides the foundation for job and trigger persistence, clustering, and enterprise-scale deployments.

3

4

## Capabilities

5

6

### JobStore Interface

7

8

Base interface for all job and trigger persistence implementations.

9

10

```java { .api }

11

/**

12

* Interface for storing and retrieving jobs and triggers

13

*/

14

interface JobStore {

15

/**

16

* Initialize the job store

17

* @param loadHelper class loading helper

18

* @param signaler scheduler signaler for notifications

19

* @throws SchedulerConfigException if initialization fails

20

*/

21

void initialize(ClassLoadHelper loadHelper, SchedulerSignaler signaler) throws SchedulerConfigException;

22

23

/**

24

* Start the job store for use

25

* @throws SchedulerException if startup fails

26

*/

27

void schedulerStarted() throws SchedulerException;

28

29

/**

30

* Shutdown the job store

31

*/

32

void shutdown();

33

34

/**

35

* Store a job and its associated triggers

36

* @param newJob the job to store

37

* @param newTriggers triggers associated with the job

38

* @param replace whether to replace existing job

39

* @throws SchedulerException if storage fails

40

*/

41

void storeJobAndTrigger(JobDetail newJob, OperableTrigger newTrigger) throws SchedulerException;

42

43

/**

44

* Store a job

45

* @param newJob the job to store

46

* @param replaceExisting whether to replace existing job

47

* @throws SchedulerException if storage fails

48

*/

49

void storeJob(JobDetail newJob, boolean replaceExisting) throws SchedulerException;

50

51

/**

52

* Store a trigger

53

* @param newTrigger the trigger to store

54

* @param replaceExisting whether to replace existing trigger

55

* @throws SchedulerException if storage fails

56

*/

57

void storeTrigger(OperableTrigger newTrigger, boolean replaceExisting) throws SchedulerException;

58

59

/**

60

* Remove a job and all its triggers

61

* @param jobKey the job key

62

* @return true if job was removed

63

* @throws SchedulerException if removal fails

64

*/

65

boolean removeJob(JobKey jobKey) throws SchedulerException;

66

67

/**

68

* Remove a trigger

69

* @param triggerKey the trigger key

70

* @return true if trigger was removed

71

* @throws SchedulerException if removal fails

72

*/

73

boolean removeTrigger(TriggerKey triggerKey) throws SchedulerException;

74

75

/**

76

* Retrieve a job

77

* @param jobKey the job key

78

* @return the job detail or null if not found

79

* @throws SchedulerException if retrieval fails

80

*/

81

JobDetail retrieveJob(JobKey jobKey) throws SchedulerException;

82

83

/**

84

* Retrieve a trigger

85

* @param triggerKey the trigger key

86

* @return the trigger or null if not found

87

* @throws SchedulerException if retrieval fails

88

*/

89

OperableTrigger retrieveTrigger(TriggerKey triggerKey) throws SchedulerException;

90

91

/**

92

* Acquire triggers ready to fire

93

* @param noLaterThan maximum fire time

94

* @param maxCount maximum number of triggers

95

* @param timeWindow time window for acquisition

96

* @return list of trigger fire bundles

97

* @throws SchedulerException if acquisition fails

98

*/

99

List<OperableTrigger> acquireNextTriggers(long noLaterThan, int maxCount, long timeWindow) throws SchedulerException;

100

101

/**

102

* Inform job store that triggers have been fired

103

* @param triggers the fired triggers

104

* @return firing results

105

* @throws SchedulerException if operation fails

106

*/

107

List<TriggerFiredResult> triggersFired(List<OperableTrigger> triggers) throws SchedulerException;

108

109

/**

110

* Check if job store supports persistence

111

* @return true if persistent

112

*/

113

boolean supportsPersistence();

114

115

/**

116

* Check if job store is clustered

117

* @return true if clustered

118

*/

119

boolean isClustered();

120

}

121

```

122

123

### RAMJobStore Class

124

125

In-memory job store implementation for single-node, volatile storage.

126

127

```java { .api }

128

/**

129

* In-memory job store - data is lost when JVM stops

130

*/

131

class RAMJobStore implements JobStore {

132

/**

133

* Create in-memory job store

134

*/

135

RAMJobStore();

136

137

/**

138

* Set misfire threshold in milliseconds

139

* @param misfireThreshold threshold in milliseconds

140

*/

141

void setMisfireThreshold(long misfireThreshold);

142

143

/**

144

* Get misfire threshold

145

* @return misfire threshold in milliseconds

146

*/

147

long getMisfireThreshold();

148

149

// Inherits all JobStore methods

150

// Data stored in memory - not persistent across restarts

151

// Fast performance, no database dependencies

152

// Single scheduler instance only

153

}

154

```

155

156

### JDBC JobStore Classes

157

158

Database-backed job stores for persistent, clustered storage.

159

160

```java { .api }

161

/**

162

* JDBC job store with transaction management

163

*/

164

class JobStoreTX extends JobStoreSupport {

165

/**

166

* Create JDBC job store with transaction management

167

*/

168

JobStoreTX();

169

170

// Manages its own database transactions

171

// Suitable for standalone applications

172

// Uses JobStoreSupport base functionality

173

}

174

175

/**

176

* JDBC job store for container-managed transactions

177

*/

178

class JobStoreCMT extends JobStoreSupport {

179

/**

180

* Create JDBC job store for container-managed transactions

181

*/

182

JobStoreCMT();

183

184

/**

185

* Set whether to use non-managed transactions for locks

186

* @param nonManagedTXForLocks true to use non-managed transactions

187

*/

188

void setNonManagedTXForLocks(boolean nonManagedTXForLocks);

189

190

// Works with container-managed transactions (CMT)

191

// Suitable for J2EE application servers

192

// Transaction boundaries managed by container

193

}

194

195

/**

196

* Base class for JDBC job store implementations

197

*/

198

abstract class JobStoreSupport implements JobStore {

199

/**

200

* Set the database table prefix

201

* @param tablePrefix prefix for all Quartz tables

202

*/

203

void setTablePrefix(String tablePrefix);

204

205

/**

206

* Set the data source name

207

* @param dataSource JNDI name or direct data source name

208

*/

209

void setDataSource(String dataSource);

210

211

/**

212

* Set the driver delegate class for database-specific SQL

213

* @param driverDelegateClass fully qualified class name

214

*/

215

void setDriverDelegateClass(String driverDelegateClass);

216

217

/**

218

* Set whether job store is clustered

219

* @param isClustered true for clustered operation

220

*/

221

void setIsClustered(boolean isClustered);

222

223

/**

224

* Set cluster check-in interval

225

* @param clusterCheckinInterval interval in milliseconds

226

*/

227

void setClusterCheckinInterval(long clusterCheckinInterval);

228

229

/**

230

* Set maximum misfire threshold

231

* @param misfireThreshold threshold in milliseconds

232

*/

233

void setMisfireThreshold(long misfireThreshold);

234

235

/**

236

* Set whether to use database locks

237

* @param useDBLocks true to use database locks

238

*/

239

void setUseDBLocks(boolean useDBLocks);

240

241

/**

242

* Set lock handler for database locking

243

* @param lockHandler lock handler implementation

244

*/

245

void setLockHandler(Semaphore lockHandler);

246

247

/**

248

* Set whether to acquire triggers within lock

249

* @param acquireTriggersWithinLock true to use lock

250

*/

251

void setAcquireTriggersWithinLock(boolean acquireTriggersWithinLock);

252

}

253

```

254

255

### Database Driver Delegates

256

257

Database-specific SQL implementations for optimal performance.

258

259

```java { .api }

260

/**

261

* Base delegate providing standard SQL operations

262

*/

263

class StdJDBCDelegate implements DriverDelegate {

264

/**

265

* Get the database-specific SQL for operations

266

*/

267

String getSelectNextTriggerToAcquireSQL();

268

String getInsertJobDetailSQL();

269

String getUpdateJobDetailSQL();

270

String getSelectJobDetailSQL();

271

String getDeleteJobDetailSQL();

272

273

// Additional SQL operation methods

274

}

275

276

/**

277

* PostgreSQL-specific optimizations

278

*/

279

class PostgreSQLDelegate extends StdJDBCDelegate {

280

// PostgreSQL-specific SQL optimizations

281

// Better handling of BYTEA columns

282

// PostgreSQL-specific lock handling

283

}

284

285

/**

286

* Oracle-specific optimizations

287

*/

288

class OracleDelegate extends StdJDBCDelegate {

289

// Oracle-specific SQL optimizations

290

// BLOB/CLOB handling improvements

291

// Oracle-specific rowid operations

292

}

293

294

/**

295

* MySQL-specific optimizations

296

*/

297

class MySQLDelegate extends StdJDBCDelegate {

298

// MySQL-specific SQL optimizations

299

// MySQL-specific data type handling

300

}

301

302

/**

303

* SQL Server-specific optimizations

304

*/

305

class MSSQLDelegate extends StdJDBCDelegate {

306

// SQL Server-specific SQL optimizations

307

// SQL Server-specific lock handling

308

}

309

310

/**

311

* DB2-specific optimizations

312

*/

313

class DB2v6Delegate extends StdJDBCDelegate {

314

// DB2-specific SQL optimizations

315

}

316

317

/**

318

* DB2 version 7+ optimizations

319

*/

320

class DB2v7Delegate extends DB2v6Delegate {

321

// Enhanced DB2 features for version 7+

322

}

323

324

/**

325

* CloudscapeDelegate for Apache Derby

326

*/

327

class CloudscapeDelegate extends StdJDBCDelegate {

328

// Apache Derby / Cloudscape optimizations

329

}

330

331

/**

332

* H2 database optimizations

333

*/

334

class H2Delegate extends StdJDBCDelegate {

335

// H2-specific optimizations

336

}

337

338

/**

339

* HSQLDB optimizations

340

*/

341

class HSQLDBDelegate extends StdJDBCDelegate {

342

// HSQLDB-specific optimizations

343

}

344

```

345

346

### Connection Pooling

347

348

Sophisticated connection pool management for database operations.

349

350

```java { .api }

351

/**

352

* Interface for providing database connections

353

*/

354

interface ConnectionProvider {

355

/**

356

* Get a database connection

357

* @return database connection

358

* @throws SQLException if connection cannot be obtained

359

*/

360

Connection getConnection() throws SQLException;

361

362

/**

363

* Shutdown the connection provider

364

* @throws SQLException if shutdown fails

365

*/

366

void shutdown() throws SQLException;

367

368

/**

369

* Initialize the connection provider

370

* @throws SQLException if initialization fails

371

*/

372

void initialize() throws SQLException;

373

}

374

375

/**

376

* Interface for pooling connection providers

377

*/

378

interface PoolingConnectionProvider extends ConnectionProvider {

379

/**

380

* Get the current number of connections in the pool

381

* @return connection count

382

*/

383

int getPoolSize();

384

385

/**

386

* Get the maximum pool size

387

* @return maximum connections

388

*/

389

int getMaxPoolSize();

390

391

/**

392

* Get the current number of connections in use

393

* @return connections in use

394

*/

395

int getConnectionsInUse();

396

}

397

398

/**

399

* C3P0 connection pooling implementation

400

*/

401

class C3p0PoolingConnectionProvider implements PoolingConnectionProvider {

402

/**

403

* Set maximum pool size

404

* @param maxPoolSize maximum number of connections

405

*/

406

void setMaxPoolSize(int maxPoolSize);

407

408

/**

409

* Set minimum pool size

410

* @param minPoolSize minimum number of connections

411

*/

412

void setMinPoolSize(int minPoolSize);

413

414

/**

415

* Set initial pool size

416

* @param initialPoolSize initial number of connections

417

*/

418

void setInitialPoolSize(int initialPoolSize);

419

420

/**

421

* Set maximum idle time for connections

422

* @param maxIdleTime idle time in seconds

423

*/

424

void setMaxIdleTime(int maxIdleTime);

425

426

/**

427

* Set connection test query

428

* @param idleConnectionTestPeriod test period in seconds

429

*/

430

void setIdleConnectionTestPeriod(int idleConnectionTestPeriod);

431

432

/**

433

* Set connection validation query

434

* @param validationQuery SQL query for validation

435

*/

436

void setValidationQuery(String validationQuery);

437

}

438

439

/**

440

* HikariCP connection pooling implementation

441

*/

442

class HikariCpPoolingConnectionProvider implements PoolingConnectionProvider {

443

/**

444

* Set maximum pool size

445

* @param maximumPoolSize maximum number of connections

446

*/

447

void setMaximumPoolSize(int maximumPoolSize);

448

449

/**

450

* Set minimum idle connections

451

* @param minimumIdle minimum idle connections

452

*/

453

void setMinimumIdle(int minimumIdle);

454

455

/**

456

* Set maximum connection lifetime

457

* @param maxLifetime lifetime in milliseconds

458

*/

459

void setMaxLifetime(long maxLifetime);

460

461

/**

462

* Set connection timeout

463

* @param connectionTimeout timeout in milliseconds

464

*/

465

void setConnectionTimeout(long connectionTimeout);

466

467

/**

468

* Set idle timeout

469

* @param idleTimeout timeout in milliseconds

470

*/

471

void setIdleTimeout(long idleTimeout);

472

473

/**

474

* Set connection test query

475

* @param connectionTestQuery SQL query for testing

476

*/

477

void setConnectionTestQuery(String connectionTestQuery);

478

}

479

480

/**

481

* JNDI-based connection provider

482

*/

483

class JNDIConnectionProvider implements ConnectionProvider {

484

/**

485

* Set JNDI name for data source lookup

486

* @param jndiURL JNDI URL for data source

487

*/

488

void setJndiURL(String jndiURL);

489

490

/**

491

* Set initial context factory

492

* @param java_naming_factory_initial factory class name

493

*/

494

void setJava_naming_factory_initial(String java_naming_factory_initial);

495

496

/**

497

* Set naming provider URL

498

* @param java_naming_provider_url provider URL

499

*/

500

void setJava_naming_provider_url(String java_naming_provider_url);

501

}

502

503

/**

504

* Connection manager for managing multiple data sources

505

*/

506

class DBConnectionManager {

507

/**

508

* Get singleton instance

509

* @return connection manager instance

510

*/

511

static DBConnectionManager getInstance();

512

513

/**

514

* Add connection provider

515

* @param dataSourceName name of data source

516

* @param provider connection provider implementation

517

*/

518

void addConnectionProvider(String dataSourceName, ConnectionProvider provider);

519

520

/**

521

* Get connection from named data source

522

* @param dataSourceName name of data source

523

* @return database connection

524

* @throws SQLException if connection cannot be obtained

525

*/

526

Connection getConnection(String dataSourceName) throws SQLException;

527

528

/**

529

* Shutdown all connection providers

530

*/

531

void shutdown();

532

}

533

```

534

535

**Usage Examples:**

536

537

```java

538

// Basic JDBC JobStore Configuration (quartz.properties)

539

/*

540

# Use JDBC job store with transaction management

541

org.quartz.jobStore.class = org.quartz.impl.jdbcjobstore.JobStoreTX

542

543

# Database-specific delegate

544

org.quartz.jobStore.driverDelegateClass = org.quartz.impl.jdbcjobstore.PostgreSQLDelegate

545

546

# Data source configuration

547

org.quartz.jobStore.dataSource = myDS

548

549

# Table prefix (default is QRTZ_)

550

org.quartz.jobStore.tablePrefix = QRTZ_

551

552

# Clustering settings

553

org.quartz.jobStore.isClustered = true

554

org.quartz.jobStore.clusterCheckinInterval = 20000

555

556

# Misfire handling

557

org.quartz.jobStore.misfireThreshold = 60000

558

559

# Data source definition

560

org.quartz.dataSource.myDS.driver = org.postgresql.Driver

561

org.quartz.dataSource.myDS.URL = jdbc:postgresql://localhost:5432/quartz

562

org.quartz.dataSource.myDS.user = quartz

563

org.quartz.dataSource.myDS.password = password

564

org.quartz.dataSource.myDS.maxConnections = 10

565

org.quartz.dataSource.myDS.connectionProvider.class = org.quartz.utils.HikariCpPoolingConnectionProvider

566

*/

567

568

// HikariCP Connection Pooling Configuration

569

/*

570

# Use HikariCP for connection pooling

571

org.quartz.dataSource.myDS.connectionProvider.class = org.quartz.utils.HikariCpPoolingConnectionProvider

572

org.quartz.dataSource.myDS.maximumPoolSize = 20

573

org.quartz.dataSource.myDS.minimumIdle = 5

574

org.quartz.dataSource.myDS.maxLifetime = 1800000

575

org.quartz.dataSource.myDS.connectionTimeout = 30000

576

org.quartz.dataSource.myDS.idleTimeout = 600000

577

org.quartz.dataSource.myDS.connectionTestQuery = SELECT 1

578

*/

579

580

// C3P0 Connection Pooling Configuration

581

/*

582

# Use C3P0 for connection pooling

583

org.quartz.dataSource.myDS.connectionProvider.class = org.quartz.utils.C3p0PoolingConnectionProvider

584

org.quartz.dataSource.myDS.maxPoolSize = 20

585

org.quartz.dataSource.myDS.minPoolSize = 5

586

org.quartz.dataSource.myDS.initialPoolSize = 10

587

org.quartz.dataSource.myDS.maxIdleTime = 300

588

org.quartz.dataSource.myDS.idleConnectionTestPeriod = 3000

589

org.quartz.dataSource.myDS.validationQuery = SELECT 1

590

*/

591

592

// JNDI Data Source Configuration

593

/*

594

# Use JNDI data source

595

org.quartz.jobStore.dataSource = myJndiDS

596

org.quartz.dataSource.myJndiDS.connectionProvider.class = org.quartz.utils.JNDIConnectionProvider

597

org.quartz.dataSource.myJndiDS.jndiURL = java:comp/env/jdbc/QuartzDS

598

*/

599

600

// Programmatic Configuration

601

Properties props = new Properties();

602

props.setProperty("org.quartz.jobStore.class", "org.quartz.impl.jdbcjobstore.JobStoreTX");

603

props.setProperty("org.quartz.jobStore.driverDelegateClass", "org.quartz.impl.jdbcjobstore.PostgreSQLDelegate");

604

props.setProperty("org.quartz.jobStore.dataSource", "myDS");

605

props.setProperty("org.quartz.jobStore.tablePrefix", "QRTZ_");

606

props.setProperty("org.quartz.jobStore.isClustered", "true");

607

608

// Data source properties

609

props.setProperty("org.quartz.dataSource.myDS.driver", "org.postgresql.Driver");

610

props.setProperty("org.quartz.dataSource.myDS.URL", "jdbc:postgresql://localhost:5432/quartz");

611

props.setProperty("org.quartz.dataSource.myDS.user", "quartz");

612

props.setProperty("org.quartz.dataSource.myDS.password", "password");

613

props.setProperty("org.quartz.dataSource.myDS.maxConnections", "15");

614

props.setProperty("org.quartz.dataSource.myDS.connectionProvider.class",

615

"org.quartz.utils.HikariCpPoolingConnectionProvider");

616

617

SchedulerFactory factory = new StdSchedulerFactory(props);

618

Scheduler scheduler = factory.getScheduler();

619

620

// Custom Connection Provider

621

public class CustomConnectionProvider implements PoolingConnectionProvider {

622

private HikariDataSource dataSource;

623

624

@Override

625

public void initialize() throws SQLException {

626

HikariConfig config = new HikariConfig();

627

config.setJdbcUrl("jdbc:postgresql://localhost:5432/quartz");

628

config.setUsername("quartz");

629

config.setPassword("password");

630

config.setMaximumPoolSize(20);

631

config.setMinimumIdle(5);

632

config.setConnectionTestQuery("SELECT 1");

633

634

dataSource = new HikariDataSource(config);

635

}

636

637

@Override

638

public Connection getConnection() throws SQLException {

639

return dataSource.getConnection();

640

}

641

642

@Override

643

public void shutdown() throws SQLException {

644

if (dataSource != null) {

645

dataSource.close();

646

}

647

}

648

649

@Override

650

public int getPoolSize() {

651

return dataSource.getHikariPoolMXBean().getTotalConnections();

652

}

653

654

@Override

655

public int getMaxPoolSize() {

656

return dataSource.getMaximumPoolSize();

657

}

658

659

@Override

660

public int getConnectionsInUse() {

661

return dataSource.getHikariPoolMXBean().getActiveConnections();

662

}

663

}

664

665

// Database Schema Creation

666

// Quartz provides SQL scripts for creating required tables:

667

// - tables_postgres.sql

668

// - tables_mysql.sql

669

// - tables_oracle.sql

670

// - tables_sqlserver.sql

671

// - tables_db2.sql

672

// - tables_h2.sql

673

// etc.

674

675

/*

676

Example PostgreSQL schema (simplified):

677

CREATE TABLE qrtz_job_details (

678

sched_name VARCHAR(120) NOT NULL,

679

job_name VARCHAR(200) NOT NULL,

680

job_group VARCHAR(200) NOT NULL,

681

description VARCHAR(250) NULL,

682

job_class_name VARCHAR(250) NOT NULL,

683

is_durable BOOL NOT NULL,

684

is_nonconcurrent BOOL NOT NULL,

685

is_update_data BOOL NOT NULL,

686

requests_recovery BOOL NOT NULL,

687

job_data BYTEA NULL,

688

PRIMARY KEY (sched_name, job_name, job_group)

689

);

690

691

CREATE TABLE qrtz_triggers (

692

sched_name VARCHAR(120) NOT NULL,

693

trigger_name VARCHAR(200) NOT NULL,

694

trigger_group VARCHAR(200) NOT NULL,

695

job_name VARCHAR(200) NOT NULL,

696

job_group VARCHAR(200) NOT NULL,

697

description VARCHAR(250) NULL,

698

next_fire_time BIGINT NULL,

699

prev_fire_time BIGINT NULL,

700

priority INTEGER NULL,

701

trigger_state VARCHAR(16) NOT NULL,

702

trigger_type VARCHAR(8) NOT NULL,

703

start_time BIGINT NOT NULL,

704

end_time BIGINT NULL,

705

calendar_name VARCHAR(200) NULL,

706

misfire_instr SMALLINT NULL,

707

job_data BYTEA NULL,

708

PRIMARY KEY (sched_name, trigger_name, trigger_group)

709

);

710

*/

711

712

// Monitoring Connection Pools

713

public void monitorConnectionPool(PoolingConnectionProvider provider) {

714

System.out.println("Pool Size: " + provider.getPoolSize());

715

System.out.println("Max Pool Size: " + provider.getMaxPoolSize());

716

System.out.println("Connections In Use: " + provider.getConnectionsInUse());

717

System.out.println("Available Connections: " +

718

(provider.getPoolSize() - provider.getConnectionsInUse()));

719

}

720

```

721

722

## Database Support Matrix

723

724

| Database | Delegate Class | Clustering | Notes |

725

|----------|---------------|------------|-------|

726

| PostgreSQL | PostgreSQLDelegate | Yes | Recommended for production |

727

| MySQL | MySQLDelegate | Yes | Full feature support |

728

| Oracle | OracleDelegate | Yes | Enterprise features |

729

| SQL Server | MSSQLDelegate | Yes | Full support |

730

| DB2 | DB2v7Delegate | Yes | Version 7+ recommended |

731

| H2 | H2Delegate | Limited | Development/testing |

732

| HSQLDB | HSQLDBDelegate | No | Development only |

733

| Apache Derby | CloudscapeDelegate | Limited | Embedded use |

734

735

## Configuration Best Practices

736

737

1. **Connection Pooling**: Always use connection pooling in production

738

2. **Database Selection**: Use PostgreSQL, MySQL, Oracle, or SQL Server for production

739

3. **Clustering**: Enable clustering for high availability

740

4. **Table Prefix**: Use unique table prefixes for multiple Quartz instances

741

5. **Monitoring**: Monitor connection pool usage and database performance

742

6. **Backup**: Regular backups of job store data for disaster recovery