3 * Database-backed job queue code.
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License along
16 * with this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
18 * http://www.gnu.org/copyleft/gpl.html
22 use Wikimedia\Rdbms\IDatabase
;
23 use Wikimedia\Rdbms\DBConnRef
;
24 use Wikimedia\Rdbms\DBConnectionError
;
25 use Wikimedia\Rdbms\DBError
;
26 use MediaWiki\MediaWikiServices
;
27 use Wikimedia\ScopedCallback
;
30 * Class to handle job queues stored in the DB
35 class JobQueueDB
extends JobQueue
{
36 const CACHE_TTL_SHORT
= 30; // integer; seconds to cache info without re-validating
37 const MAX_AGE_PRUNE
= 604800; // integer; seconds a job can live once claimed
38 const MAX_JOB_RANDOM
= 2147483647; // integer; 2^31 - 1, used for job_random
39 const MAX_OFFSET
= 255; // integer; maximum number of rows to skip
41 /** @var WANObjectCache */
44 /** @var bool|string Name of an external DB cluster. False if not set */
45 protected $cluster = false;
48 * Additional parameters include:
49 * - cluster : The name of an external cluster registered via LBFactory.
50 * If not specified, the primary DB cluster for the wiki will be used.
51 * This can be overridden with a custom cluster so that DB handles will
52 * be retrieved via LBFactory::getExternalLB() and getConnection().
53 * @param array $params
55 protected function __construct( array $params ) {
56 parent
::__construct( $params );
58 $this->cluster
= $params['cluster'] ??
false;
59 $this->cache
= MediaWikiServices
::getInstance()->getMainWANObjectCache();
62 protected function supportedOrders() {
63 return [ 'random', 'timestamp', 'fifo' ];
66 protected function optimalOrder() {
71 * @see JobQueue::doIsEmpty()
74 protected function doIsEmpty() {
75 $dbr = $this->getReplicaDB();
77 $found = $dbr->selectField( // unclaimed job
78 'job', '1', [ 'job_cmd' => $this->type
, 'job_token' => '' ], __METHOD__
80 } catch ( DBError
$e ) {
81 $this->throwDBException( $e );
88 * @see JobQueue::doGetSize()
91 protected function doGetSize() {
92 $key = $this->getCacheKey( 'size' );
94 $size = $this->cache
->get( $key );
95 if ( is_int( $size ) ) {
100 $dbr = $this->getReplicaDB();
101 $size = (int)$dbr->selectField( 'job', 'COUNT(*)',
102 [ 'job_cmd' => $this->type
, 'job_token' => '' ],
105 } catch ( DBError
$e ) {
106 $this->throwDBException( $e );
108 $this->cache
->set( $key, $size, self
::CACHE_TTL_SHORT
);
114 * @see JobQueue::doGetAcquiredCount()
117 protected function doGetAcquiredCount() {
118 if ( $this->claimTTL
<= 0 ) {
119 return 0; // no acknowledgements
122 $key = $this->getCacheKey( 'acquiredcount' );
124 $count = $this->cache
->get( $key );
125 if ( is_int( $count ) ) {
129 $dbr = $this->getReplicaDB();
131 $count = (int)$dbr->selectField( 'job', 'COUNT(*)',
132 [ 'job_cmd' => $this->type
, "job_token != {$dbr->addQuotes( '' )}" ],
135 } catch ( DBError
$e ) {
136 $this->throwDBException( $e );
138 $this->cache
->set( $key, $count, self
::CACHE_TTL_SHORT
);
144 * @see JobQueue::doGetAbandonedCount()
146 * @throws MWException
148 protected function doGetAbandonedCount() {
149 if ( $this->claimTTL
<= 0 ) {
150 return 0; // no acknowledgements
153 $key = $this->getCacheKey( 'abandonedcount' );
155 $count = $this->cache
->get( $key );
156 if ( is_int( $count ) ) {
160 $dbr = $this->getReplicaDB();
162 $count = (int)$dbr->selectField( 'job', 'COUNT(*)',
164 'job_cmd' => $this->type
,
165 "job_token != {$dbr->addQuotes( '' )}",
166 "job_attempts >= " . $dbr->addQuotes( $this->maxTries
)
170 } catch ( DBError
$e ) {
171 $this->throwDBException( $e );
174 $this->cache
->set( $key, $count, self
::CACHE_TTL_SHORT
);
180 * @see JobQueue::doBatchPush()
181 * @param IJobSpecification[] $jobs
183 * @throws DBError|Exception
186 protected function doBatchPush( array $jobs, $flags ) {
187 $dbw = $this->getMasterDB();
188 // In general, there will be two cases here:
189 // a) sqlite; DB connection is probably a regular round-aware handle.
190 // If the connection is busy with a transaction, then defer the job writes
191 // until right before the main round commit step. Any errors that bubble
192 // up will rollback the main commit round.
193 // b) mysql/postgres; DB connection is generally a separate CONN_TRX_AUTOCOMMIT handle.
194 // No transaction is active nor will be started by writes, so enqueue the jobs
195 // now so that any errors will show up immediately as the interface expects. Any
196 // errors that bubble up will rollback the main commit round.
198 $dbw->onTransactionPreCommitOrIdle(
199 function ( IDatabase
$dbw ) use ( $jobs, $flags, $fname ) {
200 $this->doBatchPushInternal( $dbw, $jobs, $flags, $fname );
207 * This function should *not* be called outside of JobQueueDB
209 * @suppress SecurityCheck-SQLInjection Bug in phan-taint-check handling bulk inserts
210 * @param IDatabase $dbw
211 * @param IJobSpecification[] $jobs
213 * @param string $method
217 public function doBatchPushInternal( IDatabase
$dbw, array $jobs, $flags, $method ) {
218 if ( $jobs === [] ) {
222 $rowSet = []; // (sha1 => job) map for jobs that are de-duplicated
223 $rowList = []; // list of jobs for jobs that are not de-duplicated
224 foreach ( $jobs as $job ) {
225 $row = $this->insertFields( $job, $dbw );
226 if ( $job->ignoreDuplicates() ) {
227 $rowSet[$row['job_sha1']] = $row;
233 if ( $flags & self
::QOS_ATOMIC
) {
234 $dbw->startAtomic( $method ); // wrap all the job additions in one transaction
237 // Strip out any duplicate jobs that are already in the queue...
238 if ( count( $rowSet ) ) {
239 $res = $dbw->select( 'job', 'job_sha1',
241 // No job_type condition since it's part of the job_sha1 hash
242 'job_sha1' => array_keys( $rowSet ),
243 'job_token' => '' // unclaimed
247 foreach ( $res as $row ) {
248 wfDebug( "Job with hash '{$row->job_sha1}' is a duplicate.\n" );
249 unset( $rowSet[$row->job_sha1
] ); // already enqueued
252 // Build the full list of job rows to insert
253 $rows = array_merge( $rowList, array_values( $rowSet ) );
254 // Insert the job rows in chunks to avoid replica DB lag...
255 foreach ( array_chunk( $rows, 50 ) as $rowBatch ) {
256 $dbw->insert( 'job', $rowBatch, $method );
258 JobQueue
::incrStats( 'inserts', $this->type
, count( $rows ) );
259 JobQueue
::incrStats( 'dupe_inserts', $this->type
,
260 count( $rowSet ) +
count( $rowList ) - count( $rows )
262 } catch ( DBError
$e ) {
263 $this->throwDBException( $e );
265 if ( $flags & self
::QOS_ATOMIC
) {
266 $dbw->endAtomic( $method );
271 * @see JobQueue::doPop()
274 protected function doPop() {
275 $dbw = $this->getMasterDB();
277 $autoTrx = $dbw->getFlag( DBO_TRX
); // get current setting
278 $dbw->clearFlag( DBO_TRX
); // make each query its own transaction
279 $scopedReset = new ScopedCallback( function () use ( $dbw, $autoTrx ) {
280 $dbw->setFlag( $autoTrx ? DBO_TRX
: 0 ); // restore old setting
283 $uuid = wfRandomString( 32 ); // pop attempt
284 $job = false; // job popped off
285 do { // retry when our row is invalid or deleted as a duplicate
286 // Try to reserve a row in the DB...
287 if ( in_array( $this->order
, [ 'fifo', 'timestamp' ] ) ) {
288 $row = $this->claimOldest( $uuid );
289 } else { // random first
290 $rand = mt_rand( 0, self
::MAX_JOB_RANDOM
); // encourage concurrent UPDATEs
291 $gte = (bool)mt_rand( 0, 1 ); // find rows with rand before/after $rand
292 $row = $this->claimRandom( $uuid, $rand, $gte );
294 // Check if we found a row to reserve...
296 break; // nothing to do
298 JobQueue
::incrStats( 'pops', $this->type
);
299 // Get the job object from the row...
300 $title = Title
::makeTitle( $row->job_namespace
, $row->job_title
);
301 $job = Job
::factory( $row->job_cmd
, $title,
302 self
::extractBlob( $row->job_params
) );
303 $job->metadata
['id'] = $row->job_id
;
304 $job->metadata
['timestamp'] = $row->job_timestamp
;
308 if ( !$job ||
mt_rand( 0, 9 ) == 0 ) {
309 // Handled jobs that need to be recycled/deleted;
310 // any recycled jobs will be picked up next attempt
311 $this->recycleAndDeleteStaleJobs();
313 } catch ( DBError
$e ) {
314 $this->throwDBException( $e );
321 * Reserve a row with a single UPDATE without holding row locks over RTTs...
323 * @param string $uuid 32 char hex string
324 * @param int $rand Random unsigned integer (31 bits)
325 * @param bool $gte Search for job_random >= $random (otherwise job_random <= $random)
326 * @return stdClass|bool Row|false
328 protected function claimRandom( $uuid, $rand, $gte ) {
329 $dbw = $this->getMasterDB();
330 // Check cache to see if the queue has <= OFFSET items
331 $tinyQueue = $this->cache
->get( $this->getCacheKey( 'small' ) );
333 $row = false; // the row acquired
334 $invertedDirection = false; // whether one job_random direction was already scanned
335 // This uses a replication safe method for acquiring jobs. One could use UPDATE+LIMIT
336 // instead, but that either uses ORDER BY (in which case it deadlocks in MySQL) or is
337 // not replication safe. Due to https://bugs.mysql.com/bug.php?id=6980, subqueries cannot
338 // be used here with MySQL.
340 if ( $tinyQueue ) { // queue has <= MAX_OFFSET rows
341 // For small queues, using OFFSET will overshoot and return no rows more often.
342 // Instead, this uses job_random to pick a row (possibly checking both directions).
343 $ineq = $gte ?
'>=' : '<=';
344 $dir = $gte ?
'ASC' : 'DESC';
345 $row = $dbw->selectRow( 'job', self
::selectFields(), // find a random job
347 'job_cmd' => $this->type
,
348 'job_token' => '', // unclaimed
349 "job_random {$ineq} {$dbw->addQuotes( $rand )}" ],
351 [ 'ORDER BY' => "job_random {$dir}" ]
353 if ( !$row && !$invertedDirection ) {
355 $invertedDirection = true;
356 continue; // try the other direction
358 } else { // table *may* have >= MAX_OFFSET rows
359 // T44614: "ORDER BY job_random" with a job_random inequality causes high CPU
360 // in MySQL if there are many rows for some reason. This uses a small OFFSET
361 // instead of job_random for reducing excess claim retries.
362 $row = $dbw->selectRow( 'job', self
::selectFields(), // find a random job
364 'job_cmd' => $this->type
,
365 'job_token' => '', // unclaimed
368 [ 'OFFSET' => mt_rand( 0, self
::MAX_OFFSET
) ]
371 $tinyQueue = true; // we know the queue must have <= MAX_OFFSET rows
372 $this->cache
->set( $this->getCacheKey( 'small' ), 1, 30 );
373 continue; // use job_random
377 if ( $row ) { // claim the job
378 $dbw->update( 'job', // update by PK
380 'job_token' => $uuid,
381 'job_token_timestamp' => $dbw->timestamp(),
382 'job_attempts = job_attempts+1' ],
383 [ 'job_cmd' => $this->type
, 'job_id' => $row->job_id
, 'job_token' => '' ],
386 // This might get raced out by another runner when claiming the previously
387 // selected row. The use of job_random should minimize this problem, however.
388 if ( !$dbw->affectedRows() ) {
389 $row = false; // raced out
392 break; // nothing to do
400 * Reserve a row with a single UPDATE without holding row locks over RTTs...
402 * @param string $uuid 32 char hex string
403 * @return stdClass|bool Row|false
405 protected function claimOldest( $uuid ) {
406 $dbw = $this->getMasterDB();
408 $row = false; // the row acquired
410 if ( $dbw->getType() === 'mysql' ) {
411 // Per https://bugs.mysql.com/bug.php?id=6980, we can't use subqueries on the
412 // same table being changed in an UPDATE query in MySQL (gives Error: 1093).
413 // Oracle and Postgre have no such limitation. However, MySQL offers an
414 // alternative here by supporting ORDER BY + LIMIT for UPDATE queries.
415 $dbw->query( "UPDATE {$dbw->tableName( 'job' )} " .
417 "job_token = {$dbw->addQuotes( $uuid ) }, " .
418 "job_token_timestamp = {$dbw->addQuotes( $dbw->timestamp() )}, " .
419 "job_attempts = job_attempts+1 " .
421 "job_cmd = {$dbw->addQuotes( $this->type )} " .
422 "AND job_token = {$dbw->addQuotes( '' )} " .
423 ") ORDER BY job_id ASC LIMIT 1",
427 // Use a subquery to find the job, within an UPDATE to claim it.
428 // This uses as much of the DB wrapper functions as possible.
431 'job_token' => $uuid,
432 'job_token_timestamp' => $dbw->timestamp(),
433 'job_attempts = job_attempts+1' ],
435 $dbw->selectSQLText( 'job', 'job_id',
436 [ 'job_cmd' => $this->type
, 'job_token' => '' ],
438 [ 'ORDER BY' => 'job_id ASC', 'LIMIT' => 1 ] ) .
444 // Fetch any row that we just reserved...
445 if ( $dbw->affectedRows() ) {
446 $row = $dbw->selectRow( 'job', self
::selectFields(),
447 [ 'job_cmd' => $this->type
, 'job_token' => $uuid ], __METHOD__
449 if ( !$row ) { // raced out by duplicate job removal
450 wfDebug( "Row deleted as duplicate by another process.\n" );
453 break; // nothing to do
461 * @see JobQueue::doAck()
463 * @throws MWException
465 protected function doAck( Job
$job ) {
466 if ( !isset( $job->metadata
['id'] ) ) {
467 throw new MWException( "Job of type '{$job->getType()}' has no ID." );
470 $dbw = $this->getMasterDB();
472 $autoTrx = $dbw->getFlag( DBO_TRX
); // get current setting
473 $dbw->clearFlag( DBO_TRX
); // make each query its own transaction
474 $scopedReset = new ScopedCallback( function () use ( $dbw, $autoTrx ) {
475 $dbw->setFlag( $autoTrx ? DBO_TRX
: 0 ); // restore old setting
478 // Delete a row with a single DELETE without holding row locks over RTTs...
480 [ 'job_cmd' => $this->type
, 'job_id' => $job->metadata
['id'] ], __METHOD__
);
482 JobQueue
::incrStats( 'acks', $this->type
);
483 } catch ( DBError
$e ) {
484 $this->throwDBException( $e );
489 * @see JobQueue::doDeduplicateRootJob()
490 * @param IJobSpecification $job
491 * @throws MWException
494 protected function doDeduplicateRootJob( IJobSpecification
$job ) {
495 $params = $job->getParams();
496 if ( !isset( $params['rootJobSignature'] ) ) {
497 throw new MWException( "Cannot register root job; missing 'rootJobSignature'." );
498 } elseif ( !isset( $params['rootJobTimestamp'] ) ) {
499 throw new MWException( "Cannot register root job; missing 'rootJobTimestamp'." );
501 $key = $this->getRootJobCacheKey( $params['rootJobSignature'] );
502 // Callers should call JobQueueGroup::push() before this method so that if the insert
503 // fails, the de-duplication registration will be aborted. Since the insert is
504 // deferred till "transaction idle", do the same here, so that the ordering is
505 // maintained. Having only the de-duplication registration succeed would cause
506 // jobs to become no-ops without any actual jobs that made them redundant.
507 $dbw = $this->getMasterDB();
508 $cache = $this->dupCache
;
509 $dbw->onTransactionCommitOrIdle(
510 function () use ( $cache, $params, $key ) {
511 $timestamp = $cache->get( $key ); // current last timestamp of this job
512 if ( $timestamp && $timestamp >= $params['rootJobTimestamp'] ) {
513 return true; // a newer version of this root job was enqueued
516 // Update the timestamp of the last root job started at the location...
517 return $cache->set( $key, $params['rootJobTimestamp'], JobQueueDB
::ROOTJOB_TTL
);
526 * @see JobQueue::doDelete()
529 protected function doDelete() {
530 $dbw = $this->getMasterDB();
532 $dbw->delete( 'job', [ 'job_cmd' => $this->type
] );
533 } catch ( DBError
$e ) {
534 $this->throwDBException( $e );
541 * @see JobQueue::doWaitForBackups()
544 protected function doWaitForBackups() {
545 $lbFactory = MediaWikiServices
::getInstance()->getDBLoadBalancerFactory();
546 $lbFactory->waitForReplication(
547 [ 'domain' => $this->domain
, 'cluster' => $this->cluster
] );
553 protected function doFlushCaches() {
554 foreach ( [ 'size', 'acquiredcount' ] as $type ) {
555 $this->cache
->delete( $this->getCacheKey( $type ) );
560 * @see JobQueue::getAllQueuedJobs()
563 public function getAllQueuedJobs() {
564 return $this->getJobIterator( [ 'job_cmd' => $this->getType(), 'job_token' => '' ] );
568 * @see JobQueue::getAllAcquiredJobs()
571 public function getAllAcquiredJobs() {
572 return $this->getJobIterator( [ 'job_cmd' => $this->getType(), "job_token > ''" ] );
576 * @param array $conds Query conditions
579 protected function getJobIterator( array $conds ) {
580 $dbr = $this->getReplicaDB();
582 return new MappedIterator(
583 $dbr->select( 'job', self
::selectFields(), $conds ),
587 Title
::makeTitle( $row->job_namespace
, $row->job_title
),
588 strlen( $row->job_params
) ?
unserialize( $row->job_params
) : []
590 $job->metadata
['id'] = $row->job_id
;
591 $job->metadata
['timestamp'] = $row->job_timestamp
;
596 } catch ( DBError
$e ) {
597 $this->throwDBException( $e );
601 public function getCoalesceLocationInternal() {
602 return $this->cluster
603 ?
"DBCluster:{$this->cluster}:{$this->domain}"
604 : "LBFactory:{$this->domain}";
607 protected function doGetSiblingQueuesWithJobs( array $types ) {
608 $dbr = $this->getReplicaDB();
609 // @note: this does not check whether the jobs are claimed or not.
610 // This is useful so JobQueueGroup::pop() also sees queues that only
611 // have stale jobs. This lets recycleAndDeleteStaleJobs() re-enqueue
612 // failed jobs so that they can be popped again for that edge case.
613 $res = $dbr->select( 'job', 'DISTINCT job_cmd',
614 [ 'job_cmd' => $types ], __METHOD__
);
617 foreach ( $res as $row ) {
618 $types[] = $row->job_cmd
;
624 protected function doGetSiblingQueueSizes( array $types ) {
625 $dbr = $this->getReplicaDB();
626 $res = $dbr->select( 'job', [ 'job_cmd', 'COUNT(*) AS count' ],
627 [ 'job_cmd' => $types ], __METHOD__
, [ 'GROUP BY' => 'job_cmd' ] );
630 foreach ( $res as $row ) {
631 $sizes[$row->job_cmd
] = (int)$row->count
;
638 * Recycle or destroy any jobs that have been claimed for too long
640 * @return int Number of jobs recycled/deleted
642 public function recycleAndDeleteStaleJobs() {
644 $count = 0; // affected rows
645 $dbw = $this->getMasterDB();
648 if ( !$dbw->lock( "jobqueue-recycle-{$this->type}", __METHOD__
, 1 ) ) {
649 return $count; // already in progress
652 // Remove claims on jobs acquired for too long if enabled...
653 if ( $this->claimTTL
> 0 ) {
654 $claimCutoff = $dbw->timestamp( $now - $this->claimTTL
);
655 // Get the IDs of jobs that have be claimed but not finished after too long.
656 // These jobs can be recycled into the queue by expiring the claim. Selecting
657 // the IDs first means that the UPDATE can be done by primary key (less deadlocks).
658 $res = $dbw->select( 'job', 'job_id',
660 'job_cmd' => $this->type
,
661 "job_token != {$dbw->addQuotes( '' )}", // was acquired
662 "job_token_timestamp < {$dbw->addQuotes( $claimCutoff )}", // stale
663 "job_attempts < {$dbw->addQuotes( $this->maxTries )}" ], // retries left
669 }, iterator_to_array( $res )
671 if ( count( $ids ) ) {
672 // Reset job_token for these jobs so that other runners will pick them up.
673 // Set the timestamp to the current time, as it is useful to now that the job
674 // was already tried before (the timestamp becomes the "released" time).
678 'job_token_timestamp' => $dbw->timestamp( $now ) ], // time of release
683 $affected = $dbw->affectedRows();
685 JobQueue
::incrStats( 'recycles', $this->type
, $affected );
686 $this->aggr
->notifyQueueNonEmpty( $this->domain
, $this->type
);
690 // Just destroy any stale jobs...
691 $pruneCutoff = $dbw->timestamp( $now - self
::MAX_AGE_PRUNE
);
693 'job_cmd' => $this->type
,
694 "job_token != {$dbw->addQuotes( '' )}", // was acquired
695 "job_token_timestamp < {$dbw->addQuotes( $pruneCutoff )}" // stale
697 if ( $this->claimTTL
> 0 ) { // only prune jobs attempted too many times...
698 $conds[] = "job_attempts >= {$dbw->addQuotes( $this->maxTries )}";
700 // Get the IDs of jobs that are considered stale and should be removed. Selecting
701 // the IDs first means that the UPDATE can be done by primary key (less deadlocks).
702 $res = $dbw->select( 'job', 'job_id', $conds, __METHOD__
);
706 }, iterator_to_array( $res )
708 if ( count( $ids ) ) {
709 $dbw->delete( 'job', [ 'job_id' => $ids ], __METHOD__
);
710 $affected = $dbw->affectedRows();
712 JobQueue
::incrStats( 'abandons', $this->type
, $affected );
715 $dbw->unlock( "jobqueue-recycle-{$this->type}", __METHOD__
);
716 } catch ( DBError
$e ) {
717 $this->throwDBException( $e );
724 * @param IJobSpecification $job
725 * @param IDatabase $db
728 protected function insertFields( IJobSpecification
$job, IDatabase
$db ) {
730 // Fields that describe the nature of the job
731 'job_cmd' => $job->getType(),
732 'job_namespace' => $job->getTitle()->getNamespace(),
733 'job_title' => $job->getTitle()->getDBkey(),
734 'job_params' => self
::makeBlob( $job->getParams() ),
735 // Additional job metadata
736 'job_timestamp' => $db->timestamp(),
737 'job_sha1' => Wikimedia\base_convert
(
738 sha1( serialize( $job->getDeduplicationInfo() ) ),
741 'job_random' => mt_rand( 0, self
::MAX_JOB_RANDOM
)
746 * @throws JobQueueConnectionError
749 protected function getReplicaDB() {
751 return $this->getDB( DB_REPLICA
);
752 } catch ( DBConnectionError
$e ) {
753 throw new JobQueueConnectionError( "DBConnectionError:" . $e->getMessage() );
758 * @throws JobQueueConnectionError
761 protected function getMasterDB() {
763 return $this->getDB( DB_MASTER
);
764 } catch ( DBConnectionError
$e ) {
765 throw new JobQueueConnectionError( "DBConnectionError:" . $e->getMessage() );
770 * @param int $index (DB_REPLICA/DB_MASTER)
773 protected function getDB( $index ) {
774 $lbFactory = MediaWikiServices
::getInstance()->getDBLoadBalancerFactory();
775 $lb = ( $this->cluster
!== false )
776 ?
$lbFactory->getExternalLB( $this->cluster
)
777 : $lbFactory->getMainLB( $this->domain
);
779 return ( $lb->getServerType( $lb->getWriterIndex() ) !== 'sqlite' )
780 // Keep a separate connection to avoid contention and deadlocks;
781 // However, SQLite has the opposite behavior due to DB-level locking.
782 ?
$lb->getConnectionRef( $index, [], $this->domain
, $lb::CONN_TRX_AUTOCOMMIT
)
783 // Jobs insertion will be defered until the PRESEND stage to reduce contention.
784 : $lb->getConnectionRef( $index, [], $this->domain
);
788 * @param string $property
791 private function getCacheKey( $property ) {
792 $cluster = is_string( $this->cluster
) ?
$this->cluster
: 'main';
794 return $this->cache
->makeGlobalKey(
804 * @param array|bool $params
807 protected static function makeBlob( $params ) {
808 if ( $params !== false ) {
809 return serialize( $params );
816 * @param string $blob
819 protected static function extractBlob( $blob ) {
820 if ( (string)$blob !== '' ) {
821 return unserialize( $blob );
829 * @throws JobQueueError
831 protected function throwDBException( DBError
$e ) {
832 throw new JobQueueError( get_class( $e ) . ": " . $e->getMessage() );
836 * Return the list of job fields that should be selected.
840 public static function selectFields() {
851 'job_token_timestamp',