Merge "docs: Update outdated PageContentSave hook documentation"
[lhc/web/wiklou.git] / includes / jobqueue / JobQueueDB.php
1 <?php
2 /**
3 * Database-backed job queue code.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
18 * http://www.gnu.org/copyleft/gpl.html
19 *
20 * @file
21 */
22 use Wikimedia\Rdbms\IDatabase;
23 use Wikimedia\Rdbms\Database;
24 use Wikimedia\Rdbms\DBConnectionError;
25 use Wikimedia\Rdbms\DBError;
26 use MediaWiki\MediaWikiServices;
27 use Wikimedia\ScopedCallback;
28
29 /**
30 * Class to handle job queues stored in the DB
31 *
32 * @ingroup JobQueue
33 * @since 1.21
34 */
35 class JobQueueDB extends JobQueue {
36 const CACHE_TTL_SHORT = 30; // integer; seconds to cache info without re-validating
37 const MAX_AGE_PRUNE = 604800; // integer; seconds a job can live once claimed
38 const MAX_JOB_RANDOM = 2147483647; // integer; 2^31 - 1, used for job_random
39 const MAX_OFFSET = 255; // integer; maximum number of rows to skip
40
41 /** @var WANObjectCache */
42 protected $cache;
43 /** @var IDatabase|DBError|null */
44 protected $conn;
45
46 /** @var array|null Server configuration array */
47 protected $server;
48 /** @var string|null Name of an external DB cluster or null for the local DB cluster */
49 protected $cluster;
50
51 /**
52 * Additional parameters include:
53 * - server : Server configuration array for Database::factory. Overrides "cluster".
54 * - cluster : The name of an external cluster registered via LBFactory.
55 * If not specified, the primary DB cluster for the wiki will be used.
56 * This can be overridden with a custom cluster so that DB handles will
57 * be retrieved via LBFactory::getExternalLB() and getConnection().
58 * @param array $params
59 */
60 protected function __construct( array $params ) {
61 parent::__construct( $params );
62
63 if ( isset( $params['server'] ) ) {
64 $this->server = $params['server'];
65 } elseif ( isset( $params['cluster'] ) && is_string( $params['cluster'] ) ) {
66 $this->cluster = $params['cluster'];
67 }
68
69 $this->cache = MediaWikiServices::getInstance()->getMainWANObjectCache();
70 }
71
72 protected function supportedOrders() {
73 return [ 'random', 'timestamp', 'fifo' ];
74 }
75
76 protected function optimalOrder() {
77 return 'random';
78 }
79
80 /**
81 * @see JobQueue::doIsEmpty()
82 * @return bool
83 */
84 protected function doIsEmpty() {
85 $dbr = $this->getReplicaDB();
86 try {
87 $found = $dbr->selectField( // unclaimed job
88 'job', '1', [ 'job_cmd' => $this->type, 'job_token' => '' ], __METHOD__
89 );
90 } catch ( DBError $e ) {
91 $this->throwDBException( $e );
92 }
93
94 return !$found;
95 }
96
97 /**
98 * @see JobQueue::doGetSize()
99 * @return int
100 */
101 protected function doGetSize() {
102 $key = $this->getCacheKey( 'size' );
103
104 $size = $this->cache->get( $key );
105 if ( is_int( $size ) ) {
106 return $size;
107 }
108
109 try {
110 $dbr = $this->getReplicaDB();
111 $size = (int)$dbr->selectField( 'job', 'COUNT(*)',
112 [ 'job_cmd' => $this->type, 'job_token' => '' ],
113 __METHOD__
114 );
115 } catch ( DBError $e ) {
116 $this->throwDBException( $e );
117 }
118 $this->cache->set( $key, $size, self::CACHE_TTL_SHORT );
119
120 return $size;
121 }
122
123 /**
124 * @see JobQueue::doGetAcquiredCount()
125 * @return int
126 */
127 protected function doGetAcquiredCount() {
128 if ( $this->claimTTL <= 0 ) {
129 return 0; // no acknowledgements
130 }
131
132 $key = $this->getCacheKey( 'acquiredcount' );
133
134 $count = $this->cache->get( $key );
135 if ( is_int( $count ) ) {
136 return $count;
137 }
138
139 $dbr = $this->getReplicaDB();
140 try {
141 $count = (int)$dbr->selectField( 'job', 'COUNT(*)',
142 [ 'job_cmd' => $this->type, "job_token != {$dbr->addQuotes( '' )}" ],
143 __METHOD__
144 );
145 } catch ( DBError $e ) {
146 $this->throwDBException( $e );
147 }
148 $this->cache->set( $key, $count, self::CACHE_TTL_SHORT );
149
150 return $count;
151 }
152
153 /**
154 * @see JobQueue::doGetAbandonedCount()
155 * @return int
156 * @throws MWException
157 */
158 protected function doGetAbandonedCount() {
159 if ( $this->claimTTL <= 0 ) {
160 return 0; // no acknowledgements
161 }
162
163 $key = $this->getCacheKey( 'abandonedcount' );
164
165 $count = $this->cache->get( $key );
166 if ( is_int( $count ) ) {
167 return $count;
168 }
169
170 $dbr = $this->getReplicaDB();
171 try {
172 $count = (int)$dbr->selectField( 'job', 'COUNT(*)',
173 [
174 'job_cmd' => $this->type,
175 "job_token != {$dbr->addQuotes( '' )}",
176 "job_attempts >= " . $dbr->addQuotes( $this->maxTries )
177 ],
178 __METHOD__
179 );
180 } catch ( DBError $e ) {
181 $this->throwDBException( $e );
182 }
183
184 $this->cache->set( $key, $count, self::CACHE_TTL_SHORT );
185
186 return $count;
187 }
188
189 /**
190 * @see JobQueue::doBatchPush()
191 * @param IJobSpecification[] $jobs
192 * @param int $flags
193 * @throws DBError|Exception
194 * @return void
195 */
196 protected function doBatchPush( array $jobs, $flags ) {
197 $dbw = $this->getMasterDB();
198 // In general, there will be two cases here:
199 // a) sqlite; DB connection is probably a regular round-aware handle.
200 // If the connection is busy with a transaction, then defer the job writes
201 // until right before the main round commit step. Any errors that bubble
202 // up will rollback the main commit round.
203 // b) mysql/postgres; DB connection is generally a separate CONN_TRX_AUTOCOMMIT handle.
204 // No transaction is active nor will be started by writes, so enqueue the jobs
205 // now so that any errors will show up immediately as the interface expects. Any
206 // errors that bubble up will rollback the main commit round.
207 $fname = __METHOD__;
208 $dbw->onTransactionPreCommitOrIdle(
209 function ( IDatabase $dbw ) use ( $jobs, $flags, $fname ) {
210 $this->doBatchPushInternal( $dbw, $jobs, $flags, $fname );
211 },
212 $fname
213 );
214 }
215
216 /**
217 * This function should *not* be called outside of JobQueueDB
218 *
219 * @suppress SecurityCheck-SQLInjection Bug in phan-taint-check handling bulk inserts
220 * @param IDatabase $dbw
221 * @param IJobSpecification[] $jobs
222 * @param int $flags
223 * @param string $method
224 * @throws DBError
225 * @return void
226 */
227 public function doBatchPushInternal( IDatabase $dbw, array $jobs, $flags, $method ) {
228 if ( $jobs === [] ) {
229 return;
230 }
231
232 $rowSet = []; // (sha1 => job) map for jobs that are de-duplicated
233 $rowList = []; // list of jobs for jobs that are not de-duplicated
234 foreach ( $jobs as $job ) {
235 $row = $this->insertFields( $job, $dbw );
236 if ( $job->ignoreDuplicates() ) {
237 $rowSet[$row['job_sha1']] = $row;
238 } else {
239 $rowList[] = $row;
240 }
241 }
242
243 if ( $flags & self::QOS_ATOMIC ) {
244 $dbw->startAtomic( $method ); // wrap all the job additions in one transaction
245 }
246 try {
247 // Strip out any duplicate jobs that are already in the queue...
248 if ( count( $rowSet ) ) {
249 $res = $dbw->select( 'job', 'job_sha1',
250 [
251 // No job_type condition since it's part of the job_sha1 hash
252 'job_sha1' => array_keys( $rowSet ),
253 'job_token' => '' // unclaimed
254 ],
255 $method
256 );
257 foreach ( $res as $row ) {
258 wfDebug( "Job with hash '{$row->job_sha1}' is a duplicate.\n" );
259 unset( $rowSet[$row->job_sha1] ); // already enqueued
260 }
261 }
262 // Build the full list of job rows to insert
263 $rows = array_merge( $rowList, array_values( $rowSet ) );
264 // Insert the job rows in chunks to avoid replica DB lag...
265 foreach ( array_chunk( $rows, 50 ) as $rowBatch ) {
266 $dbw->insert( 'job', $rowBatch, $method );
267 }
268 JobQueue::incrStats( 'inserts', $this->type, count( $rows ) );
269 JobQueue::incrStats( 'dupe_inserts', $this->type,
270 count( $rowSet ) + count( $rowList ) - count( $rows )
271 );
272 } catch ( DBError $e ) {
273 $this->throwDBException( $e );
274 }
275 if ( $flags & self::QOS_ATOMIC ) {
276 $dbw->endAtomic( $method );
277 }
278 }
279
280 /**
281 * @see JobQueue::doPop()
282 * @return Job|bool
283 */
284 protected function doPop() {
285 $dbw = $this->getMasterDB();
286 try {
287 $autoTrx = $dbw->getFlag( DBO_TRX ); // get current setting
288 $dbw->clearFlag( DBO_TRX ); // make each query its own transaction
289 $scopedReset = new ScopedCallback( function () use ( $dbw, $autoTrx ) {
290 $dbw->setFlag( $autoTrx ? DBO_TRX : 0 ); // restore old setting
291 } );
292
293 $uuid = wfRandomString( 32 ); // pop attempt
294 $job = false; // job popped off
295 do { // retry when our row is invalid or deleted as a duplicate
296 // Try to reserve a row in the DB...
297 if ( in_array( $this->order, [ 'fifo', 'timestamp' ] ) ) {
298 $row = $this->claimOldest( $uuid );
299 } else { // random first
300 $rand = mt_rand( 0, self::MAX_JOB_RANDOM ); // encourage concurrent UPDATEs
301 $gte = (bool)mt_rand( 0, 1 ); // find rows with rand before/after $rand
302 $row = $this->claimRandom( $uuid, $rand, $gte );
303 }
304 // Check if we found a row to reserve...
305 if ( !$row ) {
306 break; // nothing to do
307 }
308 JobQueue::incrStats( 'pops', $this->type );
309 // Get the job object from the row...
310 $title = Title::makeTitle( $row->job_namespace, $row->job_title );
311 $job = Job::factory( $row->job_cmd, $title,
312 self::extractBlob( $row->job_params ) );
313 $job->metadata['id'] = $row->job_id;
314 $job->metadata['timestamp'] = $row->job_timestamp;
315 break; // done
316 } while ( true );
317
318 if ( !$job || mt_rand( 0, 9 ) == 0 ) {
319 // Handled jobs that need to be recycled/deleted;
320 // any recycled jobs will be picked up next attempt
321 $this->recycleAndDeleteStaleJobs();
322 }
323 } catch ( DBError $e ) {
324 $this->throwDBException( $e );
325 }
326
327 return $job;
328 }
329
330 /**
331 * Reserve a row with a single UPDATE without holding row locks over RTTs...
332 *
333 * @param string $uuid 32 char hex string
334 * @param int $rand Random unsigned integer (31 bits)
335 * @param bool $gte Search for job_random >= $random (otherwise job_random <= $random)
336 * @return stdClass|bool Row|false
337 */
338 protected function claimRandom( $uuid, $rand, $gte ) {
339 $dbw = $this->getMasterDB();
340 // Check cache to see if the queue has <= OFFSET items
341 $tinyQueue = $this->cache->get( $this->getCacheKey( 'small' ) );
342
343 $row = false; // the row acquired
344 $invertedDirection = false; // whether one job_random direction was already scanned
345 // This uses a replication safe method for acquiring jobs. One could use UPDATE+LIMIT
346 // instead, but that either uses ORDER BY (in which case it deadlocks in MySQL) or is
347 // not replication safe. Due to https://bugs.mysql.com/bug.php?id=6980, subqueries cannot
348 // be used here with MySQL.
349 do {
350 if ( $tinyQueue ) { // queue has <= MAX_OFFSET rows
351 // For small queues, using OFFSET will overshoot and return no rows more often.
352 // Instead, this uses job_random to pick a row (possibly checking both directions).
353 $ineq = $gte ? '>=' : '<=';
354 $dir = $gte ? 'ASC' : 'DESC';
355 $row = $dbw->selectRow( 'job', self::selectFields(), // find a random job
356 [
357 'job_cmd' => $this->type,
358 'job_token' => '', // unclaimed
359 "job_random {$ineq} {$dbw->addQuotes( $rand )}" ],
360 __METHOD__,
361 [ 'ORDER BY' => "job_random {$dir}" ]
362 );
363 if ( !$row && !$invertedDirection ) {
364 $gte = !$gte;
365 $invertedDirection = true;
366 continue; // try the other direction
367 }
368 } else { // table *may* have >= MAX_OFFSET rows
369 // T44614: "ORDER BY job_random" with a job_random inequality causes high CPU
370 // in MySQL if there are many rows for some reason. This uses a small OFFSET
371 // instead of job_random for reducing excess claim retries.
372 $row = $dbw->selectRow( 'job', self::selectFields(), // find a random job
373 [
374 'job_cmd' => $this->type,
375 'job_token' => '', // unclaimed
376 ],
377 __METHOD__,
378 [ 'OFFSET' => mt_rand( 0, self::MAX_OFFSET ) ]
379 );
380 if ( !$row ) {
381 $tinyQueue = true; // we know the queue must have <= MAX_OFFSET rows
382 $this->cache->set( $this->getCacheKey( 'small' ), 1, 30 );
383 continue; // use job_random
384 }
385 }
386
387 if ( $row ) { // claim the job
388 $dbw->update( 'job', // update by PK
389 [
390 'job_token' => $uuid,
391 'job_token_timestamp' => $dbw->timestamp(),
392 'job_attempts = job_attempts+1' ],
393 [ 'job_cmd' => $this->type, 'job_id' => $row->job_id, 'job_token' => '' ],
394 __METHOD__
395 );
396 // This might get raced out by another runner when claiming the previously
397 // selected row. The use of job_random should minimize this problem, however.
398 if ( !$dbw->affectedRows() ) {
399 $row = false; // raced out
400 }
401 } else {
402 break; // nothing to do
403 }
404 } while ( !$row );
405
406 return $row;
407 }
408
409 /**
410 * Reserve a row with a single UPDATE without holding row locks over RTTs...
411 *
412 * @param string $uuid 32 char hex string
413 * @return stdClass|bool Row|false
414 */
415 protected function claimOldest( $uuid ) {
416 $dbw = $this->getMasterDB();
417
418 $row = false; // the row acquired
419 do {
420 if ( $dbw->getType() === 'mysql' ) {
421 // Per https://bugs.mysql.com/bug.php?id=6980, we can't use subqueries on the
422 // same table being changed in an UPDATE query in MySQL (gives Error: 1093).
423 // Oracle and Postgre have no such limitation. However, MySQL offers an
424 // alternative here by supporting ORDER BY + LIMIT for UPDATE queries.
425 $dbw->query( "UPDATE {$dbw->tableName( 'job' )} " .
426 "SET " .
427 "job_token = {$dbw->addQuotes( $uuid ) }, " .
428 "job_token_timestamp = {$dbw->addQuotes( $dbw->timestamp() )}, " .
429 "job_attempts = job_attempts+1 " .
430 "WHERE ( " .
431 "job_cmd = {$dbw->addQuotes( $this->type )} " .
432 "AND job_token = {$dbw->addQuotes( '' )} " .
433 ") ORDER BY job_id ASC LIMIT 1",
434 __METHOD__
435 );
436 } else {
437 // Use a subquery to find the job, within an UPDATE to claim it.
438 // This uses as much of the DB wrapper functions as possible.
439 $dbw->update( 'job',
440 [
441 'job_token' => $uuid,
442 'job_token_timestamp' => $dbw->timestamp(),
443 'job_attempts = job_attempts+1' ],
444 [ 'job_id = (' .
445 $dbw->selectSQLText( 'job', 'job_id',
446 [ 'job_cmd' => $this->type, 'job_token' => '' ],
447 __METHOD__,
448 [ 'ORDER BY' => 'job_id ASC', 'LIMIT' => 1 ] ) .
449 ')'
450 ],
451 __METHOD__
452 );
453 }
454 // Fetch any row that we just reserved...
455 if ( $dbw->affectedRows() ) {
456 $row = $dbw->selectRow( 'job', self::selectFields(),
457 [ 'job_cmd' => $this->type, 'job_token' => $uuid ], __METHOD__
458 );
459 if ( !$row ) { // raced out by duplicate job removal
460 wfDebug( "Row deleted as duplicate by another process.\n" );
461 }
462 } else {
463 break; // nothing to do
464 }
465 } while ( !$row );
466
467 return $row;
468 }
469
470 /**
471 * @see JobQueue::doAck()
472 * @param Job $job
473 * @throws MWException
474 */
475 protected function doAck( Job $job ) {
476 if ( !isset( $job->metadata['id'] ) ) {
477 throw new MWException( "Job of type '{$job->getType()}' has no ID." );
478 }
479
480 $dbw = $this->getMasterDB();
481 try {
482 $autoTrx = $dbw->getFlag( DBO_TRX ); // get current setting
483 $dbw->clearFlag( DBO_TRX ); // make each query its own transaction
484 $scopedReset = new ScopedCallback( function () use ( $dbw, $autoTrx ) {
485 $dbw->setFlag( $autoTrx ? DBO_TRX : 0 ); // restore old setting
486 } );
487
488 // Delete a row with a single DELETE without holding row locks over RTTs...
489 $dbw->delete( 'job',
490 [ 'job_cmd' => $this->type, 'job_id' => $job->metadata['id'] ], __METHOD__ );
491
492 JobQueue::incrStats( 'acks', $this->type );
493 } catch ( DBError $e ) {
494 $this->throwDBException( $e );
495 }
496 }
497
498 /**
499 * @see JobQueue::doDeduplicateRootJob()
500 * @param IJobSpecification $job
501 * @throws MWException
502 * @return bool
503 */
504 protected function doDeduplicateRootJob( IJobSpecification $job ) {
505 $params = $job->getParams();
506 if ( !isset( $params['rootJobSignature'] ) ) {
507 throw new MWException( "Cannot register root job; missing 'rootJobSignature'." );
508 } elseif ( !isset( $params['rootJobTimestamp'] ) ) {
509 throw new MWException( "Cannot register root job; missing 'rootJobTimestamp'." );
510 }
511 $key = $this->getRootJobCacheKey( $params['rootJobSignature'] );
512 // Callers should call JobQueueGroup::push() before this method so that if the insert
513 // fails, the de-duplication registration will be aborted. Since the insert is
514 // deferred till "transaction idle", do the same here, so that the ordering is
515 // maintained. Having only the de-duplication registration succeed would cause
516 // jobs to become no-ops without any actual jobs that made them redundant.
517 $dbw = $this->getMasterDB();
518 $cache = $this->dupCache;
519 $dbw->onTransactionCommitOrIdle(
520 function () use ( $cache, $params, $key ) {
521 $timestamp = $cache->get( $key ); // current last timestamp of this job
522 if ( $timestamp && $timestamp >= $params['rootJobTimestamp'] ) {
523 return true; // a newer version of this root job was enqueued
524 }
525
526 // Update the timestamp of the last root job started at the location...
527 return $cache->set( $key, $params['rootJobTimestamp'], JobQueueDB::ROOTJOB_TTL );
528 },
529 __METHOD__
530 );
531
532 return true;
533 }
534
535 /**
536 * @see JobQueue::doDelete()
537 * @return bool
538 */
539 protected function doDelete() {
540 $dbw = $this->getMasterDB();
541 try {
542 $dbw->delete( 'job', [ 'job_cmd' => $this->type ] );
543 } catch ( DBError $e ) {
544 $this->throwDBException( $e );
545 }
546
547 return true;
548 }
549
550 /**
551 * @see JobQueue::doWaitForBackups()
552 * @return void
553 */
554 protected function doWaitForBackups() {
555 if ( $this->server ) {
556 return; // not using LBFactory instance
557 }
558
559 $lbFactory = MediaWikiServices::getInstance()->getDBLoadBalancerFactory();
560 $lbFactory->waitForReplication( [
561 'domain' => $this->domain,
562 'cluster' => is_string( $this->cluster ) ? $this->cluster : false
563 ] );
564 }
565
566 /**
567 * @return void
568 */
569 protected function doFlushCaches() {
570 foreach ( [ 'size', 'acquiredcount' ] as $type ) {
571 $this->cache->delete( $this->getCacheKey( $type ) );
572 }
573 }
574
575 /**
576 * @see JobQueue::getAllQueuedJobs()
577 * @return Iterator
578 */
579 public function getAllQueuedJobs() {
580 return $this->getJobIterator( [ 'job_cmd' => $this->getType(), 'job_token' => '' ] );
581 }
582
583 /**
584 * @see JobQueue::getAllAcquiredJobs()
585 * @return Iterator
586 */
587 public function getAllAcquiredJobs() {
588 return $this->getJobIterator( [ 'job_cmd' => $this->getType(), "job_token > ''" ] );
589 }
590
591 /**
592 * @param array $conds Query conditions
593 * @return Iterator
594 */
595 protected function getJobIterator( array $conds ) {
596 $dbr = $this->getReplicaDB();
597 try {
598 return new MappedIterator(
599 $dbr->select( 'job', self::selectFields(), $conds ),
600 function ( $row ) {
601 $job = Job::factory(
602 $row->job_cmd,
603 Title::makeTitle( $row->job_namespace, $row->job_title ),
604 strlen( $row->job_params ) ? unserialize( $row->job_params ) : []
605 );
606 $job->metadata['id'] = $row->job_id;
607 $job->metadata['timestamp'] = $row->job_timestamp;
608
609 return $job;
610 }
611 );
612 } catch ( DBError $e ) {
613 $this->throwDBException( $e );
614 }
615 }
616
617 public function getCoalesceLocationInternal() {
618 if ( $this->server ) {
619 return null; // not using the LBFactory instance
620 }
621
622 return is_string( $this->cluster )
623 ? "DBCluster:{$this->cluster}:{$this->domain}"
624 : "LBFactory:{$this->domain}";
625 }
626
627 protected function doGetSiblingQueuesWithJobs( array $types ) {
628 $dbr = $this->getReplicaDB();
629 // @note: this does not check whether the jobs are claimed or not.
630 // This is useful so JobQueueGroup::pop() also sees queues that only
631 // have stale jobs. This lets recycleAndDeleteStaleJobs() re-enqueue
632 // failed jobs so that they can be popped again for that edge case.
633 $res = $dbr->select( 'job', 'DISTINCT job_cmd',
634 [ 'job_cmd' => $types ], __METHOD__ );
635
636 $types = [];
637 foreach ( $res as $row ) {
638 $types[] = $row->job_cmd;
639 }
640
641 return $types;
642 }
643
644 protected function doGetSiblingQueueSizes( array $types ) {
645 $dbr = $this->getReplicaDB();
646 $res = $dbr->select( 'job', [ 'job_cmd', 'COUNT(*) AS count' ],
647 [ 'job_cmd' => $types ], __METHOD__, [ 'GROUP BY' => 'job_cmd' ] );
648
649 $sizes = [];
650 foreach ( $res as $row ) {
651 $sizes[$row->job_cmd] = (int)$row->count;
652 }
653
654 return $sizes;
655 }
656
657 /**
658 * Recycle or destroy any jobs that have been claimed for too long
659 *
660 * @return int Number of jobs recycled/deleted
661 */
662 public function recycleAndDeleteStaleJobs() {
663 $now = time();
664 $count = 0; // affected rows
665 $dbw = $this->getMasterDB();
666
667 try {
668 if ( !$dbw->lock( "jobqueue-recycle-{$this->type}", __METHOD__, 1 ) ) {
669 return $count; // already in progress
670 }
671
672 // Remove claims on jobs acquired for too long if enabled...
673 if ( $this->claimTTL > 0 ) {
674 $claimCutoff = $dbw->timestamp( $now - $this->claimTTL );
675 // Get the IDs of jobs that have be claimed but not finished after too long.
676 // These jobs can be recycled into the queue by expiring the claim. Selecting
677 // the IDs first means that the UPDATE can be done by primary key (less deadlocks).
678 $res = $dbw->select( 'job', 'job_id',
679 [
680 'job_cmd' => $this->type,
681 "job_token != {$dbw->addQuotes( '' )}", // was acquired
682 "job_token_timestamp < {$dbw->addQuotes( $claimCutoff )}", // stale
683 "job_attempts < {$dbw->addQuotes( $this->maxTries )}" ], // retries left
684 __METHOD__
685 );
686 $ids = array_map(
687 function ( $o ) {
688 return $o->job_id;
689 }, iterator_to_array( $res )
690 );
691 if ( count( $ids ) ) {
692 // Reset job_token for these jobs so that other runners will pick them up.
693 // Set the timestamp to the current time, as it is useful to now that the job
694 // was already tried before (the timestamp becomes the "released" time).
695 $dbw->update( 'job',
696 [
697 'job_token' => '',
698 'job_token_timestamp' => $dbw->timestamp( $now ) ], // time of release
699 [
700 'job_id' => $ids ],
701 __METHOD__
702 );
703 $affected = $dbw->affectedRows();
704 $count += $affected;
705 JobQueue::incrStats( 'recycles', $this->type, $affected );
706 $this->aggr->notifyQueueNonEmpty( $this->domain, $this->type );
707 }
708 }
709
710 // Just destroy any stale jobs...
711 $pruneCutoff = $dbw->timestamp( $now - self::MAX_AGE_PRUNE );
712 $conds = [
713 'job_cmd' => $this->type,
714 "job_token != {$dbw->addQuotes( '' )}", // was acquired
715 "job_token_timestamp < {$dbw->addQuotes( $pruneCutoff )}" // stale
716 ];
717 if ( $this->claimTTL > 0 ) { // only prune jobs attempted too many times...
718 $conds[] = "job_attempts >= {$dbw->addQuotes( $this->maxTries )}";
719 }
720 // Get the IDs of jobs that are considered stale and should be removed. Selecting
721 // the IDs first means that the UPDATE can be done by primary key (less deadlocks).
722 $res = $dbw->select( 'job', 'job_id', $conds, __METHOD__ );
723 $ids = array_map(
724 function ( $o ) {
725 return $o->job_id;
726 }, iterator_to_array( $res )
727 );
728 if ( count( $ids ) ) {
729 $dbw->delete( 'job', [ 'job_id' => $ids ], __METHOD__ );
730 $affected = $dbw->affectedRows();
731 $count += $affected;
732 JobQueue::incrStats( 'abandons', $this->type, $affected );
733 }
734
735 $dbw->unlock( "jobqueue-recycle-{$this->type}", __METHOD__ );
736 } catch ( DBError $e ) {
737 $this->throwDBException( $e );
738 }
739
740 return $count;
741 }
742
743 /**
744 * @param IJobSpecification $job
745 * @param IDatabase $db
746 * @return array
747 */
748 protected function insertFields( IJobSpecification $job, IDatabase $db ) {
749 return [
750 // Fields that describe the nature of the job
751 'job_cmd' => $job->getType(),
752 'job_namespace' => $job->getTitle()->getNamespace(),
753 'job_title' => $job->getTitle()->getDBkey(),
754 'job_params' => self::makeBlob( $job->getParams() ),
755 // Additional job metadata
756 'job_timestamp' => $db->timestamp(),
757 'job_sha1' => Wikimedia\base_convert(
758 sha1( serialize( $job->getDeduplicationInfo() ) ),
759 16, 36, 31
760 ),
761 'job_random' => mt_rand( 0, self::MAX_JOB_RANDOM )
762 ];
763 }
764
765 /**
766 * @throws JobQueueConnectionError
767 * @return IDatabase
768 */
769 protected function getReplicaDB() {
770 try {
771 return $this->getDB( DB_REPLICA );
772 } catch ( DBConnectionError $e ) {
773 throw new JobQueueConnectionError( "DBConnectionError:" . $e->getMessage() );
774 }
775 }
776
777 /**
778 * @throws JobQueueConnectionError
779 * @return IDatabase
780 */
781 protected function getMasterDB() {
782 try {
783 return $this->getDB( DB_MASTER );
784 } catch ( DBConnectionError $e ) {
785 throw new JobQueueConnectionError( "DBConnectionError:" . $e->getMessage() );
786 }
787 }
788
789 /**
790 * @param int $index (DB_REPLICA/DB_MASTER)
791 * @return IDatabase
792 */
793 protected function getDB( $index ) {
794 if ( $this->server ) {
795 if ( $this->conn instanceof IDatabase ) {
796 return $this->conn;
797 } elseif ( $this->conn instanceof DBError ) {
798 throw $this->conn;
799 }
800
801 try {
802 $this->conn = Database::factory( $this->server['type'], $this->server );
803 } catch ( DBError $e ) {
804 $this->conn = $e;
805 throw $e;
806 }
807
808 return $this->conn;
809 } else {
810 $lbFactory = MediaWikiServices::getInstance()->getDBLoadBalancerFactory();
811 $lb = is_string( $this->cluster )
812 ? $lbFactory->getExternalLB( $this->cluster )
813 : $lbFactory->getMainLB( $this->domain );
814
815 return ( $lb->getServerType( $lb->getWriterIndex() ) !== 'sqlite' )
816 // Keep a separate connection to avoid contention and deadlocks;
817 // However, SQLite has the opposite behavior due to DB-level locking.
818 ? $lb->getConnectionRef( $index, [], $this->domain, $lb::CONN_TRX_AUTOCOMMIT )
819 // Jobs insertion will be defered until the PRESEND stage to reduce contention.
820 : $lb->getConnectionRef( $index, [], $this->domain );
821 }
822 }
823
824 /**
825 * @param string $property
826 * @return string
827 */
828 private function getCacheKey( $property ) {
829 $cluster = is_string( $this->cluster ) ? $this->cluster : 'main';
830
831 return $this->cache->makeGlobalKey(
832 'jobqueue',
833 $this->domain,
834 $cluster,
835 $this->type,
836 $property
837 );
838 }
839
840 /**
841 * @param array|bool $params
842 * @return string
843 */
844 protected static function makeBlob( $params ) {
845 if ( $params !== false ) {
846 return serialize( $params );
847 } else {
848 return '';
849 }
850 }
851
852 /**
853 * @param string $blob
854 * @return bool|mixed
855 */
856 protected static function extractBlob( $blob ) {
857 if ( (string)$blob !== '' ) {
858 return unserialize( $blob );
859 } else {
860 return false;
861 }
862 }
863
864 /**
865 * @param DBError $e
866 * @throws JobQueueError
867 */
868 protected function throwDBException( DBError $e ) {
869 throw new JobQueueError( get_class( $e ) . ": " . $e->getMessage() );
870 }
871
872 /**
873 * Return the list of job fields that should be selected.
874 * @since 1.23
875 * @return array
876 */
877 public static function selectFields() {
878 return [
879 'job_id',
880 'job_cmd',
881 'job_namespace',
882 'job_title',
883 'job_timestamp',
884 'job_params',
885 'job_random',
886 'job_attempts',
887 'job_token',
888 'job_token_timestamp',
889 'job_sha1',
890 ];
891 }
892 }