3 * Moves blobs indexed by trackBlobs.php to a specified list of destination
4 * clusters, and recompresses them in the process.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License along
17 * with this program; if not, write to the Free Software Foundation, Inc.,
18 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
19 * http://www.gnu.org/copyleft/gpl.html
22 * @ingroup Maintenance ExternalStorage
25 use MediaWiki\Logger\LegacyLogger
;
26 use MediaWiki\MediaWikiServices
;
27 use MediaWiki\Shell\Shell
;
28 use Wikimedia\Rdbms\IDatabase
;
30 $optionsWithArgs = RecompressTracked
::getOptionsWithArgs();
31 require __DIR__
. '/../commandLine.inc';
33 if ( count( $args ) < 1 ) {
34 echo "Usage: php recompressTracked.php [options] <cluster> [... <cluster>...]
35 Moves blobs indexed by trackBlobs.php to a specified list of destination clusters,
36 and recompresses them in the process. Restartable.
39 --procs <procs> Set the number of child processes (default 1)
40 --copy-only Copy only, do not update the text table. Restart
41 without this option to complete.
42 --debug-log <file> Log debugging data to the specified file
43 --info-log <file> Log progress messages to the specified file
44 --critical-log <file> Log error messages to the specified file
49 $job = RecompressTracked
::newFromCommandLine( $args, $options );
53 * Maintenance script that moves blobs indexed by trackBlobs.php to a specified
54 * list of destination clusters, and recompresses them in the process.
56 * @ingroup Maintenance ExternalStorage
58 class RecompressTracked
{
60 public $batchSize = 1000;
61 public $orphanBatchSize = 1000;
62 public $reportingInterval = 10;
64 public $numBatches = 0;
65 public $pageBlobClass, $orphanBlobClass;
66 public $replicaPipes, $replicaProcs, $prevReplicaId;
67 public $copyOnly = false;
68 public $isChild = false;
69 public $replicaId = false;
70 public $noCount = false;
71 public $debugLog, $infoLog, $criticalLog;
72 /** @var ExternalStoreDB */
75 private static $optionsWithArgs = [
83 private static $cmdLineOptionMap = [
84 'no-count' => 'noCount',
85 'procs' => 'numProcs',
86 'copy-only' => 'copyOnly',
88 'replica-id' => 'replicaId',
89 'debug-log' => 'debugLog',
90 'info-log' => 'infoLog',
91 'critical-log' => 'criticalLog',
94 static function getOptionsWithArgs() {
95 return self
::$optionsWithArgs;
98 static function newFromCommandLine( $args, $options ) {
99 $jobOptions = [ 'destClusters' => $args ];
100 foreach ( self
::$cmdLineOptionMap as $cmdOption => $classOption ) {
101 if ( isset( $options[$cmdOption] ) ) {
102 $jobOptions[$classOption] = $options[$cmdOption];
106 return new self( $jobOptions );
109 function __construct( $options ) {
110 foreach ( $options as $name => $value ) {
111 $this->$name = $value;
113 $esFactory = MediaWikiServices
::getInstance()->getExternalStoreFactory();
114 $this->store
= $esFactory->getStore( 'DB' );
115 if ( !$this->isChild
) {
116 $GLOBALS['wgDebugLogPrefix'] = "RCT M: ";
117 } elseif ( $this->replicaId
!== false ) {
118 $GLOBALS['wgDebugLogPrefix'] = "RCT {$this->replicaId}: ";
120 $this->pageBlobClass
= function_exists( 'xdiff_string_bdiff' ) ?
121 DiffHistoryBlob
::class : ConcatenatedGzipHistoryBlob
::class;
122 $this->orphanBlobClass
= ConcatenatedGzipHistoryBlob
::class;
125 function debug( $msg ) {
127 if ( $this->debugLog
) {
128 $this->logToFile( $msg, $this->debugLog
);
132 function info( $msg ) {
134 if ( $this->infoLog
) {
135 $this->logToFile( $msg, $this->infoLog
);
139 function critical( $msg ) {
141 if ( $this->criticalLog
) {
142 $this->logToFile( $msg, $this->criticalLog
);
146 function logToFile( $msg, $file ) {
147 $header = '[' . date( 'd\TH:i:s' ) . '] ' . wfHostname() . ' ' . posix_getpid();
148 if ( $this->replicaId
!== false ) {
149 $header .= "({$this->replicaId})";
151 $header .= ' ' . WikiMap
::getCurrentWikiDbDomain()->getId();
152 LegacyLogger
::emit( sprintf( "%-50s %s\n", $header, $msg ), $file );
156 * Wait until the selected replica DB has caught up to the master.
157 * This allows us to use the replica DB for things that were committed in a
158 * previous part of this batch process.
161 $dbw = wfGetDB( DB_MASTER
);
162 $dbr = wfGetDB( DB_REPLICA
);
163 $pos = $dbw->getMasterPos();
164 $dbr->masterPosWait( $pos, 100000 );
168 * Execute parent or child depending on the isChild option
171 if ( $this->isChild
) {
172 $this->executeChild();
174 $this->executeParent();
179 * Execute the parent process
181 function executeParent() {
182 if ( !$this->checkTrackingTable() ) {
187 $this->startReplicaProcs();
189 $this->doAllOrphans();
190 $this->killReplicaProcs();
194 * Make sure the tracking table exists and isn't empty
197 function checkTrackingTable() {
198 $dbr = wfGetDB( DB_REPLICA
);
199 if ( !$dbr->tableExists( 'blob_tracking' ) ) {
200 $this->critical( "Error: blob_tracking table does not exist" );
204 $row = $dbr->selectRow( 'blob_tracking', '*', '', __METHOD__
);
206 $this->info( "Warning: blob_tracking table contains no rows, skipping this wiki." );
215 * Start the worker processes.
216 * These processes will listen on stdin for commands.
217 * This necessary because text recompression is slow: loading, compressing and
218 * writing are all slow.
220 function startReplicaProcs() {
221 $wiki = WikiMap
::getWikiIdFromDbDomain( WikiMap
::getCurrentWikiDbDomain() );
223 $cmd = 'php ' . Shell
::escape( __FILE__
);
224 foreach ( self
::$cmdLineOptionMap as $cmdOption => $classOption ) {
225 if ( $cmdOption == 'replica-id' ) {
227 } elseif ( in_array( $cmdOption, self
::$optionsWithArgs ) && isset( $this->$classOption ) ) {
228 $cmd .= " --$cmdOption " . Shell
::escape( $this->$classOption );
229 } elseif ( $this->$classOption ) {
230 $cmd .= " --$cmdOption";
234 ' --wiki ' . Shell
::escape( $wiki ) .
235 ' ' . Shell
::escape( ...$this->destClusters
);
237 $this->replicaPipes
= $this->replicaProcs
= [];
238 for ( $i = 0; $i < $this->numProcs
; $i++
) {
242 [ 'file', 'php://stdout', 'w' ],
243 [ 'file', 'php://stderr', 'w' ]
245 Wikimedia\
suppressWarnings();
246 $proc = proc_open( "$cmd --replica-id $i", $spec, $pipes );
247 Wikimedia\restoreWarnings
();
249 $this->critical( "Error opening replica DB process: $cmd" );
252 $this->replicaProcs
[$i] = $proc;
253 $this->replicaPipes
[$i] = $pipes[0];
255 $this->prevReplicaId
= -1;
259 * Gracefully terminate the child processes
261 function killReplicaProcs() {
262 $this->info( "Waiting for replica DB processes to finish..." );
263 for ( $i = 0; $i < $this->numProcs
; $i++
) {
264 $this->dispatchToReplica( $i, 'quit' );
266 for ( $i = 0; $i < $this->numProcs
; $i++
) {
267 $status = proc_close( $this->replicaProcs
[$i] );
269 $this->critical( "Warning: child #$i exited with status $status" );
272 $this->info( "Done." );
276 * Dispatch a command to the next available replica DB.
277 * This may block until a replica DB finishes its work and becomes available.
279 function dispatch( ...$args ) {
280 $pipes = $this->replicaPipes
;
283 $numPipes = stream_select( $x, $pipes, $y, 3600 );
285 $this->critical( "Error waiting to write to replica DBs. Aborting" );
288 for ( $i = 0; $i < $this->numProcs
; $i++
) {
289 $replicaId = ( $i +
$this->prevReplicaId +
1 ) %
$this->numProcs
;
290 if ( isset( $pipes[$replicaId] ) ) {
291 $this->prevReplicaId
= $replicaId;
292 $this->dispatchToReplica( $replicaId, $args );
297 $this->critical( "Unreachable" );
302 * Dispatch a command to a specified replica DB
303 * @param int $replicaId
304 * @param array|string $args
306 function dispatchToReplica( $replicaId, $args ) {
307 $args = (array)$args;
308 $cmd = implode( ' ', $args );
309 fwrite( $this->replicaPipes
[$replicaId], "$cmd\n" );
313 * Move all tracked pages to the new clusters
315 function doAllPages() {
316 $dbr = wfGetDB( DB_REPLICA
);
319 if ( $this->noCount
) {
320 $numPages = '[unknown]';
322 $numPages = $dbr->selectField( 'blob_tracking',
323 'COUNT(DISTINCT bt_page)',
324 # A condition is required so that this query uses the index
329 if ( $this->copyOnly
) {
330 $this->info( "Copying pages..." );
332 $this->info( "Moving pages..." );
335 $res = $dbr->select( 'blob_tracking',
339 'bt_page > ' . $dbr->addQuotes( $startId )
344 'ORDER BY' => 'bt_page',
345 'LIMIT' => $this->batchSize
,
348 if ( !$res->numRows() ) {
351 foreach ( $res as $row ) {
352 $startId = $row->bt_page
;
353 $this->dispatch( 'doPage', $row->bt_page
);
356 $this->report( 'pages', $i, $numPages );
358 $this->report( 'pages', $i, $numPages );
359 if ( $this->copyOnly
) {
360 $this->info( "All page copies queued." );
362 $this->info( "All page moves queued." );
367 * Display a progress report
368 * @param string $label
369 * @param int $current
372 function report( $label, $current, $end ) {
374 if ( $current == $end ||
$this->numBatches
>= $this->reportingInterval
) {
375 $this->numBatches
= 0;
376 $this->info( "$label: $current / $end" );
377 MediaWikiServices
::getInstance()->getDBLoadBalancerFactory()->waitForReplication();
382 * Move all orphan text to the new clusters
384 function doAllOrphans() {
385 $dbr = wfGetDB( DB_REPLICA
);
388 if ( $this->noCount
) {
389 $numOrphans = '[unknown]';
391 $numOrphans = $dbr->selectField( 'blob_tracking',
392 'COUNT(DISTINCT bt_text_id)',
393 [ 'bt_moved' => 0, 'bt_page' => 0 ],
395 if ( !$numOrphans ) {
399 if ( $this->copyOnly
) {
400 $this->info( "Copying orphans..." );
402 $this->info( "Moving orphans..." );
406 $res = $dbr->select( 'blob_tracking',
411 'bt_text_id > ' . $dbr->addQuotes( $startId )
416 'ORDER BY' => 'bt_text_id',
417 'LIMIT' => $this->batchSize
420 if ( !$res->numRows() ) {
424 foreach ( $res as $row ) {
425 $startId = $row->bt_text_id
;
426 $ids[] = $row->bt_text_id
;
429 // Need to send enough orphan IDs to the child at a time to fill a blob,
430 // so orphanBatchSize needs to be at least ~100.
431 // batchSize can be smaller or larger.
432 while ( count( $ids ) > $this->orphanBatchSize
) {
433 $args = array_slice( $ids, 0, $this->orphanBatchSize
);
434 $ids = array_slice( $ids, $this->orphanBatchSize
);
435 array_unshift( $args, 'doOrphanList' );
436 $this->dispatch( ...$args );
438 if ( count( $ids ) ) {
440 array_unshift( $args, 'doOrphanList' );
441 $this->dispatch( ...$args );
444 $this->report( 'orphans', $i, $numOrphans );
446 $this->report( 'orphans', $i, $numOrphans );
447 $this->info( "All orphans queued." );
451 * Main entry point for worker processes
453 function executeChild() {
454 $this->debug( 'starting' );
457 while ( !feof( STDIN
) ) {
458 $line = rtrim( fgets( STDIN
) );
462 $this->debug( $line );
463 $args = explode( ' ', $line );
464 $cmd = array_shift( $args );
467 $this->doPage( intval( $args[0] ) );
470 $this->doOrphanList( array_map( 'intval', $args ) );
475 MediaWikiServices
::getInstance()->getDBLoadBalancerFactory()->waitForReplication();
480 * Move tracked text in a given page
484 function doPage( $pageId ) {
485 $title = Title
::newFromID( $pageId );
487 $titleText = $title->getPrefixedText();
489 $titleText = '[deleted]';
491 $dbr = wfGetDB( DB_REPLICA
);
493 // Finish any incomplete transactions
494 if ( !$this->copyOnly
) {
495 $this->finishIncompleteMoves( [ 'bt_page' => $pageId ] );
500 $trx = new CgzCopyTransaction( $this, $this->pageBlobClass
);
502 $lbFactory = MediaWikiServices
::getInstance()->getDBLoadBalancerFactory();
505 [ 'blob_tracking', 'text' ],
508 'bt_page' => $pageId,
509 'bt_text_id > ' . $dbr->addQuotes( $startId ),
511 'bt_new_url IS NULL',
516 'ORDER BY' => 'bt_text_id',
517 'LIMIT' => $this->batchSize
520 if ( !$res->numRows() ) {
525 foreach ( $res as $row ) {
526 $startId = $row->bt_text_id
;
527 if ( $lastTextId == $row->bt_text_id
) {
528 // Duplicate (null edit)
531 $lastTextId = $row->bt_text_id
;
533 $text = Revision
::getRevisionText( $row );
534 if ( $text === false ) {
535 $this->critical( "Error loading {$row->bt_rev_id}/{$row->bt_text_id}" );
540 if ( !$trx->addItem( $text, $row->bt_text_id
) ) {
541 $this->debug( "$titleText: committing blob with " . $trx->getSize() . " items" );
543 $trx = new CgzCopyTransaction( $this, $this->pageBlobClass
);
544 $lbFactory->waitForReplication();
549 $this->debug( "$titleText: committing blob with " . $trx->getSize() . " items" );
554 * Atomic move operation.
556 * Write the new URL to the text table and set the bt_moved flag.
558 * This is done in a single transaction to provide restartable behavior
561 * The transaction is kept short to reduce locking.
566 function moveTextRow( $textId, $url ) {
567 if ( $this->copyOnly
) {
568 $this->critical( "Internal error: can't call moveTextRow() in --copy-only mode" );
571 $dbw = wfGetDB( DB_MASTER
);
572 $dbw->begin( __METHOD__
);
573 $dbw->update( 'text',
576 'old_flags' => 'external,utf-8',
583 $dbw->update( 'blob_tracking',
585 [ 'bt_text_id' => $textId ],
588 $dbw->commit( __METHOD__
);
592 * Moves are done in two phases: bt_new_url and then bt_moved.
593 * - bt_new_url indicates that the text has been copied to the new cluster.
594 * - bt_moved indicates that the text table has been updated.
596 * This function completes any moves that only have done bt_new_url. This
597 * can happen when the script is interrupted, or when --copy-only is used.
599 * @param array $conds
601 function finishIncompleteMoves( $conds ) {
602 $dbr = wfGetDB( DB_REPLICA
);
603 $lbFactory = MediaWikiServices
::getInstance()->getDBLoadBalancerFactory();
606 $conds = array_merge( $conds, [
608 'bt_new_url IS NOT NULL'
611 $res = $dbr->select( 'blob_tracking',
613 array_merge( $conds, [ 'bt_text_id > ' . $dbr->addQuotes( $startId ) ] ),
616 'ORDER BY' => 'bt_text_id',
617 'LIMIT' => $this->batchSize
,
620 if ( !$res->numRows() ) {
623 $this->debug( 'Incomplete: ' . $res->numRows() . ' rows' );
624 foreach ( $res as $row ) {
625 $startId = $row->bt_text_id
;
626 $this->moveTextRow( $row->bt_text_id
, $row->bt_new_url
);
627 if ( $row->bt_text_id %
10 == 0 ) {
628 $lbFactory->waitForReplication();
635 * Returns the name of the next target cluster
638 function getTargetCluster() {
639 $cluster = next( $this->destClusters
);
640 if ( $cluster === false ) {
641 $cluster = reset( $this->destClusters
);
648 * Gets a DB master connection for the given external cluster name
649 * @param string $cluster
652 function getExtDB( $cluster ) {
653 $lbFactory = MediaWikiServices
::getInstance()->getDBLoadBalancerFactory();
654 $lb = $lbFactory->getExternalLB( $cluster );
656 return $lb->getConnection( DB_MASTER
);
660 * Move an orphan text_id to the new cluster
662 * @param array $textIds
664 function doOrphanList( $textIds ) {
665 // Finish incomplete moves
666 if ( !$this->copyOnly
) {
667 $this->finishIncompleteMoves( [ 'bt_text_id' => $textIds ] );
671 $trx = new CgzCopyTransaction( $this, $this->orphanBlobClass
);
673 $lbFactory = MediaWikiServices
::getInstance()->getDBLoadBalancerFactory();
674 $res = wfGetDB( DB_REPLICA
)->select(
675 [ 'text', 'blob_tracking' ],
676 [ 'old_id', 'old_text', 'old_flags' ],
678 'old_id' => $textIds,
686 foreach ( $res as $row ) {
687 $text = Revision
::getRevisionText( $row );
688 if ( $text === false ) {
689 $this->critical( "Error: cannot load revision text for old_id={$row->old_id}" );
693 if ( !$trx->addItem( $text, $row->old_id
) ) {
694 $this->debug( "[orphan]: committing blob with " . $trx->getSize() . " rows" );
696 $trx = new CgzCopyTransaction( $this, $this->orphanBlobClass
);
697 $lbFactory->waitForReplication();
700 $this->debug( "[orphan]: committing blob with " . $trx->getSize() . " rows" );
706 * Class to represent a recompression operation for a single CGZ blob
708 class CgzCopyTransaction
{
709 /** @var RecompressTracked */
712 /** @var ConcatenatedGzipHistoryBlob */
717 * Create a transaction from a RecompressTracked object
718 * @param RecompressTracked $parent
719 * @param string $blobClass
721 function __construct( $parent, $blobClass ) {
722 $this->blobClass
= $blobClass;
725 $this->parent
= $parent;
730 * Returns false if it's ready to commit.
731 * @param string $text
735 function addItem( $text, $textId ) {
737 $class = $this->blobClass
;
738 $this->cgz
= new $class;
740 $hash = $this->cgz
->addItem( $text );
741 $this->referrers
[$textId] = $hash;
742 $this->texts
[$textId] = $text;
744 return $this->cgz
->isHappy();
748 return count( $this->texts
);
752 * Recompress text after some aberrant modification
754 function recompress() {
755 $class = $this->blobClass
;
756 $this->cgz
= new $class;
757 $this->referrers
= [];
758 foreach ( $this->texts
as $textId => $text ) {
759 $hash = $this->cgz
->addItem( $text );
760 $this->referrers
[$textId] = $hash;
766 * Does nothing if no text items have been added.
767 * May skip the move if --copy-only is set.
770 $originalCount = count( $this->texts
);
771 if ( !$originalCount ) {
775 /* Check to see if the target text_ids have been moved already.
777 * We originally read from the replica DB, so this can happen when a single
778 * text_id is shared between multiple pages. It's rare, but possible
779 * if a delete/move/undelete cycle splits up a null edit.
781 * We do a locking read to prevent closer-run race conditions.
783 $dbw = wfGetDB( DB_MASTER
);
784 $dbw->begin( __METHOD__
);
785 $res = $dbw->select( 'blob_tracking',
786 [ 'bt_text_id', 'bt_moved' ],
787 [ 'bt_text_id' => array_keys( $this->referrers
) ],
788 __METHOD__
, [ 'FOR UPDATE' ] );
790 foreach ( $res as $row ) {
791 if ( $row->bt_moved
) {
792 # This row has already been moved, remove it
793 $this->parent
->debug( "TRX: conflict detected in old_id={$row->bt_text_id}" );
794 unset( $this->texts
[$row->bt_text_id
] );
799 // Recompress the blob if necessary
801 if ( !count( $this->texts
) ) {
802 // All have been moved already
803 if ( $originalCount > 1 ) {
804 // This is suspcious, make noise
805 $this->parent
->critical(
806 "Warning: concurrent operation detected, are there two conflicting " .
807 "processes running, doing the same job?" );
815 // Insert the data into the destination cluster
816 $targetCluster = $this->parent
->getTargetCluster();
817 $store = $this->parent
->store
;
818 $targetDB = $store->getMaster( $targetCluster );
819 $targetDB->clearFlag( DBO_TRX
); // we manage the transactions
820 $targetDB->begin( __METHOD__
);
821 $baseUrl = $this->parent
->store
->store( $targetCluster, serialize( $this->cgz
) );
823 // Write the new URLs to the blob_tracking table
824 foreach ( $this->referrers
as $textId => $hash ) {
825 $url = $baseUrl . '/' . $hash;
826 $dbw->update( 'blob_tracking',
827 [ 'bt_new_url' => $url ],
829 'bt_text_id' => $textId,
830 'bt_moved' => 0, # Check for concurrent conflicting update
836 $targetDB->commit( __METHOD__
);
837 // Critical section here: interruption at this point causes blob duplication
838 // Reversing the order of the commits would cause data loss instead
839 $dbw->commit( __METHOD__
);
841 // Write the new URLs to the text table and set the moved flag
842 if ( !$this->parent
->copyOnly
) {
843 foreach ( $this->referrers
as $textId => $hash ) {
844 $url = $baseUrl . '/' . $hash;
845 $this->parent
->moveTextRow( $textId, $url );