Just to isolate the internals. Fix most of usages in the core.
Change-Id: I8b3e9ca1f42b7c49ee57f17b88ca2fc7b404f342
return $this->hasArg( $argId ) ? $this->mArgs[$argId] : $default;
}
+ /**
+ * Returns batch size
+ *
+ * @since 1.31
+ *
+ * @return int|null
+ */
+ protected function getBatchSize() {
+ return $this->mBatchSize;
+ }
+
/**
* Set the batch size.
* @param int $s The number of operations to do in a batch
do {
$res = $dbr->select( $fileQuery['tables'], $fileQuery['fields'],
[ 'img_name > ' . $dbr->addQuotes( $start ) ],
- __METHOD__, [ 'LIMIT' => $this->mBatchSize ], $fileQuery['joins'] );
+ __METHOD__, [ 'LIMIT' => $this->getBatchSize() ], $fileQuery['joins'] );
foreach ( $res as $row ) {
$numImages++;
$start = $row->img_name;
__METHOD__,
[
'ORDER BY' => 'user_id',
- 'LIMIT' => $this->mBatchSize,
+ 'LIMIT' => $this->getBatchSize(),
]
);
$max = $db->selectField( 'ipblocks', 'MAX(ipb_user)' );
// Step 1: Clean up any duplicate user blocks
- for ( $from = 1; $from <= $max; $from += $this->mBatchSize ) {
- $to = min( $max, $from + $this->mBatchSize - 1 );
+ $batchSize = $this->getBatchSize();
+ for ( $from = 1; $from <= $max; $from += $batchSize ) {
+ $to = min( $max, $from + $batchSize - 1 );
$this->output( "Cleaning up duplicate ipb_user ($from-$to of $max)\n" );
$delete = [];
}
// Step 2: Update the user name in any blocks where it doesn't match
- for ( $from = 1; $from <= $max; $from += $this->mBatchSize ) {
- $to = min( $max, $from + $this->mBatchSize - 1 );
+ for ( $from = 1; $from <= $max; $from += $batchSize ) {
+ $to = min( $max, $from + $batchSize - 1 );
$this->output( "Cleaning up mismatched user name ($from-$to of $max)\n" );
$res = $db->select(
__METHOD__,
[
'ORDER BY' => 'page_title',
- 'LIMIT' => $this->mBatchSize,
+ 'LIMIT' => $this->getBatchSize(),
],
[
'category' => [ 'LEFT JOIN', 'page_title = cat_title' ],
__METHOD__,
[
'ORDER BY' => 'cat_title',
- 'LIMIT' => $this->mBatchSize,
+ 'LIMIT' => $this->getBatchSize(),
],
[
'page' => [ 'LEFT JOIN', [
$titleField . $dbr->buildLike( $percent, '_' ),
], LIST_OR ) ],
__METHOD__,
- [ 'LIMIT' => $this->mBatchSize ]
+ [ 'LIMIT' => $this->getBatchSize() ]
);
$this->outputStatus( "Number of invalid rows: " . $res->numRows() . "\n" );
$modDeps = $dbw->tableName( 'module_deps' );
$i = 1;
- foreach ( array_chunk( $rows, $this->mBatchSize ) as $chunk ) {
+ foreach ( array_chunk( $rows, $this->getBatchSize() ) as $chunk ) {
// WHERE ( mod=A AND skin=A ) OR ( mod=A AND skin=B) ..
$conds = array_map( function ( stdClass $row ) use ( $dbw ) {
return $dbw->makeList( (array)$row, IDatabase::LIST_AND );
foreach ( $iterator as $file ) {
if ( wfTimestamp( TS_UNIX, $tempRepo->getFileTimestamp( "$dir/$file" ) ) < $cutoff ) {
$batch[] = [ 'op' => 'delete', 'src' => "$dir/$file" ];
- if ( count( $batch ) >= $this->mBatchSize ) {
+ if ( count( $batch ) >= $this->getBatchSize() ) {
$this->doOperations( $tempRepo, $batch );
$i += count( $batch );
$batch = [];
foreach ( $iterator as $file ) {
if ( wfTimestamp( TS_UNIX, $tempRepo->getFileTimestamp( "$dir/$file" ) ) < $cutoff ) {
$batch[] = [ 'op' => 'delete', 'src' => "$dir/$file" ];
- if ( count( $batch ) >= $this->mBatchSize ) {
+ if ( count( $batch ) >= $this->getBatchSize() ) {
$this->doOperations( $tempRepo, $batch );
$i += count( $batch );
$batch = [];
__METHOD__,
[
'ORDER BY' => 'user_id',
- 'LIMIT' => $this->mBatchSize,
+ 'LIMIT' => $this->getBatchSize(),
]
);
$id = $this->convertOptionBatch( $res, $dbw );
// Check up on the rate file periodically to adjust the concurrency
if ( $rateFile && ( !$count || ( $count % 500 ) == 0 ) ) {
$this->setBatchSize( max( 1, (int)file_get_contents( $rateFile ) ) );
- $this->output( "\tBatch size is now {$this->mBatchSize}.\n" );
+ $this->output( "\tBatch size is now {$this->getBatchSize()}.\n" );
}
$batchPaths[$srcPathRel] = 1; // remove duplicates
- if ( count( $batchPaths ) >= $this->mBatchSize ) {
+ if ( count( $batchPaths ) >= $this->getBatchSize() ) {
$this->copyFileBatch( array_keys( $batchPaths ), $backendRel, $src, $dst );
$batchPaths = []; // done
}
// Check up on the rate file periodically to adjust the concurrency
if ( $rateFile && ( !$count || ( $count % 500 ) == 0 ) ) {
$this->setBatchSize( max( 1, (int)file_get_contents( $rateFile ) ) );
- $this->output( "\tBatch size is now {$this->mBatchSize}.\n" );
+ $this->output( "\tBatch size is now {$this->getBatchSize()}.\n" );
}
$batchPaths[$delPathRel] = 1; // remove duplicates
- if ( count( $batchPaths ) >= $this->mBatchSize ) {
+ if ( count( $batchPaths ) >= $this->getBatchSize() ) {
$this->delFileBatch( array_keys( $batchPaths ), $backendRel, $dst );
$batchPaths = []; // done
}
foreach ( $jobs as $job ) {
++$total;
$batch[] = $job;
- if ( count( $batch ) >= $this->mBatchSize ) {
+ if ( count( $batch ) >= $this->getBatchSize() ) {
$dst->push( $batch );
$totalOK += count( $batch );
$batch = [];
wfWaitForSlaves();
$this->commitTransaction( $db, __METHOD__ );
$q = $db->limitResult( "DELETE /* deleteSelfExternals */ FROM externallinks WHERE el_to"
- . $db->buildLike( $wgServer . '/', $db->anyString() ), $this->mBatchSize );
+ . $db->buildLike( $wgServer . '/', $db->anyString() ), $this->getBatchSize() );
$this->output( "Deleting a batch\n" );
$db->query( $q );
if ( !$db->affectedRows() ) {
$dbr,
'page',
[ 'page_title' ],
- $this->mBatchSize
+ $this->getBatchSize()
);
$it->addConditions( [
'page_namespace' => NS_CATEGORY,
$dbr,
'categorylinks',
[ 'cl_from', 'cl_to' ],
- $this->mBatchSize
+ $this->getBatchSize()
);
$it->addConditions( [
'cl_type' => 'subcat',
$repo = RepoGroup::singleton()->getLocalRepo();
$dbr = $repo->getReplicaDB();
$be = $repo->getBackend();
+ $batchSize = $this->getBatchSize();
$mtime1 = $dbr->timestampOrNull( $this->getOption( 'mtimeafter', null ) );
$mtime2 = $dbr->timestampOrNull( $this->getOption( 'mtimebefore', null ) );
__METHOD__,
// DISTINCT causes a pointless filesort
[ 'ORDER BY' => 'name', 'GROUP BY' => 'name',
- 'LIMIT' => $this->mBatchSize ],
+ 'LIMIT' => $batchSize ],
$joinConds
);
$checkPaths[] = $file->getPath();
}
- foreach ( array_chunk( $checkPaths, $this->mBatchSize ) as $paths ) {
+ foreach ( array_chunk( $checkPaths, $batchSize ) as $paths ) {
$be->preloadFileStat( [ 'srcs' => $paths ] );
foreach ( $paths as $path ) {
if ( $be->fileExists( [ 'src' => $path ] ) === false ) {
}
}
}
- } while ( $res->numRows() >= $this->mBatchSize );
+ } while ( $res->numRows() >= $batchSize );
}
}
}
$pathBatch[] = $path;
- if ( count( $pathBatch ) >= $this->mBatchSize ) {
+ if ( count( $pathBatch ) >= $this->getBatchSize() ) {
$this->checkFiles( $repo, $pathBatch, $verbose );
$pathBatch = [];
}
'page_id > ' . $dbr->addQuotes( $lastPage )
],
__METHOD__,
- [ 'ORDER BY' => 'page_id', 'LIMIT' => $this->mBatchSize ]
+ [ 'ORDER BY' => 'page_id', 'LIMIT' => $this->getBatchSize() ]
);
foreach ( $rows as $row ) {
$this->handleRow( $row );
}
- } while ( $rows->numRows() >= $this->mBatchSize );
+ } while ( $rows->numRows() >= $this->getBatchSize() );
}
return true;
],
__METHOD__,
[
- 'LIMIT' => $this->mBatchSize,
+ 'LIMIT' => $this->getBatchSize(),
'ORDER BY' => 'user_id',
]
);
$this->output( "Waiting for replica DBs..." );
wfWaitForSlaves();
$this->output( " done.\n" );
- } while ( $res->numRows() >= $this->mBatchSize );
+ } while ( $res->numRows() >= $this->getBatchSize() );
}
}
$dbr,
'user_properties',
[ 'up_user', 'up_property' ],
- $this->mBatchSize
+ $this->getBatchSize()
);
$iterator->setFetchColumns( [ 'up_user', 'up_value' ] );
$iterator->addConditions( [
. str_replace( [ "\r", "\n" ], ' ', $e->getMessage() ) . "\n" );
}
- if ( $i % $this->mBatchSize ) {
+ if ( $i % $this->getBatchSize() ) {
$lbFactory->waitForReplication();
}
}
$page->doEditContent( $content, $summary, 0, false, $user );
$this->output( "Edited $title\n" );
- if ( $i && ( $i % $this->mBatchSize ) == 0 ) {
+ if ( $i && ( $i % $this->getBatchSize() ) == 0 ) {
wfWaitForSlaves();
}
}
$queue->push( $job );
++$count;
- if ( ( $count % $this->mBatchSize ) == 0 ) {
+ if ( ( $count % $this->getBatchSize() ) == 0 ) {
$queue->waitForBackups();
}
}
__METHOD__,
[
'ORDER BY' => $primaryKey,
- 'LIMIT' => $this->mBatchSize,
+ 'LIMIT' => $this->getBatchSize(),
]
);
if ( !$res->numRows() ) {
__METHOD__,
[
'ORDER BY' => $primaryKey,
- 'LIMIT' => $this->mBatchSize,
+ 'LIMIT' => $this->getBatchSize(),
],
[ $newTable => [ 'LEFT JOIN', "{$primaryKey}={$newPrimaryKey}" ] ]
);
$conds[] = 'img_timestamp >= ' . $dbw->addQuotes( $dbw->timestamp( $since ) );
}
+ $batchSize = $this->getBatchSize();
$batch = [];
$lastName = '';
do {
[ 'img_name', 'img_sha1' ],
array_merge( [ 'img_name > ' . $dbw->addQuotes( $lastName ) ], $conds ),
__METHOD__,
- [ 'LIMIT' => $this->mBatchSize, 'ORDER BY' => 'img_name' ]
+ [ 'LIMIT' => $batchSize, 'ORDER BY' => 'img_name' ]
);
foreach ( $res as $row ) {
'src' => $spath, 'dst' => $dpath, 'img' => $ofile->getArchiveName() ];
}
- if ( count( $batch ) >= $this->mBatchSize ) {
+ if ( count( $batch ) >= $batchSize ) {
$this->runBatch( $batch, $be );
$batch = [];
}
$res = $dbw->select( 'filearchive', [ 'fa_storage_key', 'fa_id', 'fa_name' ],
array_merge( [ 'fa_id > ' . $dbw->addQuotes( $lastId ) ], $conds ),
__METHOD__,
- [ 'LIMIT' => $this->mBatchSize, 'ORDER BY' => 'fa_id' ]
+ [ 'LIMIT' => $batchSize, 'ORDER BY' => 'fa_id' ]
);
foreach ( $res as $row ) {
$batch[] = [ 'op' => 'copy', 'src' => $spath, 'dst' => $dpath,
'overwriteSame' => true, 'img' => "(ID {$row->fa_id}) {$row->fa_name}" ];
- if ( count( $batch ) >= $this->mBatchSize ) {
+ if ( count( $batch ) >= $batchSize ) {
$this->runBatch( $batch, $be );
$batch = [];
}
$oldGroup = $this->getArg( 0 );
$newGroup = $this->getArg( 1 );
$dbw = $this->getDB( DB_MASTER );
+ $batchSize = $this->getBatchSize();
$start = $dbw->selectField( 'user_groups', 'MIN(ug_user)',
[ 'ug_group' => $oldGroup ], __FUNCTION__ );
$end = $dbw->selectField( 'user_groups', 'MAX(ug_user)',
$this->error( "Nothing to do - no users in the '$oldGroup' group", true );
}
# Do remaining chunk
- $end += $this->mBatchSize - 1;
+ $end += $batchSize - 1;
$blockStart = $start;
- $blockEnd = $start + $this->mBatchSize - 1;
+ $blockEnd = $start + $batchSize - 1;
// Migrate users over in batches...
while ( $blockEnd <= $end ) {
$affected = 0;
}
$count += $affected;
- $blockStart += $this->mBatchSize;
- $blockEnd += $this->mBatchSize;
+ $blockStart += $batchSize;
+ $blockEnd += $batchSize;
wfWaitForSlaves();
}
$this->output( "Done! $count users in group '$oldGroup' are now in '$newGroup' instead.\n" );
return false;
}
$end = $db->selectField( 'page', 'MAX(page_id)', false, __METHOD__ );
+ $batchSize = $this->getBatchSize();
# Do remaining chunk
- $end += $this->mBatchSize - 1;
+ $end += $batchSize - 1;
$blockStart = $start;
- $blockEnd = $start + $this->mBatchSize - 1;
+ $blockEnd = $start + $batchSize - 1;
while ( $blockEnd <= $end ) {
$this->output( "...doing page_id from $blockStart to $blockEnd\n" );
$cond = "page_id BETWEEN $blockStart AND $blockEnd";
__METHOD__
);
}
- $blockStart += $this->mBatchSize - 1;
- $blockEnd += $this->mBatchSize - 1;
+ $blockStart += $batchSize - 1;
+ $blockEnd += $batchSize - 1;
wfWaitForSlaves();
}
return true;
$toSave = [];
$lastId = 0;
$nsCondition = $ns === 'all' ? [] : [ 'page_namespace' => $ns ];
+ $batchSize = $this->getBatchSize();
do {
$rows = $dbw->select(
'page',
'page_id > ' . $dbw->addQuotes( $lastId ),
] + $nsCondition,
__METHOD__,
- [ 'LIMIT' => $this->mBatchSize, 'ORDER BY' => 'page_id ASC' ]
+ [ 'LIMIT' => $batchSize, 'ORDER BY' => 'page_id ASC' ]
);
$this->output( "Fetched {$rows->numRows()} rows.\n" );
foreach ( $rows as $row ) {
$title = Title::newFromRow( $row );
$model = ContentHandler::getDefaultModelFor( $title );
$toSave[$model][] = $row->page_id;
- if ( count( $toSave[$model] ) >= $this->mBatchSize ) {
+ if ( count( $toSave[$model] ) >= $batchSize ) {
$this->updatePageRows( $dbw, $toSave[$model], $model );
unset( $toSave[$model] );
}
$lastId = $row->page_id;
}
- } while ( $rows->numRows() >= $this->mBatchSize );
+ } while ( $rows->numRows() >= $batchSize );
foreach ( $toSave as $model => $pages ) {
$this->updatePageRows( $dbw, $pages, $model );
}
$toSave = [];
$idsToClear = [];
$lastId = 0;
+ $batchSize = $this->getBatchSize();
do {
$rows = $dbw->select(
$selectTables,
"$key > " . $dbw->addQuotes( $lastId ),
] + $where,
__METHOD__,
- [ 'LIMIT' => $this->mBatchSize, 'ORDER BY' => "$key ASC" ],
+ [ 'LIMIT' => $batchSize, 'ORDER BY' => "$key ASC" ],
$join_conds
);
$this->output( "Fetched {$rows->numRows()} rows.\n" );
}
}
- if ( count( $toSave[$defaultModel] ) >= $this->mBatchSize ) {
+ if ( count( $toSave[$defaultModel] ) >= $batchSize ) {
$this->updateRevisionOrArchiveRows( $dbw, $toSave[$defaultModel], $defaultModel, $table );
unset( $toSave[$defaultModel] );
}
}
- } while ( $rows->numRows() >= $this->mBatchSize );
+ } while ( $rows->numRows() >= $batchSize );
foreach ( $toSave as $model => $ids ) {
$this->updateRevisionOrArchiveRows( $dbw, $ids, $model, $table );
}
$this->output( "Populating fa_sha1 field from fa_storage_key\n" );
$endId = $dbw->selectField( $table, 'MAX(fa_id)', false, __METHOD__ );
- $batchSize = $this->mBatchSize;
+ $batchSize = $this->getBatchSize();
$done = 0;
do {
$numRows = $res->numRows();
$i = 0;
foreach ( $res as $row ) {
- if ( $i % $this->mBatchSize == 0 ) {
+ if ( $i % $this->getBatchSize() == 0 ) {
$this->output( sprintf(
"Done %d of %d, %5.3f%% \r", $i, $numRows, $i / $numRows * 100 ) );
wfWaitForSlaves();
$this->output( "Copying IP revisions to ip_changes, from rev_id $start to rev_id $end\n" );
while ( $blockStart <= $end ) {
- $blockEnd = min( $blockStart + $this->mBatchSize, $end );
+ $blockEnd = min( $blockStart + $this->getBatchSize(), $end );
$rows = $dbr->select(
'revision',
[ 'rev_id', 'rev_timestamp', 'rev_user_text' ],
}
protected function doDBUpdates() {
+ $batchSize = $this->getBatchSize();
$db = $this->getDB( DB_MASTER );
if ( !$db->tableExists( 'log_search' ) ) {
$this->error( "log_search does not exist" );
$end = $db->selectField( 'logging', 'MAX(log_id)', false, __FUNCTION__ );
# Do remaining chunk
- $end += $this->mBatchSize - 1;
+ $end += $batchSize - 1;
$blockStart = $start;
- $blockEnd = $start + $this->mBatchSize - 1;
+ $blockEnd = $start + $batchSize - 1;
$delTypes = [ 'delete', 'suppress' ]; // revisiondelete types
while ( $blockEnd <= $end ) {
$log->addRelations( 'target_author_ip', $userIPs, $row->log_id );
}
}
- $blockStart += $this->mBatchSize;
- $blockEnd += $this->mBatchSize;
+ $blockStart += $batchSize;
+ $blockEnd += $batchSize;
wfWaitForSlaves();
}
$this->output( "Done populating log_search table.\n" );
}
protected function doDBUpdates() {
+ $batchSize = $this->getBatchSize();
$db = $this->getDB( DB_MASTER );
$start = $db->selectField( 'logging', 'MIN(log_id)', false, __METHOD__ );
if ( !$start ) {
$end = $db->selectField( 'logging', 'MAX(log_id)', false, __METHOD__ );
# Do remaining chunk
- $end += $this->mBatchSize - 1;
+ $end += $batchSize - 1;
$blockStart = $start;
- $blockEnd = $start + $this->mBatchSize - 1;
+ $blockEnd = $start + $batchSize - 1;
while ( $blockEnd <= $end ) {
$this->output( "...doing log_id from $blockStart to $blockEnd\n" );
$cond = "log_id BETWEEN $blockStart AND $blockEnd AND log_user = user_id";
[ 'log_id' => $row->log_id ], __METHOD__ );
}
$this->commitTransaction( $db, __METHOD__ );
- $blockStart += $this->mBatchSize;
- $blockEnd += $this->mBatchSize;
+ $blockStart += $batchSize;
+ $blockEnd += $batchSize;
wfWaitForSlaves();
}
$this->output( "Done populating log_user_text field.\n" );
__METHOD__,
[
'ORDER BY' => 'pp_page, pp_propname',
- 'LIMIT' => $this->mBatchSize
+ 'LIMIT' => $this->getBatchSize()
]
);
}
protected function doDBUpdates() {
+ $batchSize = $this->getBatchSize();
$db = $this->getDB( DB_MASTER );
if ( !$db->tableExists( 'revision' ) ) {
$this->error( "revision table does not exist" );
}
# Do remaining chunk
$blockStart = intval( $start );
- $blockEnd = intval( $start ) + $this->mBatchSize - 1;
+ $blockEnd = intval( $start ) + $batchSize - 1;
$count = 0;
$changed = 0;
while ( $blockStart <= $end ) {
__METHOD__ );
$count++;
}
- $blockStart += $this->mBatchSize;
- $blockEnd += $this->mBatchSize;
+ $blockStart += $batchSize;
+ $blockEnd += $batchSize;
wfWaitForSlaves();
}
$this->output( "rev_parent_id population complete ... {$count} rows [{$changed} changed]\n" );
$this->verbose( "Purged file {$row->log_title}; {$type} @{$row->log_timestamp}.\n" );
- if ( $this->hasOption( 'sleep-per-batch' ) && ++$bSize > $this->mBatchSize ) {
+ if ( $this->hasOption( 'sleep-per-batch' ) && ++$bSize > $this->getBatchSize() ) {
$bSize = 0;
// sleep-per-batch is milliseconds, usleep wants micro seconds.
usleep( 1000 * (int)$this->getOption( 'sleep-per-batch' ) );
$stuckCount = 0; // loop breaker
while ( true ) {
// Adjust bach size if we are stuck in a second that had many changes
- $bSize = $this->mBatchSize + ( $stuckCount * $this->mBatchSize );
+ $bSize = ( $stuckCount + 1 ) * $this->getBatchSize();
$res = $dbr->select(
[ 'page', 'revision' ],
$conds + [ 'page_id > ' . $dbr->addQuotes( $startId ) ],
__METHOD__,
[
- 'LIMIT' => $this->mBatchSize,
+ 'LIMIT' => $this->getBatchSize(),
'ORDER BY' => 'page_id'
]
$modDeps = $dbw->tableName( 'module_deps' );
$i = 1;
- foreach ( array_chunk( $rows, $this->mBatchSize ) as $chunk ) {
+ foreach ( array_chunk( $rows, $this->getBatchSize() ) as $chunk ) {
// WHERE ( mod=A AND skin=A ) OR ( mod=A AND skin=B) ..
$conds = array_map( function ( stdClass $row ) use ( $dbw ) {
return $dbw->makeList( (array)$row, IDatabase::LIST_AND );
$lastOldId = intval( $obj->rc_this_oldid );
$lastSize = $size;
- if ( ( ++$updated % $this->mBatchSize ) == 0 ) {
+ if ( ( ++$updated % $this->getBatchSize() ) == 0 ) {
wfGetLBFactory()->waitForReplication();
}
}
__METHOD__
);
- if ( ( ++$inserted % $this->mBatchSize ) == 0 ) {
+ if ( ( ++$inserted % $this->getBatchSize() ) == 0 ) {
wfGetLBFactory()->waitForReplication();
}
}
__METHOD__
);
- foreach ( array_chunk( $rcids, $this->mBatchSize ) as $rcidBatch ) {
+ foreach ( array_chunk( $rcids, $this->getBatchSize() ) as $rcidBatch ) {
$dbw->update(
'recentchanges',
[ 'rc_bot' => 1 ],
__METHOD__
);
- if ( ( ++$updates % $this->mBatchSize ) == 0 ) {
+ if ( ( ++$updates % $this->getBatchSize() ) == 0 ) {
wfGetLBFactory()->waitForReplication();
}
}
}
protected function doWork() {
- $this->output( "Finding up to {$this->mBatchSize} drifted rows " .
- "starting at cat_id {$this->minimumId}...\n" );
+ $this->output( "Finding up to {$this->getBatchSize()} drifted rows " .
+ "starting at cat_id {$this->getBatchSize()}...\n" );
$countingConds = [ 'cl_to = cat_title' ];
if ( $this->mode === 'subcats' ) {
"cat_{$this->mode} != ($countingSubquery)"
],
__METHOD__,
- [ 'LIMIT' => $this->mBatchSize ]
+ [ 'LIMIT' => $this->getBatchSize() ]
);
if ( !$idsToUpdate ) {
return false;
$conds[] = "img_minor_mime = {$dbr->addQuotes( $minor_mime )}";
}
- $res = $dbr->select( $fileQuery['tables'], $fileQuery['fields'], $conds,
- __METHOD__, [ 'LIMIT' => $this->mBatchSize, 'ORDER BY' => 'img_name ASC' ], $fileQuery['joins']
+ $res = $dbr->select( $fileQuery['tables'],
+ $fileQuery['fields'],
+ $conds,
+ __METHOD__,
+ [
+ 'LIMIT' => $this->getBatchSize(),
+ 'ORDER BY' => 'img_name ASC'
+ ],
+ $fileQuery['joins']
);
if ( $res->numRows() > 0 ) {
$this->output( "Updating headers for {$backendOperationsCount} file(s).\n" );
$this->updateFileHeaders( $repo, $backendOperations );
- } while ( $res->numRows() === $this->mBatchSize );
+ } while ( $res->numRows() === $this->getBatchSize() );
$this->output( "Done. Updated headers for $count file(s).\n" );
}
'user_email_authenticated IS NULL'
],
__METHOD__,
- [ 'LIMIT' => $this->mBatchSize ]
+ [ 'LIMIT' => $this->getBatchSize() ]
);
$count = $rows->numRows();
$badIds = [];
$maxid = $dbr->selectField( 'user', 'MAX(user_id)', [], __METHOD__ );
$min = 0;
- $max = $this->mBatchSize;
+ $max = $this->getBatchSize();
do {
$result = $dbr->select( 'user',
}
$min = $max;
- $max = $min + $this->mBatchSize;
+ $max = $min + $this->getBatchSize();
wfWaitForSlaves();
} while ( $min <= $maxid );
$next = null;
do {
- $limit = min( $this->mBatchSize, $end - $start + 1 ); // don't go pass ending ID
+ $limit = min( $this->getBatchSize(), $end - $start + 1 ); // don't go pass ending ID
$this->output( "Doing id $start to " . ( $start + $limit - 1 ) . "...\n" );
$entries = $src->getJournal()->getChangeEntries( $start, $limit, $next );
__METHOD__,
[
'ORDER BY' => 'user_id',
- 'LIMIT' => $this->mBatchSize,
+ 'LIMIT' => $this->getBatchSize(),
'LOCK IN SHARE MODE',
]
);