scenarios:
Load a page:
# The only page that exists by default is the main page.
- # But, its actual name is configurable/unknown (T216791).
- # Omit 'title' to let MediaWiki show the defaul (which is the main page),
+ # But its actual name is configurable/unknown (T216791).
+ # Omit 'title' to let MediaWiki show the default (which is the main page),
# and a query string to prevent a normalization redirect.
url: "{MW_SERVER}{MW_SCRIPT_PATH}/index.php?noredirectplz"
viewport:
'JavaScriptMinifier' => __DIR__ . '/includes/libs/JavaScriptMinifier.php',
'Job' => __DIR__ . '/includes/jobqueue/Job.php',
'JobQueue' => __DIR__ . '/includes/jobqueue/JobQueue.php',
- 'JobQueueAggregator' => __DIR__ . '/includes/jobqueue/aggregator/JobQueueAggregator.php',
- 'JobQueueAggregatorNull' => __DIR__ . '/includes/jobqueue/aggregator/JobQueueAggregatorNull.php',
- 'JobQueueAggregatorRedis' => __DIR__ . '/includes/jobqueue/aggregator/JobQueueAggregatorRedis.php',
'JobQueueConnectionError' => __DIR__ . '/includes/jobqueue/exception/JobQueueConnectionError.php',
'JobQueueDB' => __DIR__ . '/includes/jobqueue/JobQueueDB.php',
'JobQueueEnqueueUpdate' => __DIR__ . '/includes/deferred/JobQueueEnqueueUpdate.php',
'default' => [ 'class' => JobQueueDB::class, 'order' => 'random', 'claimTTL' => 3600 ],
];
-/**
- * Which aggregator to use for tracking which queues have jobs.
- * These settings should be global to all wikis.
- */
-$wgJobQueueAggregator = [
- 'class' => JobQueueAggregatorNull::class
-];
-
/**
* Whether to include the number of jobs that are queued
* for the API's maxlag parameter.
},
'LinkRenderer' => function ( MediaWikiServices $services ) : LinkRenderer {
- global $wgUser;
-
if ( defined( 'MW_NO_SESSION' ) ) {
return $services->getLinkRendererFactory()->create();
} else {
- return $services->getLinkRendererFactory()->createForUser( $wgUser );
+ return $services->getLinkRendererFactory()->createForUser(
+ RequestContext::getMain()->getUser()
+ );
}
},
$status->merge( $this->makeStubDBFile( $dir, $db ) );
$status->merge( $this->makeStubDBFile( $dir, "wikicache" ) );
$status->merge( $this->makeStubDBFile( $dir, "{$db}_l10n_cache" ) );
+ $status->merge( $this->makeStubDBFile( $dir, "{$db}_jobqueue" ) );
if ( !$status->isOK() ) {
return $status;
}
return Status::newFatal( 'config-sqlite-connection-error', $e->getMessage() );
}
+ # Create the job queue DB
+ try {
+ $conn = Database::factory(
+ 'sqlite', [ 'dbname' => "{$db}_jobqueue", 'dbDirectory' => $dir ] );
+ # @todo: don't duplicate job definition, though it's very static
+ $sql =
+<<<EOT
+ CREATE TABLE job (
+ job_id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,
+ job_cmd BLOB NOT NULL default '',
+ job_namespace INTEGER NOT NULL,
+ job_title TEXT NOT NULL,
+ job_timestamp BLOB NULL default NULL,
+ job_params BLOB NOT NULL,
+ job_random integer NOT NULL default 0,
+ job_attempts integer NOT NULL default 0,
+ job_token BLOB NOT NULL default '',
+ job_token_timestamp BLOB NULL default NULL,
+ job_sha1 BLOB NOT NULL default ''
+ );
+ CREATE INDEX job_sha1 ON job (job_sha1);
+ CREATE INDEX job_cmd_token ON job (job_cmd,job_token,job_random);
+ CREATE INDEX job_cmd_token_id ON job (job_cmd,job_token,job_id);
+ CREATE INDEX job_cmd ON job (job_cmd, job_namespace, job_title, job_params);
+ CREATE INDEX job_timestamp ON job (job_timestamp);
+EOT;
+ $conn->query( $sql );
+ $conn->query( "PRAGMA journal_mode=WAL" ); // this is permanent
+ $conn->close();
+ } catch ( DBConnectionError $e ) {
+ return Status::newFatal( 'config-sqlite-connection-error', $e->getMessage() );
+ }
+
# Open the main DB
return $this->getConnection();
}
*/
public function getLocalSettings() {
$dir = LocalSettingsGenerator::escapePhpString( $this->getVar( 'wgSQLiteDataDir' ) );
-
+ // These tables have frequent writes and are thus split off from the main one.
+ // Since the code using these tables only uses transactions for writes then set
+ // them to using BEGIN IMMEDIATE. This avoids frequent lock errors on first write.
return "# SQLite-specific settings
\$wgSQLiteDataDir = \"{$dir}\";
\$wgObjectCaches[CACHE_DB] = [
'type' => 'sqlite',
'dbname' => 'wikicache',
'tablePrefix' => '',
+ 'variables' => [ 'synchronous' => 'NORMAL' ],
'dbDirectory' => \$wgSQLiteDataDir,
+ 'trxMode' => 'IMMEDIATE',
'flags' => 0
]
];
'type' => 'sqlite',
'dbname' => \"{\$wgDBname}_l10n_cache\",
'tablePrefix' => '',
+ 'variables' => [ 'synchronous' => 'NORMAL' ],
'dbDirectory' => \$wgSQLiteDataDir,
+ 'trxMode' => 'IMMEDIATE',
'flags' => 0
+];
+\$wgJobTypeConf['default'] = [
+ 'class' => 'JobQueueDB',
+ 'claimTTL' => 3600,
+ 'server' => [
+ 'type' => 'sqlite',
+ 'dbname' => \"{\$wgDBname}_jobqueue\",
+ 'tablePrefix' => '',
+ 'dbDirectory' => \$wgSQLiteDataDir,
+ 'trxMode' => 'IMMEDIATE',
+ 'flags' => 0
+ ]
];";
}
}
return $this->params;
}
+ /**
+ * @param string|null $field Metadata field or null to get all the metadata
+ * @return mixed|null Value; null if missing
+ * @since 1.33
+ */
+ public function getMetadata( $field = null ) {
+ if ( $field === null ) {
+ return $this->metadata;
+ }
+
+ return $this->metadata[$field] ?? null;
+ }
+
+ /**
+ * @param string $field Key name to set the value for
+ * @param mixed $value The value to set the field for
+ * @return mixed|null The prior field value; null if missing
+ * @since 1.33
+ */
+ public function setMetadata( $field, $value ) {
+ $old = $this->getMetadata( $field );
+ if ( $value === null ) {
+ unset( $this->metadata[$field] );
+ } else {
+ $this->metadata[$field] = $value;
+ }
+
+ return $old;
+ }
+
/**
* @return int|null UNIX timestamp to delay running this job until, otherwise null
* @since 1.22
/** @var BagOStuff */
protected $dupCache;
- /** @var JobQueueAggregator */
- protected $aggr;
const QOS_ATOMIC = 1; // integer; "all-or-nothing" job insertions
throw new JobQueueError( __CLASS__ . " does not support '{$this->order}' order." );
}
$this->dupCache = wfGetCache( CACHE_ANYTHING );
- $this->aggr = $params['aggregator'] ?? new JobQueueAggregatorNull( [] );
$this->readOnlyReason = $params['readOnlyReason'] ?? false;
}
}
$this->doBatchPush( $jobs, $flags );
- $this->aggr->notifyQueueNonEmpty( $this->domain, $this->type );
foreach ( $jobs as $job ) {
if ( $job->isRootJob() ) {
$job = $this->doPop();
- if ( !$job ) {
- $this->aggr->notifyQueueEmpty( $this->domain, $this->type );
- }
-
// Flag this job as an old duplicate based on its "root" job...
try {
if ( $job && $this->isRootJobOldDuplicate( $job ) ) {
$title = Title::makeTitle( $row->job_namespace, $row->job_title );
$job = Job::factory( $row->job_cmd, $title,
self::extractBlob( $row->job_params ) );
- $job->metadata['id'] = $row->job_id;
- $job->metadata['timestamp'] = $row->job_timestamp;
+ $job->setMetadata( 'id', $row->job_id );
+ $job->setMetadata( 'timestamp', $row->job_timestamp );
break; // done
} while ( true );
* @throws MWException
*/
protected function doAck( Job $job ) {
- if ( !isset( $job->metadata['id'] ) ) {
+ $id = $job->getMetadata( 'id' );
+ if ( $id === null ) {
throw new MWException( "Job of type '{$job->getType()}' has no ID." );
}
$scope = $this->getScopedNoTrxFlag( $dbw );
try {
// Delete a row with a single DELETE without holding row locks over RTTs...
- $dbw->delete( 'job',
- [ 'job_cmd' => $this->type, 'job_id' => $job->metadata['id'] ], __METHOD__ );
+ $dbw->delete(
+ 'job',
+ [ 'job_cmd' => $this->type, 'job_id' => $id ],
+ __METHOD__
+ );
JobQueue::incrStats( 'acks', $this->type );
} catch ( DBError $e ) {
Title::makeTitle( $row->job_namespace, $row->job_title ),
strlen( $row->job_params ) ? unserialize( $row->job_params ) : []
);
- $job->metadata['id'] = $row->job_id;
- $job->metadata['timestamp'] = $row->job_timestamp;
+ $job->setMetadata( 'id', $row->job_id );
+ $job->setMetadata( 'timestamp', $row->job_timestamp );
return $job;
}
$affected = $dbw->affectedRows();
$count += $affected;
JobQueue::incrStats( 'recycles', $this->type, $affected );
- $this->aggr->notifyQueueNonEmpty( $this->domain, $this->type );
}
}
$job = false;
}
if ( $job ) {
- $job->metadata['QueuePartition'] = $partition;
+ $job->setMetadata( 'QueuePartition', $partition );
return $job;
} else {
}
protected function doAck( Job $job ) {
- if ( !isset( $job->metadata['QueuePartition'] ) ) {
+ $partition = $job->getMetadata( 'QueuePartition' );
+ if ( $partition === null ) {
throw new MWException( "The given job has no defined partition name." );
}
- $this->partitionQueues[$job->metadata['QueuePartition']]->ack( $job );
+ $this->partitionQueues[$partition]->ack( $job );
}
protected function doIsRootJobOldDuplicate( Job $job ) {
} else {
$conf = $conf + $wgJobTypeConf['default'];
}
- $conf['aggregator'] = JobQueueAggregator::singleton();
if ( !isset( $conf['readOnlyReason'] ) ) {
$conf['readOnlyReason'] = $this->readOnlyReason;
}
$job = $this->jobFromSpecInternal( $spec );
end( $claimed );
- $job->metadata['claimId'] = key( $claimed );
+ $job->setMetadata( 'claimId', key( $claimed ) );
return $job;
}
}
$claimed =& $this->getQueueData( 'claimed' );
- unset( $claimed[$job->metadata['claimId']] );
+ $job->setMetadata( 'claimId', null );
}
/**
* @throws JobQueueError
*/
protected function doAck( Job $job ) {
- if ( !isset( $job->metadata['uuid'] ) ) {
+ $uuid = $job->getMetadata( 'uuid' );
+ if ( $uuid === null ) {
throw new UnexpectedValueException( "Job of type '{$job->getType()}' has no UUID." );
}
- $uuid = $job->metadata['uuid'];
$conn = $this->getConnection();
try {
static $script =
}
$title = Title::makeTitle( $item['namespace'], $item['title'] );
$job = Job::factory( $item['type'], $title, $item['params'] );
- $job->metadata['uuid'] = $item['uuid'];
- $job->metadata['timestamp'] = $item['timestamp'];
+ $job->setMetadata( 'uuid', $item['uuid'] );
+ $job->setMetadata( 'timestamp', $item['timestamp'] );
// Add in attempt count for debugging at showJobs.php
- $job->metadata['attempts'] = $conn->hGet( $this->getQueueKey( 'h-attempts' ), $uid );
+ $job->setMetadata( 'attempts',
+ $conn->hGet( $this->getQueueKey( 'h-attempts' ), $uid ) );
return $job;
} catch ( RedisException $e ) {
protected function getJobFromFields( array $fields ) {
$title = Title::makeTitle( $fields['namespace'], $fields['title'] );
$job = Job::factory( $fields['type'], $title, $fields['params'] );
- $job->metadata['uuid'] = $fields['uuid'];
- $job->metadata['timestamp'] = $fields['timestamp'];
+ $job->setMetadata( 'uuid', $fields['uuid'] );
+ $job->setMetadata( 'timestamp', $fields['timestamp'] );
return $job;
}
+++ /dev/null
-<?php
-/**
- * Job queue aggregator code.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
- * http://www.gnu.org/copyleft/gpl.html
- *
- * @file
- */
-
-/**
- * Class to handle tracking information about all queues
- *
- * @ingroup JobQueue
- * @since 1.21
- */
-abstract class JobQueueAggregator {
- /** @var JobQueueAggregator */
- protected static $instance = null;
-
- /**
- * @param array $params
- */
- public function __construct( array $params ) {
- }
-
- /**
- * @throws MWException
- * @return JobQueueAggregator
- */
- final public static function singleton() {
- global $wgJobQueueAggregator;
-
- if ( !isset( self::$instance ) ) {
- $class = $wgJobQueueAggregator['class'];
- $obj = new $class( $wgJobQueueAggregator );
- if ( !( $obj instanceof JobQueueAggregator ) ) {
- throw new MWException( "Class '$class' is not a JobQueueAggregator class." );
- }
- self::$instance = $obj;
- }
-
- return self::$instance;
- }
-
- /**
- * Destroy the singleton instance
- *
- * @return void
- */
- final public static function destroySingleton() {
- self::$instance = null;
- }
-
- /**
- * Mark a queue as being empty
- *
- * @param string $wiki
- * @param string $type
- * @return bool Success
- */
- final public function notifyQueueEmpty( $wiki, $type ) {
- $ok = $this->doNotifyQueueEmpty( $wiki, $type );
-
- return $ok;
- }
-
- /**
- * @see JobQueueAggregator::notifyQueueEmpty()
- * @param string $wiki
- * @param string $type
- * @return bool
- */
- abstract protected function doNotifyQueueEmpty( $wiki, $type );
-
- /**
- * Mark a queue as being non-empty
- *
- * @param string $wiki
- * @param string $type
- * @return bool Success
- */
- final public function notifyQueueNonEmpty( $wiki, $type ) {
- $ok = $this->doNotifyQueueNonEmpty( $wiki, $type );
-
- return $ok;
- }
-
- /**
- * @see JobQueueAggregator::notifyQueueNonEmpty()
- * @param string $wiki
- * @param string $type
- * @return bool
- */
- abstract protected function doNotifyQueueNonEmpty( $wiki, $type );
-
- /**
- * Get the list of all of the queues with jobs
- *
- * @return array (job type => (list of wiki IDs))
- */
- final public function getAllReadyWikiQueues() {
- $res = $this->doGetAllReadyWikiQueues();
-
- return $res;
- }
-
- /**
- * @see JobQueueAggregator::getAllReadyWikiQueues()
- */
- abstract protected function doGetAllReadyWikiQueues();
-
- /**
- * Purge all of the aggregator information
- *
- * @return bool Success
- */
- final public function purge() {
- $res = $this->doPurge();
-
- return $res;
- }
-
- /**
- * @see JobQueueAggregator::purge()
- */
- abstract protected function doPurge();
-
- /**
- * Get all databases that have a pending job.
- * This poll all the queues and is this expensive.
- *
- * @return array (job type => (list of wiki IDs))
- */
- protected function findPendingWikiQueues() {
- global $wgLocalDatabases;
-
- $pendingDBs = []; // (job type => (db list))
- foreach ( $wgLocalDatabases as $wikiId ) {
- foreach ( JobQueueGroup::singleton( $wikiId )->getQueuesWithJobs() as $type ) {
- $pendingDBs[$type][] = $wikiId;
- }
- }
-
- return $pendingDBs;
- }
-}
+++ /dev/null
-<?php
-/**
- * Job queue aggregator code.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
- * http://www.gnu.org/copyleft/gpl.html
- *
- * @file
- */
-
-/**
- * @ingroup JobQueue
- */
-class JobQueueAggregatorNull extends JobQueueAggregator {
- protected function doNotifyQueueEmpty( $wiki, $type ) {
- return true;
- }
-
- protected function doNotifyQueueNonEmpty( $wiki, $type ) {
- return true;
- }
-
- protected function doGetAllReadyWikiQueues() {
- return [];
- }
-
- protected function doPurge() {
- return true;
- }
-}
+++ /dev/null
-<?php
-/**
- * Job queue aggregator code that uses PhpRedis.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
- * http://www.gnu.org/copyleft/gpl.html
- *
- * @file
- */
-use Psr\Log\LoggerInterface;
-
-/**
- * Class to handle tracking information about all queues using PhpRedis
- *
- * The mediawiki/services/jobrunner background service must be set up and running.
- *
- * @ingroup JobQueue
- * @ingroup Redis
- * @since 1.21
- */
-class JobQueueAggregatorRedis extends JobQueueAggregator {
- /** @var RedisConnectionPool */
- protected $redisPool;
- /** @var LoggerInterface */
- protected $logger;
- /** @var array List of Redis server addresses */
- protected $servers;
-
- /**
- * @param array $params Possible keys:
- * - redisConfig : An array of parameters to RedisConnectionPool::__construct().
- * - redisServers : Array of server entries, the first being the primary and the
- * others being fallback servers. Each entry is either a hostname/port
- * combination or the absolute path of a UNIX socket.
- * If a hostname is specified but no port, the standard port number
- * 6379 will be used. Required.
- */
- public function __construct( array $params ) {
- parent::__construct( $params );
- $this->servers = $params['redisServers'] ?? [ $params['redisServer'] ]; // b/c
- $params['redisConfig']['serializer'] = 'none';
- $this->redisPool = RedisConnectionPool::singleton( $params['redisConfig'] );
- $this->logger = \MediaWiki\Logger\LoggerFactory::getInstance( 'redis' );
- }
-
- protected function doNotifyQueueEmpty( $wiki, $type ) {
- return true; // managed by the service
- }
-
- protected function doNotifyQueueNonEmpty( $wiki, $type ) {
- return true; // managed by the service
- }
-
- protected function doGetAllReadyWikiQueues() {
- $conn = $this->getConnection();
- if ( !$conn ) {
- return [];
- }
- try {
- $map = $conn->hGetAll( $this->getReadyQueueKey() );
-
- if ( is_array( $map ) && isset( $map['_epoch'] ) ) {
- unset( $map['_epoch'] ); // ignore
- $pendingDBs = []; // (type => list of wikis)
- foreach ( $map as $key => $time ) {
- list( $type, $wiki ) = $this->decodeQueueName( $key );
- $pendingDBs[$type][] = $wiki;
- }
- } else {
- throw new UnexpectedValueException(
- "No queue listing found; make sure redisJobChronService is running."
- );
- }
-
- return $pendingDBs;
- } catch ( RedisException $e ) {
- $this->redisPool->handleError( $conn, $e );
-
- return [];
- }
- }
-
- protected function doPurge() {
- return true; // fully and only refreshed by the service
- }
-
- /**
- * Get a connection to the server that handles all sub-queues for this queue
- *
- * @return RedisConnRef|bool Returns false on failure
- * @throws MWException
- */
- protected function getConnection() {
- $conn = false;
- foreach ( $this->servers as $server ) {
- $conn = $this->redisPool->getConnection( $server, $this->logger );
- if ( $conn ) {
- break;
- }
- }
-
- return $conn;
- }
-
- /**
- * @return string
- */
- private function getReadyQueueKey() {
- return "jobqueue:aggregator:h-ready-queues:v2"; // global
- }
-
- /**
- * @param string $name
- * @return string[]
- */
- private function decodeQueueName( $name ) {
- list( $type, $wiki ) = explode( '/', $name, 2 );
-
- return [ rawurldecode( $type ), rawurldecode( $wiki ) ];
- }
-}
background-origin: border-box;
background-position: center center;
background-repeat: no-repeat;
- .background-size( 0, 0 );
+ background-size: 0 0;
.box-sizing( border-box );
position: absolute;
// Ensure alignment of checkbox to middle of the text in long labels, see T85241
// Apply a checkmark on the pseudo `:before` element when the input is checked
&:checked + label:before {
.background-image-svg( 'images/checkbox-checked.svg', 'images/checkbox-checked.png' );
- .background-size( 90%, 90% );
+ background-size: 90% 90%;
}
&:enabled {