// may still be a wikipage redirect to another article or URL.
$article = $this->initializeArticle();
if ( is_object( $article ) ) {
- $url = $request->getFullRequestURL(); // requested URL
- if (
- $request->getMethod() === 'GET' &&
- $url === $article->getTitle()->getCanonicalURL() &&
- $article->checkTouched() &&
- $output->checkLastModified( $article->getTouched() )
- ) {
- wfDebug( __METHOD__ . ": done 304\n" );
- return;
- }
$this->performAction( $article, $requestTitle );
} elseif ( is_string( $article ) ) {
$output->redirect( $article );
* - Normalise empty title:
* /wiki/ -> /wiki/Main
* /w/index.php?title= -> /wiki/Main
- * - Normalise non-standard title urls:
- * /w/index.php?title=Foo_Bar -> /wiki/Foo_Bar
* - Don't redirect anything with query parameters other than 'title' or 'action=view'.
*
* @param Title $title
if ( $request->getVal( 'action', 'view' ) != 'view'
|| $request->wasPosted()
+ || ( $request->getVal( 'title' ) !== null
+ && $title->getPrefixedDBkey() == $request->getVal( 'title' ) )
|| count( $request->getValueNames( [ 'action', 'title' ] ) )
|| !Hooks::run( 'TestCanonicalRedirect', [ $request, $title, $output ] )
) {
}
// Redirect to canonical url, make it a 301 to allow caching
$targetUrl = wfExpandUrl( $title->getFullURL(), PROTO_CURRENT );
-
- if ( $targetUrl != $request->getFullRequestURL() ) {
- $output->setCdnMaxage( 1200 );
- $output->redirect( $targetUrl, '301' );
- return true;
- }
-
- // If there is no title, or the title is in a non-standard encoding, we demand
- // a redirect. If cgi somehow changed the 'title' query to be non-standard while
- // the url is standard, the server is misconfigured.
- if ( $request->getVal( 'title' ) === null
- || $title->getPrefixedDBkey() != $request->getVal( 'title' )
- ) {
+ if ( $targetUrl == $request->getFullRequestURL() ) {
$message = "Redirect loop detected!\n\n" .
"This means the wiki got confused about what page was " .
"requested; this sometimes happens when moving a wiki " .
}
throw new HttpError( 500, $message );
}
- return false;
+ $output->setSquidMaxage( 1200 );
+ $output->redirect( $targetUrl, '301' );
+ return true;
}
/**
$config = $context->getConfig();
- $factory = wfGetLBFactory();
+ $lbFactory = MediaWikiServices::getInstance()->getDBLoadBalancerFactory();
// Commit all changes
- $factory->commitMasterChanges(
+ $lbFactory->commitMasterChanges(
__METHOD__,
// Abort if any transaction was too big
[ 'maxWriteDuration' => $config->get( 'MaxUserDBWriteDuration' ) ]
);
- // Record ChronologyProtector positions
- $factory->shutdown();
- wfDebug( __METHOD__ . ': all transactions committed' );
DeferredUpdates::doUpdates( 'enqueue', DeferredUpdates::PRESEND );
wfDebug( __METHOD__ . ': pre-send deferred updates completed' );
+ // Record ChronologyProtector positions
+ $lbFactory->shutdown();
+ wfDebug( __METHOD__ . ': all transactions committed' );
+
// Set a cookie to tell all CDN edge nodes to "stick" the user to the DC that handles this
// POST request (e.g. the "master" data center). Also have the user briefly bypass CDN so
// ChronologyProtector works for cacheable URLs.
$request = $context->getRequest();
- if ( $request->wasPosted() && $factory->hasOrMadeRecentMasterChanges() ) {
+ if ( $request->wasPosted() && $lbFactory->hasOrMadeRecentMasterChanges() ) {
$expires = time() + $config->get( 'DataCenterUpdateStickTTL' );
$options = [ 'prefix' => '' ];
$request->response()->setCookie( 'UseDC', 'master', $expires, $options );
$request->response()->setCookie( 'UseCDNCache', 'false', $expires, $options );
}
- // Avoid letting a few seconds of slave lag cause a month of stale data. This logic is
+ // Avoid letting a few seconds of replica DB lag cause a month of stale data. This logic is
// also intimately related to the value of $wgCdnReboundPurgeDelay.
- if ( $factory->laggedSlaveUsed() ) {
+ if ( $lbFactory->laggedReplicaUsed() ) {
$maxAge = $config->get( 'CdnMaxageLagged' );
$context->getOutput()->lowerCdnMaxage( $maxAge );
$request->response()->header( "X-Database-Lagged: true" );
fastcgi_finish_request();
} else {
// Either all DB and deferred updates should happen or none.
- // The later should not be cancelled due to client disconnect.
+ // The latter should not be cancelled due to client disconnect.
ignore_user_abort( true );
}
* @param string $mode Use 'fast' to always skip job running
*/
public function restInPeace( $mode = 'fast' ) {
- $factory = MediaWikiServices::getInstance()->getDBLoadBalancerFactory();
+ $lbFactory = MediaWikiServices::getInstance()->getDBLoadBalancerFactory();
// Assure deferred updates are not in the main transaction
- $factory->commitMasterChanges( __METHOD__ );
+ $lbFactory->commitMasterChanges( __METHOD__ );
// Loosen DB query expectations since the HTTP client is unblocked
$trxProfiler = Profiler::instance()->getTransactionProfiler();
wfLogProfilingData();
// Commit and close up!
- $factory->commitMasterChanges( __METHOD__ );
- $factory->shutdown( LBFactory::SHUTDOWN_NO_CHRONPROT );
+ $lbFactory->commitMasterChanges( __METHOD__ );
+ $lbFactory->shutdown( LBFactory::SHUTDOWN_NO_CHRONPROT );
wfDebug( "Request ended normally\n" );
}
$runJobsLogger = LoggerFactory::getInstance( 'runJobs' );
+ // Fall back to running the job(s) while the user waits if needed
if ( !$this->config->get( 'RunJobsAsync' ) ) {
- // Fall back to running the job here while the user waits
$runner = new JobRunner( $runJobsLogger );
- $runner->run( [ 'maxJobs' => $n ] );
+ $runner->run( [ 'maxJobs' => $n ] );
return;
}
+ // Do not send request if there are probably no jobs
try {
- if ( !JobQueueGroup::singleton()->queuesHaveJobs( JobQueueGroup::TYPE_DEFAULT ) ) {
- return; // do not send request if there are probably no jobs
+ $group = JobQueueGroup::singleton();
+ if ( !$group->queuesHaveJobs( JobQueueGroup::TYPE_DEFAULT ) ) {
+ return;
}
} catch ( JobQueueError $e ) {
MWExceptionHandler::logException( $e );
$errno = $errstr = null;
$info = wfParseUrl( $this->config->get( 'CanonicalServer' ) );
- MediaWiki\suppressWarnings();
- $host = $info['host'];
+ $host = $info ? $info['host'] : null;
$port = 80;
if ( isset( $info['scheme'] ) && $info['scheme'] == 'https' ) {
$host = "tls://" . $host;
if ( isset( $info['port'] ) ) {
$port = $info['port'];
}
- $sock = fsockopen(
+
+ MediaWiki\suppressWarnings();
+ $sock = $host ? fsockopen(
$host,
$port,
$errno,
$errstr,
- // If it takes more than 100ms to connect to ourselves there
- // is a problem elsewhere.
- 0.1
- );
+ // If it takes more than 100ms to connect to ourselves there is a problem...
+ 0.100
+ ) : false;
MediaWiki\restoreWarnings();
- if ( !$sock ) {
+
+ $invokedWithSuccess = true;
+ if ( $sock ) {
+ $special = SpecialPageFactory::getPage( 'RunJobs' );
+ $url = $special->getPageTitle()->getCanonicalURL( $query );
+ $req = (
+ "POST $url HTTP/1.1\r\n" .
+ "Host: {$info['host']}\r\n" .
+ "Connection: Close\r\n" .
+ "Content-Length: 0\r\n\r\n"
+ );
+
+ $runJobsLogger->info( "Running $n job(s) via '$url'" );
+ // Send a cron API request to be performed in the background.
+ // Give up if this takes too long to send (which should be rare).
+ stream_set_timeout( $sock, 2 );
+ $bytes = fwrite( $sock, $req );
+ if ( $bytes !== strlen( $req ) ) {
+ $invokedWithSuccess = false;
+ $runJobsLogger->error( "Failed to start cron API (socket write error)" );
+ } else {
+ // Do not wait for the response (the script should handle client aborts).
+ // Make sure that we don't close before that script reaches ignore_user_abort().
+ $start = microtime( true );
+ $status = fgets( $sock );
+ $sec = microtime( true ) - $start;
+ if ( !preg_match( '#^HTTP/\d\.\d 202 #', $status ) ) {
+ $invokedWithSuccess = false;
+ $runJobsLogger->error( "Failed to start cron API: received '$status' ($sec)" );
+ }
+ }
+ fclose( $sock );
+ } else {
+ $invokedWithSuccess = false;
$runJobsLogger->error( "Failed to start cron API (socket error $errno): $errstr" );
- // Fall back to running the job here while the user waits
- $runner = new JobRunner( $runJobsLogger );
- $runner->run( [ 'maxJobs' => $n ] );
- return;
}
- $special = SpecialPageFactory::getPage( 'RunJobs' );
- $url = $special->getPageTitle()->getCanonicalURL( $query );
- $req = (
- "POST $url HTTP/1.1\r\n" .
- "Host: {$info['host']}\r\n" .
- "Connection: Close\r\n" .
- "Content-Length: 0\r\n\r\n"
- );
+ // Fall back to running the job(s) while the user waits if needed
+ if ( !$invokedWithSuccess ) {
+ $runJobsLogger->warning( "Jobs switched to blocking; Special:RunJobs disabled" );
- $runJobsLogger->info( "Running $n job(s) via '$url'" );
- // Send a cron API request to be performed in the background.
- // Give up if this takes too long to send (which should be rare).
- stream_set_timeout( $sock, 2 );
- $bytes = fwrite( $sock, $req );
- if ( $bytes !== strlen( $req ) ) {
- $runJobsLogger->error( "Failed to start cron API (socket write error)" );
- } else {
- // Do not wait for the response (the script should handle client aborts).
- // Make sure that we don't close before that script reaches ignore_user_abort().
- $status = fgets( $sock );
- if ( !preg_match( '#^HTTP/\d\.\d 202 #', $status ) ) {
- $runJobsLogger->error( "Failed to start cron API: received '$status'" );
- }
+ $runner = new JobRunner( $runJobsLogger );
+ $runner->run( [ 'maxJobs' => $n ] );
}
- fclose( $sock );
}
}