X-Git-Url: https://git.cyclocoop.org/%27.WWW_URL.%27admin/?a=blobdiff_plain;f=includes%2FMediaWiki.php;h=bca7a2175576a2f7b83244423a47bcf725bdeac4;hb=2f04cdd54880c16d1961fd2e1e815465bab0e2ee;hp=77ac76ae4c6ab682b9992fc562c7abd05b8869f1;hpb=e227752be65a8fcef127ee0b2e9cc48dddd382cb;p=lhc%2Fweb%2Fwiklou.git diff --git a/includes/MediaWiki.php b/includes/MediaWiki.php index 77ac76ae4c..bca7a21755 100644 --- a/includes/MediaWiki.php +++ b/includes/MediaWiki.php @@ -554,34 +554,35 @@ class MediaWiki { $config = $context->getConfig(); - $factory = wfGetLBFactory(); + $lbFactory = MediaWikiServices::getInstance()->getDBLoadBalancerFactory(); // Commit all changes - $factory->commitMasterChanges( + $lbFactory->commitMasterChanges( __METHOD__, // Abort if any transaction was too big [ 'maxWriteDuration' => $config->get( 'MaxUserDBWriteDuration' ) ] ); - // Record ChronologyProtector positions - $factory->shutdown(); - wfDebug( __METHOD__ . ': all transactions committed' ); DeferredUpdates::doUpdates( 'enqueue', DeferredUpdates::PRESEND ); wfDebug( __METHOD__ . ': pre-send deferred updates completed' ); + // Record ChronologyProtector positions + $lbFactory->shutdown(); + wfDebug( __METHOD__ . ': all transactions committed' ); + // Set a cookie to tell all CDN edge nodes to "stick" the user to the DC that handles this // POST request (e.g. the "master" data center). Also have the user briefly bypass CDN so // ChronologyProtector works for cacheable URLs. $request = $context->getRequest(); - if ( $request->wasPosted() && $factory->hasOrMadeRecentMasterChanges() ) { + if ( $request->wasPosted() && $lbFactory->hasOrMadeRecentMasterChanges() ) { $expires = time() + $config->get( 'DataCenterUpdateStickTTL' ); $options = [ 'prefix' => '' ]; $request->response()->setCookie( 'UseDC', 'master', $expires, $options ); $request->response()->setCookie( 'UseCDNCache', 'false', $expires, $options ); } - // Avoid letting a few seconds of slave lag cause a month of stale data. This logic is + // Avoid letting a few seconds of replica DB lag cause a month of stale data. This logic is // also intimately related to the value of $wgCdnReboundPurgeDelay. - if ( $factory->laggedSlaveUsed() ) { + if ( $lbFactory->laggedReplicaUsed() ) { $maxAge = $config->get( 'CdnMaxageLagged' ); $context->getOutput()->lowerCdnMaxage( $maxAge ); $request->response()->header( "X-Database-Lagged: true" ); @@ -631,7 +632,7 @@ class MediaWiki { fastcgi_finish_request(); } else { // Either all DB and deferred updates should happen or none. - // The later should not be cancelled due to client disconnect. + // The latter should not be cancelled due to client disconnect. ignore_user_abort( true ); } @@ -762,9 +763,9 @@ class MediaWiki { * @param string $mode Use 'fast' to always skip job running */ public function restInPeace( $mode = 'fast' ) { - $factory = MediaWikiServices::getInstance()->getDBLoadBalancerFactory(); + $lbFactory = MediaWikiServices::getInstance()->getDBLoadBalancerFactory(); // Assure deferred updates are not in the main transaction - $factory->commitMasterChanges( __METHOD__ ); + $lbFactory->commitMasterChanges( __METHOD__ ); // Loosen DB query expectations since the HTTP client is unblocked $trxProfiler = Profiler::instance()->getTransactionProfiler(); @@ -790,8 +791,8 @@ class MediaWiki { wfLogProfilingData(); // Commit and close up! - $factory->commitMasterChanges( __METHOD__ ); - $factory->shutdown( LBFactory::SHUTDOWN_NO_CHRONPROT ); + $lbFactory->commitMasterChanges( __METHOD__ ); + $lbFactory->shutdown( LBFactory::SHUTDOWN_NO_CHRONPROT ); wfDebug( "Request ended normally\n" ); } @@ -821,16 +822,18 @@ class MediaWiki { $runJobsLogger = LoggerFactory::getInstance( 'runJobs' ); + // Fall back to running the job(s) while the user waits if needed if ( !$this->config->get( 'RunJobsAsync' ) ) { - // Fall back to running the job here while the user waits $runner = new JobRunner( $runJobsLogger ); - $runner->run( [ 'maxJobs' => $n ] ); + $runner->run( [ 'maxJobs' => $n ] ); return; } + // Do not send request if there are probably no jobs try { - if ( !JobQueueGroup::singleton()->queuesHaveJobs( JobQueueGroup::TYPE_DEFAULT ) ) { - return; // do not send request if there are probably no jobs + $group = JobQueueGroup::singleton(); + if ( !$group->queuesHaveJobs( JobQueueGroup::TYPE_DEFAULT ) ) { + return; } } catch ( JobQueueError $e ) { MWExceptionHandler::logException( $e ); @@ -844,8 +847,7 @@ class MediaWiki { $errno = $errstr = null; $info = wfParseUrl( $this->config->get( 'CanonicalServer' ) ); - MediaWiki\suppressWarnings(); - $host = $info['host']; + $host = $info ? $info['host'] : null; $port = 80; if ( isset( $info['scheme'] ) && $info['scheme'] == 'https' ) { $host = "tls://" . $host; @@ -854,48 +856,60 @@ class MediaWiki { if ( isset( $info['port'] ) ) { $port = $info['port']; } - $sock = fsockopen( + + MediaWiki\suppressWarnings(); + $sock = $host ? fsockopen( $host, $port, $errno, $errstr, - // If it takes more than 100ms to connect to ourselves there - // is a problem elsewhere. - 0.1 - ); + // If it takes more than 100ms to connect to ourselves there is a problem... + 0.100 + ) : false; MediaWiki\restoreWarnings(); - if ( !$sock ) { + + $invokedWithSuccess = true; + if ( $sock ) { + $special = SpecialPageFactory::getPage( 'RunJobs' ); + $url = $special->getPageTitle()->getCanonicalURL( $query ); + $req = ( + "POST $url HTTP/1.1\r\n" . + "Host: {$info['host']}\r\n" . + "Connection: Close\r\n" . + "Content-Length: 0\r\n\r\n" + ); + + $runJobsLogger->info( "Running $n job(s) via '$url'" ); + // Send a cron API request to be performed in the background. + // Give up if this takes too long to send (which should be rare). + stream_set_timeout( $sock, 2 ); + $bytes = fwrite( $sock, $req ); + if ( $bytes !== strlen( $req ) ) { + $invokedWithSuccess = false; + $runJobsLogger->error( "Failed to start cron API (socket write error)" ); + } else { + // Do not wait for the response (the script should handle client aborts). + // Make sure that we don't close before that script reaches ignore_user_abort(). + $start = microtime( true ); + $status = fgets( $sock ); + $sec = microtime( true ) - $start; + if ( !preg_match( '#^HTTP/\d\.\d 202 #', $status ) ) { + $invokedWithSuccess = false; + $runJobsLogger->error( "Failed to start cron API: received '$status' ($sec)" ); + } + } + fclose( $sock ); + } else { + $invokedWithSuccess = false; $runJobsLogger->error( "Failed to start cron API (socket error $errno): $errstr" ); - // Fall back to running the job here while the user waits - $runner = new JobRunner( $runJobsLogger ); - $runner->run( [ 'maxJobs' => $n ] ); - return; } - $special = SpecialPageFactory::getPage( 'RunJobs' ); - $url = $special->getPageTitle()->getCanonicalURL( $query ); - $req = ( - "POST $url HTTP/1.1\r\n" . - "Host: {$info['host']}\r\n" . - "Connection: Close\r\n" . - "Content-Length: 0\r\n\r\n" - ); + // Fall back to running the job(s) while the user waits if needed + if ( !$invokedWithSuccess ) { + $runJobsLogger->warning( "Jobs switched to blocking; Special:RunJobs disabled" ); - $runJobsLogger->info( "Running $n job(s) via '$url'" ); - // Send a cron API request to be performed in the background. - // Give up if this takes too long to send (which should be rare). - stream_set_timeout( $sock, 2 ); - $bytes = fwrite( $sock, $req ); - if ( $bytes !== strlen( $req ) ) { - $runJobsLogger->error( "Failed to start cron API (socket write error)" ); - } else { - // Do not wait for the response (the script should handle client aborts). - // Make sure that we don't close before that script reaches ignore_user_abort(). - $status = fgets( $sock ); - if ( !preg_match( '#^HTTP/\d\.\d 202 #', $status ) ) { - $runJobsLogger->error( "Failed to start cron API: received '$status'" ); - } + $runner = new JobRunner( $runJobsLogger ); + $runner->run( [ 'maxJobs' => $n ] ); } - fclose( $sock ); } }