X-Git-Url: http://git.cyclocoop.org/?a=blobdiff_plain;f=includes%2FMediaWiki.php;h=7a6987e20f5e11286af8fdbce5d704b56819eb73;hb=440d00a4805c9505c2544f8e4bf8817f61230603;hp=ca77121b6bda800f4f41c2030c2f738abfda4c56;hpb=e5ef0fd0c6607dd34f6dee69d716b159662a0a34;p=lhc%2Fweb%2Fwiklou.git diff --git a/includes/MediaWiki.php b/includes/MediaWiki.php index ca77121b6b..7a6987e20f 100644 --- a/includes/MediaWiki.php +++ b/includes/MediaWiki.php @@ -23,8 +23,8 @@ use MediaWiki\Logger\LoggerFactory; use Psr\Log\LoggerInterface; use MediaWiki\MediaWikiServices; +use Wikimedia\Rdbms\ILBFactory; use Wikimedia\Rdbms\ChronologyProtector; -use Wikimedia\Rdbms\LBFactory; use Wikimedia\Rdbms\DBConnectionError; use Liuggio\StatsdClient\Sender\SocketSender; @@ -260,8 +260,16 @@ class MediaWiki { ) { list( , $subpage ) = $spFactory->resolveAlias( $title->getDBkey() ); $target = $specialPage->getRedirect( $subpage ); - // target can also be true. We let that case fall through to normal processing. + // Target can also be true. We let that case fall through to normal processing. if ( $target instanceof Title ) { + if ( $target->isExternal() ) { + // Handle interwiki redirects + $target = SpecialPage::getTitleFor( + 'GoToInterwiki', + 'force/' . $target->getPrefixedDBkey() + ); + } + $query = $specialPage->getRedirectQuery( $subpage ) ?: []; $request = new DerivativeRequest( $this->context->getRequest(), $query ); $request->setRequestURL( $this->context->getRequest()->getRequestURL() ); @@ -580,15 +588,15 @@ class MediaWiki { public static function preOutputCommit( IContextSource $context, callable $postCommitWork = null ) { - // Either all DBs should commit or none - ignore_user_abort( true ); - $config = $context->getConfig(); $request = $context->getRequest(); $output = $context->getOutput(); $lbFactory = MediaWikiServices::getInstance()->getDBLoadBalancerFactory(); - // Commit all changes + // Try to make sure that all RDBMs, session, and other storage updates complete + ignore_user_abort( true ); + + // Commit all RDBMs changes from the main transaction round $lbFactory->commitMasterChanges( __METHOD__, // Abort if any transaction was too big @@ -596,47 +604,31 @@ class MediaWiki { ); wfDebug( __METHOD__ . ': primary transaction round committed' ); - // Run updates that need to block the user or affect output (this is the last chance) + // Run updates that need to block the client or affect output (this is the last chance) DeferredUpdates::doUpdates( 'run', DeferredUpdates::PRESEND ); wfDebug( __METHOD__ . ': pre-send deferred updates completed' ); - // T214471: persist the session to avoid race conditions on subsequent requests - $request->getSession()->save(); - - // Should the client return, their request should observe the new ChronologyProtector - // DB positions. This request might be on a foreign wiki domain, so synchronously update - // the DB positions in all datacenters to be safe. If this output is not a redirect, - // then OutputPage::output() will be relatively slow, meaning that running it in - // $postCommitWork should help mask the latency of those updates. - $flags = $lbFactory::SHUTDOWN_CHRONPROT_SYNC; - $strategy = 'cookie+sync'; - - $allowHeaders = !( $output->isDisabled() || headers_sent() ); - if ( $output->getRedirect() && $lbFactory->hasOrMadeRecentMasterChanges( INF ) ) { - // OutputPage::output() will be fast, so $postCommitWork is useless for masking - // the latency of synchronously updating the DB positions in all datacenters. - // Try to make use of the time the client spends following redirects instead. - $domainDistance = self::getUrlDomainDistance( $output->getRedirect() ); - if ( $domainDistance === 'local' && $allowHeaders ) { - $flags = $lbFactory::SHUTDOWN_CHRONPROT_ASYNC; - $strategy = 'cookie'; // use same-domain cookie and keep the URL uncluttered - } elseif ( $domainDistance === 'remote' ) { - $flags = $lbFactory::SHUTDOWN_CHRONPROT_ASYNC; - $strategy = 'cookie+url'; // cross-domain cookie might not work - } - } - + // Persist the session to avoid race conditions on subsequent requests by the client + $request->getSession()->save(); // T214471 + wfDebug( __METHOD__ . ': session changes committed' ); + + // Figure out whether to wait for DB replication now or to use some method that assures + // that subsequent requests by the client will use the DB replication positions written + // during the shutdown() call below; the later requires working around replication lag + // of the store containing DB replication positions (e.g. dynomite, mcrouter). + list( $flags, $strategy ) = self::getChronProtStrategy( $lbFactory, $output ); // Record ChronologyProtector positions for DBs affected in this request at this point $cpIndex = null; $cpClientId = null; $lbFactory->shutdown( $flags, $postCommitWork, $cpIndex, $cpClientId ); wfDebug( __METHOD__ . ': LBFactory shutdown completed' ); + $allowHeaders = !( $output->isDisabled() || headers_sent() ); if ( $cpIndex > 0 ) { if ( $allowHeaders ) { $now = time(); $expires = $now + ChronologyProtector::POSITION_COOKIE_TTL; $options = [ 'prefix' => '' ]; - $value = LBFactory::makeCookieValueFromCPIndex( $cpIndex, $now, $cpClientId ); + $value = $lbFactory::makeCookieValueFromCPIndex( $cpIndex, $now, $cpClientId ); $request->response()->setCookie( 'cpPosIndex', $value, $expires, $options ); } @@ -654,31 +646,66 @@ class MediaWiki { } } - // Set a cookie to tell all CDN edge nodes to "stick" the user to the DC that handles this - // POST request (e.g. the "master" data center). Also have the user briefly bypass CDN so - // ChronologyProtector works for cacheable URLs. - if ( $request->wasPosted() && $lbFactory->hasOrMadeRecentMasterChanges() ) { - $expires = time() + $config->get( 'DataCenterUpdateStickTTL' ); - $options = [ 'prefix' => '' ]; - $request->response()->setCookie( 'UseDC', 'master', $expires, $options ); - $request->response()->setCookie( 'UseCDNCache', 'false', $expires, $options ); - } + if ( $allowHeaders ) { + // Set a cookie to tell all CDN edge nodes to "stick" the user to the DC that + // handles this POST request (e.g. the "master" data center). Also have the user + // briefly bypass CDN so ChronologyProtector works for cacheable URLs. + if ( $request->wasPosted() && $lbFactory->hasOrMadeRecentMasterChanges() ) { + $expires = time() + $config->get( 'DataCenterUpdateStickTTL' ); + $options = [ 'prefix' => '' ]; + $request->response()->setCookie( 'UseDC', 'master', $expires, $options ); + $request->response()->setCookie( 'UseCDNCache', 'false', $expires, $options ); + } + + // Avoid letting a few seconds of replica DB lag cause a month of stale data. + // This logic is also intimately related to the value of $wgCdnReboundPurgeDelay. + if ( $lbFactory->laggedReplicaUsed() ) { + $maxAge = $config->get( 'CdnMaxageLagged' ); + $output->lowerCdnMaxage( $maxAge ); + $request->response()->header( "X-Database-Lagged: true" ); + wfDebugLog( 'replication', + "Lagged DB used; CDN cache TTL limited to $maxAge seconds" ); + } - // Avoid letting a few seconds of replica DB lag cause a month of stale data. This logic is - // also intimately related to the value of $wgCdnReboundPurgeDelay. - if ( $lbFactory->laggedReplicaUsed() ) { - $maxAge = $config->get( 'CdnMaxageLagged' ); - $output->lowerCdnMaxage( $maxAge ); - $request->response()->header( "X-Database-Lagged: true" ); - wfDebugLog( 'replication', "Lagged DB used; CDN cache TTL limited to $maxAge seconds" ); + // Avoid long-term cache pollution due to message cache rebuild timeouts (T133069) + if ( MessageCache::singleton()->isDisabled() ) { + $maxAge = $config->get( 'CdnMaxageSubstitute' ); + $output->lowerCdnMaxage( $maxAge ); + $request->response()->header( "X-Response-Substitute: true" ); + } } + } + + /** + * @param ILBFactory $lbFactory + * @param OutputPage $output + * @return array + */ + private static function getChronProtStrategy( ILBFactory $lbFactory, OutputPage $output ) { + // Should the client return, their request should observe the new ChronologyProtector + // DB positions. This request might be on a foreign wiki domain, so synchronously update + // the DB positions in all datacenters to be safe. If this output is not a redirect, + // then OutputPage::output() will be relatively slow, meaning that running it in + // $postCommitWork should help mask the latency of those updates. + $flags = $lbFactory::SHUTDOWN_CHRONPROT_SYNC; + $strategy = 'cookie+sync'; - // Avoid long-term cache pollution due to message cache rebuild timeouts (T133069) - if ( MessageCache::singleton()->isDisabled() ) { - $maxAge = $config->get( 'CdnMaxageSubstitute' ); - $output->lowerCdnMaxage( $maxAge ); - $request->response()->header( "X-Response-Substitute: true" ); + $allowHeaders = !( $output->isDisabled() || headers_sent() ); + if ( $output->getRedirect() && $lbFactory->hasOrMadeRecentMasterChanges( INF ) ) { + // OutputPage::output() will be fast, so $postCommitWork is useless for masking + // the latency of synchronously updating the DB positions in all datacenters. + // Try to make use of the time the client spends following redirects instead. + $domainDistance = self::getUrlDomainDistance( $output->getRedirect() ); + if ( $domainDistance === 'local' && $allowHeaders ) { + $flags = $lbFactory::SHUTDOWN_CHRONPROT_ASYNC; + $strategy = 'cookie'; // use same-domain cookie and keep the URL uncluttered + } elseif ( $domainDistance === 'remote' ) { + $flags = $lbFactory::SHUTDOWN_CHRONPROT_ASYNC; + $strategy = 'cookie+url'; // cross-domain cookie might not work + } } + + return [ $flags, $strategy ]; } /** @@ -917,7 +944,7 @@ class MediaWiki { // Commit and close up! $lbFactory->commitMasterChanges( __METHOD__ ); - $lbFactory->shutdown( LBFactory::SHUTDOWN_NO_CHRONPROT ); + $lbFactory->shutdown( $lbFactory::SHUTDOWN_NO_CHRONPROT ); wfDebug( "Request ended normally\n" ); }