function parseScalar( $str ) {
if ( $str !== '' && $str[0] == '\'' )
// Single-quoted string
- // @fixme trim() call is due to mystery bug where whitespace gets
+ // @todo Fixme: trim() call is due to mystery bug where whitespace gets
// appended to the token; without it we ended up reading in the
// extra quote on the end!
return strtr( substr( trim( $str ), 1, -1 ),
array( '\\\'' => '\'', '\\\\' => '\\' ) );
if ( $str !== '' && @$str[0] == '"' )
// Double-quoted string
- // @fixme trim() call is due to mystery bug where whitespace gets
+ // @todo Fixme: trim() call is due to mystery bug where whitespace gets
// appended to the token; without it we ended up reading in the
// extra quote on the end!
return stripcslashes( substr( trim( $str ), 1, -1 ) );
$resultDetails = array( 'internal' => $status->getWikiText() );
*/
- // @fixme upload() uses $wgUser, which is wrong here
+ // @todo Fixme: upload() uses $wgUser, which is wrong here
// it may also create a page without our desire, also wrong potentially.
// and, it will record a *current* upload, but we might want an archive version here
return false;
}
- // @fixme!
+ // @todo Fixme!
$src = $this->getSrc();
$data = Http::get( $src );
if( !$data ) {
* @param String $linktype Type of external link. Gets added to the classes
* @param array $attribs Array of extra attributes to <a>
*
- * @TODO! @FIXME! This is a really crappy implementation. $linktype and
+ * @todo FIXME: This is a really crappy implementation. $linktype and
* 'external' are mashed into the class attrib for the link (which is made
* into a string). Then, if we've got additional params in $attribs, we
* add to it. People using this might want to change the classes (or other
* Formats wiki links and media links in text; all other wiki formatting
* is ignored
*
- * @fixme doesn't handle sub-links as in image thumb texts like the main parser
+ * @todo Fixme: doesn't handle sub-links as in image thumb texts like the main parser
* @param string $comment Text to format links in
* @return string
*/
* @private
*/
function wfRequestExtension() {
- /// @fixme -- this sort of dupes some code in WebRequest::getRequestUrl()
+ /// @todo Fixme: this sort of dupes some code in WebRequest::getRequestUrl()
if( isset( $_SERVER['REQUEST_URI'] ) ) {
// Strip the query string...
list( $path ) = explode( '?', $_SERVER['REQUEST_URI'], 2 );
/**
* Make a fake revision object from an archive table row. This is queried
* for permissions or even inserted (as in Special:Undelete)
- * @fixme: should be a subclass for RevisionDelete. [TS]
+ * @todo Fixme: should be a subclass for RevisionDelete. [TS]
*/
public static function newFromArchiveRow( $row, $overrides = array() ) {
$attribs = $overrides + array(
$host = preg_replace( $strip, '', $host );
- // @fixme: validate hostnames here
+ // @todo Fixme: validate hostnames here
return $protocol . $host . $rest;
} else {
private $_response;
public function __construct() {
- /// @fixme This preemptive de-quoting can interfere with other web libraries
+ /// @todo Fixme: this preemptive de-quoting can interfere with other web libraries
/// and increases our memory footprint. It would be cleaner to do on
/// demand; but currently we have no wrapper for $_SERVER etc.
$this->checkMagicQuotes();
return $this->mInfo['mime'];
}
- /// @fixme May guess wrong on file types that can be eg audio or video
+ /// @todo Fixme: may guess wrong on file types that can be eg audio or video
function getMediaType() {
$magic = MimeMagic::singleton();
return $magic->getMediaType( null, $this->getMimeType() );
}
/**
- * @fixme document this!
+ * @todo Fixme: document this!
* 'value' thingy goes into a wikitext table; it used to be escaped but
* that was incompatible with previous practice of customized display
* with wikitext formatting via messages such as 'exif-model-value'.
*/
function uniqPrefix() {
if( !isset( $this->mUniqPrefix ) ) {
- // @fixme this is probably *horribly wrong*
+ // @todo Fixme: this is probably *horribly wrong*
// LanguageConverter seems to want $wgParser's uniqPrefix, however
// if this is called for a parser cache hit, the parser may not
// have ever been initialized in the first place.
switch( $paramName ) {
case 'manualthumb':
case 'alt':
- // @fixme - possibly check validity here for
+ // @todo Fixme: possibly check validity here for
// manualthumb? downstream behavior seems odd with
// missing manual thumbs.
$validated = true;
* @param Title $nt @see Title object for the target
* @param integer $id User ID for the target
* @return String: appropriately-escaped HTML to be output literally
- * @fixme Almost the same as getSubTitle in SpecialDeletedContributions.php. Could be combined.
+ * @todo Fixme: almost the same as getSubTitle in SpecialDeletedContributions.php. Could be combined.
*/
protected function contributionsSub( $nt, $id ) {
global $wgSysopUserBans, $wgLang, $wgUser, $wgOut;
* @param Title $nt @see Title object for the target
* @param integer $id User ID for the target
* @return String: appropriately-escaped HTML to be output literally
- * @fixme Almost the same as contributionsSub in SpecialContributions.php. Could be combined.
+ * @todo Fixme: almost the same as contributionsSub in SpecialContributions.php. Could be combined.
*/
function getSubTitle( $nt, $id ) {
global $wgSysopUserBans, $wgLang, $wgUser, $wgOut;
$t = Title::makeTitleSafe( NS_MAIN, $catname );
if ( $t ) {
/**
- * @fixme This can lead to hitting memory limit for very large
+ * @todo Fixme: this can lead to hitting memory limit for very large
* categories. Ideally we would do the lookup synchronously
* during the export in a single query.
*/
if ( strval( $nsindex ) !== '' ) {
/**
- * Same implementation as above, so same @fixme
+ * Same implementation as above, so same @todo
*/
$nspages = $this->getPagesFromNamespace( $nsindex );
if ( $nspages ) $page .= "\n" . implode( "\n", $nspages );
$title = Title::newFromText( $page );
if( $title ) {
$pageSet[$title->getPrefixedText()] = true;
- /// @fixme May or may not be more efficient to batch these
+ /// @todo Fixme: May or may not be more efficient to batch these
/// by namespace when given multiple input pages.
$result = $dbr->select(
array( 'page', $table ),
$sk = $wgUser->getSkin();
$isDeleted = !( $rev->getId() && $rev->getTitle() );
if( $isDeleted ) {
- /// @fixme $rev->getTitle() is null for deleted revs...?
+ /// @todo Fixme: $rev->getTitle() is null for deleted revs...?
$targetPage = SpecialPage::getTitleFor( 'Undelete' );
$targetQuery = array(
'target' => $this->mTargetObj->getPrefixedText(),
'timestamp' => wfTimestamp( TS_MW, $rev->getTimestamp() )
);
} else {
- /// @fixme getId() may return non-zero for deleted revs...
+ /// @todo Fixme getId() may return non-zero for deleted revs...
$targetPage = $rev->getTitle();
$targetQuery = array( 'oldid' => $rev->getId() );
}
// eventually this should be a word segmentation
// for now just treat each character as a word
- // @fixme only do this for Han characters...
+ // @todo Fixme: only do this for Han characters...
$t = preg_replace(
"/([\\xc0-\\xff][\\x80-\\xbf]*)/",
" $1", $string);
// eventually this should be a word segmentation
// for now just treat each character as a word
- // @fixme only do this for Han characters...
+ // @todo Fixme: only do this for Han characters...
$t = preg_replace(
"/([\\xc0-\\xff][\\x80-\\xbf]*)/",
" $1", $string);
// Note we put a space on both sides to cover cases
// where a number or Latin char follows a Han char.
//
- // @fixme only do this for Han characters...
+ // @todo Fixme: only do this for Han characters...
$t = preg_replace(
"/([\\xc0-\\xff][\\x80-\\xbf]*)/",
" $1 ", $string);