Try to make sure jobs will not get pushed twice on failures
authorAaron Schulz <aschulz@wikimedia.org>
Thu, 10 Oct 2013 19:16:34 +0000 (12:16 -0700)
committerTim Starling <tstarling@wikimedia.org>
Thu, 17 Oct 2013 23:43:10 +0000 (23:43 +0000)
* Some queues like the DB one, might split up batches into chunks
  which we do not want when used by federated queues (it already
  manages chunking anyway). If only X of Y got inserted, then they
  would all get inserted into another partition, causing duplicates

Change-Id: Id73b34f3500f8aeaf456e6bcac862a1e4832c870

includes/job/JobQueueFederated.php

index d788c98..4257da4 100644 (file)
@@ -253,7 +253,7 @@ class JobQueueFederated extends JobQueue {
                foreach ( $uJobsByPartition as $partition => $jobBatch ) {
                        $queue = $this->partitionQueues[$partition];
                        try {
-                               $ok = $queue->doBatchPush( $jobBatch, $flags );
+                               $ok = $queue->doBatchPush( $jobBatch, $flags | self::QOS_ATOMIC );
                        } catch ( JobQueueError $e ) {
                                $ok = false;
                                wfDebugLog( 'exception', $e->getLogMessage() );
@@ -274,7 +274,7 @@ class JobQueueFederated extends JobQueue {
                        } else {
                                $queue = $this->partitionQueues[$partition];
                                try {
-                                       $ok = $queue->doBatchPush( $jobBatch, $flags );
+                                       $ok = $queue->doBatchPush( $jobBatch, $flags | self::QOS_ATOMIC );
                                } catch ( JobQueueError $e ) {
                                        $ok = false;
                                        wfDebugLog( 'exception', $e->getLogMessage() );