Merge "Separated Config parameters into new json files for FUNCnetCfg test"
diff --git a/TestON/drivers/common/cli/onosclidriver.py b/TestON/drivers/common/cli/onosclidriver.py
index 93152ea..18f0914 100755
--- a/TestON/drivers/common/cli/onosclidriver.py
+++ b/TestON/drivers/common/cli/onosclidriver.py
@@ -163,7 +163,7 @@
                         except pexpect.TIMEOUT:
                             main.log.error( "ONOS did not respond to 'logout' or CTRL-d" )
                         return main.TRUE
-                    else: # some other output
+                    else:  # some other output
                         main.log.warn( "Unknown repsonse to logout command: '{}'",
                                        repr( self.handle.before ) )
                         return main.FALSE
@@ -488,7 +488,7 @@
             if debug:
                 # NOTE: This adds and average of .4 seconds per call
                 logStr = "\"Sending CLI command: '" + cmdStr + "'\""
-                self.log( logStr,noExit=noExit )
+                self.log( logStr, noExit=noExit )
             self.handle.sendline( cmdStr )
             i = self.handle.expect( ["onos>", "\$"], timeout )
             response = self.handle.before
@@ -853,8 +853,8 @@
                     main.log.error( "Mastership not balanced" )
                     main.log.info( "\n" + self.checkMasters( False ) )
                     return main.FALSE
-            main.log.info( "Mastership balanced between " \
-                            + str( len(masters) ) + " masters" )
+            main.log.info( "Mastership balanced between " +
+                           str( len(masters) ) + " masters" )
             return main.TRUE
         except ( TypeError, ValueError ):
             main.log.exception( "{}: Object not as expected: {!r}".format( self.name, mastersOutput ) )
@@ -2254,7 +2254,7 @@
                                str( len( intentDictONOS ) ) + " actual" )
                 returnValue = main.FALSE
             for intentID in intentDict.keys():
-                if not intentID in intentDictONOS.keys():
+                if intentID not in intentDictONOS.keys():
                     main.log.debug( self.name + ": intent ID - " + intentID + " is not in ONOS" )
                     returnValue = main.FALSE
                 else:
@@ -2304,7 +2304,7 @@
 
             # Check response if something wrong
             response = self.sendline( cmd, timeout=timeout, noExit=noExit )
-            if response == None:
+            if response is None:
                 return main.FALSE
             response = json.loads( response )
 
@@ -2327,6 +2327,9 @@
             else:
                 main.cleanup()
                 main.exit()
+        except pexpect.TIMEOUT:
+            main.log.error( self.name + ": ONOS timeout" )
+            return None
         except Exception:
             main.log.exception( self.name + ": Uncaught exception!" )
             if noExit:
@@ -2334,9 +2337,6 @@
             else:
                 main.cleanup()
                 main.exit()
-        except pexpect.TIMEOUT:
-            main.log.error( self.name + ": ONOS timeout" )
-            return None
 
     def flows( self, state="", jsonFormat=True, timeout=60, noExit=False, noCore=False ):
         """
@@ -2381,10 +2381,10 @@
 
     def checkFlowCount(self, min=0, timeout=60 ):
         count = self.getTotalFlowsNum( timeout=timeout )
-        count = int (count) if count else 0
-        return count if (count > min) else False
+        count = int( count ) if count else 0
+        return count if ( count > min ) else False
 
-    def checkFlowsState( self, isPENDING=True, timeout=60,noExit=False ):
+    def checkFlowsState( self, isPENDING=True, timeout=60, noExit=False ):
         """
         Description:
             Check the if all the current flows are in ADDED state
@@ -2433,6 +2433,9 @@
         except AssertionError:
             main.log.exception( "" )
             return None
+        except pexpect.TIMEOUT:
+            main.log.error( self.name + ": ONOS timeout" )
+            return None
         except pexpect.EOF:
             main.log.error( self.name + ": EOF exception found" )
             main.log.error( self.name + ":    " + self.handle.before )
@@ -2442,10 +2445,6 @@
             main.log.exception( self.name + ": Uncaught exception!" )
             main.cleanup()
             main.exit()
-        except pexpect.TIMEOUT:
-            main.log.error( self.name + ": ONOS timeout" )
-            return None
-
 
     def pushTestIntents( self, ingress, egress, batchSize, offset="",
                          options="", timeout=10, background = False, noExit=False, getResponse=False ):
@@ -2542,15 +2541,15 @@
                 main.cleanup()
                 main.exit()
             return None
+        except pexpect.TIMEOUT:
+            main.log.error( self.name + ": ONOS timeout" )
+            return None
         except Exception:
             main.log.exception( self.name + ": Uncaught exception!" )
             if not noExit:
                 main.cleanup()
                 main.exit()
             return None
-        except pexpect.TIMEOUT:
-            main.log.error( self.name + ": ONOS timeout" )
-            return None
 
     def getTotalIntentsNum( self, timeout=60, noExit = False ):
         """
@@ -2564,8 +2563,8 @@
         try:
             cmd = "summary -j"
             response = self.sendline( cmd, timeout=timeout, noExit=noExit )
-            if response == None:
-                return  -1
+            if response is None:
+                return -1
             response = json.loads( response )
             return int( response.get("intents") )
         except ( TypeError, ValueError ):
@@ -2880,7 +2879,7 @@
             return main.ERROR
         try:
             topology = self.getTopology( self.topology() )
-            if topology == {} or topology == None or summary == {} or summary == None:
+            if topology == {} or topology is None or summary == {} or summary is None:
                 return main.ERROR
             output = ""
             # Is the number of switches is what we expected
@@ -4167,7 +4166,7 @@
                     containsCheck = main.FALSE
                     match = matchFalse
                 else:
-                    main.log.error( self.name + " setTestGet did not match " +\
+                    main.log.error( self.name + " setTestGet did not match " +
                                     "expected output" )
                     main.log.debug( self.name + " expected: " + pattern )
                     main.log.debug( self.name + " actual: " + repr( output ) )
@@ -4253,7 +4252,6 @@
             None on error
         """
         try:
-            counters = {}
             cmdStr = "counters"
             if jsonFormat:
                 cmdStr += " -j"
@@ -4871,7 +4869,7 @@
             present in the resoponse. Otherwise, returns main.FALSE
         '''
         try:
-            cmd =  "null-link null:{} null:{} {}".format( begin, end, state )
+            cmd = "null-link null:{} null:{} {}".format( begin, end, state )
             response = self.sendline( cmd, showResponse=showResponse, timeout=timeout )
             assert response is not None, "Error in sendline"
             assert "Command not found:" not in response, response
@@ -4911,7 +4909,7 @@
         try:
             state = state.lower()
             assert state == 'enable' or state == 'disable', "Unknown state"
-            cmd =  "portstate {} {} {}".format( dpid, port, state )
+            cmd = "portstate {} {} {}".format( dpid, port, state )
             response = self.sendline( cmd, showResponse=True )
             assert response is not None, "Error in sendline"
             assert "Command not found:" not in response, response
@@ -4945,7 +4943,7 @@
         Level defaults to INFO
         """
         try:
-            self.handle.sendline( "log:set %s %s" %( level, app ) )
+            self.handle.sendline( "log:set %s %s" % ( level, app ) )
             self.handle.expect( "onos>" )
 
             response = self.handle.before
@@ -4999,24 +4997,24 @@
                 nodeA = link[ 'src' ][ 'device' ]
                 nodeB = link[ 'dst' ][ 'device' ]
                 assert idToDevice[ nodeA ][ 'available' ] and idToDevice[ nodeB ][ 'available' ]
-                if not nodeA in graphDict.keys():
-                    graphDict[ nodeA ] = { 'edges':{},
-                                           'dpid':idToDevice[ nodeA ][ 'id' ][3:],
-                                           'type':idToDevice[ nodeA ][ 'type' ],
-                                           'available':idToDevice[ nodeA ][ 'available' ],
-                                           'role':idToDevice[ nodeA ][ 'role' ],
-                                           'mfr':idToDevice[ nodeA ][ 'mfr' ],
-                                           'hw':idToDevice[ nodeA ][ 'hw' ],
-                                           'sw':idToDevice[ nodeA ][ 'sw' ],
-                                           'serial':idToDevice[ nodeA ][ 'serial' ],
-                                           'chassisId':idToDevice[ nodeA ][ 'chassisId' ],
-                                           'annotations':idToDevice[ nodeA ][ 'annotations' ]}
+                if nodeA not in graphDict.keys():
+                    graphDict[ nodeA ] = { 'edges': {},
+                                           'dpid': idToDevice[ nodeA ][ 'id' ][3:],
+                                           'type': idToDevice[ nodeA ][ 'type' ],
+                                           'available': idToDevice[ nodeA ][ 'available' ],
+                                           'role': idToDevice[ nodeA ][ 'role' ],
+                                           'mfr': idToDevice[ nodeA ][ 'mfr' ],
+                                           'hw': idToDevice[ nodeA ][ 'hw' ],
+                                           'sw': idToDevice[ nodeA ][ 'sw' ],
+                                           'serial': idToDevice[ nodeA ][ 'serial' ],
+                                           'chassisId': idToDevice[ nodeA ][ 'chassisId' ],
+                                           'annotations': idToDevice[ nodeA ][ 'annotations' ]}
                 else:
                     # Assert nodeB is not connected to any current links of nodeA
                     assert nodeB not in graphDict[ nodeA ][ 'edges' ].keys()
-                graphDict[ nodeA ][ 'edges' ][ nodeB ] = { 'port':link[ 'src' ][ 'port' ],
-                                                           'type':link[ 'type' ],
-                                                           'state':link[ 'state' ] }
+                graphDict[ nodeA ][ 'edges' ][ nodeB ] = { 'port': link[ 'src' ][ 'port' ],
+                                                           'type': link[ 'type' ],
+                                                           'state': link[ 'state' ] }
             return graphDict
         except ( TypeError, ValueError ):
             main.log.exception( self.name + ": Object not as expected" )
@@ -5097,29 +5095,31 @@
         """
         try:
             assert type( searchTerm ) is str
-            #Build the log paths string
+            # Build the log paths string
             logPath = '/opt/onos/log/karaf.log.'
             logPaths = '/opt/onos/log/karaf.log'
             for i in range( 1, logNum ):
                 logPaths = logPath + str( i ) + " " + logPaths
             cmd = "cat " + logPaths
+            if startLine:
+                # 100000000 is just a extreme large number to make sure this function can grep all the lines after startLine
+                cmd = cmd + " | grep -A 100000000 \'" + startLine + "\'"
             if mode == 'all':
                 cmd = cmd + " | grep \'" + searchTerm + "\'"
-            if mode == 'last':
+            elif mode == 'last':
                 cmd = cmd + " | grep \'" + searchTerm + "\'" + " | tail -n 1"
-            if mode == 'first':
-                if startLine != '':
-                    # 100000000 is just a extreme large number to make sure this function can grep all the lines after startLine
-                    cmd = cmd + " | grep -A 100000000 \'" + startLine + "\' | grep \'" + searchTerm + "\'" + "| head -n 1"
-                else:
-                    cmd = cmd + " | grep \'" + searchTerm + "\'" + " | head -n 1"
-            if mode == 'num':
+            elif mode == 'first':
+                cmd = cmd + " | grep \'" + searchTerm + "\'" + " | head -n 1"
+            elif mode == 'num':
                 cmd = cmd + " | grep -c \'" + searchTerm + "\'"
                 num = self.sendline( cmd )
                 return num
-            if mode == 'total':
+            elif mode == 'total':
                 totalLines = self.sendline( "cat /opt/onos/log/karaf.log | wc -l" )
                 return int(totalLines)
+            else:
+                main.log.error( self.name + " unsupported mode" )
+                return main.ERROR
             before = self.sendline( cmd )
             before = before.splitlines()
             # make sure the returned list only contains the search term
@@ -5461,7 +5461,7 @@
         try:
             cmdStr = "interfaces"
             if jsonFormat:
-                #raise NotImplementedError
+                raise NotImplementedError
                 cmdStr += " -j"
             handle = self.sendline( cmdStr )
             assert handle is not None, "Error in sendline"
@@ -5499,10 +5499,7 @@
                 and the splitTerm_after is "x"
 
             others:
-
-                plz look at the "logsearch" Function in onosclidriver.py
-
-
+                Please look at the "logsearch" Function in onosclidriver.py
         '''
         if logNum < 0:
             main.log.error("Get wrong log number ")
@@ -5524,3 +5521,240 @@
         except AssertionError:
             main.log.warn( "Search Term Not Found " )
             return main.ERROR
+
+    def workQueueAdd( self, queueName, value ):
+        """
+        CLI command to add a string to the specified Work Queue.
+        This function uses the distributed primitives test app, which
+        gives some cli access to distributed primitives for testing
+        purposes only.
+
+        Required arguments:
+            queueName - The name of the queue to add to
+            value - The value to add to the queue
+        returns:
+            main.TRUE on success, main.FALSE on failure and
+            main.ERROR on error.
+        """
+        try:
+            queueName = str( queueName )
+            value = str( value )
+            prefix = "work-queue-test"
+            operation = "add"
+            cmdStr = " ".join( [ prefix, queueName, operation, value ] )
+            output = self.distPrimitivesSend( cmdStr )
+            if "Invalid operation name" in output:
+                main.log.warn( output )
+                return main.ERROR
+            elif "Done" in output:
+                return main.TRUE
+        except TypeError:
+            main.log.exception( self.name + ": Object not as expected" )
+            return main.ERROR
+        except Exception:
+            main.log.exception( self.name + ": Uncaught exception!" )
+            main.cleanup()
+            main.exit()
+
+    def workQueueAddMultiple( self, queueName, value1, value2 ):
+        """
+        CLI command to add two strings to the specified Work Queue.
+        This function uses the distributed primitives test app, which
+        gives some cli access to distributed primitives for testing
+        purposes only.
+
+        Required arguments:
+            queueName - The name of the queue to add to
+            value1 - The first value to add to the queue
+            value2 - The second value to add to the queue
+        returns:
+            main.TRUE on success, main.FALSE on failure and
+            main.ERROR on error.
+        """
+        try:
+            queueName = str( queueName )
+            value1 = str( value1 )
+            value2 = str( value2 )
+            prefix = "work-queue-test"
+            operation = "addMultiple"
+            cmdStr = " ".join( [ prefix, queueName, operation, value1, value2 ] )
+            output = self.distPrimitivesSend( cmdStr )
+            if "Invalid operation name" in output:
+                main.log.warn( output )
+                return main.ERROR
+            elif "Done" in output:
+                return main.TRUE
+        except TypeError:
+            main.log.exception( self.name + ": Object not as expected" )
+            return main.ERROR
+        except Exception:
+            main.log.exception( self.name + ": Uncaught exception!" )
+            main.cleanup()
+            main.exit()
+
+    def workQueueTakeAndComplete( self, queueName, number=1 ):
+        """
+        CLI command to take a value from the specified Work Queue and compelte it.
+        This function uses the distributed primitives test app, which
+        gives some cli access to distributed primitives for testing
+        purposes only.
+
+        Required arguments:
+            queueName - The name of the queue to add to
+            number - The number of items to take and complete
+        returns:
+            main.TRUE on success, main.FALSE on failure and
+            main.ERROR on error.
+        """
+        try:
+            queueName = str( queueName )
+            number = str( int( number ) )
+            prefix = "work-queue-test"
+            operation = "takeAndComplete"
+            cmdStr = " ".join( [ prefix, queueName, operation, number ] )
+            output = self.distPrimitivesSend( cmdStr )
+            if "Invalid operation name" in output:
+                main.log.warn( output )
+                return main.ERROR
+            elif "Done" in output:
+                return main.TRUE
+        except TypeError:
+            main.log.exception( self.name + ": Object not as expected" )
+            return main.ERROR
+        except Exception:
+            main.log.exception( self.name + ": Uncaught exception!" )
+            main.cleanup()
+            main.exit()
+
+    def workQueueDestroy( self, queueName ):
+        """
+        CLI command to destroy the specified Work Queue.
+        This function uses the distributed primitives test app, which
+        gives some cli access to distributed primitives for testing
+        purposes only.
+
+        Required arguments:
+            queueName - The name of the queue to add to
+        returns:
+            main.TRUE on success, main.FALSE on failure and
+            main.ERROR on error.
+        """
+        try:
+            queueName = str( queueName )
+            prefix = "work-queue-test"
+            operation = "destroy"
+            cmdStr = " ".join( [ prefix, queueName, operation ] )
+            output = self.distPrimitivesSend( cmdStr )
+            if "Invalid operation name" in output:
+                main.log.warn( output )
+                return main.ERROR
+            return main.TRUE
+        except TypeError:
+            main.log.exception( self.name + ": Object not as expected" )
+            return main.ERROR
+        except Exception:
+            main.log.exception( self.name + ": Uncaught exception!" )
+            main.cleanup()
+            main.exit()
+
+    def workQueueTotalPending( self, queueName ):
+        """
+        CLI command to get the Total Pending items of the specified Work Queue.
+        This function uses the distributed primitives test app, which
+        gives some cli access to distributed primitives for testing
+        purposes only.
+
+        Required arguments:
+            queueName - The name of the queue to add to
+        returns:
+            The number of Pending items in the specified work queue or
+            None on error
+        """
+        try:
+            queueName = str( queueName )
+            prefix = "work-queue-test"
+            operation = "totalPending"
+            cmdStr = " ".join( [ prefix, queueName, operation ] )
+            output = self.distPrimitivesSend( cmdStr )
+            pattern = r'\d+'
+            if "Invalid operation name" in output:
+                main.log.warn( output )
+                return None
+            else:
+                match = re.search( pattern, output )
+                return match.group(0)
+        except ( AttributeError, TypeError ):
+            main.log.exception( self.name + ": Object not as expected; " + str( output ) )
+            return None
+        except Exception:
+            main.log.exception( self.name + ": Uncaught exception!" )
+            main.cleanup()
+            main.exit()
+
+    def workQueueTotalCompleted( self, queueName ):
+        """
+        CLI command to get the Total Completed items of the specified Work Queue.
+        This function uses the distributed primitives test app, which
+        gives some cli access to distributed primitives for testing
+        purposes only.
+
+        Required arguments:
+            queueName - The name of the queue to add to
+        returns:
+            The number of complete items in the specified work queue or
+            None on error
+        """
+        try:
+            queueName = str( queueName )
+            prefix = "work-queue-test"
+            operation = "totalCompleted"
+            cmdStr = " ".join( [ prefix, queueName, operation ] )
+            output = self.distPrimitivesSend( cmdStr )
+            pattern = r'\d+'
+            if "Invalid operation name" in output:
+                main.log.warn( output )
+                return None
+            else:
+                match = re.search( pattern, output )
+                return match.group(0)
+        except ( AttributeError, TypeError ):
+            main.log.exception( self.name + ": Object not as expected; " + str( output ) )
+            return None
+        except Exception:
+            main.log.exception( self.name + ": Uncaught exception!" )
+            main.cleanup()
+            main.exit()
+
+    def workQueueTotalInProgress( self, queueName ):
+        """
+        CLI command to get the Total In Progress items of the specified Work Queue.
+        This function uses the distributed primitives test app, which
+        gives some cli access to distributed primitives for testing
+        purposes only.
+
+        Required arguments:
+            queueName - The name of the queue to add to
+        returns:
+            The number of In Progress items in the specified work queue or
+            None on error
+        """
+        try:
+            queueName = str( queueName )
+            prefix = "work-queue-test"
+            operation = "totalInProgress"
+            cmdStr = " ".join( [ prefix, queueName, operation ] )
+            output = self.distPrimitivesSend( cmdStr )
+            pattern = r'\d+'
+            if "Invalid operation name" in output:
+                main.log.warn( output )
+                return None
+            else:
+                match = re.search( pattern, output )
+                return match.group(0)
+        except ( AttributeError, TypeError ):
+            main.log.exception( self.name + ": Object not as expected; " + str( output ) )
+            return None
+        except Exception:
+            main.log.exception( self.name + ": Uncaught exception!" )
+            main.cleanup()
+            main.exit()
diff --git a/TestON/tests/HA/dependencies/HA.py b/TestON/tests/HA/dependencies/HA.py
index b6c6f26..7df3e51 100644
--- a/TestON/tests/HA/dependencies/HA.py
+++ b/TestON/tests/HA/dependencies/HA.py
@@ -109,7 +109,7 @@
     def consistentLeaderboards( self, nodes ):
         TOPIC = 'org.onosproject.election'
         # FIXME: use threads
-        #FIXME: should we retry outside the function?
+        # FIXME: should we retry outside the function?
         for n in range( 5 ):  # Retry in case election is still happening
             leaderList = []
             # Get all leaderboards
@@ -160,11 +160,73 @@
             results = results and currentResult
         return results
 
+    def workQueueStatsCheck( self, workQueueName, completed, inProgress, pending ):
+        # Completed
+        threads = []
+        completedValues = []
+        for i in main.activeNodes:
+            t = main.Thread( target=main.CLIs[i].workQueueTotalCompleted,
+                             name="WorkQueueCompleted-" + str( i ),
+                             args=[ workQueueName ] )
+            threads.append( t )
+            t.start()
+
+        for t in threads:
+            t.join()
+            completedValues.append( int( t.result ) )
+        # Check the results
+        completedResults = [ x == completed for x in completedValues ]
+        completedResult = all( completedResults )
+        if not completedResult:
+            main.log.warn( "Expected Work Queue {} to have {} completed, found {}".format(
+                workQueueName, completed, completedValues ) )
+
+        # In Progress
+        threads = []
+        inProgressValues = []
+        for i in main.activeNodes:
+            t = main.Thread( target=main.CLIs[i].workQueueTotalInProgress,
+                             name="WorkQueueInProgress-" + str( i ),
+                             args=[ workQueueName ] )
+            threads.append( t )
+            t.start()
+
+        for t in threads:
+            t.join()
+            inProgressValues.append( int( t.result ) )
+        # Check the results
+        inProgressResults = [ x == inProgress for x in inProgressValues ]
+        inProgressResult = all( inProgressResults )
+        if not inProgressResult:
+            main.log.warn( "Expected Work Queue {} to have {} inProgress, found {}".format(
+                workQueueName, inProgress, inProgressValues ) )
+
+        # Pending
+        threads = []
+        pendingValues = []
+        for i in main.activeNodes:
+            t = main.Thread( target=main.CLIs[i].workQueueTotalPending,
+                             name="WorkQueuePending-" + str( i ),
+                             args=[ workQueueName ] )
+            threads.append( t )
+            t.start()
+
+        for t in threads:
+            t.join()
+            pendingValues.append( int( t.result ) )
+        # Check the results
+        pendingResults = [ x == pending for x in pendingValues ]
+        pendingResult = all( pendingResults )
+        if not pendingResult:
+            main.log.warn( "Expected Work Queue {} to have {} pending, found {}".format(
+                workQueueName, pending, pendingValues ) )
+        return completedResult and inProgressResult and pendingResult
+
     def CASE17( self, main ):
         """
         Check for basic functionality with distributed primitives
         """
-        #TODO: Clean this up so it's not just a cut/paste from the test
+        # TODO: Clean this up so it's not just a cut/paste from the test
         try:
             # Make sure variables are defined/set
             assert main.numCtrls, "main.numCtrls not defined"
@@ -188,8 +250,12 @@
             addValue = "a"
             addAllValue = "a b c d e f"
             retainValue = "c d e f"
-            valueName = "TestOn-Value"
+            valueName = "TestON-Value"
             valueValue = None
+            workQueueName = "TestON-Queue"
+            workQueueCompleted = 0
+            workQueueInProgress = 0
+            workQueuePending = 0
 
             description = "Check for basic functionality with distributed " +\
                           "primitives"
@@ -1522,5 +1588,123 @@
                                      onfail="Error getting atomic Value " +
                                             str( valueValue ) + ", found: " +
                                             str( getValues ) )
+
+            # WORK QUEUES
+            main.step( "Work Queue add()" )
+            threads = []
+            i = main.activeNodes[0]
+            addResult = main.CLIs[i].workQueueAdd( workQueueName, 'foo' )
+            workQueuePending += 1
+            main.log.debug( addResult )
+            # Check the results
+            utilities.assert_equals( expect=main.TRUE,
+                                     actual=addResult,
+                                     onpass="Work Queue add successful",
+                                     onfail="Error adding to Work Queue" )
+
+            main.step( "Check the work queue stats" )
+            statsResults = self.workQueueStatsCheck( workQueueName,
+                                                     workQueueCompleted,
+                                                     workQueueInProgress,
+                                                     workQueuePending )
+            utilities.assert_equals( expect=True,
+                                     actual=statsResults,
+                                     onpass="Work Queue stats correct",
+                                     onfail="Work Queue stats incorrect " )
+
+            main.step( "Work Queue addMultiple()" )
+            threads = []
+            i = main.activeNodes[0]
+            addMultipleResult = main.CLIs[i].workQueueAddMultiple( workQueueName, 'bar', 'baz' )
+            workQueuePending += 2
+            main.log.debug( addMultipleResult )
+            # Check the results
+            utilities.assert_equals( expect=main.TRUE,
+                                     actual=addMultipleResult,
+                                     onpass="Work Queue add multiple successful",
+                                     onfail="Error adding multiple items to Work Queue" )
+
+            main.step( "Check the work queue stats" )
+            statsResults = self.workQueueStatsCheck( workQueueName,
+                                                     workQueueCompleted,
+                                                     workQueueInProgress,
+                                                     workQueuePending )
+            utilities.assert_equals( expect=True,
+                                     actual=statsResults,
+                                     onpass="Work Queue stats correct",
+                                     onfail="Work Queue stats incorrect " )
+
+            main.step( "Work Queue takeAndComplete() 1" )
+            threads = []
+            i = main.activeNodes[0]
+            number = 1
+            take1Result = main.CLIs[i].workQueueTakeAndComplete( workQueueName, number )
+            workQueuePending -= number
+            workQueueCompleted += number
+            main.log.debug( take1Result )
+            # Check the results
+            utilities.assert_equals( expect=main.TRUE,
+                                     actual=take1Result,
+                                     onpass="Work Queue takeAndComplete 1 successful",
+                                     onfail="Error taking 1 from Work Queue" )
+
+            main.step( "Check the work queue stats" )
+            statsResults = self.workQueueStatsCheck( workQueueName,
+                                                     workQueueCompleted,
+                                                     workQueueInProgress,
+                                                     workQueuePending )
+            utilities.assert_equals( expect=True,
+                                     actual=statsResults,
+                                     onpass="Work Queue stats correct",
+                                     onfail="Work Queue stats incorrect " )
+
+            main.step( "Work Queue takeAndComplete() 2" )
+            threads = []
+            i = main.activeNodes[0]
+            number = 2
+            take2Result = main.CLIs[i].workQueueTakeAndComplete( workQueueName, number )
+            workQueuePending -= number
+            workQueueCompleted += number
+            main.log.debug( take2Result )
+            # Check the results
+            utilities.assert_equals( expect=main.TRUE,
+                                     actual=take2Result,
+                                     onpass="Work Queue takeAndComplete 2 successful",
+                                     onfail="Error taking 2 from Work Queue" )
+
+            main.step( "Check the work queue stats" )
+            statsResults = self.workQueueStatsCheck( workQueueName,
+                                                     workQueueCompleted,
+                                                     workQueueInProgress,
+                                                     workQueuePending )
+            utilities.assert_equals( expect=True,
+                                     actual=statsResults,
+                                     onpass="Work Queue stats correct",
+                                     onfail="Work Queue stats incorrect " )
+
+            main.step( "Work Queue destroy()" )
+            valueValue = None
+            threads = []
+            i = main.activeNodes[0]
+            destroyResult = main.CLIs[i].workQueueDestroy( workQueueName )
+            workQueueCompleted = 0
+            workQueueInProgress = 0
+            workQueuePending = 0
+            main.log.debug( destroyResult )
+            # Check the results
+            utilities.assert_equals( expect=main.TRUE,
+                                     actual=destroyResult,
+                                     onpass="Work Queue destroy successful",
+                                     onfail="Error destroying Work Queue" )
+
+            main.step( "Check the work queue stats" )
+            statsResults = self.workQueueStatsCheck( workQueueName,
+                                                     workQueueCompleted,
+                                                     workQueueInProgress,
+                                                     workQueuePending )
+            utilities.assert_equals( expect=True,
+                                     actual=statsResults,
+                                     onpass="Work Queue stats correct",
+                                     onfail="Work Queue stats incorrect " )
         except Exception as e:
             main.log.error( "Exception: " + str( e ) )
diff --git a/TestON/tests/SCPF/Jenkinsfile b/TestON/tests/SCPF/Jenkinsfile
new file mode 100644
index 0000000..ea6f8f3
--- /dev/null
+++ b/TestON/tests/SCPF/Jenkinsfile
@@ -0,0 +1,182 @@
+#!groovy
+// This is a Jenkinsfile for a scripted pipeline for the SCPF tests
+properties([pipelineTriggers([cron('30 19 * * *')])])
+
+// TODO: Exception handling around steps
+SCPF = [
+    SCPFcbench: [ test:'SCPFcbench', table:'cbench_bm_tests', results:'cbench_bm_results', file:'CbenchDB'],
+    SCPFhostLat: [ test:'SCPFhostLat', table:'host_latency_results', results:'host_latency_results', file:'HostAddLatency'],
+    SCPFportLat: [ test:'SCPFportLat', table:'port_latency_details', results:'port_latency_results', file:'/tmp/portEventResultDb'],
+    SCPFflowTp1g: [ test:'SCPFflowTp1g', table:'flow_tp_tests', results:'flow_tp_results', file:'flowTP1gDB'],
+    SCPFscaleTopo: [ test:'SCPFscaleTopo', table:'scale_topo_latency_details', results:'scale_topo_latency_results', file:'/tmp/scaleTopoResultDb'],
+    SCPFswitchLat: [ test:'SCPFswitchLat', table:'switch_latency_details', results:'switch_latency_results', file:'/tmp/switchEventResultDb'],
+    SCPFbatchFlowResp: [ test:'SCPFbatchFlowResp', table:'batch_flow_tests', results:'batch_flow_results', file:'SCPFbatchFlowRespData'],
+    SCPFintentEventTp: [ test:'SCPFintentEventTp', table:'intent_tp_tests', results:'intent_tp_results', file:'IntentEventTPDB'],
+    SCPFintentRerouteLat: [ test:'SCPFintentRerouteLat', table:'intent_reroute_latency_tests', results:'intent_reroute_latency_results', file:'IntentRerouteLatDB'],
+    SCPFscalingMaxIntents: [ test:'SCPFscalingMaxIntents', table:'max_intents_tests', results:'max_intents_results', file:'ScalingMaxIntentDB'],
+    SCPFintentEventTpWithFlowObj: [ test:'SCPFintentEventTp --params TEST/flowObj=True', table:'intent_tp_fobj_tests', results:'intent_tp_fobj_results', file:'IntentEventTPflowObjDB'],
+    SCPFintentInstallWithdrawLat: [ test:'SCPFintentInstallWithdrawLat', table:'intent_latency_tests', results:'intent_latency_results', file:'IntentInstallWithdrawLatDB'],
+    SCPFintentRerouteLatWithFlowObj: [ test:'SCPFintentRerouteLat --params TEST/flowObj=True', table:'intent_reroute_latency_fobj_tests', results:'intent_reroute_latency_fobj_results', file:'IntentRerouteLatDBWithFlowObj'],
+    SCPFscalingMaxIntentsWithFlowObj: [ test:'SCPFscalingMaxIntents --params TEST/flowObj=True', table:'max_intents_fobj_tests', results:'max_intents_fobj_results', file:'ScalingMaxIntentDBWFO'],
+    SCPFintentInstallWithdrawLatWithFlowObj: [ test:'SCPFintentInstallWithdrawLat --params TEST/flowObj=True', table:'intent_latency_fobj_tests', results:'intent_latency_fobj_results', file:'IntentInstallWithdrawLatDBWFO']
+]
+
+// Get just the test names
+def defaultTests = SCPF.keySet().toArray()
+// Convert to a string with new line deliminators for user input
+StringBuilder sb = new StringBuilder();
+for (String s : defaultTests)
+{
+    sb.append(s);
+    sb.append("\n");
+}
+choices = sb.toString();
+// Define sets of tests
+SCPF_Basic = "SCPFswitchLat\nSCPFportLat\nSCPFintentInstallWithdrawLat\nSCPFintentEventTp\nSCPFflowTp1g\nSCPFcbench\nSCPFbatchFlowResp"
+SCPF_ExtraSetA = "SCPFintentRerouteLat\nSCPFscalingMaxIntents\nSCPFhostLat\nSCPFscaleTopo"
+SCPF_ExtraSetB = "SCPFintentInstallWithdrawLatWithFlowObj\nSCPFintentEventTpWithFlowObj\nSCPFintentRerouteLatWithFlowObj\nSCPFscalingMaxIntentsWithFlowObj"
+
+try {
+    timeout(time: 120, unit: 'SECONDS') {
+        // This is kind of hacky, I can't seem to find the correct way to get a "build with parameters" button
+        testcases = input message: 'Tests to run?', parameters:[[$class:'TextParameterDefinition', defaultValue: choices, description:'', name: 'Run these tests']]
+    }
+} catch(org.jenkinsci.plugins.workflow.steps.FlowInterruptedException Err) {
+    echo("Input timed out or cancled, using default values")
+    // Set tests based on day of week
+    def now = new Date()
+    echo(now.toString())
+    today = now[Calendar.DAY_OF_WEEK]
+    switch (today) {
+        // TODO Choose which tests to run based on day of the week for nightly run?
+        // TODO Maybe define different subsets of tests above then just add them here
+        case Calendar.MONDAY:
+            choices = SCPF_Basic + SCPF_ExtraSetA
+            println "Defaulting to Monday tests:" + choices
+            break
+        case Calendar.TUESDAY:
+            choices = SCPF_Basic + SCPF_ExtraSetB
+            println "Defaulting to Tuesday tests:" + choices
+            break
+        case Calendar.WEDNESDAY:
+            choices = SCPF_Basic
+            println "Defaulting to Wednesday tests:" + choices
+            break
+        case Calendar.THURSDAY:
+            choices = SCPF_Basic + SCPF_ExtraSetA
+            println "Defaulting to Thursday tests:" + choices
+            break
+        case Calendar.FRIDAY:
+            choices = SCPF_Basic + SCPF_ExtraSetA + SCPF_ExtraSetB
+            println "Defaulting to Fridat tests:" + choices
+            break
+        case Calendar.SATURDAY:
+            choices = SCPF_Basic + SCPF_ExtraSetA + SCPF_ExtraSetB
+            println "Defaulting to Saturday tests:" + choices
+            break
+        case Calendar.SUNDAY:
+            choices = SCPF_Basic
+            println "Defaulting to Sunday tests:" + choices
+            break
+    }
+    testcases = choices
+}
+
+echo("Testcases:")
+//println testcases
+// TODO REMOVE AFTER DEBUGGING
+def testsToRun = testcases.tokenize("\n;, ")
+for (String test : testsToRun) {
+    println test
+}
+
+def tests = [:]
+for (int i = 0; i < testsToRun.size(); i++) {
+    // Get the actual string here.
+    def testName = testsToRun[i]
+    def stepName = "Running ${testName}"
+    tests[stepName] = SCPFTest(testName)
+}
+
+// run the tests
+parallel tests
+
+// The testName should be the key from the SCPF map
+def SCPFTest( testName ) {
+    return {
+        node ("TestStation-BMs"){ // only run SCPF tests on the BM cluster
+            def prop = readProperties(file:'/var/jenkins/TestONOS.property') // TODO set defaults
+
+            withEnv(['ONOSBranch='+prop["ONOSBranch"],
+                     'ONOSJVMHeap='+prop["ONOSJVMHeap"],
+                     'TestONBranch='+prop["TestONBranch"],
+                     'ONOSTag='+prop["ONOSTag"],
+                     'WikiPrefix='+prop["WikiPrefix"]]){
+                stage(testName) {
+                    sh '''#!/bin/bash -l
+                    set -i # interactive
+                    shopt -s expand_aliases # expand alias in non-interactive mode
+                    export PYTHONUNBUFFERED=1
+
+                    ifconfig
+
+                    echo "ONOS Branch is: $ONOSBranch"
+                    echo "TestON Branch is: $TestONBranch"
+                    echo "Test date: "
+                    date
+
+                    cd ~
+                    export PATH=$PATH:onos/tools/test/bin
+
+                    stc teardown
+
+                    cd ~/OnosSystemTest/TestON/bin
+                    git log |head
+                    ./cleanup.sh
+                    '''
+                    sh "./cli.py run " + SCPF[testName]['test']
+
+                    // Post Results
+                    withCredentials([
+                        string(credentialsId: 'db_pass', variable: 'pass'),
+                        string(credentialsId: 'db_user', variable: 'user'),
+                        string(credentialsId: 'db_host', variable: 'host'),
+                        string(credentialsId: 'db_port', variable: 'port')]) {
+                            sh '''#!/bin/bash
+
+                            export DATE=\$(date +%F_%T)
+                            cd ~
+                            pwd
+                            cd /tmp
+                            while read line
+                            do
+
+                            echo \$line
+                            echo ''' + pass + "|psql --host=" + host + " --port=" + port + " --username=" + user + " --password --dbname onostest -c \"INSERT INTO " + SCPF[testName]['table'] + " VALUES('\$DATE','" + SCPF[testName]['results'] + '''','\$BUILD_NUMBER', '\$ONOSBranch', \$line );\"
+
+                            done< ''' + SCPF[testName]['file'] + '''
+                            echo 'Done'
+                            '''
+                    }
+                    // Fetch Logs
+                    sh '''#!/bin/bash
+
+                    cd ~/OnosSystemTest/TestON/logs
+                    echo "Job Name is: ${JOB_NAME}"
+                    TestONlogDir=$(ls -t | grep ${TEST_NAME}_  |head -1)
+                    echo "########################################################################################"
+                    echo "#####  copying ONOS logs from all nodes to TestON/logs directory: ${TestONlogDir}"
+                    echo "########################################################################################"
+                    cd $TestONlogDir
+                    if [ $? -eq 1 ]
+                    then
+                        echo "Job name does not match any test suite name to move log!"
+                    else
+                        pwd
+                        for i in $OC{1..7}; do onos-fetch-logs $i; done
+                    fi'''
+                }
+            }
+        }
+    }
+}
diff --git a/TestON/tests/SCPF/SCPFintentRerouteLat/SCPFintentRerouteLat.params b/TestON/tests/SCPF/SCPFintentRerouteLat/SCPFintentRerouteLat.params
index c3c0ff6..64e0ab5 100644
--- a/TestON/tests/SCPF/SCPFintentRerouteLat/SCPFintentRerouteLat.params
+++ b/TestON/tests/SCPF/SCPFintentRerouteLat/SCPFintentRerouteLat.params
@@ -7,7 +7,7 @@
 
     <ENV>
         <cellName>intentRerouteCell</cellName>
-        <cellApps>drivers,null,intentperf,metrics</cellApps>
+        <cellApps>drivers,null,metrics</cellApps>
     </ENV>
 
     <DEPENDENCY>
@@ -60,7 +60,7 @@
         <startup>5</startup>
         <setmaster>5</setmaster>
         <install>10</install>
-        <verify>5</verify>
+        <verify>10</verify>
         # timeout for pexpect
         <timeout>300</timeout>
     </SLEEP>
diff --git a/TestON/tests/SCPF/SCPFintentRerouteLat/SCPFintentRerouteLat.py b/TestON/tests/SCPF/SCPFintentRerouteLat/SCPFintentRerouteLat.py
index ac0a673..ae0af63 100644
--- a/TestON/tests/SCPF/SCPFintentRerouteLat/SCPFintentRerouteLat.py
+++ b/TestON/tests/SCPF/SCPFintentRerouteLat/SCPFintentRerouteLat.py
@@ -248,7 +248,8 @@
         main.CLIs[0].setCfg("org.onosproject.provider.nil.NullProviders", "deviceCount", value=main.deviceCount)
         main.CLIs[0].setCfg("org.onosproject.provider.nil.NullProviders", "topoShape", value="reroute")
         main.CLIs[0].setCfg("org.onosproject.provider.nil.NullProviders", "enabled", value="true")
-        main.CLIs[0].setCfg("org.onosproject.store.flow.impl.DistributedFlowRuleStore", "backupEnabled", value="false")
+        # "bakcupEnabled" should be true by default. Not sure if it's intended or not to disable it. Seems no impact to test results.
+        #main.CLIs[0].setCfg("org.onosproject.store.flow.impl.DistributedFlowRuleStore", "backupEnabled", value="false")
         if main.flowObj:
             main.CLIs[0].setCfg("org.onosproject.net.intent.impl.compiler.IntentConfigurableRegistrator",
                                 "useFlowObjectives", value="true")
@@ -271,24 +272,21 @@
         import json
         # from scipy import stats
 
-        ts = time.time()
         print(main.intentsList)
         for batchSize in main.intentsList:
             main.batchSize = batchSize
             main.log.report("Intent Batch size: " + str(batchSize) + "\n      ")
-            main.LatencyList = []
-            main.LatencyListTopoToFirstInstalled = []
-            main.LatencyListFirstInstalledToLastInstalled = []
+            firstLocalLatencies = []
+            lastLocalLatencies = []
+            firstGlobalLatencies = []
+            lastGlobalLatencies = []
+            main.startLine = {}
             main.validRun = 0
             main.invalidRun = 0
-            # initial a variables to record the term of startLine in karaf logs of each node
-            main.totalLines = []
-            for i in range( main.numCtrls ):
-                main.totalLines.append( '' )
             while main.validRun <= main.warmUp + main.sampleSize and main.invalidRun <= main.maxInvalidRun:
                 if main.validRun >= main.warmUp:
                     main.log.info("================================================")
-                    main.log.info("Starting test iteration: {} ".format( main.validRun - main.warmUp))
+                    main.log.info("Valid iteration: {} ".format( main.validRun - main.warmUp))
                     main.log.info("Total iteration: {}".format( main.validRun + main.invalidRun))
                     main.log.info("================================================")
                 else:
@@ -298,140 +296,112 @@
                 main.CLIs[0].pushTestIntents(main.ingress, main.egress, main.batchSize,
                                              offset=1, options="-i", timeout=main.timeout)
 
-                # check links and flows
-                k = 0
-                main.verify = main.FALSE
-                linkCheck = 0
-                flowsCheck = 0
-                while k <= main.verifyAttempts:
-                    time.sleep(main.verifySleep)
-                    summary = json.loads(main.CLIs[0].summary(timeout=main.timeout))
-                    linkCheck = summary.get("links")
-                    flowsCheck = summary.get("flows")
-                    if linkCheck == main.deviceCount * 2 and flowsCheck == batchSize * (main.deviceCount - 1 ):
-                        main.log.info("links: {}, flows: {} ".format(linkCheck, flowsCheck))
-                        main.verify = main.TRUE
-                        break
-                    k += 1
+                # check links, flows and intents
+                main.intentRerouteLatFuncs.sanityCheck( main, main.deviceCount * 2, batchSize * (main.deviceCount - 1 ), main.batchSize )
                 if not main.verify:
-                    main.log.warn("Links or flows number not as expected")
-                    main.log.warn("links: {}, flows: {} ".format(linkCheck, flowsCheck))
-                    # bring back topology
-                    main.intentRerouteLatFuncs.bringBackTopology( main )
-                    if main.validRun >= main.warmUp:
-                        main.invalidRun += 1
-                        continue
-                    else:
-                        main.validRun += 1
-                        continue
-                # Bring link down
+                    main.log.warn( "Sanity check failed, skipping this iteration..." )
+                    continue
+
+                # Insert one line in karaf.log before link down
+                for i in range( main.numCtrls ):
+                    main.CLIs[ i ].log( "\'Scale: {}, Batch:{}, Iteration: {}\'".format( main.numCtrls, batchSize, main.validRun + main.invalidRun ) )
+
+                # bring link down
                 main.CLIs[0].link( main.end1[ 'port' ], main.end2[ 'port' ], "down",
                                   timeout=main.timeout, showResponse=False)
-                main.verify = main.FALSE
-                k = 0
-                while k <= main.verifyAttempts:
-                    time.sleep(main.verifySleep)
-                    summary = json.loads(main.CLIs[0].summary(timeout=main.timeout))
-                    linkCheck = summary.get("links")
-                    flowsCheck = summary.get("flows")
-                    if linkCheck == (main.deviceCount - 1) * 2 and flowsCheck == batchSize * main.deviceCount:
-                        main.log.info("links: {}, flows: {} ".format(linkCheck, flowsCheck))
-                        main.verify = main.TRUE
-                        break
-                    k += 1
+
+                # check links, flows and intents
+                main.intentRerouteLatFuncs.sanityCheck( main, (main.deviceCount - 1) * 2, batchSize * main.deviceCount, main.batchSize )
                 if not main.verify:
-                    main.log.warn("Links or flows number not as expected")
-                    main.log.warn("links: {}, flows: {} ".format(linkCheck, flowsCheck))
-                    # bring back topology
-                    main.intentRerouteLatFuncs.bringBackTopology( main )
-                    if main.validRun >= main.warmUp:
-                        main.invalidRun += 1
-                        continue
-                    else:
-                        main.validRun += 1
-                        continue
-                # record the link romving time as the startLine
+                    main.log.warn( "Sanity check failed, skipping this iteration..." )
+                    continue
+
+                # Get timestamp of last LINK_REMOVED event as separator between iterations
+                skip = False
                 for i in range( main.numCtrls ):
-                    logNum = main.intentRerouteLatFuncs.checkLog( main, i )
-                    main.totalLines[i] = str(main.CLIs[ i ].getTimeStampFromLog( "last", "LINK_REMOVED", "time = ", " ", logNum=logNum ))
-                    main.log.info("Node " + str( i+1 ) + ": the start timestamp is " + main.totalLines[i] + " this iteration" )
-                #Calculate values
-                lastTopologyToFirstInstalled, firstInstalledToLastInstalled, totalTime = main.intentRerouteLatFuncs.getValues( main )
-                if totalTime == -1:
+                    logNum = main.intentRerouteLatFuncs.getLogNum( main, i )
+                    timestamp = str( main.CLIs[ i ].getTimeStampFromLog( "last", "LINK_REMOVED", "time = ", " ", logNum=logNum ) )
+                    if timestamp == main.ERROR:
+                        # Try again in case that the log number just increased
+                        logNum = main.intentRerouteLatFuncs.getLogNum( main, i )
+                        timestamp = str( main.CLIs[ i ].getTimeStampFromLog( "last", "LINK_REMOVED", "time = ", " ", logNum=logNum ) )
+                    if timestamp == main.ERROR:
+                        main.log.warn( "Cannot find the event we want in the log, skipping this iteration..." )
+                        main.intentRerouteLatFuncs.bringBackTopology( main )
+                        if main.validRun >= main.warmUp:
+                            main.invalidRun += 1
+                        else:
+                            main.validRun += 1
+                        skip = True
+                        break
+                    else:
+                        main.startLine[ i ] = timestamp
+                        main.log.info( "Timestamp of last LINK_REMOVED event on node {} is {}".format( i+1, main.startLine[ i ] ) )
+                if skip: continue
+
+                # calculate values
+                topologyTimestamps = main.intentRerouteLatFuncs.getTopologyTimestamps( main )
+                intentTimestamps = main.intentRerouteLatFuncs.getIntentTimestamps( main )
+                if intentTimestamps == main.ERROR or topologyTimestamps == main.ERROR:
+                    main.log.info( "Got invalid timestamp, skipping this iteration..." )
+                    main.intentRerouteLatFuncs.bringBackTopology( main )
                     if main.validRun >= main.warmUp:
                         main.invalidRun += 1
                     else:
                         main.validRun += 1
                     continue
                 else:
-                    main.log.info("Get valid latency")
+                    main.log.info( "Got valid timestamps" )
+
+                firstLocalLatnecy, lastLocalLatnecy, firstGlobalLatency, lastGlobalLatnecy = main.intentRerouteLatFuncs.calculateLatency( main, topologyTimestamps, intentTimestamps )
+                if firstLocalLatnecy < 0:
+                    main.log.info( "Got negative latency, skipping this iteration..." )
+                    main.intentRerouteLatFuncs.bringBackTopology( main )
+                    if main.validRun >= main.warmUp:
+                        main.invalidRun += 1
+                    else:
+                        main.validRun += 1
+                    continue
+                else:
+                    main.log.info( "Got valid latencies" )
                     main.validRun += 1
 
-                # Verify Summary after we bring up link, and withdrawn intents
+                firstLocalLatencies.append( firstLocalLatnecy )
+                lastLocalLatencies.append( lastLocalLatnecy )
+                firstGlobalLatencies.append( firstGlobalLatency )
+                lastGlobalLatencies.append( lastGlobalLatnecy )
+
+                # bring up link and withdraw intents
                 main.CLIs[0].link( main.end1[ 'port' ], main.end2[ 'port' ], "up",
                                   timeout=main.timeout)
-                k = 0
-                main.verify = main.FALSE
-                linkCheck = 0
-                flowsCheck = 0
-                while k <= main.verifyAttempts:
-                    time.sleep(main.verifySleep)
-                    main.CLIs[0].pushTestIntents(main.ingress, main.egress, batchSize,
-                                                 offset=1, options="-w", timeout=main.timeout)
-                    main.CLIs[0].purgeWithdrawnIntents()
-                    summary = json.loads(main.CLIs[0].summary())
-                    linkCheck = summary.get("links")
-                    flowsCheck = summary.get("flows")
-                    intentCheck = summary.get("intents")
-                    if linkCheck == main.deviceCount * 2 and flowsCheck == 0 and intentCheck == 0:
-                        main.log.info("links: {}, flows: {}, intents: {} ".format(linkCheck, flowsCheck, intentCheck))
-                        main.verify = main.TRUE
-                        break
-                    k += 1
+                main.CLIs[0].pushTestIntents(main.ingress, main.egress, batchSize,
+                                             offset=1, options="-w", timeout=main.timeout)
+                main.CLIs[0].purgeWithdrawnIntents()
+
+                # check links, flows and intents
+                main.intentRerouteLatFuncs.sanityCheck( main, main.deviceCount * 2, 0, 0 )
                 if not main.verify:
-                    main.log.error("links, flows or intents number not as expected")
-                    main.log.info("links: {}, flows: {}, intents: {} ".format(linkCheck, flowsCheck, intentCheck))
-                    # bring back topology
-                    main.intentRerouteLatFuncs.bringBackTopology( main )
                     continue
-                #main.log.info("total negative results num: " + str( main.record ) )
 
-            aveLatency = 0
-            aveLatencyTopoToFirstInstalled = 0
-            aveLatencyFirstInstalledToLastInstalled = 0
-
-            stdLatency = 0
-            stdLatencyTopoToFirstInstalled = 0
-            stdLatencyFirstInstalledToLastInstalled = 0
-
-            aveLatency = numpy.average( main.LatencyList )
-            aveLatencyTopoToFirstInstalled = numpy.average( main.LatencyListTopoToFirstInstalled )
-            aveLatencyFirstInstalledToLastInstalled = numpy.average( main.LatencyListFirstInstalledToLastInstalled )
-
-            stdLatency = numpy.std( main.LatencyList )
-            stdLatencyTopoToFirstInstalled = numpy.std( main.LatencyListTopoToFirstInstalled )
-            stdLatencyFirstInstalledToLastInstalled = numpy.std( main.LatencyListFirstInstalledToLastInstalled )
+            aveLocalLatency = numpy.average( lastLocalLatencies )
+            aveGlobalLatency = numpy.average( lastGlobalLatencies )
+            stdLocalLatency = numpy.std( lastLocalLatencies )
+            stdGlobalLatency = numpy.std( lastGlobalLatencies )
 
             main.log.report( "Scale: " + str( main.numCtrls ) + "  \tIntent batch: " + str( batchSize ) )
-            main.log.report( "Total Latency average:................" + str( aveLatency ) )
-            main.log.report( "Latency standard deviation:..........." + str( stdLatency ) )
-            main.log.report( "Last Topology to first installed Latency average:................." + str( aveLatencyTopoToFirstInstalled ) )
-            main.log.report( "Last Topology to first installed Latency standard deviation:......" + str( stdLatencyTopoToFirstInstalled ) )
-            main.log.report( "First installed to last installed Latency average:................" + str( aveLatencyFirstInstalledToLastInstalled ) )
-            main.log.report( "First installed to last installed Latency standard deviation:....." + str( stdLatencyFirstInstalledToLastInstalled ) )
+            main.log.report( "Local latency average:................" + str( aveLocalLatency ) )
+            main.log.report( "Global latency average:................" + str( aveGlobalLatency ) )
+            main.log.report( "Local latency std:................" + str( stdLocalLatency ) )
+            main.log.report( "Global latency std:................" + str( stdGlobalLatency ) )
             main.log.report( "________________________________________________________" )
 
-            if not (numpy.isnan(aveLatency) or numpy.isnan(stdLatency)):
+            if not ( numpy.isnan( aveLocalLatency ) or numpy.isnan( aveGlobalLatency ) ):
                 # check if got NaN for result
-                resultsDB = open(main.dbFileName, "a")
-                resultsDB.write("'" + main.commit + "',")
-                resultsDB.write(str(main.numCtrls) + ",")
-                resultsDB.write(str(batchSize) + ",")
-                resultsDB.write(str(aveLatency) + ",")
-                resultsDB.write(str(stdLatency) + ",")
-                resultsDB.write(str(aveLatencyTopoToFirstInstalled) + ",")
-                resultsDB.write(str(stdLatencyTopoToFirstInstalled) + ",")
-                resultsDB.write(str(aveLatencyFirstInstalledToLastInstalled) + ",")
-                resultsDB.write(str(stdLatencyFirstInstalledToLastInstalled) + "\n")
+                resultsDB = open( main.dbFileName, "a" )
+                resultsDB.write( "'" + main.commit + "'," )
+                resultsDB.write( str( main.numCtrls ) + "," )
+                resultsDB.write( str( batchSize ) + "," )
+                resultsDB.write( str( aveLocalLatency ) + "," )
+                resultsDB.write( str( stdLocalLatency ) + "\n" )
                 resultsDB.close()
-        del main.scale[0]
+        del main.scale[ 0 ]
diff --git a/TestON/tests/SCPF/SCPFintentRerouteLat/dependencies/intentRerouteLatFuncs.py b/TestON/tests/SCPF/SCPFintentRerouteLat/dependencies/intentRerouteLatFuncs.py
index 24cf32b..1f830ad 100644
--- a/TestON/tests/SCPF/SCPFintentRerouteLat/dependencies/intentRerouteLatFuncs.py
+++ b/TestON/tests/SCPF/SCPFintentRerouteLat/dependencies/intentRerouteLatFuncs.py
@@ -4,20 +4,40 @@
 '''
 import numpy
 import time
+import json
 
 def _init_( self ):
     self.default = ''
 
-def checkLog( main, nodeId ):
-    try:
-        logNames = main.ONOSbench.listLog( main.onosIp[ nodeId ] )
-        assert logNames is not None
-        if len( logNames ) >= 2:
-            return 2
-        return 1
-    except AssertionError:
-        main.log.error("There is no karaf log")
-        return -1
+def sanityCheck( main, linkNumExpected, flowNumExpected, intentNumExpected ):
+    '''
+    Sanity check on numbers of links, flows and intents in ONOS
+    '''
+    attemps = 0
+    main.verify = main.FALSE
+    linkNum = 0
+    flowNum = 0
+    intentNum = 0
+    while attemps <= main.verifyAttempts:
+        time.sleep(main.verifySleep)
+        summary = json.loads( main.CLIs[0].summary( timeout=main.timeout ) )
+        linkNum = summary.get("links")
+        flowNum = summary.get("flows")
+        intentNum = summary.get("intents")
+        if linkNum == linkNumExpected and flowNum == flowNumExpected and intentNum == intentNumExpected:
+            main.log.info("links: {}, flows: {}, intents: {}".format(linkNum, flowNum, intentNum))
+            main.verify = main.TRUE
+            break
+        attemps += 1
+    if not main.verify:
+        main.log.warn("Links or flows or intents number not as expected")
+        main.log.warn("links: {}, flows: {}, intents: {}".format(linkNum, flowNum, intentNum))
+        # bring back topology
+        bringBackTopology( main )
+        if main.validRun >= main.warmUp:
+            main.invalidRun += 1
+        else:
+            main.validRun += 1
 
 def bringBackTopology( main ):
     main.log.info( "Bring back topology " )
@@ -35,75 +55,86 @@
         main.CLIs[ 0 ].deviceRole(main.end2[ 'name' ], main.ONOSip[ 0 ])
     time.sleep( main.setMasterSleep )
 
-def getValues( main ):
+def getLogNum( main, nodeId ):
     '''
-    Calculated the wanted values for intentRerouteTest
-
-    1. Get the first "last topology timestamp" from karaf.log in different node
-    2. Get the first "first intent installed timestamp" from  karaf log in different node
-    3. Get the last "last intent installed timestamp" from karaf log in different node
-
-    Return:
-        last_topology_to_first_installed: The time from the last topology to the first intent installed
-        first_installed_to_last_installed: Time time from the first topology to the last intent installed
-        totalTime: The time from the last topology to the last intent installed
-
+    Return the number of karaf log files
     '''
-    lastTopologyTimestamp = compareTimestamp( main, main.searchTerm[ "TopologyTime" ], "creationTime=", ",",  'last',func='min' )
-    firstIntentInstalledTimestamp = compareTimestamp( main, main.searchTerm[ "InstallTime" ], "time = ", " ",  'first',func='min' )
-    lastIntentInstalledTimestamp = compareTimestamp( main, main.searchTerm[ "InstallTime" ], "time = ", " ",  'last',func='max' )
-
-    if lastTopologyTimestamp == -1 or firstIntentInstalledTimestamp == -1 or lastIntentInstalledTimestamp == -1:
-        main.log.warn( "Can't get timestamp from karaf log! " )
-        bringBackTopology( main )
-        return -1, -1, -1
-
-    #calculate values
-    lastTopologyToFirstInstalled = firstIntentInstalledTimestamp - lastTopologyTimestamp
-    if lastTopologyToFirstInstalled < 0:
-        main.record = main.record + 1
-
-    firstInstalledToLastInstalled = lastIntentInstalledTimestamp - firstIntentInstalledTimestamp
-    totalTime = lastIntentInstalledTimestamp - lastTopologyTimestamp
-
-    if main.validRun >= main.warmUp and main.verify:
-        main.log.info( "Last topology time stamp: {0:f}".format( lastTopologyTimestamp ))
-        main.log.info( "First installed time stamp: {0:f}".format( firstIntentInstalledTimestamp ))
-        main.log.info( "Last installed time stamp: {0:f}".format( lastIntentInstalledTimestamp ))
-        main.log.info( "Last topology to first installed latency:{0:f}".format( lastTopologyToFirstInstalled ))
-        main.log.info( "First installed to last installed latency:{0:f}".format( firstInstalledToLastInstalled ))
-        main.log.info( "Overall latency:{0:f}".format( totalTime ))
-        main.LatencyList.append( totalTime )
-        main.LatencyListTopoToFirstInstalled.append( lastTopologyToFirstInstalled )
-        main.LatencyListFirstInstalledToLastInstalled.append( firstInstalledToLastInstalled )
-    return lastTopologyToFirstInstalled, firstInstalledToLastInstalled, totalTime
-
-def compareTimestamp( main, compareTerm, splitTerm_before, splitTerm_after, mode, func='max' ):
-    '''
-    Compare all the timestamps of compareTerm from different node.
-
-    func:
-        max: Compare which one is the biggest and retun it
-        min: Compare which one is the smallest and return it
-
-    return:
-        This function will return the biggest or smallest timestamps of the compareTerm.
-
-    '''
-    compareTermList = []
-    for i in range( main.numCtrls ):
-        timestamp = main.CLIs[ i ].getTimeStampFromLog( mode, compareTerm, splitTerm_before, splitTerm_after, startLine=main.totalLines[ i ], logNum=checkLog( main, i ) )
-        compareTermList.append( timestamp )
-    main.log.info("-----------------------------------------------")
-    for i in range( main.numCtrls ):
-        main.log.info( "ONOS Node {} {} {} time stamp: {}".format((i+1), mode, compareTerm, compareTermList[ i ]))
-    x = min( compareTermList )
-    main.log.info("-----------------------------------------------")
-    if x == -1:
-        main.log.warn( "Can't compare timestamps" )
+    try:
+        logNameList = main.ONOSbench.listLog( main.onosIp[ nodeId ] )
+        assert logNameList is not None
+        # FIXME: are two karaf logs enough to cover the events we want?
+        if len( logNameList ) >= 2:
+            return 2
+        return 1
+    except AssertionError:
+        main.log.error("There is no karaf log")
         return -1
-    else:
-        if func == 'max':
-            return max( compareTermList )
-        if func == 'min':
-            return min( compareTermList )
+
+def getTopologyTimestamps( main ):
+    '''
+    Get timestamps for the last topology events on all cluster nodes
+    '''
+    timestamps = []
+    for i in range( main.numCtrls ):
+        # Search for last topology event in karaf log
+        lines = main.CLIs[ i ].logSearch( mode='last', searchTerm=main.searchTerm[ "TopologyTime" ], startLine=main.startLine[ i ], logNum=getLogNum( main, i ) )
+        if lines is None or len( lines ) != 1:
+            main.log.error( "Error when trying to get topology event timestamp" )
+            return main.ERROR
+        try:
+            timestampField = lines[0].split( "creationTime=" )
+            timestamp = timestampField[ 1 ].split( "," )
+            timestamp = int( timestamp[ 0 ] )
+            timestamps.append( timestamp )
+        except KeyError:
+            main.log.error( "Error when trying to get intent key or timestamp" )
+            return main.ERROR
+    return timestamps
+
+def getIntentTimestamps( main ):
+    '''
+    Get timestamps for all intent keys on all cluster nodes
+    '''
+    timestamps = {}
+    for i in range( main.numCtrls ):
+        # Search for intent INSTALLED event in karaf log
+        lines = main.CLIs[ i ].logSearch( mode='all', searchTerm=main.searchTerm[ "InstallTime" ], startLine=main.startLine[ i ], logNum=getLogNum( main, i ) )
+        if lines is None or len( lines ) == 0:
+            main.log.error( "Error when trying to get intent key or timestamp" )
+            return main.ERROR
+        for line in lines:
+            try:
+                # Get intent key
+                keyField = line.split( "key=" )
+                key = keyField[ 1 ].split( "," )
+                key = key[ 0 ]
+                if not key in timestamps.keys():
+                    timestamps[ key ] = []
+                # Get timestamp
+                timestampField = line.split( "time = " )
+                timestamp = timestampField[ 1 ].split( " " )
+                timestamp = int( timestamp[ 0 ] )
+                timestamps[ key ].append( timestamp )
+            except KeyError:
+                main.log.error( "Error when trying to get intent key or timestamp" )
+                return main.ERROR
+    return timestamps
+
+def calculateLatency( main, topologyTimestamps, intentTimestamps ):
+    '''
+    Calculate reroute latency values using timestamps
+    '''
+    topologyTimestamp = numpy.min( topologyTimestamps )
+    firstInstalledLatency = {}
+    lastInstalledLatency = {}
+    for key in intentTimestamps.keys():
+        firstInstalledTimestamp = numpy.min( intentTimestamps[ key ] )
+        lastInstalledTimestamp = numpy.max( intentTimestamps[ key ] )
+        firstInstalledLatency[ key ] = firstInstalledTimestamp - topologyTimestamp
+        lastInstalledLatency[ key ] = lastInstalledTimestamp - topologyTimestamp
+    firstLocalLatnecy = numpy.min( firstInstalledLatency.values() )
+    lastLocalLatnecy = numpy.max( firstInstalledLatency.values() )
+    firstGlobalLatency = numpy.min( lastInstalledLatency.values() )
+    lastGlobalLatnecy = numpy.max( lastInstalledLatency.values() )
+    main.log.info( "firstLocalLatnecy: {}, lastLocalLatnecy: {}, firstGlobalLatency: {}, lastGlobalLatnecy: {}".format( firstLocalLatnecy, lastLocalLatnecy, firstGlobalLatency, lastGlobalLatnecy ) )
+    return firstLocalLatnecy, lastLocalLatnecy, firstGlobalLatency, lastGlobalLatnecy
diff --git a/TestON/tests/SCPF/SCPFscaleTopo/SCPFscaleTopo.py b/TestON/tests/SCPF/SCPFscaleTopo/SCPFscaleTopo.py
index 3304d27..a94dfe8 100644
--- a/TestON/tests/SCPF/SCPFscaleTopo/SCPFscaleTopo.py
+++ b/TestON/tests/SCPF/SCPFscaleTopo/SCPFscaleTopo.py
@@ -155,13 +155,14 @@
         else:
             main.log.info( "Mininet was not running" )
 
+        main.commit = main.ONOSbench.getVersion(report=True)
+        main.commit = main.commit.split(" ")[1]
+
         main.case( "Starting up " + str( main.numCtrls ) +
                    " node(s) ONOS cluster" )
         main.caseExplanation = "Set up ONOS with " + str( main.numCtrls ) +\
                                 " node(s) ONOS cluster"
 
-
-
         #kill off all onos processes
         main.log.info( "Safety check, killing all ONOS processes" +
                        " before initiating environment setup" )