blob: c781a52c8f726b0f8b633a6d6ecf7a9f47904203 [file] [log] [blame]
Jeremy Ronquillob27ce4c2017-07-17 12:41:28 -07001"""
Jeremy Ronquillo23fb2162017-09-15 14:59:57 -07002Copyright 2015 Open Networking Foundation ( ONF )
Jeremy Ronquillob27ce4c2017-07-17 12:41:28 -07003
4Please refer questions to either the onos test mailing list at <onos-test@onosproject.org>,
5the System Testing Plans and Results wiki page at <https://wiki.onosproject.org/x/voMg>,
6or the System Testing Guide page at <https://wiki.onosproject.org/x/WYQg>
7
8 TestON is free software: you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation, either version 2 of the License, or
Jeremy Ronquillo23fb2162017-09-15 14:59:57 -070011 ( at your option ) any later version.
Jeremy Ronquillob27ce4c2017-07-17 12:41:28 -070012
13 TestON is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with TestON. If not, see <http://www.gnu.org/licenses/>.
20"""
Jon Halla440e872016-03-31 15:15:50 -070021import json
Jon Hall41d39f12016-04-11 22:54:35 -070022import time
Jon Halle1a3b752015-07-22 13:02:46 -070023
Jon Hallf37d44d2017-05-24 10:37:30 -070024
Jon Hall41d39f12016-04-11 22:54:35 -070025class HA():
Jon Hall57b50432015-10-22 10:20:10 -070026
Jon Halla440e872016-03-31 15:15:50 -070027 def __init__( self ):
28 self.default = ''
Jon Hall57b50432015-10-22 10:20:10 -070029
Devin Lim58046fa2017-07-05 16:55:00 -070030 def customizeOnosGenPartitions( self ):
Devin Lim58046fa2017-07-05 16:55:00 -070031 # copy gen-partions file to ONOS
32 # NOTE: this assumes TestON and ONOS are on the same machine
33 srcFile = main.testDir + "/HA/dependencies/onos-gen-partitions"
34 dstDir = main.ONOSbench.home + "/tools/test/bin/onos-gen-partitions"
35 cpResult = main.ONOSbench.secureCopy( main.ONOSbench.user_name,
36 main.ONOSbench.ip_address,
37 srcFile,
38 dstDir,
39 pwd=main.ONOSbench.pwd,
40 direction="from" )
Jon Hallca319892017-06-15 15:25:22 -070041
Devin Lim58046fa2017-07-05 16:55:00 -070042 def cleanUpGenPartition( self ):
43 # clean up gen-partitions file
44 try:
45 main.ONOSbench.handle.sendline( "cd " + main.ONOSbench.home )
46 main.ONOSbench.handle.expect( main.ONOSbench.home + "\$" )
47 main.ONOSbench.handle.sendline( "git checkout -- tools/test/bin/onos-gen-partitions" )
48 main.ONOSbench.handle.expect( main.ONOSbench.home + "\$" )
49 main.log.info( " Cleaning custom gen partitions file, response was: \n" +
50 str( main.ONOSbench.handle.before ) )
51 except ( pexpect.TIMEOUT, pexpect.EOF ):
52 main.log.exception( "ONOSbench: pexpect exception found:" +
53 main.ONOSbench.handle.before )
Devin Lim44075962017-08-11 10:56:37 -070054 main.cleanAndExit()
Jon Hallca319892017-06-15 15:25:22 -070055
Devin Lim58046fa2017-07-05 16:55:00 -070056 def startingMininet( self ):
57 main.step( "Starting Mininet" )
58 # scp topo file to mininet
59 # TODO: move to params?
60 topoName = "obelisk.py"
61 filePath = main.ONOSbench.home + "/tools/test/topos/"
62 main.ONOSbench.scp( main.Mininet1,
63 filePath + topoName,
64 main.Mininet1.home,
65 direction="to" )
66 mnResult = main.Mininet1.startNet()
67 utilities.assert_equals( expect=main.TRUE, actual=mnResult,
68 onpass="Mininet Started",
69 onfail="Error starting Mininet" )
Jon Hallca319892017-06-15 15:25:22 -070070
Devin Lim58046fa2017-07-05 16:55:00 -070071 def scalingMetadata( self ):
72 import re
Devin Lim142b5342017-07-20 15:22:39 -070073 main.step( "Generate initial metadata file" )
Devin Lim58046fa2017-07-05 16:55:00 -070074 main.scaling = main.params[ 'scaling' ].split( "," )
75 main.log.debug( main.scaling )
76 scale = main.scaling.pop( 0 )
77 main.log.debug( scale )
78 if "e" in scale:
79 equal = True
80 else:
81 equal = False
82 main.log.debug( equal )
Devin Lim142b5342017-07-20 15:22:39 -070083 main.Cluster.setRunningNode( int( re.search( "\d+", scale ).group( 0 ) ) )
84 genResult = main.Server.generateFile( main.Cluster.numCtrls, equal=equal )
Devin Lim58046fa2017-07-05 16:55:00 -070085 utilities.assert_equals( expect=main.TRUE, actual=genResult,
86 onpass="New cluster metadata file generated",
87 onfail="Failled to generate new metadata file" )
Jon Hallca319892017-06-15 15:25:22 -070088
Devin Lim58046fa2017-07-05 16:55:00 -070089 def swapNodeMetadata( self ):
Devin Lim142b5342017-07-20 15:22:39 -070090 main.step( "Generate initial metadata file" )
91 if main.Cluster.numCtrls >= 5:
92 main.Cluster.setRunningNode( main.Cluster.numCtrls - 2 )
Devin Lim58046fa2017-07-05 16:55:00 -070093 else:
94 main.log.error( "Not enough ONOS nodes to run this test. Requires 5 or more" )
Devin Lim142b5342017-07-20 15:22:39 -070095 genResult = main.Server.generateFile( main.Cluster.numCtrls )
Devin Lim58046fa2017-07-05 16:55:00 -070096 utilities.assert_equals( expect=main.TRUE, actual=genResult,
97 onpass="New cluster metadata file generated",
98 onfail="Failled to generate new metadata file" )
Jon Hallca319892017-06-15 15:25:22 -070099
Devin Lim142b5342017-07-20 15:22:39 -0700100 def setServerForCluster( self ):
Devin Lim58046fa2017-07-05 16:55:00 -0700101 import os
102 main.step( "Setup server for cluster metadata file" )
103 main.serverPort = main.params[ 'server' ][ 'port' ]
104 rootDir = os.path.dirname( main.testFile ) + "/dependencies"
105 main.log.debug( "Root dir: {}".format( rootDir ) )
106 status = main.Server.start( main.ONOSbench,
107 rootDir,
108 port=main.serverPort,
109 logDir=main.logdir + "/server.log" )
110 utilities.assert_equals( expect=main.TRUE, actual=status,
111 onpass="Server started",
112 onfail="Failled to start SimpleHTTPServer" )
113
Jon Hall4f360bc2017-09-07 10:19:52 -0700114 def copyBackupConfig( self ):
Devin Lim58046fa2017-07-05 16:55:00 -0700115 main.step( "Copying backup config files" )
116 main.onosServicepath = main.ONOSbench.home + "/tools/package/bin/onos-service"
117 cp = main.ONOSbench.scp( main.ONOSbench,
118 main.onosServicepath,
119 main.onosServicepath + ".backup",
120 direction="to" )
121
122 utilities.assert_equals( expect=main.TRUE,
123 actual=cp,
124 onpass="Copy backup config file succeeded",
125 onfail="Copy backup config file failed" )
Jon Hall4f360bc2017-09-07 10:19:52 -0700126
127 def setMetadataUrl( self ):
128 # NOTE: You should probably backup the config before and reset the config after the test
Devin Lim58046fa2017-07-05 16:55:00 -0700129 # we need to modify the onos-service file to use remote metadata file
130 # url for cluster metadata file
131 iface = main.params[ 'server' ].get( 'interface' )
132 ip = main.ONOSbench.getIpAddr( iface=iface )
133 metaFile = "cluster.json"
134 javaArgs = r"-Donos.cluster.metadata.uri=http:\/\/{}:{}\/{}".format( ip, main.serverPort, metaFile )
135 main.log.warn( javaArgs )
136 main.log.warn( repr( javaArgs ) )
137 handle = main.ONOSbench.handle
Jon Hall4173b242017-09-12 17:04:38 -0700138 sed = r"sed -i 's/bash/bash\nexport JAVA_OPTS=${{JAVA_OPTS:-{}}}\n/' {}".format( javaArgs,
139 main.onosServicepath )
Devin Lim58046fa2017-07-05 16:55:00 -0700140 main.log.warn( sed )
141 main.log.warn( repr( sed ) )
142 handle.sendline( sed )
143 handle.expect( metaFile )
144 output = handle.before
145 handle.expect( "\$" )
146 output += handle.before
147 main.log.debug( repr( output ) )
148
149 def cleanUpOnosService( self ):
150 # Cleanup custom onos-service file
151 main.ONOSbench.scp( main.ONOSbench,
152 main.onosServicepath + ".backup",
153 main.onosServicepath,
154 direction="to" )
Jon Hallca319892017-06-15 15:25:22 -0700155
Jon Halla440e872016-03-31 15:15:50 -0700156 def consistentCheck( self ):
157 """
158 Checks that TestON counters are consistent across all nodes.
Jon Halle1a3b752015-07-22 13:02:46 -0700159
Jon Hallf37d44d2017-05-24 10:37:30 -0700160 Returns the tuple ( onosCounters, consistent )
Jon Hall41d39f12016-04-11 22:54:35 -0700161 - onosCounters is the parsed json output of the counters command on
162 all nodes
163 - consistent is main.TRUE if all "TestON" counters are consitent across
164 all nodes or main.FALSE
Jon Halla440e872016-03-31 15:15:50 -0700165 """
Jon Halle1a3b752015-07-22 13:02:46 -0700166 try:
Jon Halla440e872016-03-31 15:15:50 -0700167 # Get onos counters results
168 onosCountersRaw = []
169 threads = []
Jon Hallca319892017-06-15 15:25:22 -0700170 for ctrl in main.Cluster.active():
Jon Halla440e872016-03-31 15:15:50 -0700171 t = main.Thread( target=utilities.retry,
Jon Hallca319892017-06-15 15:25:22 -0700172 name="counters-" + str( ctrl ),
173 args=[ ctrl.counters, [ None ] ],
Jon Hallf37d44d2017-05-24 10:37:30 -0700174 kwargs={ 'sleep': 5, 'attempts': 5,
Jon Halla440e872016-03-31 15:15:50 -0700175 'randomTime': True } )
176 threads.append( t )
177 t.start()
178 for t in threads:
179 t.join()
180 onosCountersRaw.append( t.result )
181 onosCounters = []
Jon Hallca319892017-06-15 15:25:22 -0700182 for i in range( len( onosCountersRaw ) ):
Jon Halla440e872016-03-31 15:15:50 -0700183 try:
Jon Hallf37d44d2017-05-24 10:37:30 -0700184 onosCounters.append( json.loads( onosCountersRaw[ i ] ) )
Jon Halla440e872016-03-31 15:15:50 -0700185 except ( ValueError, TypeError ):
Jon Hallca319892017-06-15 15:25:22 -0700186 main.log.error( "Could not parse counters response from " +
Devin Lim142b5342017-07-20 15:22:39 -0700187 str( main.Cluster.active( i ) ) )
Jon Halla440e872016-03-31 15:15:50 -0700188 main.log.warn( repr( onosCountersRaw[ i ] ) )
189 onosCounters.append( [] )
190
191 testCounters = {}
192 # make a list of all the "TestON-*" counters in ONOS
Jon Hall41d39f12016-04-11 22:54:35 -0700193 # lookes like a dict whose keys are the name of the ONOS node and
194 # values are a list of the counters. I.E.
Jon Hallf37d44d2017-05-24 10:37:30 -0700195 # { "ONOS1": [ { "name":"TestON-Partitions","value":56 } ]
Jon Halla440e872016-03-31 15:15:50 -0700196 # }
197 # NOTE: There is an assumtion that all nodes are active
198 # based on the above for loops
199 for controller in enumerate( onosCounters ):
Jon Hallf37d44d2017-05-24 10:37:30 -0700200 for key, value in controller[ 1 ].iteritems():
Jon Halla440e872016-03-31 15:15:50 -0700201 if 'TestON' in key:
Devin Lim142b5342017-07-20 15:22:39 -0700202 node = str( main.Cluster.active( controller[ 0 ] ) )
Jon Halla440e872016-03-31 15:15:50 -0700203 try:
Jon Hallf37d44d2017-05-24 10:37:30 -0700204 testCounters[ node ].append( { key: value } )
Jon Halla440e872016-03-31 15:15:50 -0700205 except KeyError:
Jon Hallf37d44d2017-05-24 10:37:30 -0700206 testCounters[ node ] = [ { key: value } ]
Jon Halla440e872016-03-31 15:15:50 -0700207 # compare the counters on each node
Jon Hallf37d44d2017-05-24 10:37:30 -0700208 firstV = testCounters.values()[ 0 ]
Jon Halla440e872016-03-31 15:15:50 -0700209 tmp = [ v == firstV for k, v in testCounters.iteritems() ]
210 if all( tmp ):
211 consistent = main.TRUE
212 else:
213 consistent = main.FALSE
214 main.log.error( "ONOS nodes have different values for counters:\n" +
215 testCounters )
216 return ( onosCounters, consistent )
217 except Exception:
218 main.log.exception( "" )
Devin Lim44075962017-08-11 10:56:37 -0700219 main.cleanAndExit()
Jon Halla440e872016-03-31 15:15:50 -0700220
221 def counterCheck( self, counterName, counterValue ):
222 """
223 Checks that TestON counters are consistent across all nodes and that
224 specified counter is in ONOS with the given value
225 """
226 try:
227 correctResults = main.TRUE
228 # Get onos counters results and consistentCheck
229 onosCounters, consistent = self.consistentCheck()
230 # Check for correct values
Jon Hallca319892017-06-15 15:25:22 -0700231 for i in range( len( main.Cluster.active() ) ):
Jon Hallf37d44d2017-05-24 10:37:30 -0700232 current = onosCounters[ i ]
Jon Halla440e872016-03-31 15:15:50 -0700233 onosValue = None
234 try:
235 onosValue = current.get( counterName )
Jon Hall41d39f12016-04-11 22:54:35 -0700236 except AttributeError:
Devin Lim142b5342017-07-20 15:22:39 -0700237 node = str( main.Cluster.active( i ) )
Jon Hallca319892017-06-15 15:25:22 -0700238 main.log.exception( node + " counters result " +
Jon Hall41d39f12016-04-11 22:54:35 -0700239 "is not as expected" )
Jon Halla440e872016-03-31 15:15:50 -0700240 correctResults = main.FALSE
241 if onosValue == counterValue:
242 main.log.info( counterName + " counter value is correct" )
243 else:
Jon Hall41d39f12016-04-11 22:54:35 -0700244 main.log.error( counterName +
245 " counter value is incorrect," +
246 " expected value: " + str( counterValue ) +
247 " current value: " + str( onosValue ) )
Jon Halla440e872016-03-31 15:15:50 -0700248 correctResults = main.FALSE
249 return consistent and correctResults
250 except Exception:
251 main.log.exception( "" )
Devin Lim44075962017-08-11 10:56:37 -0700252 main.cleanAndExit()
Jon Hall41d39f12016-04-11 22:54:35 -0700253
254 def consistentLeaderboards( self, nodes ):
255 TOPIC = 'org.onosproject.election'
256 # FIXME: use threads
Jon Halle0f0b342017-04-18 11:43:47 -0700257 # FIXME: should we retry outside the function?
Jon Hall41d39f12016-04-11 22:54:35 -0700258 for n in range( 5 ): # Retry in case election is still happening
259 leaderList = []
260 # Get all leaderboards
261 for cli in nodes:
262 leaderList.append( cli.specificLeaderCandidate( TOPIC ) )
263 # Compare leaderboards
Jon Hallf37d44d2017-05-24 10:37:30 -0700264 result = all( i == leaderList[ 0 ] for i in leaderList ) and\
Jon Hall41d39f12016-04-11 22:54:35 -0700265 leaderList is not None
266 main.log.debug( leaderList )
267 main.log.warn( result )
268 if result:
269 return ( result, leaderList )
Jon Hallf37d44d2017-05-24 10:37:30 -0700270 time.sleep( 5 ) # TODO: paramerterize
Jon Hall41d39f12016-04-11 22:54:35 -0700271 main.log.error( "Inconsistent leaderboards:" + str( leaderList ) )
272 return ( result, leaderList )
273
Devin Lim58046fa2017-07-05 16:55:00 -0700274 def generateGraph( self, testName, plotName="Plot-HA", index=2 ):
Jeremy Ronquillo7f8fb572017-11-14 08:28:41 -0800275 # DEPRECATED: ONOSSetup.py now creates these graphs.
276
277 main.log.debug( "HA.generateGraph() is deprecated; ONOSSetup now creates these graphs." )
Jon Hallca319892017-06-15 15:25:22 -0700278
Devin Lim58046fa2017-07-05 16:55:00 -0700279 def initialSetUp( self, serviceClean=False ):
280 """
281 rest of initialSetup
282 """
Devin Lim58046fa2017-07-05 16:55:00 -0700283 if main.params[ 'tcpdump' ].lower() == "true":
284 main.step( "Start Packet Capture MN" )
285 main.Mininet2.startTcpdump(
286 str( main.params[ 'MNtcpdump' ][ 'folder' ] ) + str( main.TEST )
287 + "-MN.pcap",
288 intf=main.params[ 'MNtcpdump' ][ 'intf' ],
289 port=main.params[ 'MNtcpdump' ][ 'port' ] )
290
291 if serviceClean:
292 main.step( "Clean up ONOS service changes" )
Devin Lim142b5342017-07-20 15:22:39 -0700293 main.ONOSbench.handle.sendline( "git checkout -- tools/package/init/onos.conf" )
294 main.ONOSbench.handle.expect( "\$" )
295 main.ONOSbench.handle.sendline( "git checkout -- tools/package/init/onos.service" )
296 main.ONOSbench.handle.expect( "\$" )
Devin Lim58046fa2017-07-05 16:55:00 -0700297
298 main.step( "Checking ONOS nodes" )
Devin Lim3ebd5e72017-11-14 10:38:00 -0800299 nodeResults = utilities.retry( main.Cluster.nodesCheck,
Devin Lim58046fa2017-07-05 16:55:00 -0700300 False,
Devin Lim58046fa2017-07-05 16:55:00 -0700301 attempts=5 )
302
303 utilities.assert_equals( expect=True, actual=nodeResults,
304 onpass="Nodes check successful",
305 onfail="Nodes check NOT successful" )
306
307 if not nodeResults:
Jon Hallca319892017-06-15 15:25:22 -0700308 for ctrl in main.Cluster.active():
Devin Lim58046fa2017-07-05 16:55:00 -0700309 main.log.debug( "{} components not ACTIVE: \n{}".format(
Jon Hallca319892017-06-15 15:25:22 -0700310 ctrl.name,
311 ctrl.CLI.sendline( "scr:list | grep -v ACTIVE" ) ) )
Devin Lim58046fa2017-07-05 16:55:00 -0700312 main.log.error( "Failed to start ONOS, stopping test" )
Devin Lim44075962017-08-11 10:56:37 -0700313 main.cleanAndExit()
Devin Lim58046fa2017-07-05 16:55:00 -0700314
315 main.step( "Activate apps defined in the params file" )
316 # get data from the params
317 apps = main.params.get( 'apps' )
318 if apps:
319 apps = apps.split( ',' )
Jon Hallca319892017-06-15 15:25:22 -0700320 main.log.debug( "Apps: " + str( apps ) )
Devin Lim58046fa2017-07-05 16:55:00 -0700321 activateResult = True
322 for app in apps:
Devin Lim142b5342017-07-20 15:22:39 -0700323 main.Cluster.active( 0 ).app( app, "Activate" )
Devin Lim58046fa2017-07-05 16:55:00 -0700324 # TODO: check this worked
325 time.sleep( 10 ) # wait for apps to activate
326 for app in apps:
Devin Lim142b5342017-07-20 15:22:39 -0700327 state = main.Cluster.active( 0 ).appStatus( app )
Devin Lim58046fa2017-07-05 16:55:00 -0700328 if state == "ACTIVE":
329 activateResult = activateResult and True
330 else:
331 main.log.error( "{} is in {} state".format( app, state ) )
332 activateResult = False
333 utilities.assert_equals( expect=True,
334 actual=activateResult,
335 onpass="Successfully activated apps",
336 onfail="Failed to activate apps" )
337 else:
338 main.log.warn( "No apps were specified to be loaded after startup" )
339
340 main.step( "Set ONOS configurations" )
Jon Hallca319892017-06-15 15:25:22 -0700341 # FIXME: This shoudl be part of the general startup sequence
Devin Lim58046fa2017-07-05 16:55:00 -0700342 config = main.params.get( 'ONOS_Configuration' )
343 if config:
344 main.log.debug( config )
345 checkResult = main.TRUE
346 for component in config:
347 for setting in config[ component ]:
348 value = config[ component ][ setting ]
Jon Hallca319892017-06-15 15:25:22 -0700349 check = main.Cluster.next().setCfg( component, setting, value )
Devin Lim58046fa2017-07-05 16:55:00 -0700350 main.log.info( "Value was changed? {}".format( main.TRUE == check ) )
351 checkResult = check and checkResult
352 utilities.assert_equals( expect=main.TRUE,
353 actual=checkResult,
354 onpass="Successfully set config",
355 onfail="Failed to set config" )
356 else:
357 main.log.warn( "No configurations were specified to be changed after startup" )
358
Jon Hallca319892017-06-15 15:25:22 -0700359 main.step( "Check app ids" )
360 appCheck = self.appCheck()
361 utilities.assert_equals( expect=True, actual=appCheck,
Devin Lim58046fa2017-07-05 16:55:00 -0700362 onpass="App Ids seem to be correct",
363 onfail="Something is wrong with app Ids" )
Jon Hall7a6ebfd2017-03-13 10:58:58 -0700364
Jon Hallca319892017-06-15 15:25:22 -0700365 def commonChecks( self ):
366 # TODO: make this assertable or assert in here?
367 self.topicsCheck()
368 self.partitionsCheck()
369 self.pendingMapCheck()
370 self.appCheck()
371
372 def topicsCheck( self, extraTopics=[] ):
373 """
374 Check for work partition topics in leaders output
375 """
376 leaders = main.Cluster.next().leaders()
377 missing = False
378 try:
379 if leaders:
380 parsedLeaders = json.loads( leaders )
381 output = json.dumps( parsedLeaders,
382 sort_keys=True,
383 indent=4,
384 separators=( ',', ': ' ) )
385 main.log.debug( "Leaders: " + output )
386 # check for all intent partitions
387 topics = []
388 for i in range( 14 ):
389 topics.append( "work-partition-" + str( i ) )
390 topics += extraTopics
391 main.log.debug( topics )
392 ONOStopics = [ j[ 'topic' ] for j in parsedLeaders ]
393 for topic in topics:
394 if topic not in ONOStopics:
395 main.log.error( "Error: " + topic +
396 " not in leaders" )
397 missing = True
398 else:
399 main.log.error( "leaders() returned None" )
400 except ( ValueError, TypeError ):
401 main.log.exception( "Error parsing leaders" )
402 main.log.error( repr( leaders ) )
403 if missing:
Jon Hall4173b242017-09-12 17:04:38 -0700404 # NOTE Can we refactor this into the Cluster class?
405 # Maybe an option to print the output of a command from each node?
Jon Hallca319892017-06-15 15:25:22 -0700406 for ctrl in main.Cluster.active():
407 response = ctrl.CLI.leaders( jsonFormat=False )
408 main.log.debug( str( ctrl.name ) + " leaders output: \n" +
409 str( response ) )
410 return missing
411
412 def partitionsCheck( self ):
413 # TODO: return something assertable
414 partitions = main.Cluster.next().partitions()
415 try:
416 if partitions:
417 parsedPartitions = json.loads( partitions )
418 output = json.dumps( parsedPartitions,
419 sort_keys=True,
420 indent=4,
421 separators=( ',', ': ' ) )
422 main.log.debug( "Partitions: " + output )
423 # TODO check for a leader in all paritions
424 # TODO check for consistency among nodes
425 else:
426 main.log.error( "partitions() returned None" )
427 except ( ValueError, TypeError ):
428 main.log.exception( "Error parsing partitions" )
429 main.log.error( repr( partitions ) )
430
431 def pendingMapCheck( self ):
432 pendingMap = main.Cluster.next().pendingMap()
433 try:
434 if pendingMap:
435 parsedPending = json.loads( pendingMap )
436 output = json.dumps( parsedPending,
437 sort_keys=True,
438 indent=4,
439 separators=( ',', ': ' ) )
440 main.log.debug( "Pending map: " + output )
441 # TODO check something here?
442 else:
443 main.log.error( "pendingMap() returned None" )
444 except ( ValueError, TypeError ):
445 main.log.exception( "Error parsing pending map" )
446 main.log.error( repr( pendingMap ) )
447
448 def appCheck( self ):
449 """
450 Check App IDs on all nodes
451 """
452 # FIXME: Rename this to appIDCheck? or add a check for isntalled apps
453 appResults = main.Cluster.command( "appToIDCheck" )
454 appCheck = all( i == main.TRUE for i in appResults )
455 if not appCheck:
Devin Lim142b5342017-07-20 15:22:39 -0700456 ctrl = main.Cluster.active( 0 )
Jon Hallca319892017-06-15 15:25:22 -0700457 main.log.debug( "%s apps: %s" % ( ctrl.name, ctrl.apps() ) )
458 main.log.debug( "%s appIDs: %s" % ( ctrl.name, ctrl.appIDs() ) )
459 return appCheck
460
Jon Halle0f0b342017-04-18 11:43:47 -0700461 def workQueueStatsCheck( self, workQueueName, completed, inProgress, pending ):
462 # Completed
Jon Hallca319892017-06-15 15:25:22 -0700463 completedValues = main.Cluster.command( "workQueueTotalCompleted",
464 args=[ workQueueName ] )
Jon Halle0f0b342017-04-18 11:43:47 -0700465 # Check the results
Jon Hallca319892017-06-15 15:25:22 -0700466 completedResults = [ int( x ) == completed for x in completedValues ]
Jon Halle0f0b342017-04-18 11:43:47 -0700467 completedResult = all( completedResults )
468 if not completedResult:
469 main.log.warn( "Expected Work Queue {} to have {} completed, found {}".format(
470 workQueueName, completed, completedValues ) )
471
472 # In Progress
Jon Hallca319892017-06-15 15:25:22 -0700473 inProgressValues = main.Cluster.command( "workQueueTotalInProgress",
474 args=[ workQueueName ] )
Jon Halle0f0b342017-04-18 11:43:47 -0700475 # Check the results
Jon Hallca319892017-06-15 15:25:22 -0700476 inProgressResults = [ int( x ) == inProgress for x in inProgressValues ]
Jon Halle0f0b342017-04-18 11:43:47 -0700477 inProgressResult = all( inProgressResults )
478 if not inProgressResult:
479 main.log.warn( "Expected Work Queue {} to have {} inProgress, found {}".format(
480 workQueueName, inProgress, inProgressValues ) )
481
482 # Pending
Jon Hallca319892017-06-15 15:25:22 -0700483 pendingValues = main.Cluster.command( "workQueueTotalPending",
484 args=[ workQueueName ] )
Jon Halle0f0b342017-04-18 11:43:47 -0700485 # Check the results
Jon Hallca319892017-06-15 15:25:22 -0700486 pendingResults = [ int( x ) == pending for x in pendingValues ]
Jon Halle0f0b342017-04-18 11:43:47 -0700487 pendingResult = all( pendingResults )
488 if not pendingResult:
489 main.log.warn( "Expected Work Queue {} to have {} pending, found {}".format(
490 workQueueName, pending, pendingValues ) )
491 return completedResult and inProgressResult and pendingResult
492
Devin Lim58046fa2017-07-05 16:55:00 -0700493 def assignDevices( self, main ):
494 """
495 Assign devices to controllers
496 """
497 import re
Devin Lim58046fa2017-07-05 16:55:00 -0700498 assert main, "main not defined"
499 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -0700500
501 main.case( "Assigning devices to controllers" )
502 main.caseExplanation = "Assign switches to ONOS using 'ovs-vsctl' " + \
503 "and check that an ONOS node becomes the " + \
504 "master of the device."
505 main.step( "Assign switches to controllers" )
506
Jon Hallca319892017-06-15 15:25:22 -0700507 ipList = main.Cluster.getIps()
Devin Lim58046fa2017-07-05 16:55:00 -0700508 swList = []
509 for i in range( 1, 29 ):
510 swList.append( "s" + str( i ) )
511 main.Mininet1.assignSwController( sw=swList, ip=ipList )
512
513 mastershipCheck = main.TRUE
514 for i in range( 1, 29 ):
515 response = main.Mininet1.getSwController( "s" + str( i ) )
516 try:
517 main.log.info( str( response ) )
518 except Exception:
519 main.log.info( repr( response ) )
Devin Lim142b5342017-07-20 15:22:39 -0700520 for ctrl in main.Cluster.runningNodes:
Jon Hallca319892017-06-15 15:25:22 -0700521 if re.search( "tcp:" + ctrl.ipAddress, response ):
Devin Lim58046fa2017-07-05 16:55:00 -0700522 mastershipCheck = mastershipCheck and main.TRUE
523 else:
Jon Hall4173b242017-09-12 17:04:38 -0700524 main.log.error( "Error, node " + repr( ctrl ) + " is " +
Devin Lim58046fa2017-07-05 16:55:00 -0700525 "not in the list of controllers s" +
526 str( i ) + " is connecting to." )
527 mastershipCheck = main.FALSE
528 utilities.assert_equals(
529 expect=main.TRUE,
530 actual=mastershipCheck,
531 onpass="Switch mastership assigned correctly",
532 onfail="Switches not assigned correctly to controllers" )
Jon Hallca319892017-06-15 15:25:22 -0700533
Devin Lim58046fa2017-07-05 16:55:00 -0700534 def assignIntents( self, main ):
535 """
536 Assign intents
537 """
538 import time
539 import json
Devin Lim58046fa2017-07-05 16:55:00 -0700540 assert main, "main not defined"
541 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -0700542 try:
543 main.HAlabels
544 except ( NameError, AttributeError ):
545 main.log.error( "main.HAlabels not defined, setting to []" )
546 main.HAlabels = []
547 try:
548 main.HAdata
549 except ( NameError, AttributeError ):
550 main.log.error( "data not defined, setting to []" )
551 main.HAdata = []
552 main.case( "Adding host Intents" )
553 main.caseExplanation = "Discover hosts by using pingall then " +\
554 "assign predetermined host-to-host intents." +\
555 " After installation, check that the intent" +\
556 " is distributed to all nodes and the state" +\
557 " is INSTALLED"
558
559 # install onos-app-fwd
560 main.step( "Install reactive forwarding app" )
Jon Hallca319892017-06-15 15:25:22 -0700561 onosCli = main.Cluster.next()
Devin Lime9f0ccf2017-08-11 17:25:12 -0700562 installResults = onosCli.CLI.activateApp( "org.onosproject.fwd" )
Devin Lim58046fa2017-07-05 16:55:00 -0700563 utilities.assert_equals( expect=main.TRUE, actual=installResults,
564 onpass="Install fwd successful",
565 onfail="Install fwd failed" )
566
567 main.step( "Check app ids" )
Jon Hallca319892017-06-15 15:25:22 -0700568 appCheck = self.appCheck()
569 utilities.assert_equals( expect=True, actual=appCheck,
Devin Lim58046fa2017-07-05 16:55:00 -0700570 onpass="App Ids seem to be correct",
571 onfail="Something is wrong with app Ids" )
572
573 main.step( "Discovering Hosts( Via pingall for now )" )
574 # FIXME: Once we have a host discovery mechanism, use that instead
575 # REACTIVE FWD test
576 pingResult = main.FALSE
577 passMsg = "Reactive Pingall test passed"
578 time1 = time.time()
579 pingResult = main.Mininet1.pingall()
580 time2 = time.time()
581 if not pingResult:
582 main.log.warn( "First pingall failed. Trying again..." )
583 pingResult = main.Mininet1.pingall()
584 passMsg += " on the second try"
585 utilities.assert_equals(
586 expect=main.TRUE,
587 actual=pingResult,
588 onpass=passMsg,
589 onfail="Reactive Pingall failed, " +
590 "one or more ping pairs failed" )
591 main.log.info( "Time for pingall: %2f seconds" %
592 ( time2 - time1 ) )
Jon Hallca319892017-06-15 15:25:22 -0700593 if not pingResult:
Devin Lim44075962017-08-11 10:56:37 -0700594 main.cleanAndExit()
Devin Lim58046fa2017-07-05 16:55:00 -0700595 # timeout for fwd flows
596 time.sleep( 11 )
597 # uninstall onos-app-fwd
598 main.step( "Uninstall reactive forwarding app" )
Devin Lime9f0ccf2017-08-11 17:25:12 -0700599 uninstallResult = onosCli.CLI.deactivateApp( "org.onosproject.fwd" )
Devin Lim58046fa2017-07-05 16:55:00 -0700600 utilities.assert_equals( expect=main.TRUE, actual=uninstallResult,
601 onpass="Uninstall fwd successful",
602 onfail="Uninstall fwd failed" )
603
604 main.step( "Check app ids" )
Jon Hallca319892017-06-15 15:25:22 -0700605 appCheck2 = self.appCheck()
606 utilities.assert_equals( expect=True, actual=appCheck2,
Devin Lim58046fa2017-07-05 16:55:00 -0700607 onpass="App Ids seem to be correct",
608 onfail="Something is wrong with app Ids" )
609
610 main.step( "Add host intents via cli" )
611 intentIds = []
612 # TODO: move the host numbers to params
613 # Maybe look at all the paths we ping?
614 intentAddResult = True
615 hostResult = main.TRUE
616 for i in range( 8, 18 ):
617 main.log.info( "Adding host intent between h" + str( i ) +
618 " and h" + str( i + 10 ) )
619 host1 = "00:00:00:00:00:" + \
620 str( hex( i )[ 2: ] ).zfill( 2 ).upper()
621 host2 = "00:00:00:00:00:" + \
622 str( hex( i + 10 )[ 2: ] ).zfill( 2 ).upper()
623 # NOTE: getHost can return None
Jon Hallca319892017-06-15 15:25:22 -0700624 host1Dict = onosCli.CLI.getHost( host1 )
625 host2Dict = onosCli.CLI.getHost( host2 )
Devin Lim58046fa2017-07-05 16:55:00 -0700626 host1Id = None
627 host2Id = None
628 if host1Dict and host2Dict:
629 host1Id = host1Dict.get( 'id', None )
630 host2Id = host2Dict.get( 'id', None )
631 if host1Id and host2Id:
Jon Hallca319892017-06-15 15:25:22 -0700632 nodeNum = len( main.Cluster.active() )
Devin Lim142b5342017-07-20 15:22:39 -0700633 ctrl = main.Cluster.active( i % nodeNum )
Jon Hallca319892017-06-15 15:25:22 -0700634 tmpId = ctrl.CLI.addHostIntent( host1Id, host2Id )
Devin Lim58046fa2017-07-05 16:55:00 -0700635 if tmpId:
636 main.log.info( "Added intent with id: " + tmpId )
637 intentIds.append( tmpId )
638 else:
639 main.log.error( "addHostIntent returned: " +
640 repr( tmpId ) )
641 else:
642 main.log.error( "Error, getHost() failed for h" + str( i ) +
643 " and/or h" + str( i + 10 ) )
Devin Lime9f0ccf2017-08-11 17:25:12 -0700644 hosts = main.Cluster.next().CLI.hosts()
Devin Lim58046fa2017-07-05 16:55:00 -0700645 try:
Jon Hallca319892017-06-15 15:25:22 -0700646 output = json.dumps( json.loads( hosts ),
647 sort_keys=True,
648 indent=4,
649 separators=( ',', ': ' ) )
Devin Lim58046fa2017-07-05 16:55:00 -0700650 except ( ValueError, TypeError ):
Jon Hallca319892017-06-15 15:25:22 -0700651 output = repr( hosts )
652 main.log.debug( "Hosts output: %s" % output )
Devin Lim58046fa2017-07-05 16:55:00 -0700653 hostResult = main.FALSE
654 utilities.assert_equals( expect=main.TRUE, actual=hostResult,
655 onpass="Found a host id for each host",
656 onfail="Error looking up host ids" )
657
658 intentStart = time.time()
659 onosIds = onosCli.getAllIntentsId()
660 main.log.info( "Submitted intents: " + str( intentIds ) )
661 main.log.info( "Intents in ONOS: " + str( onosIds ) )
662 for intent in intentIds:
663 if intent in onosIds:
664 pass # intent submitted is in onos
665 else:
666 intentAddResult = False
667 if intentAddResult:
668 intentStop = time.time()
669 else:
670 intentStop = None
671 # Print the intent states
Devin Lime9f0ccf2017-08-11 17:25:12 -0700672 intents = onosCli.CLI.intents()
Devin Lim58046fa2017-07-05 16:55:00 -0700673 intentStates = []
674 installedCheck = True
675 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
676 count = 0
677 try:
678 for intent in json.loads( intents ):
679 state = intent.get( 'state', None )
680 if "INSTALLED" not in state:
681 installedCheck = False
682 intentId = intent.get( 'id', None )
683 intentStates.append( ( intentId, state ) )
684 except ( ValueError, TypeError ):
685 main.log.exception( "Error parsing intents" )
686 # add submitted intents not in the store
687 tmplist = [ i for i, s in intentStates ]
688 missingIntents = False
689 for i in intentIds:
690 if i not in tmplist:
691 intentStates.append( ( i, " - " ) )
692 missingIntents = True
693 intentStates.sort()
694 for i, s in intentStates:
695 count += 1
696 main.log.info( "%-6s%-15s%-15s" %
697 ( str( count ), str( i ), str( s ) ) )
Jon Hallca319892017-06-15 15:25:22 -0700698 self.commonChecks()
Devin Lim58046fa2017-07-05 16:55:00 -0700699
700 intentAddResult = bool( intentAddResult and not missingIntents and
701 installedCheck )
702 if not intentAddResult:
703 main.log.error( "Error in pushing host intents to ONOS" )
704
705 main.step( "Intent Anti-Entropy dispersion" )
706 for j in range( 100 ):
707 correct = True
708 main.log.info( "Submitted intents: " + str( sorted( intentIds ) ) )
Jon Hallca319892017-06-15 15:25:22 -0700709 for ctrl in main.Cluster.active():
Devin Lim58046fa2017-07-05 16:55:00 -0700710 onosIds = []
Jon Hallca319892017-06-15 15:25:22 -0700711 ids = ctrl.getAllIntentsId()
Devin Lim58046fa2017-07-05 16:55:00 -0700712 onosIds.append( ids )
Jon Hallca319892017-06-15 15:25:22 -0700713 main.log.debug( "Intents in " + ctrl.name + ": " +
Devin Lim58046fa2017-07-05 16:55:00 -0700714 str( sorted( onosIds ) ) )
715 if sorted( ids ) != sorted( intentIds ):
716 main.log.warn( "Set of intent IDs doesn't match" )
717 correct = False
718 break
719 else:
Devin Lime9f0ccf2017-08-11 17:25:12 -0700720 intents = json.loads( ctrl.CLI.intents() )
Devin Lim58046fa2017-07-05 16:55:00 -0700721 for intent in intents:
722 if intent[ 'state' ] != "INSTALLED":
723 main.log.warn( "Intent " + intent[ 'id' ] +
724 " is " + intent[ 'state' ] )
725 correct = False
726 break
727 if correct:
728 break
729 else:
730 time.sleep( 1 )
731 if not intentStop:
732 intentStop = time.time()
733 global gossipTime
734 gossipTime = intentStop - intentStart
735 main.log.info( "It took about " + str( gossipTime ) +
736 " seconds for all intents to appear in each node" )
737 append = False
738 title = "Gossip Intents"
739 count = 1
740 while append is False:
741 curTitle = title + str( count )
742 if curTitle not in main.HAlabels:
743 main.HAlabels.append( curTitle )
744 main.HAdata.append( str( gossipTime ) )
745 append = True
746 else:
747 count += 1
748 gossipPeriod = int( main.params[ 'timers' ][ 'gossip' ] )
Devin Lim142b5342017-07-20 15:22:39 -0700749 maxGossipTime = gossipPeriod * len( main.Cluster.runningNodes )
Devin Lim58046fa2017-07-05 16:55:00 -0700750 utilities.assert_greater_equals(
751 expect=maxGossipTime, actual=gossipTime,
752 onpass="ECM anti-entropy for intents worked within " +
753 "expected time",
754 onfail="Intent ECM anti-entropy took too long. " +
755 "Expected time:{}, Actual time:{}".format( maxGossipTime,
756 gossipTime ) )
757 if gossipTime <= maxGossipTime:
758 intentAddResult = True
759
Jon Hallca319892017-06-15 15:25:22 -0700760 pendingMap = main.Cluster.next().pendingMap()
Devin Lim58046fa2017-07-05 16:55:00 -0700761 if not intentAddResult or "key" in pendingMap:
762 import time
763 installedCheck = True
764 main.log.info( "Sleeping 60 seconds to see if intents are found" )
765 time.sleep( 60 )
766 onosIds = onosCli.getAllIntentsId()
767 main.log.info( "Submitted intents: " + str( intentIds ) )
768 main.log.info( "Intents in ONOS: " + str( onosIds ) )
769 # Print the intent states
Devin Lime9f0ccf2017-08-11 17:25:12 -0700770 intents = onosCli.CLI.intents()
Devin Lim58046fa2017-07-05 16:55:00 -0700771 intentStates = []
772 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
773 count = 0
774 try:
775 for intent in json.loads( intents ):
776 # Iter through intents of a node
777 state = intent.get( 'state', None )
778 if "INSTALLED" not in state:
779 installedCheck = False
780 intentId = intent.get( 'id', None )
781 intentStates.append( ( intentId, state ) )
782 except ( ValueError, TypeError ):
783 main.log.exception( "Error parsing intents" )
784 # add submitted intents not in the store
785 tmplist = [ i for i, s in intentStates ]
786 for i in intentIds:
787 if i not in tmplist:
788 intentStates.append( ( i, " - " ) )
789 intentStates.sort()
790 for i, s in intentStates:
791 count += 1
792 main.log.info( "%-6s%-15s%-15s" %
793 ( str( count ), str( i ), str( s ) ) )
Jon Hallca319892017-06-15 15:25:22 -0700794 self.topicsCheck( [ "org.onosproject.election" ] )
795 self.partitionsCheck()
796 self.pendingMapCheck()
Devin Lim58046fa2017-07-05 16:55:00 -0700797
Jon Hallca319892017-06-15 15:25:22 -0700798 def pingAcrossHostIntent( self, main ):
Devin Lim58046fa2017-07-05 16:55:00 -0700799 """
800 Ping across added host intents
801 """
802 import json
803 import time
Devin Lim58046fa2017-07-05 16:55:00 -0700804 assert main, "main not defined"
805 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -0700806 main.case( "Verify connectivity by sending traffic across Intents" )
807 main.caseExplanation = "Ping across added host intents to check " +\
808 "functionality and check the state of " +\
809 "the intent"
810
Jon Hallca319892017-06-15 15:25:22 -0700811 onosCli = main.Cluster.next()
Devin Lim58046fa2017-07-05 16:55:00 -0700812 main.step( "Check Intent state" )
813 installedCheck = False
814 loopCount = 0
815 while not installedCheck and loopCount < 40:
816 installedCheck = True
817 # Print the intent states
Devin Lime9f0ccf2017-08-11 17:25:12 -0700818 intents = onosCli.CLI.intents()
Devin Lim58046fa2017-07-05 16:55:00 -0700819 intentStates = []
820 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
821 count = 0
822 # Iter through intents of a node
823 try:
824 for intent in json.loads( intents ):
825 state = intent.get( 'state', None )
826 if "INSTALLED" not in state:
827 installedCheck = False
Jon Hall8bafdc02017-09-05 11:36:26 -0700828 main.log.debug( "Failed intent: " + str( intent ) )
Devin Lim58046fa2017-07-05 16:55:00 -0700829 intentId = intent.get( 'id', None )
830 intentStates.append( ( intentId, state ) )
831 except ( ValueError, TypeError ):
832 main.log.exception( "Error parsing intents." )
833 # Print states
834 intentStates.sort()
835 for i, s in intentStates:
836 count += 1
837 main.log.info( "%-6s%-15s%-15s" %
838 ( str( count ), str( i ), str( s ) ) )
Devin Lim58046fa2017-07-05 16:55:00 -0700839 if not installedCheck:
840 time.sleep( 1 )
841 loopCount += 1
842 utilities.assert_equals( expect=True, actual=installedCheck,
843 onpass="Intents are all INSTALLED",
844 onfail="Intents are not all in " +
845 "INSTALLED state" )
846
847 main.step( "Ping across added host intents" )
848 PingResult = main.TRUE
849 for i in range( 8, 18 ):
850 ping = main.Mininet1.pingHost( src="h" + str( i ),
851 target="h" + str( i + 10 ) )
852 PingResult = PingResult and ping
853 if ping == main.FALSE:
854 main.log.warn( "Ping failed between h" + str( i ) +
855 " and h" + str( i + 10 ) )
856 elif ping == main.TRUE:
857 main.log.info( "Ping test passed!" )
858 # Don't set PingResult or you'd override failures
859 if PingResult == main.FALSE:
860 main.log.error(
861 "Intents have not been installed correctly, pings failed." )
862 # TODO: pretty print
Devin Lim58046fa2017-07-05 16:55:00 -0700863 try:
Devin Lime9f0ccf2017-08-11 17:25:12 -0700864 tmpIntents = onosCli.CLI.intents()
Jon Hallca319892017-06-15 15:25:22 -0700865 output = json.dumps( json.loads( tmpIntents ),
866 sort_keys=True,
867 indent=4,
868 separators=( ',', ': ' ) )
Devin Lim58046fa2017-07-05 16:55:00 -0700869 except ( ValueError, TypeError ):
Jon Hall4173b242017-09-12 17:04:38 -0700870 output = repr( tmpIntents )
Jon Hallca319892017-06-15 15:25:22 -0700871 main.log.debug( "ONOS1 intents: " + output )
Devin Lim58046fa2017-07-05 16:55:00 -0700872 utilities.assert_equals(
873 expect=main.TRUE,
874 actual=PingResult,
875 onpass="Intents have been installed correctly and pings work",
876 onfail="Intents have not been installed correctly, pings failed." )
877
878 main.step( "Check leadership of topics" )
Jon Hallca319892017-06-15 15:25:22 -0700879 topicsCheck = self.topicsCheck()
880 utilities.assert_equals( expect=False, actual=topicsCheck,
Devin Lim58046fa2017-07-05 16:55:00 -0700881 onpass="intent Partitions is in leaders",
Jon Hallca319892017-06-15 15:25:22 -0700882 onfail="Some topics were lost" )
883 self.partitionsCheck()
884 self.pendingMapCheck()
Devin Lim58046fa2017-07-05 16:55:00 -0700885
886 if not installedCheck:
887 main.log.info( "Waiting 60 seconds to see if the state of " +
888 "intents change" )
889 time.sleep( 60 )
890 # Print the intent states
Devin Lime9f0ccf2017-08-11 17:25:12 -0700891 intents = onosCli.CLI.intents()
Devin Lim58046fa2017-07-05 16:55:00 -0700892 intentStates = []
893 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
894 count = 0
895 # Iter through intents of a node
896 try:
897 for intent in json.loads( intents ):
898 state = intent.get( 'state', None )
899 if "INSTALLED" not in state:
900 installedCheck = False
901 intentId = intent.get( 'id', None )
902 intentStates.append( ( intentId, state ) )
903 except ( ValueError, TypeError ):
904 main.log.exception( "Error parsing intents." )
905 intentStates.sort()
906 for i, s in intentStates:
907 count += 1
908 main.log.info( "%-6s%-15s%-15s" %
909 ( str( count ), str( i ), str( s ) ) )
Jon Hallca319892017-06-15 15:25:22 -0700910 self.commonChecks()
Devin Lim58046fa2017-07-05 16:55:00 -0700911
Devin Lim58046fa2017-07-05 16:55:00 -0700912 # Print flowrules
Devin Lime9f0ccf2017-08-11 17:25:12 -0700913 main.log.debug( onosCli.CLI.flows() )
Devin Lim58046fa2017-07-05 16:55:00 -0700914 main.step( "Wait a minute then ping again" )
915 # the wait is above
916 PingResult = main.TRUE
917 for i in range( 8, 18 ):
918 ping = main.Mininet1.pingHost( src="h" + str( i ),
919 target="h" + str( i + 10 ) )
920 PingResult = PingResult and ping
921 if ping == main.FALSE:
922 main.log.warn( "Ping failed between h" + str( i ) +
923 " and h" + str( i + 10 ) )
924 elif ping == main.TRUE:
925 main.log.info( "Ping test passed!" )
926 # Don't set PingResult or you'd override failures
927 if PingResult == main.FALSE:
928 main.log.error(
929 "Intents have not been installed correctly, pings failed." )
930 # TODO: pretty print
Jon Hallca319892017-06-15 15:25:22 -0700931 main.log.warn( str( onosCli.name ) + " intents: " )
Devin Lim58046fa2017-07-05 16:55:00 -0700932 try:
Devin Lime9f0ccf2017-08-11 17:25:12 -0700933 tmpIntents = onosCli.CLI.intents()
Devin Lim58046fa2017-07-05 16:55:00 -0700934 main.log.warn( json.dumps( json.loads( tmpIntents ),
935 sort_keys=True,
936 indent=4,
937 separators=( ',', ': ' ) ) )
938 except ( ValueError, TypeError ):
939 main.log.warn( repr( tmpIntents ) )
940 utilities.assert_equals(
941 expect=main.TRUE,
942 actual=PingResult,
943 onpass="Intents have been installed correctly and pings work",
944 onfail="Intents have not been installed correctly, pings failed." )
945
Devin Lim142b5342017-07-20 15:22:39 -0700946 def checkRoleNotNull( self ):
Devin Lim58046fa2017-07-05 16:55:00 -0700947 main.step( "Check that each switch has a master" )
Devin Lim58046fa2017-07-05 16:55:00 -0700948 # Assert that each device has a master
Devin Lim142b5342017-07-20 15:22:39 -0700949 rolesNotNull = main.Cluster.command( "rolesNotNull", returnBool=True )
Devin Lim58046fa2017-07-05 16:55:00 -0700950 utilities.assert_equals(
Jon Hallca319892017-06-15 15:25:22 -0700951 expect=True,
Devin Lim58046fa2017-07-05 16:55:00 -0700952 actual=rolesNotNull,
953 onpass="Each device has a master",
954 onfail="Some devices don't have a master assigned" )
955
Devin Lim142b5342017-07-20 15:22:39 -0700956 def checkTheRole( self ):
957 main.step( "Read device roles from ONOS" )
Jon Hallca319892017-06-15 15:25:22 -0700958 ONOSMastership = main.Cluster.command( "roles" )
Devin Lim58046fa2017-07-05 16:55:00 -0700959 consistentMastership = True
960 rolesResults = True
Devin Lim58046fa2017-07-05 16:55:00 -0700961 for i in range( len( ONOSMastership ) ):
Devin Lim142b5342017-07-20 15:22:39 -0700962 node = str( main.Cluster.active( i ) )
Devin Lim58046fa2017-07-05 16:55:00 -0700963 if not ONOSMastership[ i ] or "Error" in ONOSMastership[ i ]:
Jon Hallca319892017-06-15 15:25:22 -0700964 main.log.error( "Error in getting " + node + " roles" )
965 main.log.warn( node + " mastership response: " +
Devin Lim58046fa2017-07-05 16:55:00 -0700966 repr( ONOSMastership[ i ] ) )
967 rolesResults = False
968 utilities.assert_equals(
969 expect=True,
970 actual=rolesResults,
971 onpass="No error in reading roles output",
972 onfail="Error in reading roles from ONOS" )
973
974 main.step( "Check for consistency in roles from each controller" )
975 if all( [ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
976 main.log.info(
977 "Switch roles are consistent across all ONOS nodes" )
978 else:
979 consistentMastership = False
980 utilities.assert_equals(
981 expect=True,
982 actual=consistentMastership,
983 onpass="Switch roles are consistent across all ONOS nodes",
984 onfail="ONOS nodes have different views of switch roles" )
Devin Lim142b5342017-07-20 15:22:39 -0700985 return ONOSMastership, rolesResults, consistentMastership
986
987 def checkingIntents( self ):
988 main.step( "Get the intents from each controller" )
989 ONOSIntents = main.Cluster.command( "intents", specificDriver=2 )
990 intentsResults = True
991 for i in range( len( ONOSIntents ) ):
992 node = str( main.Cluster.active( i ) )
993 if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
994 main.log.error( "Error in getting " + node + " intents" )
995 main.log.warn( node + " intents response: " +
996 repr( ONOSIntents[ i ] ) )
997 intentsResults = False
998 utilities.assert_equals(
999 expect=True,
1000 actual=intentsResults,
1001 onpass="No error in reading intents output",
1002 onfail="Error in reading intents from ONOS" )
1003 return ONOSIntents, intentsResults
1004
1005 def readingState( self, main ):
1006 """
1007 Reading state of ONOS
1008 """
1009 import json
1010 import time
1011 assert main, "main not defined"
1012 assert utilities.assert_equals, "utilities.assert_equals not defined"
1013 try:
1014 from tests.dependencies.topology import Topology
1015 except ImportError:
1016 main.log.error( "Topology not found exiting the test" )
Devin Lim44075962017-08-11 10:56:37 -07001017 main.cleanAndExit()
Devin Lim142b5342017-07-20 15:22:39 -07001018 try:
1019 main.topoRelated
1020 except ( NameError, AttributeError ):
1021 main.topoRelated = Topology()
1022 main.case( "Setting up and gathering data for current state" )
1023 # The general idea for this test case is to pull the state of
1024 # ( intents,flows, topology,... ) from each ONOS node
1025 # We can then compare them with each other and also with past states
1026
1027 global mastershipState
1028 mastershipState = '[]'
1029
1030 self.checkRoleNotNull()
1031
1032 main.step( "Get the Mastership of each switch from each controller" )
1033 mastershipCheck = main.FALSE
1034
1035 ONOSMastership, consistentMastership, rolesResults = self.checkTheRole()
Devin Lim58046fa2017-07-05 16:55:00 -07001036
1037 if rolesResults and not consistentMastership:
Jon Hallca319892017-06-15 15:25:22 -07001038 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001039 node = str( main.Cluster.active( i ) )
Devin Lim58046fa2017-07-05 16:55:00 -07001040 try:
1041 main.log.warn(
Jon Hallca319892017-06-15 15:25:22 -07001042 node + " roles: ",
Devin Lim58046fa2017-07-05 16:55:00 -07001043 json.dumps(
1044 json.loads( ONOSMastership[ i ] ),
1045 sort_keys=True,
1046 indent=4,
1047 separators=( ',', ': ' ) ) )
1048 except ( ValueError, TypeError ):
1049 main.log.warn( repr( ONOSMastership[ i ] ) )
1050 elif rolesResults and consistentMastership:
Jon Hallca319892017-06-15 15:25:22 -07001051 mastershipCheck = main.TRUE
Devin Lim58046fa2017-07-05 16:55:00 -07001052 mastershipState = ONOSMastership[ 0 ]
1053
Devin Lim58046fa2017-07-05 16:55:00 -07001054 global intentState
1055 intentState = []
Devin Lim142b5342017-07-20 15:22:39 -07001056 ONOSIntents, intentsResults = self.checkingIntents()
Jon Hallca319892017-06-15 15:25:22 -07001057 intentCheck = main.FALSE
1058 consistentIntents = True
Devin Lim142b5342017-07-20 15:22:39 -07001059
Devin Lim58046fa2017-07-05 16:55:00 -07001060 main.step( "Check for consistency in Intents from each controller" )
1061 if all( [ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
1062 main.log.info( "Intents are consistent across all ONOS " +
1063 "nodes" )
1064 else:
1065 consistentIntents = False
1066 main.log.error( "Intents not consistent" )
1067 utilities.assert_equals(
1068 expect=True,
1069 actual=consistentIntents,
1070 onpass="Intents are consistent across all ONOS nodes",
1071 onfail="ONOS nodes have different views of intents" )
1072
1073 if intentsResults:
1074 # Try to make it easy to figure out what is happening
1075 #
1076 # Intent ONOS1 ONOS2 ...
1077 # 0x01 INSTALLED INSTALLING
1078 # ... ... ...
1079 # ... ... ...
1080 title = " Id"
Jon Hallca319892017-06-15 15:25:22 -07001081 for ctrl in main.Cluster.active():
1082 title += " " * 10 + ctrl.name
Devin Lim58046fa2017-07-05 16:55:00 -07001083 main.log.warn( title )
1084 # get all intent keys in the cluster
1085 keys = []
1086 try:
1087 # Get the set of all intent keys
1088 for nodeStr in ONOSIntents:
1089 node = json.loads( nodeStr )
1090 for intent in node:
1091 keys.append( intent.get( 'id' ) )
1092 keys = set( keys )
1093 # For each intent key, print the state on each node
1094 for key in keys:
1095 row = "%-13s" % key
1096 for nodeStr in ONOSIntents:
1097 node = json.loads( nodeStr )
1098 for intent in node:
1099 if intent.get( 'id', "Error" ) == key:
1100 row += "%-15s" % intent.get( 'state' )
1101 main.log.warn( row )
1102 # End of intent state table
1103 except ValueError as e:
1104 main.log.exception( e )
1105 main.log.debug( "nodeStr was: " + repr( nodeStr ) )
1106
1107 if intentsResults and not consistentIntents:
1108 # print the json objects
Jon Hallca319892017-06-15 15:25:22 -07001109 main.log.debug( ctrl.name + " intents: " )
Devin Lim58046fa2017-07-05 16:55:00 -07001110 main.log.debug( json.dumps( json.loads( ONOSIntents[ -1 ] ),
1111 sort_keys=True,
1112 indent=4,
1113 separators=( ',', ': ' ) ) )
1114 for i in range( len( ONOSIntents ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001115 node = str( main.Cluster.active( i ) )
Devin Lim58046fa2017-07-05 16:55:00 -07001116 if ONOSIntents[ i ] != ONOSIntents[ -1 ]:
Jon Hallca319892017-06-15 15:25:22 -07001117 main.log.debug( node + " intents: " )
Devin Lim58046fa2017-07-05 16:55:00 -07001118 main.log.debug( json.dumps( json.loads( ONOSIntents[ i ] ),
1119 sort_keys=True,
1120 indent=4,
1121 separators=( ',', ': ' ) ) )
1122 else:
Jon Hallca319892017-06-15 15:25:22 -07001123 main.log.debug( node + " intents match " + ctrl.name + " intents" )
Devin Lim58046fa2017-07-05 16:55:00 -07001124 elif intentsResults and consistentIntents:
Jon Hallca319892017-06-15 15:25:22 -07001125 intentCheck = main.TRUE
Devin Lim58046fa2017-07-05 16:55:00 -07001126 intentState = ONOSIntents[ 0 ]
1127
1128 main.step( "Get the flows from each controller" )
1129 global flowState
1130 flowState = []
Jon Hall4173b242017-09-12 17:04:38 -07001131 ONOSFlows = main.Cluster.command( "flows", specificDriver=2 ) # TODO: Possible arg: sleep = 30
Devin Lim58046fa2017-07-05 16:55:00 -07001132 ONOSFlowsJson = []
1133 flowCheck = main.FALSE
1134 consistentFlows = True
1135 flowsResults = True
Devin Lim58046fa2017-07-05 16:55:00 -07001136 for i in range( len( ONOSFlows ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001137 node = str( main.Cluster.active( i ) )
Devin Lim58046fa2017-07-05 16:55:00 -07001138 if not ONOSFlows[ i ] or "Error" in ONOSFlows[ i ]:
Jon Hallca319892017-06-15 15:25:22 -07001139 main.log.error( "Error in getting " + node + " flows" )
1140 main.log.warn( node + " flows response: " +
Devin Lim58046fa2017-07-05 16:55:00 -07001141 repr( ONOSFlows[ i ] ) )
1142 flowsResults = False
1143 ONOSFlowsJson.append( None )
1144 else:
1145 try:
1146 ONOSFlowsJson.append( json.loads( ONOSFlows[ i ] ) )
1147 except ( ValueError, TypeError ):
1148 # FIXME: change this to log.error?
Jon Hallca319892017-06-15 15:25:22 -07001149 main.log.exception( "Error in parsing " + node +
Devin Lim58046fa2017-07-05 16:55:00 -07001150 " response as json." )
1151 main.log.error( repr( ONOSFlows[ i ] ) )
1152 ONOSFlowsJson.append( None )
1153 flowsResults = False
1154 utilities.assert_equals(
1155 expect=True,
1156 actual=flowsResults,
1157 onpass="No error in reading flows output",
1158 onfail="Error in reading flows from ONOS" )
1159
1160 main.step( "Check for consistency in Flows from each controller" )
1161 tmp = [ len( i ) == len( ONOSFlowsJson[ 0 ] ) for i in ONOSFlowsJson ]
1162 if all( tmp ):
1163 main.log.info( "Flow count is consistent across all ONOS nodes" )
1164 else:
1165 consistentFlows = False
1166 utilities.assert_equals(
1167 expect=True,
1168 actual=consistentFlows,
1169 onpass="The flow count is consistent across all ONOS nodes",
1170 onfail="ONOS nodes have different flow counts" )
1171
1172 if flowsResults and not consistentFlows:
1173 for i in range( len( ONOSFlows ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001174 node = str( main.Cluster.active( i ) )
Devin Lim58046fa2017-07-05 16:55:00 -07001175 try:
1176 main.log.warn(
Jon Hallca319892017-06-15 15:25:22 -07001177 node + " flows: " +
Devin Lim58046fa2017-07-05 16:55:00 -07001178 json.dumps( json.loads( ONOSFlows[ i ] ), sort_keys=True,
1179 indent=4, separators=( ',', ': ' ) ) )
1180 except ( ValueError, TypeError ):
Jon Hallca319892017-06-15 15:25:22 -07001181 main.log.warn( node + " flows: " +
Devin Lim58046fa2017-07-05 16:55:00 -07001182 repr( ONOSFlows[ i ] ) )
1183 elif flowsResults and consistentFlows:
1184 flowCheck = main.TRUE
1185 flowState = ONOSFlows[ 0 ]
1186
1187 main.step( "Get the OF Table entries" )
1188 global flows
1189 flows = []
1190 for i in range( 1, 29 ):
1191 flows.append( main.Mininet1.getFlowTable( "s" + str( i ), version="1.3", debug=False ) )
1192 if flowCheck == main.FALSE:
1193 for table in flows:
1194 main.log.warn( table )
1195 # TODO: Compare switch flow tables with ONOS flow tables
1196
1197 main.step( "Start continuous pings" )
1198 main.Mininet2.pingLong(
1199 src=main.params[ 'PING' ][ 'source1' ],
1200 target=main.params[ 'PING' ][ 'target1' ],
1201 pingTime=500 )
1202 main.Mininet2.pingLong(
1203 src=main.params[ 'PING' ][ 'source2' ],
1204 target=main.params[ 'PING' ][ 'target2' ],
1205 pingTime=500 )
1206 main.Mininet2.pingLong(
1207 src=main.params[ 'PING' ][ 'source3' ],
1208 target=main.params[ 'PING' ][ 'target3' ],
1209 pingTime=500 )
1210 main.Mininet2.pingLong(
1211 src=main.params[ 'PING' ][ 'source4' ],
1212 target=main.params[ 'PING' ][ 'target4' ],
1213 pingTime=500 )
1214 main.Mininet2.pingLong(
1215 src=main.params[ 'PING' ][ 'source5' ],
1216 target=main.params[ 'PING' ][ 'target5' ],
1217 pingTime=500 )
1218 main.Mininet2.pingLong(
1219 src=main.params[ 'PING' ][ 'source6' ],
1220 target=main.params[ 'PING' ][ 'target6' ],
1221 pingTime=500 )
1222 main.Mininet2.pingLong(
1223 src=main.params[ 'PING' ][ 'source7' ],
1224 target=main.params[ 'PING' ][ 'target7' ],
1225 pingTime=500 )
1226 main.Mininet2.pingLong(
1227 src=main.params[ 'PING' ][ 'source8' ],
1228 target=main.params[ 'PING' ][ 'target8' ],
1229 pingTime=500 )
1230 main.Mininet2.pingLong(
1231 src=main.params[ 'PING' ][ 'source9' ],
1232 target=main.params[ 'PING' ][ 'target9' ],
1233 pingTime=500 )
1234 main.Mininet2.pingLong(
1235 src=main.params[ 'PING' ][ 'source10' ],
1236 target=main.params[ 'PING' ][ 'target10' ],
1237 pingTime=500 )
1238
1239 main.step( "Collecting topology information from ONOS" )
Devin Lim142b5342017-07-20 15:22:39 -07001240 devices = main.topoRelated.getAll( "devices" )
1241 hosts = main.topoRelated.getAll( "hosts", inJson=True )
1242 ports = main.topoRelated.getAll( "ports" )
1243 links = main.topoRelated.getAll( "links" )
1244 clusters = main.topoRelated.getAll( "clusters" )
Devin Lim58046fa2017-07-05 16:55:00 -07001245 # Compare json objects for hosts and dataplane clusters
1246
1247 # hosts
1248 main.step( "Host view is consistent across ONOS nodes" )
1249 consistentHostsResult = main.TRUE
1250 for controller in range( len( hosts ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001251 controllerStr = str( main.Cluster.active( controller ) )
Devin Lim58046fa2017-07-05 16:55:00 -07001252 if hosts[ controller ] and "Error" not in hosts[ controller ]:
1253 if hosts[ controller ] == hosts[ 0 ]:
1254 continue
1255 else: # hosts not consistent
Jon Hallca319892017-06-15 15:25:22 -07001256 main.log.error( "hosts from " +
Devin Lim58046fa2017-07-05 16:55:00 -07001257 controllerStr +
1258 " is inconsistent with ONOS1" )
1259 main.log.warn( repr( hosts[ controller ] ) )
1260 consistentHostsResult = main.FALSE
1261
1262 else:
Jon Hallca319892017-06-15 15:25:22 -07001263 main.log.error( "Error in getting ONOS hosts from " +
Devin Lim58046fa2017-07-05 16:55:00 -07001264 controllerStr )
1265 consistentHostsResult = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001266 main.log.warn( controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07001267 " hosts response: " +
1268 repr( hosts[ controller ] ) )
1269 utilities.assert_equals(
1270 expect=main.TRUE,
1271 actual=consistentHostsResult,
1272 onpass="Hosts view is consistent across all ONOS nodes",
1273 onfail="ONOS nodes have different views of hosts" )
1274
1275 main.step( "Each host has an IP address" )
1276 ipResult = main.TRUE
1277 for controller in range( 0, len( hosts ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001278 controllerStr = str( main.Cluster.active( controller ) )
Devin Lim58046fa2017-07-05 16:55:00 -07001279 if hosts[ controller ]:
1280 for host in hosts[ controller ]:
1281 if not host.get( 'ipAddresses', [] ):
Jon Hallca319892017-06-15 15:25:22 -07001282 main.log.error( "Error with host ips on " +
Devin Lim58046fa2017-07-05 16:55:00 -07001283 controllerStr + ": " + str( host ) )
1284 ipResult = main.FALSE
1285 utilities.assert_equals(
1286 expect=main.TRUE,
1287 actual=ipResult,
1288 onpass="The ips of the hosts aren't empty",
1289 onfail="The ip of at least one host is missing" )
1290
1291 # Strongly connected clusters of devices
1292 main.step( "Cluster view is consistent across ONOS nodes" )
1293 consistentClustersResult = main.TRUE
1294 for controller in range( len( clusters ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001295 controllerStr = str( main.Cluster.active( controller ) )
Devin Lim58046fa2017-07-05 16:55:00 -07001296 if "Error" not in clusters[ controller ]:
1297 if clusters[ controller ] == clusters[ 0 ]:
1298 continue
1299 else: # clusters not consistent
Jon Hallca319892017-06-15 15:25:22 -07001300 main.log.error( "clusters from " + controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07001301 " is inconsistent with ONOS1" )
1302 consistentClustersResult = main.FALSE
1303
1304 else:
1305 main.log.error( "Error in getting dataplane clusters " +
Jon Hallca319892017-06-15 15:25:22 -07001306 "from " + controllerStr )
Devin Lim58046fa2017-07-05 16:55:00 -07001307 consistentClustersResult = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001308 main.log.warn( controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07001309 " clusters response: " +
1310 repr( clusters[ controller ] ) )
1311 utilities.assert_equals(
1312 expect=main.TRUE,
1313 actual=consistentClustersResult,
1314 onpass="Clusters view is consistent across all ONOS nodes",
1315 onfail="ONOS nodes have different views of clusters" )
1316 if not consistentClustersResult:
1317 main.log.debug( clusters )
1318
1319 # there should always only be one cluster
1320 main.step( "Cluster view correct across ONOS nodes" )
1321 try:
1322 numClusters = len( json.loads( clusters[ 0 ] ) )
1323 except ( ValueError, TypeError ):
1324 main.log.exception( "Error parsing clusters[0]: " +
1325 repr( clusters[ 0 ] ) )
1326 numClusters = "ERROR"
1327 utilities.assert_equals(
1328 expect=1,
1329 actual=numClusters,
1330 onpass="ONOS shows 1 SCC",
1331 onfail="ONOS shows " + str( numClusters ) + " SCCs" )
1332
1333 main.step( "Comparing ONOS topology to MN" )
1334 devicesResults = main.TRUE
1335 linksResults = main.TRUE
1336 hostsResults = main.TRUE
1337 mnSwitches = main.Mininet1.getSwitches()
1338 mnLinks = main.Mininet1.getLinks()
1339 mnHosts = main.Mininet1.getHosts()
Jon Hallca319892017-06-15 15:25:22 -07001340 for controller in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001341 controllerStr = str( main.Cluster.active( controller ) )
Devin Lim58046fa2017-07-05 16:55:00 -07001342 currentDevicesResult = main.topoRelated.compareDevicePort(
1343 main.Mininet1, controller,
1344 mnSwitches, devices, ports )
1345 utilities.assert_equals( expect=main.TRUE,
1346 actual=currentDevicesResult,
Jon Hallca319892017-06-15 15:25:22 -07001347 onpass=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07001348 " Switches view is correct",
Jon Hallca319892017-06-15 15:25:22 -07001349 onfail=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07001350 " Switches view is incorrect" )
1351
1352 currentLinksResult = main.topoRelated.compareBase( links, controller,
1353 main.Mininet1.compareLinks,
1354 [ mnSwitches, mnLinks ] )
1355 utilities.assert_equals( expect=main.TRUE,
1356 actual=currentLinksResult,
Jon Hallca319892017-06-15 15:25:22 -07001357 onpass=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07001358 " links view is correct",
Jon Hallca319892017-06-15 15:25:22 -07001359 onfail=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07001360 " links view is incorrect" )
1361
1362 if hosts[ controller ] and "Error" not in hosts[ controller ]:
1363 currentHostsResult = main.Mininet1.compareHosts(
1364 mnHosts,
1365 hosts[ controller ] )
1366 else:
1367 currentHostsResult = main.FALSE
1368 utilities.assert_equals( expect=main.TRUE,
1369 actual=currentHostsResult,
Jon Hallca319892017-06-15 15:25:22 -07001370 onpass=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07001371 " hosts exist in Mininet",
Jon Hallca319892017-06-15 15:25:22 -07001372 onfail=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07001373 " hosts don't match Mininet" )
1374
1375 devicesResults = devicesResults and currentDevicesResult
1376 linksResults = linksResults and currentLinksResult
1377 hostsResults = hostsResults and currentHostsResult
1378
1379 main.step( "Device information is correct" )
1380 utilities.assert_equals(
1381 expect=main.TRUE,
1382 actual=devicesResults,
1383 onpass="Device information is correct",
1384 onfail="Device information is incorrect" )
1385
1386 main.step( "Links are correct" )
1387 utilities.assert_equals(
1388 expect=main.TRUE,
1389 actual=linksResults,
1390 onpass="Link are correct",
1391 onfail="Links are incorrect" )
1392
1393 main.step( "Hosts are correct" )
1394 utilities.assert_equals(
1395 expect=main.TRUE,
1396 actual=hostsResults,
1397 onpass="Hosts are correct",
1398 onfail="Hosts are incorrect" )
1399
1400 def checkDistPrimitivesFunc( self, main ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001401 """
1402 Check for basic functionality with distributed primitives
1403 """
Jon Halle0f0b342017-04-18 11:43:47 -07001404 # TODO: Clean this up so it's not just a cut/paste from the test
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001405 try:
1406 # Make sure variables are defined/set
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001407 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001408 assert main.pCounterName, "main.pCounterName not defined"
1409 assert main.onosSetName, "main.onosSetName not defined"
1410 # NOTE: assert fails if value is 0/None/Empty/False
1411 try:
1412 main.pCounterValue
Devin Lim58046fa2017-07-05 16:55:00 -07001413 except ( NameError, AttributeError ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001414 main.log.error( "main.pCounterValue not defined, setting to 0" )
1415 main.pCounterValue = 0
1416 try:
1417 main.onosSet
Devin Lim58046fa2017-07-05 16:55:00 -07001418 except ( NameError, AttributeError ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001419 main.log.error( "main.onosSet not defined, setting to empty Set" )
Jon Hallf37d44d2017-05-24 10:37:30 -07001420 main.onosSet = set( [] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001421 # Variables for the distributed primitives tests. These are local only
1422 addValue = "a"
1423 addAllValue = "a b c d e f"
1424 retainValue = "c d e f"
Jon Halle0f0b342017-04-18 11:43:47 -07001425 valueName = "TestON-Value"
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001426 valueValue = None
Jon Halle0f0b342017-04-18 11:43:47 -07001427 workQueueName = "TestON-Queue"
1428 workQueueCompleted = 0
1429 workQueueInProgress = 0
1430 workQueuePending = 0
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001431
1432 description = "Check for basic functionality with distributed " +\
1433 "primitives"
1434 main.case( description )
1435 main.caseExplanation = "Test the methods of the distributed " +\
1436 "primitives (counters and sets) throught the cli"
1437 # DISTRIBUTED ATOMIC COUNTERS
1438 # Partitioned counters
1439 main.step( "Increment then get a default counter on each node" )
Jon Hallca319892017-06-15 15:25:22 -07001440 pCounters = main.Cluster.command( "counterTestAddAndGet",
1441 args=[ main.pCounterName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001442 addedPValues = []
Jon Hallca319892017-06-15 15:25:22 -07001443 for i in main.Cluster.active():
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001444 main.pCounterValue += 1
1445 addedPValues.append( main.pCounterValue )
Jon Hallca319892017-06-15 15:25:22 -07001446 # Check that counter incremented once per controller
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001447 pCounterResults = True
1448 for i in addedPValues:
1449 tmpResult = i in pCounters
1450 pCounterResults = pCounterResults and tmpResult
1451 if not tmpResult:
1452 main.log.error( str( i ) + " is not in partitioned "
1453 "counter incremented results" )
1454 utilities.assert_equals( expect=True,
1455 actual=pCounterResults,
1456 onpass="Default counter incremented",
1457 onfail="Error incrementing default" +
1458 " counter" )
1459
1460 main.step( "Get then Increment a default counter on each node" )
Jon Hallca319892017-06-15 15:25:22 -07001461 pCounters = main.Cluster.command( "counterTestGetAndAdd",
1462 args=[ main.pCounterName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001463 addedPValues = []
Jon Hallca319892017-06-15 15:25:22 -07001464 for i in main.Cluster.active():
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001465 addedPValues.append( main.pCounterValue )
1466 main.pCounterValue += 1
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001467 # Check that counter incremented numController times
1468 pCounterResults = True
1469 for i in addedPValues:
1470 tmpResult = i in pCounters
1471 pCounterResults = pCounterResults and tmpResult
1472 if not tmpResult:
1473 main.log.error( str( i ) + " is not in partitioned "
1474 "counter incremented results" )
1475 utilities.assert_equals( expect=True,
1476 actual=pCounterResults,
1477 onpass="Default counter incremented",
1478 onfail="Error incrementing default" +
1479 " counter" )
1480
1481 main.step( "Counters we added have the correct values" )
Devin Lim58046fa2017-07-05 16:55:00 -07001482 incrementCheck = self.counterCheck( main.pCounterName, main.pCounterValue )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001483 utilities.assert_equals( expect=main.TRUE,
1484 actual=incrementCheck,
1485 onpass="Added counters are correct",
1486 onfail="Added counters are incorrect" )
1487
1488 main.step( "Add -8 to then get a default counter on each node" )
Jon Hallca319892017-06-15 15:25:22 -07001489 pCounters = main.Cluster.command( "counterTestAddAndGet",
1490 args=[ main.pCounterName ],
1491 kwargs={ "delta": -8 } )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001492 addedPValues = []
Jon Hallca319892017-06-15 15:25:22 -07001493 for ctrl in main.Cluster.active():
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001494 main.pCounterValue += -8
1495 addedPValues.append( main.pCounterValue )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001496 # Check that counter incremented numController times
1497 pCounterResults = True
1498 for i in addedPValues:
1499 tmpResult = i in pCounters
1500 pCounterResults = pCounterResults and tmpResult
1501 if not tmpResult:
1502 main.log.error( str( i ) + " is not in partitioned "
1503 "counter incremented results" )
1504 utilities.assert_equals( expect=True,
1505 actual=pCounterResults,
1506 onpass="Default counter incremented",
1507 onfail="Error incrementing default" +
1508 " counter" )
1509
1510 main.step( "Add 5 to then get a default counter on each node" )
Jon Hallca319892017-06-15 15:25:22 -07001511 pCounters = main.Cluster.command( "counterTestAddAndGet",
1512 args=[ main.pCounterName ],
1513 kwargs={ "delta": 5 } )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001514 addedPValues = []
Jon Hallca319892017-06-15 15:25:22 -07001515 for ctrl in main.Cluster.active():
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001516 main.pCounterValue += 5
1517 addedPValues.append( main.pCounterValue )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001518
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001519 # Check that counter incremented numController times
1520 pCounterResults = True
1521 for i in addedPValues:
1522 tmpResult = i in pCounters
1523 pCounterResults = pCounterResults and tmpResult
1524 if not tmpResult:
1525 main.log.error( str( i ) + " is not in partitioned "
1526 "counter incremented results" )
1527 utilities.assert_equals( expect=True,
1528 actual=pCounterResults,
1529 onpass="Default counter incremented",
1530 onfail="Error incrementing default" +
1531 " counter" )
1532
1533 main.step( "Get then add 5 to a default counter on each node" )
Jon Hallca319892017-06-15 15:25:22 -07001534 pCounters = main.Cluster.command( "counterTestGetAndAdd",
1535 args=[ main.pCounterName ],
1536 kwargs={ "delta": 5 } )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001537 addedPValues = []
Jon Hallca319892017-06-15 15:25:22 -07001538 for ctrl in main.Cluster.active():
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001539 addedPValues.append( main.pCounterValue )
1540 main.pCounterValue += 5
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001541 # Check that counter incremented numController times
1542 pCounterResults = True
1543 for i in addedPValues:
1544 tmpResult = i in pCounters
1545 pCounterResults = pCounterResults and tmpResult
1546 if not tmpResult:
1547 main.log.error( str( i ) + " is not in partitioned "
1548 "counter incremented results" )
1549 utilities.assert_equals( expect=True,
1550 actual=pCounterResults,
1551 onpass="Default counter incremented",
1552 onfail="Error incrementing default" +
1553 " counter" )
1554
1555 main.step( "Counters we added have the correct values" )
Devin Lim58046fa2017-07-05 16:55:00 -07001556 incrementCheck = self.counterCheck( main.pCounterName, main.pCounterValue )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001557 utilities.assert_equals( expect=main.TRUE,
1558 actual=incrementCheck,
1559 onpass="Added counters are correct",
1560 onfail="Added counters are incorrect" )
1561
1562 # DISTRIBUTED SETS
1563 main.step( "Distributed Set get" )
1564 size = len( main.onosSet )
Jon Hallca319892017-06-15 15:25:22 -07001565 getResponses = main.Cluster.command( "setTestGet",
1566 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001567 getResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001568 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001569 node = main.Cluster.active( i )
Jon Hallf37d44d2017-05-24 10:37:30 -07001570 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001571 current = set( getResponses[ i ] )
1572 if len( current ) == len( getResponses[ i ] ):
1573 # no repeats
1574 if main.onosSet != current:
Jon Hallca319892017-06-15 15:25:22 -07001575 main.log.error( node +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001576 " has incorrect view" +
1577 " of set " + main.onosSetName + ":\n" +
1578 str( getResponses[ i ] ) )
1579 main.log.debug( "Expected: " + str( main.onosSet ) )
1580 main.log.debug( "Actual: " + str( current ) )
1581 getResults = main.FALSE
1582 else:
1583 # error, set is not a set
Jon Hallca319892017-06-15 15:25:22 -07001584 main.log.error( node +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001585 " has repeat elements in" +
1586 " set " + main.onosSetName + ":\n" +
1587 str( getResponses[ i ] ) )
1588 getResults = main.FALSE
1589 elif getResponses[ i ] == main.ERROR:
1590 getResults = main.FALSE
1591 utilities.assert_equals( expect=main.TRUE,
1592 actual=getResults,
1593 onpass="Set elements are correct",
1594 onfail="Set elements are incorrect" )
1595
1596 main.step( "Distributed Set size" )
Jon Hallca319892017-06-15 15:25:22 -07001597 sizeResponses = main.Cluster.command( "setTestSize",
1598 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001599 sizeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001600 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001601 node = main.Cluster.active( i )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001602 if size != sizeResponses[ i ]:
1603 sizeResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001604 main.log.error( node +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001605 " expected a size of " + str( size ) +
1606 " for set " + main.onosSetName +
1607 " but got " + str( sizeResponses[ i ] ) )
1608 utilities.assert_equals( expect=main.TRUE,
1609 actual=sizeResults,
1610 onpass="Set sizes are correct",
1611 onfail="Set sizes are incorrect" )
1612
1613 main.step( "Distributed Set add()" )
1614 main.onosSet.add( addValue )
Jon Hallca319892017-06-15 15:25:22 -07001615 addResponses = main.Cluster.command( "setTestAdd",
1616 args=[ main.onosSetName, addValue ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001617 # main.TRUE = successfully changed the set
1618 # main.FALSE = action resulted in no change in set
1619 # main.ERROR - Some error in executing the function
1620 addResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001621 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001622 if addResponses[ i ] == main.TRUE:
1623 # All is well
1624 pass
1625 elif addResponses[ i ] == main.FALSE:
1626 # Already in set, probably fine
1627 pass
1628 elif addResponses[ i ] == main.ERROR:
1629 # Error in execution
1630 addResults = main.FALSE
1631 else:
1632 # unexpected result
1633 addResults = main.FALSE
1634 if addResults != main.TRUE:
1635 main.log.error( "Error executing set add" )
1636
1637 # Check if set is still correct
1638 size = len( main.onosSet )
Jon Hallca319892017-06-15 15:25:22 -07001639 getResponses = main.Cluster.command( "setTestGet",
1640 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001641 getResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001642 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001643 node = main.Cluster.active( i )
Jon Hallf37d44d2017-05-24 10:37:30 -07001644 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001645 current = set( getResponses[ i ] )
1646 if len( current ) == len( getResponses[ i ] ):
1647 # no repeats
1648 if main.onosSet != current:
Jon Hallca319892017-06-15 15:25:22 -07001649 main.log.error( node + " has incorrect view" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001650 " of set " + main.onosSetName + ":\n" +
1651 str( getResponses[ i ] ) )
1652 main.log.debug( "Expected: " + str( main.onosSet ) )
1653 main.log.debug( "Actual: " + str( current ) )
1654 getResults = main.FALSE
1655 else:
1656 # error, set is not a set
Jon Hallca319892017-06-15 15:25:22 -07001657 main.log.error( node + " has repeat elements in" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001658 " set " + main.onosSetName + ":\n" +
1659 str( getResponses[ i ] ) )
1660 getResults = main.FALSE
1661 elif getResponses[ i ] == main.ERROR:
1662 getResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001663 sizeResponses = main.Cluster.command( "setTestSize",
1664 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001665 sizeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001666 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001667 node = main.Cluster.active( i )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001668 if size != sizeResponses[ i ]:
1669 sizeResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001670 main.log.error( node + " expected a size of " +
1671 str( size ) + " for set " + main.onosSetName +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001672 " but got " + str( sizeResponses[ i ] ) )
1673 addResults = addResults and getResults and sizeResults
1674 utilities.assert_equals( expect=main.TRUE,
1675 actual=addResults,
1676 onpass="Set add correct",
1677 onfail="Set add was incorrect" )
1678
1679 main.step( "Distributed Set addAll()" )
1680 main.onosSet.update( addAllValue.split() )
Jon Hallca319892017-06-15 15:25:22 -07001681 addResponses = main.Cluster.command( "setTestAdd",
1682 args=[ main.onosSetName, addAllValue ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001683 # main.TRUE = successfully changed the set
1684 # main.FALSE = action resulted in no change in set
1685 # main.ERROR - Some error in executing the function
1686 addAllResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001687 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001688 if addResponses[ i ] == main.TRUE:
1689 # All is well
1690 pass
1691 elif addResponses[ i ] == main.FALSE:
1692 # Already in set, probably fine
1693 pass
1694 elif addResponses[ i ] == main.ERROR:
1695 # Error in execution
1696 addAllResults = main.FALSE
1697 else:
1698 # unexpected result
1699 addAllResults = main.FALSE
1700 if addAllResults != main.TRUE:
1701 main.log.error( "Error executing set addAll" )
1702
1703 # Check if set is still correct
1704 size = len( main.onosSet )
Jon Hallca319892017-06-15 15:25:22 -07001705 getResponses = main.Cluster.command( "setTestGet",
1706 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001707 getResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001708 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001709 node = main.Cluster.active( i )
Jon Hallf37d44d2017-05-24 10:37:30 -07001710 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001711 current = set( getResponses[ i ] )
1712 if len( current ) == len( getResponses[ i ] ):
1713 # no repeats
1714 if main.onosSet != current:
Jon Hallca319892017-06-15 15:25:22 -07001715 main.log.error( node + " has incorrect view" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001716 " of set " + main.onosSetName + ":\n" +
1717 str( getResponses[ i ] ) )
1718 main.log.debug( "Expected: " + str( main.onosSet ) )
1719 main.log.debug( "Actual: " + str( current ) )
1720 getResults = main.FALSE
1721 else:
1722 # error, set is not a set
Jon Hallca319892017-06-15 15:25:22 -07001723 main.log.error( node + " has repeat elements in" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001724 " set " + main.onosSetName + ":\n" +
1725 str( getResponses[ i ] ) )
1726 getResults = main.FALSE
1727 elif getResponses[ i ] == main.ERROR:
1728 getResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001729 sizeResponses = main.Cluster.command( "setTestSize",
1730 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001731 sizeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001732 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001733 node = main.Cluster.active( i )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001734 if size != sizeResponses[ i ]:
1735 sizeResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001736 main.log.error( node + " expected a size of " + str( size ) +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001737 " for set " + main.onosSetName +
1738 " but got " + str( sizeResponses[ i ] ) )
1739 addAllResults = addAllResults and getResults and sizeResults
1740 utilities.assert_equals( expect=main.TRUE,
1741 actual=addAllResults,
1742 onpass="Set addAll correct",
1743 onfail="Set addAll was incorrect" )
1744
1745 main.step( "Distributed Set contains()" )
Jon Hallca319892017-06-15 15:25:22 -07001746 containsResponses = main.Cluster.command( "setTestGet",
1747 args=[ main.onosSetName ],
1748 kwargs={ "values": addValue } )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001749 containsResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001750 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001751 if containsResponses[ i ] == main.ERROR:
1752 containsResults = main.FALSE
1753 else:
1754 containsResults = containsResults and\
1755 containsResponses[ i ][ 1 ]
1756 utilities.assert_equals( expect=main.TRUE,
1757 actual=containsResults,
1758 onpass="Set contains is functional",
1759 onfail="Set contains failed" )
1760
1761 main.step( "Distributed Set containsAll()" )
Jon Hallca319892017-06-15 15:25:22 -07001762 containsAllResponses = main.Cluster.command( "setTestGet",
1763 args=[ main.onosSetName ],
1764 kwargs={ "values": addAllValue } )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001765 containsAllResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001766 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001767 if containsResponses[ i ] == main.ERROR:
1768 containsResults = main.FALSE
1769 else:
1770 containsResults = containsResults and\
1771 containsResponses[ i ][ 1 ]
1772 utilities.assert_equals( expect=main.TRUE,
1773 actual=containsAllResults,
1774 onpass="Set containsAll is functional",
1775 onfail="Set containsAll failed" )
1776
1777 main.step( "Distributed Set remove()" )
1778 main.onosSet.remove( addValue )
Jon Hallca319892017-06-15 15:25:22 -07001779 removeResponses = main.Cluster.command( "setTestRemove",
1780 args=[ main.onosSetName, addValue ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001781 # main.TRUE = successfully changed the set
1782 # main.FALSE = action resulted in no change in set
1783 # main.ERROR - Some error in executing the function
1784 removeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001785 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001786 if removeResponses[ i ] == main.TRUE:
1787 # All is well
1788 pass
1789 elif removeResponses[ i ] == main.FALSE:
1790 # not in set, probably fine
1791 pass
1792 elif removeResponses[ i ] == main.ERROR:
1793 # Error in execution
1794 removeResults = main.FALSE
1795 else:
1796 # unexpected result
1797 removeResults = main.FALSE
1798 if removeResults != main.TRUE:
1799 main.log.error( "Error executing set remove" )
1800
1801 # Check if set is still correct
1802 size = len( main.onosSet )
Jon Hallca319892017-06-15 15:25:22 -07001803 getResponses = main.Cluster.command( "setTestGet",
1804 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001805 getResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001806 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001807 node = main.Cluster.active( i )
Jon Hallf37d44d2017-05-24 10:37:30 -07001808 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001809 current = set( getResponses[ i ] )
1810 if len( current ) == len( getResponses[ i ] ):
1811 # no repeats
1812 if main.onosSet != current:
Jon Hallca319892017-06-15 15:25:22 -07001813 main.log.error( node + " has incorrect view" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001814 " of set " + main.onosSetName + ":\n" +
1815 str( getResponses[ i ] ) )
1816 main.log.debug( "Expected: " + str( main.onosSet ) )
1817 main.log.debug( "Actual: " + str( current ) )
1818 getResults = main.FALSE
1819 else:
1820 # error, set is not a set
Jon Hallca319892017-06-15 15:25:22 -07001821 main.log.error( node + " has repeat elements in" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001822 " set " + main.onosSetName + ":\n" +
1823 str( getResponses[ i ] ) )
1824 getResults = main.FALSE
1825 elif getResponses[ i ] == main.ERROR:
1826 getResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001827 sizeResponses = main.Cluster.command( "setTestSize",
1828 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001829 sizeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001830 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001831 node = main.Cluster.active( i )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001832 if size != sizeResponses[ i ]:
1833 sizeResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001834 main.log.error( node + " expected a size of " + str( size ) +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001835 " for set " + main.onosSetName +
1836 " but got " + str( sizeResponses[ i ] ) )
1837 removeResults = removeResults and getResults and sizeResults
1838 utilities.assert_equals( expect=main.TRUE,
1839 actual=removeResults,
1840 onpass="Set remove correct",
1841 onfail="Set remove was incorrect" )
1842
1843 main.step( "Distributed Set removeAll()" )
1844 main.onosSet.difference_update( addAllValue.split() )
Jon Hallca319892017-06-15 15:25:22 -07001845 removeAllResponses = main.Cluster.command( "setTestRemove",
1846 args=[ main.onosSetName, addAllValue ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001847 # main.TRUE = successfully changed the set
1848 # main.FALSE = action resulted in no change in set
1849 # main.ERROR - Some error in executing the function
1850 removeAllResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001851 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001852 if removeAllResponses[ i ] == main.TRUE:
1853 # All is well
1854 pass
1855 elif removeAllResponses[ i ] == main.FALSE:
1856 # not in set, probably fine
1857 pass
1858 elif removeAllResponses[ i ] == main.ERROR:
1859 # Error in execution
1860 removeAllResults = main.FALSE
1861 else:
1862 # unexpected result
1863 removeAllResults = main.FALSE
1864 if removeAllResults != main.TRUE:
1865 main.log.error( "Error executing set removeAll" )
1866
1867 # Check if set is still correct
1868 size = len( main.onosSet )
Jon Hallca319892017-06-15 15:25:22 -07001869 getResponses = main.Cluster.command( "setTestGet",
1870 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001871 getResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001872 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001873 node = main.Cluster.active( i )
Jon Hallf37d44d2017-05-24 10:37:30 -07001874 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001875 current = set( getResponses[ i ] )
1876 if len( current ) == len( getResponses[ i ] ):
1877 # no repeats
1878 if main.onosSet != current:
Jon Hallca319892017-06-15 15:25:22 -07001879 main.log.error( node + " has incorrect view" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001880 " of set " + main.onosSetName + ":\n" +
1881 str( getResponses[ i ] ) )
1882 main.log.debug( "Expected: " + str( main.onosSet ) )
1883 main.log.debug( "Actual: " + str( current ) )
1884 getResults = main.FALSE
1885 else:
1886 # error, set is not a set
Jon Hallca319892017-06-15 15:25:22 -07001887 main.log.error( node + " has repeat elements in" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001888 " set " + main.onosSetName + ":\n" +
1889 str( getResponses[ i ] ) )
1890 getResults = main.FALSE
1891 elif getResponses[ i ] == main.ERROR:
1892 getResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001893 sizeResponses = main.Cluster.command( "setTestSize",
1894 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001895 sizeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001896 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001897 node = main.Cluster.active( i )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001898 if size != sizeResponses[ i ]:
1899 sizeResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001900 main.log.error( node + " expected a size of " + str( size ) +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001901 " for set " + main.onosSetName +
1902 " but got " + str( sizeResponses[ i ] ) )
1903 removeAllResults = removeAllResults and getResults and sizeResults
1904 utilities.assert_equals( expect=main.TRUE,
1905 actual=removeAllResults,
1906 onpass="Set removeAll correct",
1907 onfail="Set removeAll was incorrect" )
1908
1909 main.step( "Distributed Set addAll()" )
1910 main.onosSet.update( addAllValue.split() )
Jon Hallca319892017-06-15 15:25:22 -07001911 addResponses = main.Cluster.command( "setTestAdd",
1912 args=[ main.onosSetName, addAllValue ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001913 # main.TRUE = successfully changed the set
1914 # main.FALSE = action resulted in no change in set
1915 # main.ERROR - Some error in executing the function
1916 addAllResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001917 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001918 if addResponses[ i ] == main.TRUE:
1919 # All is well
1920 pass
1921 elif addResponses[ i ] == main.FALSE:
1922 # Already in set, probably fine
1923 pass
1924 elif addResponses[ i ] == main.ERROR:
1925 # Error in execution
1926 addAllResults = main.FALSE
1927 else:
1928 # unexpected result
1929 addAllResults = main.FALSE
1930 if addAllResults != main.TRUE:
1931 main.log.error( "Error executing set addAll" )
1932
1933 # Check if set is still correct
1934 size = len( main.onosSet )
Jon Hallca319892017-06-15 15:25:22 -07001935 getResponses = main.Cluster.command( "setTestGet",
1936 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001937 getResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001938 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001939 node = main.Cluster.active( i )
Jon Hallf37d44d2017-05-24 10:37:30 -07001940 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001941 current = set( getResponses[ i ] )
1942 if len( current ) == len( getResponses[ i ] ):
1943 # no repeats
1944 if main.onosSet != current:
Jon Hallca319892017-06-15 15:25:22 -07001945 main.log.error( node + " has incorrect view" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001946 " of set " + main.onosSetName + ":\n" +
1947 str( getResponses[ i ] ) )
1948 main.log.debug( "Expected: " + str( main.onosSet ) )
1949 main.log.debug( "Actual: " + str( current ) )
1950 getResults = main.FALSE
1951 else:
1952 # error, set is not a set
Jon Hallca319892017-06-15 15:25:22 -07001953 main.log.error( node + " has repeat elements in" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001954 " set " + main.onosSetName + ":\n" +
1955 str( getResponses[ i ] ) )
1956 getResults = main.FALSE
1957 elif getResponses[ i ] == main.ERROR:
1958 getResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001959 sizeResponses = main.Cluster.command( "setTestSize",
1960 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001961 sizeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001962 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001963 node = main.Cluster.active( i )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001964 if size != sizeResponses[ i ]:
1965 sizeResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001966 main.log.error( node + " expected a size of " + str( size ) +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001967 " for set " + main.onosSetName +
1968 " but got " + str( sizeResponses[ i ] ) )
1969 addAllResults = addAllResults and getResults and sizeResults
1970 utilities.assert_equals( expect=main.TRUE,
1971 actual=addAllResults,
1972 onpass="Set addAll correct",
1973 onfail="Set addAll was incorrect" )
1974
1975 main.step( "Distributed Set clear()" )
1976 main.onosSet.clear()
Jon Hallca319892017-06-15 15:25:22 -07001977 clearResponses = main.Cluster.command( "setTestRemove",
Jon Hall4173b242017-09-12 17:04:38 -07001978 args=[ main.onosSetName, " " ], # Values doesn't matter
1979 kwargs={ "clear": True } )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001980 # main.TRUE = successfully changed the set
1981 # main.FALSE = action resulted in no change in set
1982 # main.ERROR - Some error in executing the function
1983 clearResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001984 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001985 if clearResponses[ i ] == main.TRUE:
1986 # All is well
1987 pass
1988 elif clearResponses[ i ] == main.FALSE:
1989 # Nothing set, probably fine
1990 pass
1991 elif clearResponses[ i ] == main.ERROR:
1992 # Error in execution
1993 clearResults = main.FALSE
1994 else:
1995 # unexpected result
1996 clearResults = main.FALSE
1997 if clearResults != main.TRUE:
1998 main.log.error( "Error executing set clear" )
1999
2000 # Check if set is still correct
2001 size = len( main.onosSet )
Jon Hallca319892017-06-15 15:25:22 -07002002 getResponses = main.Cluster.command( "setTestGet",
2003 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002004 getResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002005 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07002006 node = main.Cluster.active( i )
Jon Hallf37d44d2017-05-24 10:37:30 -07002007 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002008 current = set( getResponses[ i ] )
2009 if len( current ) == len( getResponses[ i ] ):
2010 # no repeats
2011 if main.onosSet != current:
Jon Hallca319892017-06-15 15:25:22 -07002012 main.log.error( node + " has incorrect view" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002013 " of set " + main.onosSetName + ":\n" +
2014 str( getResponses[ i ] ) )
2015 main.log.debug( "Expected: " + str( main.onosSet ) )
2016 main.log.debug( "Actual: " + str( current ) )
2017 getResults = main.FALSE
2018 else:
2019 # error, set is not a set
Jon Hallca319892017-06-15 15:25:22 -07002020 main.log.error( node + " has repeat elements in" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002021 " set " + main.onosSetName + ":\n" +
2022 str( getResponses[ i ] ) )
2023 getResults = main.FALSE
2024 elif getResponses[ i ] == main.ERROR:
2025 getResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07002026 sizeResponses = main.Cluster.command( "setTestSize",
2027 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002028 sizeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002029 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07002030 node = main.Cluster.active( i )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002031 if size != sizeResponses[ i ]:
2032 sizeResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07002033 main.log.error( node + " expected a size of " + str( size ) +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002034 " for set " + main.onosSetName +
2035 " but got " + str( sizeResponses[ i ] ) )
2036 clearResults = clearResults and getResults and sizeResults
2037 utilities.assert_equals( expect=main.TRUE,
2038 actual=clearResults,
2039 onpass="Set clear correct",
2040 onfail="Set clear was incorrect" )
2041
2042 main.step( "Distributed Set addAll()" )
2043 main.onosSet.update( addAllValue.split() )
Jon Hallca319892017-06-15 15:25:22 -07002044 addResponses = main.Cluster.command( "setTestAdd",
2045 args=[ main.onosSetName, addAllValue ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002046 # main.TRUE = successfully changed the set
2047 # main.FALSE = action resulted in no change in set
2048 # main.ERROR - Some error in executing the function
2049 addAllResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002050 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002051 if addResponses[ i ] == main.TRUE:
2052 # All is well
2053 pass
2054 elif addResponses[ i ] == main.FALSE:
2055 # Already in set, probably fine
2056 pass
2057 elif addResponses[ i ] == main.ERROR:
2058 # Error in execution
2059 addAllResults = main.FALSE
2060 else:
2061 # unexpected result
2062 addAllResults = main.FALSE
2063 if addAllResults != main.TRUE:
2064 main.log.error( "Error executing set addAll" )
2065
2066 # Check if set is still correct
2067 size = len( main.onosSet )
Jon Hallca319892017-06-15 15:25:22 -07002068 getResponses = main.Cluster.command( "setTestGet",
2069 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002070 getResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002071 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07002072 node = main.Cluster.active( i )
Jon Hallf37d44d2017-05-24 10:37:30 -07002073 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002074 current = set( getResponses[ i ] )
2075 if len( current ) == len( getResponses[ i ] ):
2076 # no repeats
2077 if main.onosSet != current:
Jon Hallca319892017-06-15 15:25:22 -07002078 main.log.error( node + " has incorrect view" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002079 " of set " + main.onosSetName + ":\n" +
2080 str( getResponses[ i ] ) )
2081 main.log.debug( "Expected: " + str( main.onosSet ) )
2082 main.log.debug( "Actual: " + str( current ) )
2083 getResults = main.FALSE
2084 else:
2085 # error, set is not a set
Jon Hallca319892017-06-15 15:25:22 -07002086 main.log.error( node + " has repeat elements in" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002087 " set " + main.onosSetName + ":\n" +
2088 str( getResponses[ i ] ) )
2089 getResults = main.FALSE
2090 elif getResponses[ i ] == main.ERROR:
2091 getResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07002092 sizeResponses = main.Cluster.command( "setTestSize",
2093 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002094 sizeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002095 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07002096 node = main.Cluster.active( i )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002097 if size != sizeResponses[ i ]:
2098 sizeResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07002099 main.log.error( node + " expected a size of " + str( size ) +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002100 " for set " + main.onosSetName +
2101 " but got " + str( sizeResponses[ i ] ) )
2102 addAllResults = addAllResults and getResults and sizeResults
2103 utilities.assert_equals( expect=main.TRUE,
2104 actual=addAllResults,
2105 onpass="Set addAll correct",
2106 onfail="Set addAll was incorrect" )
2107
2108 main.step( "Distributed Set retain()" )
2109 main.onosSet.intersection_update( retainValue.split() )
Jon Hallca319892017-06-15 15:25:22 -07002110 retainResponses = main.Cluster.command( "setTestRemove",
2111 args=[ main.onosSetName, retainValue ],
2112 kwargs={ "retain": True } )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002113 # main.TRUE = successfully changed the set
2114 # main.FALSE = action resulted in no change in set
2115 # main.ERROR - Some error in executing the function
2116 retainResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002117 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002118 if retainResponses[ i ] == main.TRUE:
2119 # All is well
2120 pass
2121 elif retainResponses[ i ] == main.FALSE:
2122 # Already in set, probably fine
2123 pass
2124 elif retainResponses[ i ] == main.ERROR:
2125 # Error in execution
2126 retainResults = main.FALSE
2127 else:
2128 # unexpected result
2129 retainResults = main.FALSE
2130 if retainResults != main.TRUE:
2131 main.log.error( "Error executing set retain" )
2132
2133 # Check if set is still correct
2134 size = len( main.onosSet )
Jon Hallca319892017-06-15 15:25:22 -07002135 getResponses = main.Cluster.command( "setTestGet",
2136 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002137 getResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002138 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07002139 node = main.Cluster.active( i )
Jon Hallf37d44d2017-05-24 10:37:30 -07002140 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002141 current = set( getResponses[ i ] )
2142 if len( current ) == len( getResponses[ i ] ):
2143 # no repeats
2144 if main.onosSet != current:
Jon Hallca319892017-06-15 15:25:22 -07002145 main.log.error( node + " has incorrect view" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002146 " of set " + main.onosSetName + ":\n" +
2147 str( getResponses[ i ] ) )
2148 main.log.debug( "Expected: " + str( main.onosSet ) )
2149 main.log.debug( "Actual: " + str( current ) )
2150 getResults = main.FALSE
2151 else:
2152 # error, set is not a set
Jon Hallca319892017-06-15 15:25:22 -07002153 main.log.error( node + " has repeat elements in" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002154 " set " + main.onosSetName + ":\n" +
2155 str( getResponses[ i ] ) )
2156 getResults = main.FALSE
2157 elif getResponses[ i ] == main.ERROR:
2158 getResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07002159 sizeResponses = main.Cluster.command( "setTestSize",
2160 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002161 sizeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002162 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07002163 node = main.Cluster.active( i )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002164 if size != sizeResponses[ i ]:
2165 sizeResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07002166 main.log.error( node + " expected a size of " +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002167 str( size ) + " for set " + main.onosSetName +
2168 " but got " + str( sizeResponses[ i ] ) )
2169 retainResults = retainResults and getResults and sizeResults
2170 utilities.assert_equals( expect=main.TRUE,
2171 actual=retainResults,
2172 onpass="Set retain correct",
2173 onfail="Set retain was incorrect" )
2174
2175 # Transactional maps
2176 main.step( "Partitioned Transactional maps put" )
2177 tMapValue = "Testing"
2178 numKeys = 100
2179 putResult = True
Jon Hallca319892017-06-15 15:25:22 -07002180 ctrl = main.Cluster.next()
2181 putResponses = ctrl.transactionalMapPut( numKeys, tMapValue )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002182 if putResponses and len( putResponses ) == 100:
2183 for i in putResponses:
2184 if putResponses[ i ][ 'value' ] != tMapValue:
2185 putResult = False
2186 else:
2187 putResult = False
2188 if not putResult:
2189 main.log.debug( "Put response values: " + str( putResponses ) )
2190 utilities.assert_equals( expect=True,
2191 actual=putResult,
2192 onpass="Partitioned Transactional Map put successful",
2193 onfail="Partitioned Transactional Map put values are incorrect" )
2194
2195 main.step( "Partitioned Transactional maps get" )
2196 # FIXME: is this sleep needed?
2197 time.sleep( 5 )
2198
2199 getCheck = True
2200 for n in range( 1, numKeys + 1 ):
Jon Hallca319892017-06-15 15:25:22 -07002201 getResponses = main.Cluster.command( "transactionalMapGet",
2202 args=[ "Key" + str( n ) ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002203 valueCheck = True
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002204 for node in getResponses:
2205 if node != tMapValue:
2206 valueCheck = False
2207 if not valueCheck:
Jon Hallf37d44d2017-05-24 10:37:30 -07002208 main.log.warn( "Values for key 'Key" + str(n) + "' do not match:" )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002209 main.log.warn( getResponses )
2210 getCheck = getCheck and valueCheck
2211 utilities.assert_equals( expect=True,
2212 actual=getCheck,
2213 onpass="Partitioned Transactional Map get values were correct",
2214 onfail="Partitioned Transactional Map values incorrect" )
2215
2216 # DISTRIBUTED ATOMIC VALUE
2217 main.step( "Get the value of a new value" )
Jon Hallca319892017-06-15 15:25:22 -07002218 getValues = main.Cluster.command( "valueTestGet",
2219 args=[ valueName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002220 main.log.debug( getValues )
2221 # Check the results
2222 atomicValueGetResult = True
2223 expected = valueValue if valueValue is not None else "null"
2224 main.log.debug( "Checking for value of " + expected )
2225 for i in getValues:
2226 if i != expected:
2227 atomicValueGetResult = False
2228 utilities.assert_equals( expect=True,
2229 actual=atomicValueGetResult,
2230 onpass="Atomic Value get successful",
2231 onfail="Error getting atomic Value " +
2232 str( valueValue ) + ", found: " +
2233 str( getValues ) )
2234
2235 main.step( "Atomic Value set()" )
2236 valueValue = "foo"
Jon Hallca319892017-06-15 15:25:22 -07002237 setValues = main.Cluster.command( "valueTestSet",
2238 args=[ valueName, valueValue ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002239 main.log.debug( setValues )
2240 # Check the results
2241 atomicValueSetResults = True
2242 for i in setValues:
2243 if i != main.TRUE:
2244 atomicValueSetResults = False
2245 utilities.assert_equals( expect=True,
2246 actual=atomicValueSetResults,
2247 onpass="Atomic Value set successful",
2248 onfail="Error setting atomic Value" +
2249 str( setValues ) )
2250
2251 main.step( "Get the value after set()" )
Jon Hallca319892017-06-15 15:25:22 -07002252 getValues = main.Cluster.command( "valueTestGet",
2253 args=[ valueName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002254 main.log.debug( getValues )
2255 # Check the results
2256 atomicValueGetResult = True
2257 expected = valueValue if valueValue is not None else "null"
2258 main.log.debug( "Checking for value of " + expected )
2259 for i in getValues:
2260 if i != expected:
2261 atomicValueGetResult = False
2262 utilities.assert_equals( expect=True,
2263 actual=atomicValueGetResult,
2264 onpass="Atomic Value get successful",
2265 onfail="Error getting atomic Value " +
2266 str( valueValue ) + ", found: " +
2267 str( getValues ) )
2268
2269 main.step( "Atomic Value compareAndSet()" )
2270 oldValue = valueValue
2271 valueValue = "bar"
Jon Hallca319892017-06-15 15:25:22 -07002272 ctrl = main.Cluster.next()
2273 CASValue = ctrl.valueTestCompareAndSet( valueName, oldValue, valueValue )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002274 main.log.debug( CASValue )
2275 utilities.assert_equals( expect=main.TRUE,
2276 actual=CASValue,
2277 onpass="Atomic Value comapreAndSet successful",
2278 onfail="Error setting atomic Value:" +
2279 str( CASValue ) )
2280
2281 main.step( "Get the value after compareAndSet()" )
Jon Hallca319892017-06-15 15:25:22 -07002282 getValues = main.Cluster.command( "valueTestGet",
2283 args=[ valueName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002284 main.log.debug( getValues )
2285 # Check the results
2286 atomicValueGetResult = True
2287 expected = valueValue if valueValue is not None else "null"
2288 main.log.debug( "Checking for value of " + expected )
2289 for i in getValues:
2290 if i != expected:
2291 atomicValueGetResult = False
2292 utilities.assert_equals( expect=True,
2293 actual=atomicValueGetResult,
2294 onpass="Atomic Value get successful",
2295 onfail="Error getting atomic Value " +
2296 str( valueValue ) + ", found: " +
2297 str( getValues ) )
2298
2299 main.step( "Atomic Value getAndSet()" )
2300 oldValue = valueValue
2301 valueValue = "baz"
Jon Hallca319892017-06-15 15:25:22 -07002302 ctrl = main.Cluster.next()
2303 GASValue = ctrl.valueTestGetAndSet( valueName, valueValue )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002304 main.log.debug( GASValue )
2305 expected = oldValue if oldValue is not None else "null"
2306 utilities.assert_equals( expect=expected,
2307 actual=GASValue,
2308 onpass="Atomic Value GAS successful",
2309 onfail="Error with GetAndSet atomic Value: expected " +
2310 str( expected ) + ", found: " +
2311 str( GASValue ) )
2312
2313 main.step( "Get the value after getAndSet()" )
Jon Hallca319892017-06-15 15:25:22 -07002314 getValues = main.Cluster.command( "valueTestGet",
2315 args=[ valueName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002316 main.log.debug( getValues )
2317 # Check the results
2318 atomicValueGetResult = True
2319 expected = valueValue if valueValue is not None else "null"
2320 main.log.debug( "Checking for value of " + expected )
2321 for i in getValues:
2322 if i != expected:
2323 atomicValueGetResult = False
2324 utilities.assert_equals( expect=True,
2325 actual=atomicValueGetResult,
2326 onpass="Atomic Value get successful",
2327 onfail="Error getting atomic Value: expected " +
2328 str( valueValue ) + ", found: " +
2329 str( getValues ) )
2330
2331 main.step( "Atomic Value destory()" )
2332 valueValue = None
Jon Hallca319892017-06-15 15:25:22 -07002333 ctrl = main.Cluster.next()
2334 destroyResult = ctrl.valueTestDestroy( valueName )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002335 main.log.debug( destroyResult )
2336 # Check the results
2337 utilities.assert_equals( expect=main.TRUE,
2338 actual=destroyResult,
2339 onpass="Atomic Value destroy successful",
2340 onfail="Error destroying atomic Value" )
2341
2342 main.step( "Get the value after destroy()" )
Jon Hallca319892017-06-15 15:25:22 -07002343 getValues = main.Cluster.command( "valueTestGet",
2344 args=[ valueName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002345 main.log.debug( getValues )
2346 # Check the results
2347 atomicValueGetResult = True
2348 expected = valueValue if valueValue is not None else "null"
2349 main.log.debug( "Checking for value of " + expected )
2350 for i in getValues:
2351 if i != expected:
2352 atomicValueGetResult = False
2353 utilities.assert_equals( expect=True,
2354 actual=atomicValueGetResult,
2355 onpass="Atomic Value get successful",
2356 onfail="Error getting atomic Value " +
2357 str( valueValue ) + ", found: " +
2358 str( getValues ) )
Jon Halle0f0b342017-04-18 11:43:47 -07002359
2360 # WORK QUEUES
2361 main.step( "Work Queue add()" )
Jon Hallca319892017-06-15 15:25:22 -07002362 ctrl = main.Cluster.next()
2363 addResult = ctrl.workQueueAdd( workQueueName, 'foo' )
Jon Halle0f0b342017-04-18 11:43:47 -07002364 workQueuePending += 1
2365 main.log.debug( addResult )
2366 # Check the results
2367 utilities.assert_equals( expect=main.TRUE,
2368 actual=addResult,
2369 onpass="Work Queue add successful",
2370 onfail="Error adding to Work Queue" )
2371
2372 main.step( "Check the work queue stats" )
2373 statsResults = self.workQueueStatsCheck( workQueueName,
2374 workQueueCompleted,
2375 workQueueInProgress,
2376 workQueuePending )
2377 utilities.assert_equals( expect=True,
2378 actual=statsResults,
2379 onpass="Work Queue stats correct",
2380 onfail="Work Queue stats incorrect " )
2381
2382 main.step( "Work Queue addMultiple()" )
Jon Hallca319892017-06-15 15:25:22 -07002383 ctrl = main.Cluster.next()
2384 addMultipleResult = ctrl.workQueueAddMultiple( workQueueName, 'bar', 'baz' )
Jon Halle0f0b342017-04-18 11:43:47 -07002385 workQueuePending += 2
2386 main.log.debug( addMultipleResult )
2387 # Check the results
2388 utilities.assert_equals( expect=main.TRUE,
2389 actual=addMultipleResult,
2390 onpass="Work Queue add multiple successful",
2391 onfail="Error adding multiple items to Work Queue" )
2392
2393 main.step( "Check the work queue stats" )
2394 statsResults = self.workQueueStatsCheck( workQueueName,
2395 workQueueCompleted,
2396 workQueueInProgress,
2397 workQueuePending )
2398 utilities.assert_equals( expect=True,
2399 actual=statsResults,
2400 onpass="Work Queue stats correct",
2401 onfail="Work Queue stats incorrect " )
2402
2403 main.step( "Work Queue takeAndComplete() 1" )
Jon Hallca319892017-06-15 15:25:22 -07002404 ctrl = main.Cluster.next()
Jon Halle0f0b342017-04-18 11:43:47 -07002405 number = 1
Jon Hallca319892017-06-15 15:25:22 -07002406 take1Result = ctrl.workQueueTakeAndComplete( workQueueName, number )
Jon Halle0f0b342017-04-18 11:43:47 -07002407 workQueuePending -= number
2408 workQueueCompleted += number
2409 main.log.debug( take1Result )
2410 # Check the results
2411 utilities.assert_equals( expect=main.TRUE,
2412 actual=take1Result,
2413 onpass="Work Queue takeAndComplete 1 successful",
2414 onfail="Error taking 1 from Work Queue" )
2415
2416 main.step( "Check the work queue stats" )
2417 statsResults = self.workQueueStatsCheck( workQueueName,
2418 workQueueCompleted,
2419 workQueueInProgress,
2420 workQueuePending )
2421 utilities.assert_equals( expect=True,
2422 actual=statsResults,
2423 onpass="Work Queue stats correct",
2424 onfail="Work Queue stats incorrect " )
2425
2426 main.step( "Work Queue takeAndComplete() 2" )
Jon Hallca319892017-06-15 15:25:22 -07002427 ctrl = main.Cluster.next()
Jon Halle0f0b342017-04-18 11:43:47 -07002428 number = 2
Jon Hallca319892017-06-15 15:25:22 -07002429 take2Result = ctrl.workQueueTakeAndComplete( workQueueName, number )
Jon Halle0f0b342017-04-18 11:43:47 -07002430 workQueuePending -= number
2431 workQueueCompleted += number
2432 main.log.debug( take2Result )
2433 # Check the results
2434 utilities.assert_equals( expect=main.TRUE,
2435 actual=take2Result,
2436 onpass="Work Queue takeAndComplete 2 successful",
2437 onfail="Error taking 2 from Work Queue" )
2438
2439 main.step( "Check the work queue stats" )
2440 statsResults = self.workQueueStatsCheck( workQueueName,
2441 workQueueCompleted,
2442 workQueueInProgress,
2443 workQueuePending )
2444 utilities.assert_equals( expect=True,
2445 actual=statsResults,
2446 onpass="Work Queue stats correct",
2447 onfail="Work Queue stats incorrect " )
2448
2449 main.step( "Work Queue destroy()" )
2450 valueValue = None
2451 threads = []
Jon Hallca319892017-06-15 15:25:22 -07002452 ctrl = main.Cluster.next()
2453 destroyResult = ctrl.workQueueDestroy( workQueueName )
Jon Halle0f0b342017-04-18 11:43:47 -07002454 workQueueCompleted = 0
2455 workQueueInProgress = 0
2456 workQueuePending = 0
2457 main.log.debug( destroyResult )
2458 # Check the results
2459 utilities.assert_equals( expect=main.TRUE,
2460 actual=destroyResult,
2461 onpass="Work Queue destroy successful",
2462 onfail="Error destroying Work Queue" )
2463
2464 main.step( "Check the work queue stats" )
2465 statsResults = self.workQueueStatsCheck( workQueueName,
2466 workQueueCompleted,
2467 workQueueInProgress,
2468 workQueuePending )
2469 utilities.assert_equals( expect=True,
2470 actual=statsResults,
2471 onpass="Work Queue stats correct",
2472 onfail="Work Queue stats incorrect " )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002473 except Exception as e:
2474 main.log.error( "Exception: " + str( e ) )
Devin Lim58046fa2017-07-05 16:55:00 -07002475
2476 def cleanUp( self, main ):
2477 """
2478 Clean up
2479 """
2480 import os
2481 import time
Devin Lim58046fa2017-07-05 16:55:00 -07002482 assert main, "main not defined"
2483 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07002484
2485 # printing colors to terminal
2486 colors = { 'cyan': '\033[96m', 'purple': '\033[95m',
2487 'blue': '\033[94m', 'green': '\033[92m',
2488 'yellow': '\033[93m', 'red': '\033[91m', 'end': '\033[0m' }
Jon Hall4173b242017-09-12 17:04:38 -07002489
Devin Lim58046fa2017-07-05 16:55:00 -07002490 main.case( "Test Cleanup" )
Jon Hall4173b242017-09-12 17:04:38 -07002491
2492 main.step( "Checking raft log size" )
2493 # TODO: this is a flaky check, but the intent is to make sure the raft logs
2494 # get compacted periodically
2495 logCheck = main.Cluster.checkPartitionSize()
2496 utilities.assert_equals( expect=True, actual=logCheck,
2497 onpass="Raft log size is not too big",
2498 onfail="Raft logs grew too big" )
2499
Devin Lim58046fa2017-07-05 16:55:00 -07002500 main.step( "Killing tcpdumps" )
2501 main.Mininet2.stopTcpdump()
2502
2503 testname = main.TEST
2504 if main.params[ 'BACKUP' ][ 'ENABLED' ] == "True":
2505 main.step( "Copying MN pcap and ONOS log files to test station" )
2506 teststationUser = main.params[ 'BACKUP' ][ 'TESTONUSER' ]
2507 teststationIP = main.params[ 'BACKUP' ][ 'TESTONIP' ]
2508 # NOTE: MN Pcap file is being saved to logdir.
2509 # We scp this file as MN and TestON aren't necessarily the same vm
2510
2511 # FIXME: To be replaced with a Jenkin's post script
2512 # TODO: Load these from params
2513 # NOTE: must end in /
2514 logFolder = "/opt/onos/log/"
2515 logFiles = [ "karaf.log", "karaf.log.1" ]
2516 # NOTE: must end in /
2517 for f in logFiles:
Devin Lim142b5342017-07-20 15:22:39 -07002518 for ctrl in main.Cluster.runningNodes:
Jon Hallca319892017-06-15 15:25:22 -07002519 dstName = main.logdir + "/" + ctrl.name + "-" + f
2520 main.ONOSbench.secureCopy( ctrl.user_name, ctrl.ipAddress,
Devin Lim58046fa2017-07-05 16:55:00 -07002521 logFolder + f, dstName )
2522 # std*.log's
2523 # NOTE: must end in /
2524 logFolder = "/opt/onos/var/"
2525 logFiles = [ "stderr.log", "stdout.log" ]
2526 # NOTE: must end in /
2527 for f in logFiles:
Devin Lim142b5342017-07-20 15:22:39 -07002528 for ctrl in main.Cluster.runningNodes:
Jon Hallca319892017-06-15 15:25:22 -07002529 dstName = main.logdir + "/" + ctrl.name + "-" + f
2530 main.ONOSbench.secureCopy( ctrl.user_name, ctrl.ipAddress,
Devin Lim58046fa2017-07-05 16:55:00 -07002531 logFolder + f, dstName )
2532 else:
2533 main.log.debug( "skipping saving log files" )
2534
2535 main.step( "Stopping Mininet" )
2536 mnResult = main.Mininet1.stopNet()
2537 utilities.assert_equals( expect=main.TRUE, actual=mnResult,
2538 onpass="Mininet stopped",
2539 onfail="MN cleanup NOT successful" )
2540
2541 main.step( "Checking ONOS Logs for errors" )
Devin Lim142b5342017-07-20 15:22:39 -07002542 for ctrl in main.Cluster.runningNodes:
Jon Hallca319892017-06-15 15:25:22 -07002543 main.log.debug( "Checking logs for errors on " + ctrl.name + ":" )
2544 main.log.warn( main.ONOSbench.checkLogs( ctrl.ipAddress ) )
Devin Lim58046fa2017-07-05 16:55:00 -07002545
2546 try:
2547 timerLog = open( main.logdir + "/Timers.csv", 'w' )
2548 main.log.debug( ", ".join( main.HAlabels ) + "\n" + ", ".join( main.HAdata ) )
2549 timerLog.write( ", ".join( main.HAlabels ) + "\n" + ", ".join( main.HAdata ) )
2550 timerLog.close()
2551 except NameError as e:
2552 main.log.exception( e )
Jon Hallca319892017-06-15 15:25:22 -07002553
Devin Lim58046fa2017-07-05 16:55:00 -07002554 def assignMastership( self, main ):
2555 """
2556 Assign mastership to controllers
2557 """
2558 import time
Devin Lim58046fa2017-07-05 16:55:00 -07002559 assert main, "main not defined"
2560 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07002561
2562 main.case( "Assigning Controller roles for switches" )
2563 main.caseExplanation = "Check that ONOS is connected to each " +\
2564 "device. Then manually assign" +\
2565 " mastership to specific ONOS nodes using" +\
2566 " 'device-role'"
2567 main.step( "Assign mastership of switches to specific controllers" )
2568 # Manually assign mastership to the controller we want
2569 roleCall = main.TRUE
2570
2571 ipList = []
2572 deviceList = []
Jon Hallca319892017-06-15 15:25:22 -07002573 onosCli = main.Cluster.next()
Devin Lim58046fa2017-07-05 16:55:00 -07002574 try:
2575 # Assign mastership to specific controllers. This assignment was
2576 # determined for a 7 node cluser, but will work with any sized
2577 # cluster
2578 for i in range( 1, 29 ): # switches 1 through 28
2579 # set up correct variables:
2580 if i == 1:
2581 c = 0
Devin Lim142b5342017-07-20 15:22:39 -07002582 ip = main.Cluster.active( c ).ip_address # ONOS1
Devin Lim58046fa2017-07-05 16:55:00 -07002583 deviceId = onosCli.getDevice( "1000" ).get( 'id' )
2584 elif i == 2:
Devin Lim142b5342017-07-20 15:22:39 -07002585 c = 1 % main.Cluster.numCtrls
2586 ip = main.Cluster.active( c ).ip_address # ONOS2
Devin Lim58046fa2017-07-05 16:55:00 -07002587 deviceId = onosCli.getDevice( "2000" ).get( 'id' )
2588 elif i == 3:
Devin Lim142b5342017-07-20 15:22:39 -07002589 c = 1 % main.Cluster.numCtrls
2590 ip = main.Cluster.active( c ).ip_address # ONOS2
Devin Lim58046fa2017-07-05 16:55:00 -07002591 deviceId = onosCli.getDevice( "3000" ).get( 'id' )
2592 elif i == 4:
Devin Lim142b5342017-07-20 15:22:39 -07002593 c = 3 % main.Cluster.numCtrls
2594 ip = main.Cluster.active( c ).ip_address # ONOS4
Devin Lim58046fa2017-07-05 16:55:00 -07002595 deviceId = onosCli.getDevice( "3004" ).get( 'id' )
2596 elif i == 5:
Devin Lim142b5342017-07-20 15:22:39 -07002597 c = 2 % main.Cluster.numCtrls
2598 ip = main.Cluster.active( c ).ip_address # ONOS3
Devin Lim58046fa2017-07-05 16:55:00 -07002599 deviceId = onosCli.getDevice( "5000" ).get( 'id' )
2600 elif i == 6:
Devin Lim142b5342017-07-20 15:22:39 -07002601 c = 2 % main.Cluster.numCtrls
2602 ip = main.Cluster.active( c ).ip_address # ONOS3
Devin Lim58046fa2017-07-05 16:55:00 -07002603 deviceId = onosCli.getDevice( "6000" ).get( 'id' )
2604 elif i == 7:
Devin Lim142b5342017-07-20 15:22:39 -07002605 c = 5 % main.Cluster.numCtrls
2606 ip = main.Cluster.active( c ).ip_address # ONOS6
Devin Lim58046fa2017-07-05 16:55:00 -07002607 deviceId = onosCli.getDevice( "6007" ).get( 'id' )
2608 elif i >= 8 and i <= 17:
Devin Lim142b5342017-07-20 15:22:39 -07002609 c = 4 % main.Cluster.numCtrls
2610 ip = main.Cluster.active( c ).ip_address # ONOS5
Devin Lim58046fa2017-07-05 16:55:00 -07002611 dpid = '3' + str( i ).zfill( 3 )
2612 deviceId = onosCli.getDevice( dpid ).get( 'id' )
2613 elif i >= 18 and i <= 27:
Devin Lim142b5342017-07-20 15:22:39 -07002614 c = 6 % main.Cluster.numCtrls
2615 ip = main.Cluster.active( c ).ip_address # ONOS7
Devin Lim58046fa2017-07-05 16:55:00 -07002616 dpid = '6' + str( i ).zfill( 3 )
2617 deviceId = onosCli.getDevice( dpid ).get( 'id' )
2618 elif i == 28:
2619 c = 0
Devin Lim142b5342017-07-20 15:22:39 -07002620 ip = main.Cluster.active( c ).ip_address # ONOS1
Devin Lim58046fa2017-07-05 16:55:00 -07002621 deviceId = onosCli.getDevice( "2800" ).get( 'id' )
2622 else:
2623 main.log.error( "You didn't write an else statement for " +
2624 "switch s" + str( i ) )
2625 roleCall = main.FALSE
2626 # Assign switch
2627 assert deviceId, "No device id for s" + str( i ) + " in ONOS"
2628 # TODO: make this controller dynamic
2629 roleCall = roleCall and onosCli.deviceRole( deviceId, ip )
2630 ipList.append( ip )
2631 deviceList.append( deviceId )
2632 except ( AttributeError, AssertionError ):
2633 main.log.exception( "Something is wrong with ONOS device view" )
2634 main.log.info( onosCli.devices() )
2635 utilities.assert_equals(
2636 expect=main.TRUE,
2637 actual=roleCall,
2638 onpass="Re-assigned switch mastership to designated controller",
2639 onfail="Something wrong with deviceRole calls" )
2640
2641 main.step( "Check mastership was correctly assigned" )
2642 roleCheck = main.TRUE
2643 # NOTE: This is due to the fact that device mastership change is not
2644 # atomic and is actually a multi step process
2645 time.sleep( 5 )
2646 for i in range( len( ipList ) ):
2647 ip = ipList[ i ]
2648 deviceId = deviceList[ i ]
2649 # Check assignment
2650 master = onosCli.getRole( deviceId ).get( 'master' )
2651 if ip in master:
2652 roleCheck = roleCheck and main.TRUE
2653 else:
2654 roleCheck = roleCheck and main.FALSE
2655 main.log.error( "Error, controller " + ip + " is not" +
2656 " master " + "of device " +
2657 str( deviceId ) + ". Master is " +
2658 repr( master ) + "." )
2659 utilities.assert_equals(
2660 expect=main.TRUE,
2661 actual=roleCheck,
2662 onpass="Switches were successfully reassigned to designated " +
2663 "controller",
2664 onfail="Switches were not successfully reassigned" )
Jon Hallca319892017-06-15 15:25:22 -07002665
Devin Lim58046fa2017-07-05 16:55:00 -07002666 def bringUpStoppedNode( self, main ):
2667 """
2668 The bring up stopped nodes
2669 """
2670 import time
Devin Lim58046fa2017-07-05 16:55:00 -07002671 assert main, "main not defined"
2672 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07002673 assert main.kill, "main.kill not defined"
2674 main.case( "Restart minority of ONOS nodes" )
2675
2676 main.step( "Restarting " + str( len( main.kill ) ) + " ONOS nodes" )
2677 startResults = main.TRUE
2678 restartTime = time.time()
Jon Hallca319892017-06-15 15:25:22 -07002679 for ctrl in main.kill:
Devin Lim58046fa2017-07-05 16:55:00 -07002680 startResults = startResults and\
Jon Hallca319892017-06-15 15:25:22 -07002681 ctrl.onosStart( ctrl.ipAddress )
Devin Lim58046fa2017-07-05 16:55:00 -07002682 utilities.assert_equals( expect=main.TRUE, actual=startResults,
2683 onpass="ONOS nodes started successfully",
2684 onfail="ONOS nodes NOT successfully started" )
2685
2686 main.step( "Checking if ONOS is up yet" )
2687 count = 0
2688 onosIsupResult = main.FALSE
2689 while onosIsupResult == main.FALSE and count < 10:
2690 onosIsupResult = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002691 for ctrl in main.kill:
Devin Lim58046fa2017-07-05 16:55:00 -07002692 onosIsupResult = onosIsupResult and\
Jon Hallca319892017-06-15 15:25:22 -07002693 ctrl.isup( ctrl.ipAddress )
Devin Lim58046fa2017-07-05 16:55:00 -07002694 count = count + 1
2695 utilities.assert_equals( expect=main.TRUE, actual=onosIsupResult,
2696 onpass="ONOS restarted successfully",
2697 onfail="ONOS restart NOT successful" )
2698
Jon Hallca319892017-06-15 15:25:22 -07002699 main.step( "Restarting ONOS nodes" )
Devin Lim58046fa2017-07-05 16:55:00 -07002700 cliResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002701 for ctrl in main.kill:
Devin Lim58046fa2017-07-05 16:55:00 -07002702 cliResults = cliResults and\
Jon Hallca319892017-06-15 15:25:22 -07002703 ctrl.startOnosCli( ctrl.ipAddress )
2704 ctrl.active = True
Devin Lim58046fa2017-07-05 16:55:00 -07002705 utilities.assert_equals( expect=main.TRUE, actual=cliResults,
Jon Hallca319892017-06-15 15:25:22 -07002706 onpass="ONOS node(s) restarted",
2707 onfail="ONOS node(s) did not restart" )
Devin Lim58046fa2017-07-05 16:55:00 -07002708
2709 # Grab the time of restart so we chan check how long the gossip
2710 # protocol has had time to work
2711 main.restartTime = time.time() - restartTime
2712 main.log.debug( "Restart time: " + str( main.restartTime ) )
2713 # TODO: MAke this configurable. Also, we are breaking the above timer
2714 main.step( "Checking ONOS nodes" )
Devin Lim3ebd5e72017-11-14 10:38:00 -08002715 nodeResults = utilities.retry( main.Cluster.nodesCheck,
Devin Lim58046fa2017-07-05 16:55:00 -07002716 False,
Devin Lim58046fa2017-07-05 16:55:00 -07002717 sleep=15,
2718 attempts=5 )
2719
2720 utilities.assert_equals( expect=True, actual=nodeResults,
2721 onpass="Nodes check successful",
2722 onfail="Nodes check NOT successful" )
2723
2724 if not nodeResults:
Jon Hallca319892017-06-15 15:25:22 -07002725 for ctrl in main.Cluster.active():
Devin Lim58046fa2017-07-05 16:55:00 -07002726 main.log.debug( "{} components not ACTIVE: \n{}".format(
Jon Hallca319892017-06-15 15:25:22 -07002727 ctrl.name,
2728 ctrl.CLI.sendline( "scr:list | grep -v ACTIVE" ) ) )
Devin Lim58046fa2017-07-05 16:55:00 -07002729 main.log.error( "Failed to start ONOS, stopping test" )
Devin Lim44075962017-08-11 10:56:37 -07002730 main.cleanAndExit()
Devin Lim58046fa2017-07-05 16:55:00 -07002731
Jon Hallca319892017-06-15 15:25:22 -07002732 self.commonChecks()
Devin Lim58046fa2017-07-05 16:55:00 -07002733
2734 main.step( "Rerun for election on the node(s) that were killed" )
2735 runResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002736 for ctrl in main.kill:
Devin Lim58046fa2017-07-05 16:55:00 -07002737 runResults = runResults and\
Jon Hallca319892017-06-15 15:25:22 -07002738 ctrl.electionTestRun()
Devin Lim58046fa2017-07-05 16:55:00 -07002739 utilities.assert_equals( expect=main.TRUE, actual=runResults,
2740 onpass="ONOS nodes reran for election topic",
2741 onfail="Errror rerunning for election" )
Jon Hall4173b242017-09-12 17:04:38 -07002742
Devin Lim142b5342017-07-20 15:22:39 -07002743 def tempCell( self, cellName, ipList ):
2744 main.step( "Create cell file" )
2745 cellAppString = main.params[ 'ENV' ][ 'appString' ]
Devin Lim58046fa2017-07-05 16:55:00 -07002746
Devin Lim142b5342017-07-20 15:22:39 -07002747 main.ONOSbench.createCellFile( main.ONOSbench.ip_address, cellName,
2748 main.Mininet1.ip_address,
Jeremy Ronquillo23fb2162017-09-15 14:59:57 -07002749 cellAppString, ipList, main.ONOScli1.karafUser )
Devin Lim142b5342017-07-20 15:22:39 -07002750 main.step( "Applying cell variable to environment" )
2751 cellResult = main.ONOSbench.setCell( cellName )
2752 verifyResult = main.ONOSbench.verifyCell()
2753
Devin Lim142b5342017-07-20 15:22:39 -07002754 def checkStateAfterEvent( self, main, afterWhich, compareSwitch=False, isRestart=False ):
Devin Lim58046fa2017-07-05 16:55:00 -07002755 """
2756 afterWhich :
Jon Hallca319892017-06-15 15:25:22 -07002757 0: failure
Devin Lim58046fa2017-07-05 16:55:00 -07002758 1: scaling
2759 """
2760 """
2761 Check state after ONOS failure/scaling
2762 """
2763 import json
Devin Lim58046fa2017-07-05 16:55:00 -07002764 assert main, "main not defined"
2765 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07002766 main.case( "Running ONOS Constant State Tests" )
2767
Jeremy Ronquillo23fb2162017-09-15 14:59:57 -07002768 OnosAfterWhich = [ "failure", "scaliing" ]
Devin Lim58046fa2017-07-05 16:55:00 -07002769
Devin Lim58046fa2017-07-05 16:55:00 -07002770 # Assert that each device has a master
Devin Lim142b5342017-07-20 15:22:39 -07002771 self.checkRoleNotNull()
Devin Lim58046fa2017-07-05 16:55:00 -07002772
Devin Lim142b5342017-07-20 15:22:39 -07002773 ONOSMastership, rolesResults, consistentMastership = self.checkTheRole()
Jon Hallca319892017-06-15 15:25:22 -07002774 mastershipCheck = main.FALSE
Devin Lim58046fa2017-07-05 16:55:00 -07002775
2776 if rolesResults and not consistentMastership:
2777 for i in range( len( ONOSMastership ) ):
Devin Lim142b5342017-07-20 15:22:39 -07002778 node = str( main.Cluster.active( i ) )
Jon Hallca319892017-06-15 15:25:22 -07002779 main.log.warn( node + " roles: ",
Devin Lim58046fa2017-07-05 16:55:00 -07002780 json.dumps( json.loads( ONOSMastership[ i ] ),
2781 sort_keys=True,
2782 indent=4,
2783 separators=( ',', ': ' ) ) )
2784
2785 if compareSwitch:
2786 description2 = "Compare switch roles from before failure"
2787 main.step( description2 )
2788 try:
2789 currentJson = json.loads( ONOSMastership[ 0 ] )
2790 oldJson = json.loads( mastershipState )
2791 except ( ValueError, TypeError ):
2792 main.log.exception( "Something is wrong with parsing " +
2793 "ONOSMastership[0] or mastershipState" )
Jon Hallca319892017-06-15 15:25:22 -07002794 main.log.debug( "ONOSMastership[0]: " + repr( ONOSMastership[ 0 ] ) )
2795 main.log.debug( "mastershipState" + repr( mastershipState ) )
Devin Lim44075962017-08-11 10:56:37 -07002796 main.cleanAndExit()
Devin Lim58046fa2017-07-05 16:55:00 -07002797 mastershipCheck = main.TRUE
2798 for i in range( 1, 29 ):
2799 switchDPID = str(
2800 main.Mininet1.getSwitchDPID( switch="s" + str( i ) ) )
2801 current = [ switch[ 'master' ] for switch in currentJson
2802 if switchDPID in switch[ 'id' ] ]
2803 old = [ switch[ 'master' ] for switch in oldJson
2804 if switchDPID in switch[ 'id' ] ]
2805 if current == old:
2806 mastershipCheck = mastershipCheck and main.TRUE
2807 else:
2808 main.log.warn( "Mastership of switch %s changed" % switchDPID )
2809 mastershipCheck = main.FALSE
2810 utilities.assert_equals(
2811 expect=main.TRUE,
2812 actual=mastershipCheck,
2813 onpass="Mastership of Switches was not changed",
2814 onfail="Mastership of some switches changed" )
2815
2816 # NOTE: we expect mastership to change on controller failure/scaling down
Devin Lim142b5342017-07-20 15:22:39 -07002817 ONOSIntents, intentsResults = self.checkingIntents()
Devin Lim58046fa2017-07-05 16:55:00 -07002818 intentCheck = main.FALSE
2819 consistentIntents = True
Devin Lim58046fa2017-07-05 16:55:00 -07002820
2821 main.step( "Check for consistency in Intents from each controller" )
2822 if all( [ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
2823 main.log.info( "Intents are consistent across all ONOS " +
2824 "nodes" )
2825 else:
2826 consistentIntents = False
2827
2828 # Try to make it easy to figure out what is happening
2829 #
2830 # Intent ONOS1 ONOS2 ...
2831 # 0x01 INSTALLED INSTALLING
2832 # ... ... ...
2833 # ... ... ...
2834 title = " ID"
Jon Hallca319892017-06-15 15:25:22 -07002835 for ctrl in main.Cluster.active():
2836 title += " " * 10 + ctrl.name
Devin Lim58046fa2017-07-05 16:55:00 -07002837 main.log.warn( title )
2838 # get all intent keys in the cluster
2839 keys = []
2840 for nodeStr in ONOSIntents:
2841 node = json.loads( nodeStr )
2842 for intent in node:
2843 keys.append( intent.get( 'id' ) )
2844 keys = set( keys )
2845 for key in keys:
2846 row = "%-13s" % key
2847 for nodeStr in ONOSIntents:
2848 node = json.loads( nodeStr )
2849 for intent in node:
2850 if intent.get( 'id' ) == key:
2851 row += "%-15s" % intent.get( 'state' )
2852 main.log.warn( row )
2853 # End table view
2854
2855 utilities.assert_equals(
2856 expect=True,
2857 actual=consistentIntents,
2858 onpass="Intents are consistent across all ONOS nodes",
2859 onfail="ONOS nodes have different views of intents" )
2860 intentStates = []
2861 for node in ONOSIntents: # Iter through ONOS nodes
2862 nodeStates = []
2863 # Iter through intents of a node
2864 try:
2865 for intent in json.loads( node ):
2866 nodeStates.append( intent[ 'state' ] )
2867 except ( ValueError, TypeError ):
2868 main.log.exception( "Error in parsing intents" )
2869 main.log.error( repr( node ) )
2870 intentStates.append( nodeStates )
2871 out = [ ( i, nodeStates.count( i ) ) for i in set( nodeStates ) ]
2872 main.log.info( dict( out ) )
2873
2874 if intentsResults and not consistentIntents:
Jon Hallca319892017-06-15 15:25:22 -07002875 for i in range( len( main.Cluster.active() ) ):
Jon Hall6040bcf2017-08-14 11:15:41 -07002876 ctrl = main.Cluster.controllers[ i ]
Jon Hallca319892017-06-15 15:25:22 -07002877 main.log.warn( ctrl.name + " intents: " )
Devin Lim58046fa2017-07-05 16:55:00 -07002878 main.log.warn( json.dumps(
2879 json.loads( ONOSIntents[ i ] ),
2880 sort_keys=True,
2881 indent=4,
2882 separators=( ',', ': ' ) ) )
2883 elif intentsResults and consistentIntents:
2884 intentCheck = main.TRUE
2885
2886 # NOTE: Store has no durability, so intents are lost across system
2887 # restarts
2888 if not isRestart:
2889 main.step( "Compare current intents with intents before the " + OnosAfterWhich[ afterWhich ] )
2890 # NOTE: this requires case 5 to pass for intentState to be set.
2891 # maybe we should stop the test if that fails?
2892 sameIntents = main.FALSE
2893 try:
2894 intentState
2895 except NameError:
2896 main.log.warn( "No previous intent state was saved" )
2897 else:
2898 if intentState and intentState == ONOSIntents[ 0 ]:
2899 sameIntents = main.TRUE
2900 main.log.info( "Intents are consistent with before " + OnosAfterWhich[ afterWhich ] )
2901 # TODO: possibly the states have changed? we may need to figure out
2902 # what the acceptable states are
2903 elif len( intentState ) == len( ONOSIntents[ 0 ] ):
2904 sameIntents = main.TRUE
2905 try:
2906 before = json.loads( intentState )
2907 after = json.loads( ONOSIntents[ 0 ] )
2908 for intent in before:
2909 if intent not in after:
2910 sameIntents = main.FALSE
2911 main.log.debug( "Intent is not currently in ONOS " +
2912 "(at least in the same form):" )
2913 main.log.debug( json.dumps( intent ) )
2914 except ( ValueError, TypeError ):
2915 main.log.exception( "Exception printing intents" )
2916 main.log.debug( repr( ONOSIntents[ 0 ] ) )
2917 main.log.debug( repr( intentState ) )
2918 if sameIntents == main.FALSE:
2919 try:
2920 main.log.debug( "ONOS intents before: " )
2921 main.log.debug( json.dumps( json.loads( intentState ),
2922 sort_keys=True, indent=4,
2923 separators=( ',', ': ' ) ) )
2924 main.log.debug( "Current ONOS intents: " )
2925 main.log.debug( json.dumps( json.loads( ONOSIntents[ 0 ] ),
2926 sort_keys=True, indent=4,
2927 separators=( ',', ': ' ) ) )
2928 except ( ValueError, TypeError ):
2929 main.log.exception( "Exception printing intents" )
2930 main.log.debug( repr( ONOSIntents[ 0 ] ) )
2931 main.log.debug( repr( intentState ) )
2932 utilities.assert_equals(
2933 expect=main.TRUE,
2934 actual=sameIntents,
Jeremy Ronquillo23fb2162017-09-15 14:59:57 -07002935 onpass="Intents are consistent with before " + OnosAfterWhich[ afterWhich ],
Devin Lim58046fa2017-07-05 16:55:00 -07002936 onfail="The Intents changed during " + OnosAfterWhich[ afterWhich ] )
2937 intentCheck = intentCheck and sameIntents
2938
2939 main.step( "Get the OF Table entries and compare to before " +
2940 "component " + OnosAfterWhich[ afterWhich ] )
2941 FlowTables = main.TRUE
2942 for i in range( 28 ):
2943 main.log.info( "Checking flow table on s" + str( i + 1 ) )
2944 tmpFlows = main.Mininet1.getFlowTable( "s" + str( i + 1 ), version="1.3", debug=False )
2945 curSwitch = main.Mininet1.flowTableComp( flows[ i ], tmpFlows )
2946 FlowTables = FlowTables and curSwitch
2947 if curSwitch == main.FALSE:
2948 main.log.warn( "Differences in flow table for switch: s{}".format( i + 1 ) )
2949 utilities.assert_equals(
2950 expect=main.TRUE,
2951 actual=FlowTables,
2952 onpass="No changes were found in the flow tables",
2953 onfail="Changes were found in the flow tables" )
2954
Jon Hallca319892017-06-15 15:25:22 -07002955 main.Mininet2.pingLongKill()
Devin Lim58046fa2017-07-05 16:55:00 -07002956 """
2957 main.step( "Check the continuous pings to ensure that no packets " +
2958 "were dropped during component failure" )
2959 main.Mininet2.pingKill( main.params[ 'TESTONUSER' ],
2960 main.params[ 'TESTONIP' ] )
2961 LossInPings = main.FALSE
2962 # NOTE: checkForLoss returns main.FALSE with 0% packet loss
2963 for i in range( 8, 18 ):
2964 main.log.info(
2965 "Checking for a loss in pings along flow from s" +
2966 str( i ) )
2967 LossInPings = main.Mininet2.checkForLoss(
2968 "/tmp/ping.h" +
2969 str( i ) ) or LossInPings
2970 if LossInPings == main.TRUE:
2971 main.log.info( "Loss in ping detected" )
2972 elif LossInPings == main.ERROR:
2973 main.log.info( "There are multiple mininet process running" )
2974 elif LossInPings == main.FALSE:
2975 main.log.info( "No Loss in the pings" )
2976 main.log.info( "No loss of dataplane connectivity" )
2977 utilities.assert_equals(
2978 expect=main.FALSE,
2979 actual=LossInPings,
2980 onpass="No Loss of connectivity",
2981 onfail="Loss of dataplane connectivity detected" )
2982 # NOTE: Since intents are not persisted with IntnentStore,
2983 # we expect loss in dataplane connectivity
2984 LossInPings = main.FALSE
2985 """
Devin Lim58046fa2017-07-05 16:55:00 -07002986 def compareTopo( self, main ):
2987 """
2988 Compare topo
2989 """
2990 import json
2991 import time
Devin Lim58046fa2017-07-05 16:55:00 -07002992 assert main, "main not defined"
2993 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07002994 try:
2995 from tests.dependencies.topology import Topology
2996 except ImportError:
2997 main.log.error( "Topology not found exiting the test" )
Devin Lim44075962017-08-11 10:56:37 -07002998 main.cleanAndExit()
Devin Lim58046fa2017-07-05 16:55:00 -07002999 try:
3000 main.topoRelated
3001 except ( NameError, AttributeError ):
3002 main.topoRelated = Topology()
3003 main.case( "Compare ONOS Topology view to Mininet topology" )
3004 main.caseExplanation = "Compare topology objects between Mininet" +\
3005 " and ONOS"
3006 topoResult = main.FALSE
3007 topoFailMsg = "ONOS topology don't match Mininet"
3008 elapsed = 0
3009 count = 0
3010 main.step( "Comparing ONOS topology to MN topology" )
3011 startTime = time.time()
3012 # Give time for Gossip to work
3013 while topoResult == main.FALSE and ( elapsed < 60 or count < 3 ):
3014 devicesResults = main.TRUE
3015 linksResults = main.TRUE
3016 hostsResults = main.TRUE
3017 hostAttachmentResults = True
3018 count += 1
3019 cliStart = time.time()
Devin Lim142b5342017-07-20 15:22:39 -07003020 devices = main.topoRelated.getAll( "devices", True,
Jon Hallca319892017-06-15 15:25:22 -07003021 kwargs={ 'sleep': 5, 'attempts': 5,
3022 'randomTime': True } )
Devin Lim58046fa2017-07-05 16:55:00 -07003023 ipResult = main.TRUE
3024
Devin Lim142b5342017-07-20 15:22:39 -07003025 hosts = main.topoRelated.getAll( "hosts", True,
Jon Hallca319892017-06-15 15:25:22 -07003026 kwargs={ 'sleep': 5, 'attempts': 5,
3027 'randomTime': True },
3028 inJson=True )
Devin Lim58046fa2017-07-05 16:55:00 -07003029
3030 for controller in range( 0, len( hosts ) ):
Devin Lim142b5342017-07-20 15:22:39 -07003031 controllerStr = str( main.Cluster.active( controller ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003032 if hosts[ controller ]:
3033 for host in hosts[ controller ]:
3034 if host is None or host.get( 'ipAddresses', [] ) == []:
3035 main.log.error(
3036 "Error with host ipAddresses on controller" +
3037 controllerStr + ": " + str( host ) )
3038 ipResult = main.FALSE
Jeremy Ronquillo23fb2162017-09-15 14:59:57 -07003039 ports = main.topoRelated.getAll( "ports", True,
Jon Hallca319892017-06-15 15:25:22 -07003040 kwargs={ 'sleep': 5, 'attempts': 5,
3041 'randomTime': True } )
Devin Lim142b5342017-07-20 15:22:39 -07003042 links = main.topoRelated.getAll( "links", True,
Jon Hallca319892017-06-15 15:25:22 -07003043 kwargs={ 'sleep': 5, 'attempts': 5,
3044 'randomTime': True } )
Devin Lim142b5342017-07-20 15:22:39 -07003045 clusters = main.topoRelated.getAll( "clusters", True,
Jon Hallca319892017-06-15 15:25:22 -07003046 kwargs={ 'sleep': 5, 'attempts': 5,
3047 'randomTime': True } )
Devin Lim58046fa2017-07-05 16:55:00 -07003048
3049 elapsed = time.time() - startTime
3050 cliTime = time.time() - cliStart
3051 print "Elapsed time: " + str( elapsed )
3052 print "CLI time: " + str( cliTime )
3053
3054 if all( e is None for e in devices ) and\
3055 all( e is None for e in hosts ) and\
3056 all( e is None for e in ports ) and\
3057 all( e is None for e in links ) and\
3058 all( e is None for e in clusters ):
3059 topoFailMsg = "Could not get topology from ONOS"
3060 main.log.error( topoFailMsg )
3061 continue # Try again, No use trying to compare
3062
3063 mnSwitches = main.Mininet1.getSwitches()
3064 mnLinks = main.Mininet1.getLinks()
3065 mnHosts = main.Mininet1.getHosts()
Jon Hallca319892017-06-15 15:25:22 -07003066 for controller in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07003067 controllerStr = str( main.Cluster.active( controller ) )
Jon Hall4173b242017-09-12 17:04:38 -07003068 currentDevicesResult = main.topoRelated.compareDevicePort( main.Mininet1,
3069 controller,
3070 mnSwitches,
3071 devices,
3072 ports )
Devin Lim58046fa2017-07-05 16:55:00 -07003073 utilities.assert_equals( expect=main.TRUE,
3074 actual=currentDevicesResult,
Jon Hallca319892017-06-15 15:25:22 -07003075 onpass=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07003076 " Switches view is correct",
Jon Hallca319892017-06-15 15:25:22 -07003077 onfail=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07003078 " Switches view is incorrect" )
3079
Devin Lim58046fa2017-07-05 16:55:00 -07003080 currentLinksResult = main.topoRelated.compareBase( links, controller,
Jon Hall4173b242017-09-12 17:04:38 -07003081 main.Mininet1.compareLinks,
3082 [ mnSwitches, mnLinks ] )
Devin Lim58046fa2017-07-05 16:55:00 -07003083 utilities.assert_equals( expect=main.TRUE,
3084 actual=currentLinksResult,
Jon Hallca319892017-06-15 15:25:22 -07003085 onpass=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07003086 " links view is correct",
Jon Hallca319892017-06-15 15:25:22 -07003087 onfail=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07003088 " links view is incorrect" )
3089 if hosts[ controller ] and "Error" not in hosts[ controller ]:
3090 currentHostsResult = main.Mininet1.compareHosts(
3091 mnHosts,
3092 hosts[ controller ] )
3093 elif hosts[ controller ] == []:
3094 currentHostsResult = main.TRUE
3095 else:
3096 currentHostsResult = main.FALSE
3097 utilities.assert_equals( expect=main.TRUE,
3098 actual=currentHostsResult,
Jon Hallca319892017-06-15 15:25:22 -07003099 onpass=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07003100 " hosts exist in Mininet",
Jon Hallca319892017-06-15 15:25:22 -07003101 onfail=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07003102 " hosts don't match Mininet" )
3103 # CHECKING HOST ATTACHMENT POINTS
3104 hostAttachment = True
3105 zeroHosts = False
3106 # FIXME: topo-HA/obelisk specific mappings:
3107 # key is mac and value is dpid
3108 mappings = {}
3109 for i in range( 1, 29 ): # hosts 1 through 28
3110 # set up correct variables:
3111 macId = "00:" * 5 + hex( i ).split( "0x" )[ 1 ].upper().zfill( 2 )
3112 if i == 1:
3113 deviceId = "1000".zfill( 16 )
3114 elif i == 2:
3115 deviceId = "2000".zfill( 16 )
3116 elif i == 3:
3117 deviceId = "3000".zfill( 16 )
3118 elif i == 4:
3119 deviceId = "3004".zfill( 16 )
3120 elif i == 5:
3121 deviceId = "5000".zfill( 16 )
3122 elif i == 6:
3123 deviceId = "6000".zfill( 16 )
3124 elif i == 7:
3125 deviceId = "6007".zfill( 16 )
3126 elif i >= 8 and i <= 17:
3127 dpid = '3' + str( i ).zfill( 3 )
3128 deviceId = dpid.zfill( 16 )
3129 elif i >= 18 and i <= 27:
3130 dpid = '6' + str( i ).zfill( 3 )
3131 deviceId = dpid.zfill( 16 )
3132 elif i == 28:
3133 deviceId = "2800".zfill( 16 )
3134 mappings[ macId ] = deviceId
3135 if hosts[ controller ] is not None and "Error" not in hosts[ controller ]:
3136 if hosts[ controller ] == []:
3137 main.log.warn( "There are no hosts discovered" )
3138 zeroHosts = True
3139 else:
3140 for host in hosts[ controller ]:
3141 mac = None
3142 location = None
3143 device = None
3144 port = None
3145 try:
3146 mac = host.get( 'mac' )
3147 assert mac, "mac field could not be found for this host object"
Devin Limefaf3062017-08-14 16:18:19 -07003148 print host
3149 if 'locations' in host:
3150 location = host.get( 'locations' )[ 0 ]
3151 elif 'location' in host:
3152 location = host.get( 'location' )
Devin Lim58046fa2017-07-05 16:55:00 -07003153 assert location, "location field could not be found for this host object"
3154
3155 # Trim the protocol identifier off deviceId
3156 device = str( location.get( 'elementId' ) ).split( ':' )[ 1 ]
3157 assert device, "elementId field could not be found for this host location object"
3158
3159 port = location.get( 'port' )
3160 assert port, "port field could not be found for this host location object"
3161
3162 # Now check if this matches where they should be
3163 if mac and device and port:
3164 if str( port ) != "1":
3165 main.log.error( "The attachment port is incorrect for " +
3166 "host " + str( mac ) +
3167 ". Expected: 1 Actual: " + str( port ) )
3168 hostAttachment = False
3169 if device != mappings[ str( mac ) ]:
3170 main.log.error( "The attachment device is incorrect for " +
3171 "host " + str( mac ) +
3172 ". Expected: " + mappings[ str( mac ) ] +
3173 " Actual: " + device )
3174 hostAttachment = False
3175 else:
3176 hostAttachment = False
Devin Limefaf3062017-08-14 16:18:19 -07003177 except ( AssertionError, TypeError ):
Devin Lim58046fa2017-07-05 16:55:00 -07003178 main.log.exception( "Json object not as expected" )
3179 main.log.error( repr( host ) )
3180 hostAttachment = False
3181 else:
3182 main.log.error( "No hosts json output or \"Error\"" +
3183 " in output. hosts = " +
3184 repr( hosts[ controller ] ) )
3185 if zeroHosts is False:
3186 # TODO: Find a way to know if there should be hosts in a
3187 # given point of the test
3188 hostAttachment = True
3189
3190 # END CHECKING HOST ATTACHMENT POINTS
3191 devicesResults = devicesResults and currentDevicesResult
3192 linksResults = linksResults and currentLinksResult
3193 hostsResults = hostsResults and currentHostsResult
3194 hostAttachmentResults = hostAttachmentResults and\
3195 hostAttachment
3196 topoResult = ( devicesResults and linksResults
3197 and hostsResults and ipResult and
3198 hostAttachmentResults )
3199 utilities.assert_equals( expect=True,
3200 actual=topoResult,
3201 onpass="ONOS topology matches Mininet",
3202 onfail=topoFailMsg )
3203 # End of While loop to pull ONOS state
3204
3205 # Compare json objects for hosts and dataplane clusters
3206
3207 # hosts
3208 main.step( "Hosts view is consistent across all ONOS nodes" )
3209 consistentHostsResult = main.TRUE
3210 for controller in range( len( hosts ) ):
Devin Lim142b5342017-07-20 15:22:39 -07003211 controllerStr = str( main.Cluster.active( controller ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003212 if hosts[ controller ] is not None and "Error" not in hosts[ controller ]:
3213 if hosts[ controller ] == hosts[ 0 ]:
3214 continue
3215 else: # hosts not consistent
Jon Hallca319892017-06-15 15:25:22 -07003216 main.log.error( "hosts from " + controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07003217 " is inconsistent with ONOS1" )
Jon Hallca319892017-06-15 15:25:22 -07003218 main.log.debug( repr( hosts[ controller ] ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003219 consistentHostsResult = main.FALSE
3220
3221 else:
Jon Hallca319892017-06-15 15:25:22 -07003222 main.log.error( "Error in getting ONOS hosts from " +
Devin Lim58046fa2017-07-05 16:55:00 -07003223 controllerStr )
3224 consistentHostsResult = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07003225 main.log.debug( controllerStr +
3226 " hosts response: " +
3227 repr( hosts[ controller ] ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003228 utilities.assert_equals(
3229 expect=main.TRUE,
3230 actual=consistentHostsResult,
3231 onpass="Hosts view is consistent across all ONOS nodes",
3232 onfail="ONOS nodes have different views of hosts" )
3233
3234 main.step( "Hosts information is correct" )
3235 hostsResults = hostsResults and ipResult
3236 utilities.assert_equals(
3237 expect=main.TRUE,
3238 actual=hostsResults,
3239 onpass="Host information is correct",
3240 onfail="Host information is incorrect" )
3241
3242 main.step( "Host attachment points to the network" )
3243 utilities.assert_equals(
3244 expect=True,
3245 actual=hostAttachmentResults,
3246 onpass="Hosts are correctly attached to the network",
3247 onfail="ONOS did not correctly attach hosts to the network" )
3248
3249 # Strongly connected clusters of devices
3250 main.step( "Clusters view is consistent across all ONOS nodes" )
3251 consistentClustersResult = main.TRUE
3252 for controller in range( len( clusters ) ):
Devin Lim142b5342017-07-20 15:22:39 -07003253 controllerStr = str( main.Cluster.active( controller ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003254 if "Error" not in clusters[ controller ]:
3255 if clusters[ controller ] == clusters[ 0 ]:
3256 continue
3257 else: # clusters not consistent
Jon Hallca319892017-06-15 15:25:22 -07003258 main.log.error( "clusters from " +
Devin Lim58046fa2017-07-05 16:55:00 -07003259 controllerStr +
3260 " is inconsistent with ONOS1" )
3261 consistentClustersResult = main.FALSE
3262 else:
3263 main.log.error( "Error in getting dataplane clusters " +
Jon Hallca319892017-06-15 15:25:22 -07003264 "from " + controllerStr )
Devin Lim58046fa2017-07-05 16:55:00 -07003265 consistentClustersResult = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07003266 main.log.debug( controllerStr +
3267 " clusters response: " +
3268 repr( clusters[ controller ] ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003269 utilities.assert_equals(
3270 expect=main.TRUE,
3271 actual=consistentClustersResult,
3272 onpass="Clusters view is consistent across all ONOS nodes",
3273 onfail="ONOS nodes have different views of clusters" )
3274 if not consistentClustersResult:
3275 main.log.debug( clusters )
3276 for x in links:
Jon Hallca319892017-06-15 15:25:22 -07003277 main.log.debug( "{}: {}".format( len( x ), x ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003278
3279 main.step( "There is only one SCC" )
3280 # there should always only be one cluster
3281 try:
3282 numClusters = len( json.loads( clusters[ 0 ] ) )
3283 except ( ValueError, TypeError ):
3284 main.log.exception( "Error parsing clusters[0]: " +
3285 repr( clusters[ 0 ] ) )
3286 numClusters = "ERROR"
3287 clusterResults = main.FALSE
3288 if numClusters == 1:
3289 clusterResults = main.TRUE
3290 utilities.assert_equals(
3291 expect=1,
3292 actual=numClusters,
3293 onpass="ONOS shows 1 SCC",
3294 onfail="ONOS shows " + str( numClusters ) + " SCCs" )
3295
3296 topoResult = ( devicesResults and linksResults
3297 and hostsResults and consistentHostsResult
3298 and consistentClustersResult and clusterResults
3299 and ipResult and hostAttachmentResults )
3300
3301 topoResult = topoResult and int( count <= 2 )
3302 note = "note it takes about " + str( int( cliTime ) ) + \
3303 " seconds for the test to make all the cli calls to fetch " +\
3304 "the topology from each ONOS instance"
3305 main.log.info(
3306 "Very crass estimate for topology discovery/convergence( " +
3307 str( note ) + " ): " + str( elapsed ) + " seconds, " +
3308 str( count ) + " tries" )
3309
3310 main.step( "Device information is correct" )
3311 utilities.assert_equals(
3312 expect=main.TRUE,
3313 actual=devicesResults,
3314 onpass="Device information is correct",
3315 onfail="Device information is incorrect" )
3316
3317 main.step( "Links are correct" )
3318 utilities.assert_equals(
3319 expect=main.TRUE,
3320 actual=linksResults,
3321 onpass="Link are correct",
3322 onfail="Links are incorrect" )
3323
3324 main.step( "Hosts are correct" )
3325 utilities.assert_equals(
3326 expect=main.TRUE,
3327 actual=hostsResults,
3328 onpass="Hosts are correct",
3329 onfail="Hosts are incorrect" )
3330
3331 # FIXME: move this to an ONOS state case
3332 main.step( "Checking ONOS nodes" )
Devin Lim3ebd5e72017-11-14 10:38:00 -08003333 nodeResults = utilities.retry( main.Cluster.nodesCheck,
Devin Lim58046fa2017-07-05 16:55:00 -07003334 False,
Devin Lim58046fa2017-07-05 16:55:00 -07003335 attempts=5 )
3336 utilities.assert_equals( expect=True, actual=nodeResults,
3337 onpass="Nodes check successful",
3338 onfail="Nodes check NOT successful" )
3339 if not nodeResults:
Jon Hallca319892017-06-15 15:25:22 -07003340 for ctrl in main.Cluster.active():
Devin Lim58046fa2017-07-05 16:55:00 -07003341 main.log.debug( "{} components not ACTIVE: \n{}".format(
Jon Hallca319892017-06-15 15:25:22 -07003342 ctrl.name,
3343 ctrl.CLI.sendline( "scr:list | grep -v ACTIVE" ) ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003344
3345 if not topoResult:
Devin Lim44075962017-08-11 10:56:37 -07003346 main.cleanAndExit()
Jon Hallca319892017-06-15 15:25:22 -07003347
Devin Lim58046fa2017-07-05 16:55:00 -07003348 def linkDown( self, main, fromS="s3", toS="s28" ):
3349 """
3350 Link fromS-toS down
3351 """
3352 import time
Devin Lim58046fa2017-07-05 16:55:00 -07003353 assert main, "main not defined"
3354 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07003355 # NOTE: You should probably run a topology check after this
3356
3357 linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
3358
3359 description = "Turn off a link to ensure that Link Discovery " +\
3360 "is working properly"
3361 main.case( description )
3362
3363 main.step( "Kill Link between " + fromS + " and " + toS )
3364 LinkDown = main.Mininet1.link( END1=fromS, END2=toS, OPTION="down" )
3365 main.log.info( "Waiting " + str( linkSleep ) +
3366 " seconds for link down to be discovered" )
3367 time.sleep( linkSleep )
3368 utilities.assert_equals( expect=main.TRUE, actual=LinkDown,
3369 onpass="Link down successful",
3370 onfail="Failed to bring link down" )
3371 # TODO do some sort of check here
3372
3373 def linkUp( self, main, fromS="s3", toS="s28" ):
3374 """
3375 Link fromS-toS up
3376 """
3377 import time
Devin Lim58046fa2017-07-05 16:55:00 -07003378 assert main, "main not defined"
3379 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07003380 # NOTE: You should probably run a topology check after this
3381
3382 linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
3383
3384 description = "Restore a link to ensure that Link Discovery is " + \
3385 "working properly"
3386 main.case( description )
3387
Jon Hall4173b242017-09-12 17:04:38 -07003388 main.step( "Bring link between " + fromS + " and " + toS + " back up" )
Devin Lim58046fa2017-07-05 16:55:00 -07003389 LinkUp = main.Mininet1.link( END1=fromS, END2=toS, OPTION="up" )
3390 main.log.info( "Waiting " + str( linkSleep ) +
3391 " seconds for link up to be discovered" )
3392 time.sleep( linkSleep )
3393 utilities.assert_equals( expect=main.TRUE, actual=LinkUp,
3394 onpass="Link up successful",
3395 onfail="Failed to bring link up" )
3396
3397 def switchDown( self, main ):
3398 """
3399 Switch Down
3400 """
3401 # NOTE: You should probably run a topology check after this
3402 import time
Devin Lim58046fa2017-07-05 16:55:00 -07003403 assert main, "main not defined"
3404 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07003405
3406 switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
3407
3408 description = "Killing a switch to ensure it is discovered correctly"
Jon Hallca319892017-06-15 15:25:22 -07003409 onosCli = main.Cluster.next()
Devin Lim58046fa2017-07-05 16:55:00 -07003410 main.case( description )
3411 switch = main.params[ 'kill' ][ 'switch' ]
3412 switchDPID = main.params[ 'kill' ][ 'dpid' ]
3413
3414 # TODO: Make this switch parameterizable
3415 main.step( "Kill " + switch )
3416 main.log.info( "Deleting " + switch )
3417 main.Mininet1.delSwitch( switch )
3418 main.log.info( "Waiting " + str( switchSleep ) +
3419 " seconds for switch down to be discovered" )
3420 time.sleep( switchSleep )
3421 device = onosCli.getDevice( dpid=switchDPID )
3422 # Peek at the deleted switch
Jon Hallca319892017-06-15 15:25:22 -07003423 main.log.warn( "Bringing down switch " + str( device ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003424 result = main.FALSE
3425 if device and device[ 'available' ] is False:
3426 result = main.TRUE
3427 utilities.assert_equals( expect=main.TRUE, actual=result,
3428 onpass="Kill switch successful",
3429 onfail="Failed to kill switch?" )
Jon Hallca319892017-06-15 15:25:22 -07003430
Devin Lim58046fa2017-07-05 16:55:00 -07003431 def switchUp( self, main ):
3432 """
3433 Switch Up
3434 """
3435 # NOTE: You should probably run a topology check after this
3436 import time
Devin Lim58046fa2017-07-05 16:55:00 -07003437 assert main, "main not defined"
3438 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07003439
3440 switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
3441 switch = main.params[ 'kill' ][ 'switch' ]
3442 switchDPID = main.params[ 'kill' ][ 'dpid' ]
3443 links = main.params[ 'kill' ][ 'links' ].split()
Jon Hallca319892017-06-15 15:25:22 -07003444 onosCli = main.Cluster.next()
Devin Lim58046fa2017-07-05 16:55:00 -07003445 description = "Adding a switch to ensure it is discovered correctly"
3446 main.case( description )
3447
3448 main.step( "Add back " + switch )
3449 main.Mininet1.addSwitch( switch, dpid=switchDPID )
3450 for peer in links:
3451 main.Mininet1.addLink( switch, peer )
Jon Hallca319892017-06-15 15:25:22 -07003452 ipList = main.Cluster.getIps()
Devin Lim58046fa2017-07-05 16:55:00 -07003453 main.Mininet1.assignSwController( sw=switch, ip=ipList )
3454 main.log.info( "Waiting " + str( switchSleep ) +
3455 " seconds for switch up to be discovered" )
3456 time.sleep( switchSleep )
3457 device = onosCli.getDevice( dpid=switchDPID )
3458 # Peek at the deleted switch
Jon Hallca319892017-06-15 15:25:22 -07003459 main.log.debug( "Added device: " + str( device ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003460 result = main.FALSE
3461 if device and device[ 'available' ]:
3462 result = main.TRUE
3463 utilities.assert_equals( expect=main.TRUE, actual=result,
3464 onpass="add switch successful",
3465 onfail="Failed to add switch?" )
3466
3467 def startElectionApp( self, main ):
3468 """
3469 start election app on all onos nodes
3470 """
Devin Lim58046fa2017-07-05 16:55:00 -07003471 assert main, "main not defined"
3472 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07003473
3474 main.case( "Start Leadership Election app" )
3475 main.step( "Install leadership election app" )
Jon Hallca319892017-06-15 15:25:22 -07003476 onosCli = main.Cluster.next()
Devin Lime9f0ccf2017-08-11 17:25:12 -07003477 appResult = onosCli.CLI.activateApp( "org.onosproject.election" )
Devin Lim58046fa2017-07-05 16:55:00 -07003478 utilities.assert_equals(
3479 expect=main.TRUE,
3480 actual=appResult,
3481 onpass="Election app installed",
3482 onfail="Something went wrong with installing Leadership election" )
3483
3484 main.step( "Run for election on each node" )
Jon Hallca319892017-06-15 15:25:22 -07003485 onosCli.electionTestRun()
3486 main.Cluster.command( "electionTestRun" )
Devin Lim58046fa2017-07-05 16:55:00 -07003487 time.sleep( 5 )
Jon Hallca319892017-06-15 15:25:22 -07003488 sameResult, leaders = main.HA.consistentLeaderboards( main.Cluster.active() )
Devin Lim58046fa2017-07-05 16:55:00 -07003489 utilities.assert_equals(
3490 expect=True,
3491 actual=sameResult,
3492 onpass="All nodes see the same leaderboards",
3493 onfail="Inconsistent leaderboards" )
3494
3495 if sameResult:
3496 leader = leaders[ 0 ][ 0 ]
Jon Hallca319892017-06-15 15:25:22 -07003497 if onosCli.ipAddress in leader:
Devin Lim58046fa2017-07-05 16:55:00 -07003498 correctLeader = True
3499 else:
3500 correctLeader = False
3501 main.step( "First node was elected leader" )
3502 utilities.assert_equals(
3503 expect=True,
3504 actual=correctLeader,
3505 onpass="Correct leader was elected",
3506 onfail="Incorrect leader" )
Jon Hallca319892017-06-15 15:25:22 -07003507 main.Cluster.testLeader = leader
3508
Devin Lim58046fa2017-07-05 16:55:00 -07003509 def isElectionFunctional( self, main ):
3510 """
3511 Check that Leadership Election is still functional
3512 15.1 Run election on each node
3513 15.2 Check that each node has the same leaders and candidates
3514 15.3 Find current leader and withdraw
3515 15.4 Check that a new node was elected leader
3516 15.5 Check that that new leader was the candidate of old leader
3517 15.6 Run for election on old leader
3518 15.7 Check that oldLeader is a candidate, and leader if only 1 node
3519 15.8 Make sure that the old leader was added to the candidate list
3520
3521 old and new variable prefixes refer to data from before vs after
3522 withdrawl and later before withdrawl vs after re-election
3523 """
3524 import time
Devin Lim58046fa2017-07-05 16:55:00 -07003525 assert main, "main not defined"
3526 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07003527
3528 description = "Check that Leadership Election is still functional"
3529 main.case( description )
3530 # NOTE: Need to re-run after restarts since being a canidate is not persistant
3531
3532 oldLeaders = [] # list of lists of each nodes' candidates before
3533 newLeaders = [] # list of lists of each nodes' candidates after
3534 oldLeader = '' # the old leader from oldLeaders, None if not same
3535 newLeader = '' # the new leaders fron newLoeaders, None if not same
3536 oldLeaderCLI = None # the CLI of the old leader used for re-electing
3537 expectNoLeader = False # True when there is only one leader
Devin Lim142b5342017-07-20 15:22:39 -07003538 if len( main.Cluster.runningNodes ) == 1:
Devin Lim58046fa2017-07-05 16:55:00 -07003539 expectNoLeader = True
3540
3541 main.step( "Run for election on each node" )
Devin Lim142b5342017-07-20 15:22:39 -07003542 electionResult = main.Cluster.command( "electionTestRun", returnBool=True )
Devin Lim58046fa2017-07-05 16:55:00 -07003543 utilities.assert_equals(
Jon Hallca319892017-06-15 15:25:22 -07003544 expect=True,
Devin Lim58046fa2017-07-05 16:55:00 -07003545 actual=electionResult,
3546 onpass="All nodes successfully ran for leadership",
3547 onfail="At least one node failed to run for leadership" )
3548
3549 if electionResult == main.FALSE:
3550 main.log.error(
3551 "Skipping Test Case because Election Test App isn't loaded" )
3552 main.skipCase()
3553
3554 main.step( "Check that each node shows the same leader and candidates" )
3555 failMessage = "Nodes have different leaderboards"
Jon Hallca319892017-06-15 15:25:22 -07003556 activeCLIs = main.Cluster.active()
3557 sameResult, oldLeaders = main.HA.consistentLeaderboards( activeCLIs )
Devin Lim58046fa2017-07-05 16:55:00 -07003558 if sameResult:
3559 oldLeader = oldLeaders[ 0 ][ 0 ]
Jon Hallca319892017-06-15 15:25:22 -07003560 main.log.info( "Old leader: " + oldLeader )
Devin Lim58046fa2017-07-05 16:55:00 -07003561 else:
3562 oldLeader = None
3563 utilities.assert_equals(
3564 expect=True,
3565 actual=sameResult,
3566 onpass="Leaderboards are consistent for the election topic",
3567 onfail=failMessage )
3568
3569 main.step( "Find current leader and withdraw" )
3570 withdrawResult = main.TRUE
3571 # do some sanity checking on leader before using it
3572 if oldLeader is None:
3573 main.log.error( "Leadership isn't consistent." )
3574 withdrawResult = main.FALSE
3575 # Get the CLI of the oldLeader
Jon Hallca319892017-06-15 15:25:22 -07003576 for ctrl in main.Cluster.active():
3577 if oldLeader == ctrl.ipAddress:
3578 oldLeaderCLI = ctrl
Devin Lim58046fa2017-07-05 16:55:00 -07003579 break
3580 else: # FOR/ELSE statement
3581 main.log.error( "Leader election, could not find current leader" )
3582 if oldLeader:
3583 withdrawResult = oldLeaderCLI.electionTestWithdraw()
3584 utilities.assert_equals(
3585 expect=main.TRUE,
3586 actual=withdrawResult,
3587 onpass="Node was withdrawn from election",
3588 onfail="Node was not withdrawn from election" )
3589
3590 main.step( "Check that a new node was elected leader" )
3591 failMessage = "Nodes have different leaders"
3592 # Get new leaders and candidates
3593 newLeaderResult, newLeaders = self.consistentLeaderboards( activeCLIs )
3594 newLeader = None
3595 if newLeaderResult:
3596 if newLeaders[ 0 ][ 0 ] == 'none':
3597 main.log.error( "No leader was elected on at least 1 node" )
3598 if not expectNoLeader:
3599 newLeaderResult = False
3600 newLeader = newLeaders[ 0 ][ 0 ]
3601
3602 # Check that the new leader is not the older leader, which was withdrawn
3603 if newLeader == oldLeader:
3604 newLeaderResult = False
3605 main.log.error( "All nodes still see old leader: " + str( oldLeader ) +
3606 " as the current leader" )
3607 utilities.assert_equals(
3608 expect=True,
3609 actual=newLeaderResult,
3610 onpass="Leadership election passed",
3611 onfail="Something went wrong with Leadership election" )
3612
3613 main.step( "Check that that new leader was the candidate of old leader" )
3614 # candidates[ 2 ] should become the top candidate after withdrawl
3615 correctCandidateResult = main.TRUE
3616 if expectNoLeader:
3617 if newLeader == 'none':
3618 main.log.info( "No leader expected. None found. Pass" )
3619 correctCandidateResult = main.TRUE
3620 else:
3621 main.log.info( "Expected no leader, got: " + str( newLeader ) )
3622 correctCandidateResult = main.FALSE
3623 elif len( oldLeaders[ 0 ] ) >= 3:
3624 if newLeader == oldLeaders[ 0 ][ 2 ]:
3625 # correct leader was elected
3626 correctCandidateResult = main.TRUE
3627 else:
3628 correctCandidateResult = main.FALSE
3629 main.log.error( "Candidate {} was elected. {} should have had priority.".format(
3630 newLeader, oldLeaders[ 0 ][ 2 ] ) )
3631 else:
3632 main.log.warn( "Could not determine who should be the correct leader" )
3633 main.log.debug( oldLeaders[ 0 ] )
3634 correctCandidateResult = main.FALSE
3635 utilities.assert_equals(
3636 expect=main.TRUE,
3637 actual=correctCandidateResult,
3638 onpass="Correct Candidate Elected",
3639 onfail="Incorrect Candidate Elected" )
3640
3641 main.step( "Run for election on old leader( just so everyone " +
3642 "is in the hat )" )
3643 if oldLeaderCLI is not None:
3644 runResult = oldLeaderCLI.electionTestRun()
3645 else:
3646 main.log.error( "No old leader to re-elect" )
3647 runResult = main.FALSE
3648 utilities.assert_equals(
3649 expect=main.TRUE,
3650 actual=runResult,
3651 onpass="App re-ran for election",
3652 onfail="App failed to run for election" )
3653
3654 main.step(
3655 "Check that oldLeader is a candidate, and leader if only 1 node" )
3656 # verify leader didn't just change
3657 # Get new leaders and candidates
3658 reRunLeaders = []
3659 time.sleep( 5 ) # Paremterize
3660 positionResult, reRunLeaders = self.consistentLeaderboards( activeCLIs )
3661
3662 # Check that the re-elected node is last on the candidate List
3663 if not reRunLeaders[ 0 ]:
3664 positionResult = main.FALSE
3665 elif oldLeader != reRunLeaders[ 0 ][ -1 ]:
3666 main.log.error( "Old Leader ({}) not in the proper position: {} ".format( str( oldLeader ),
3667 str( reRunLeaders[ 0 ] ) ) )
3668 positionResult = main.FALSE
3669 utilities.assert_equals(
3670 expect=True,
3671 actual=positionResult,
3672 onpass="Old leader successfully re-ran for election",
3673 onfail="Something went wrong with Leadership election after " +
3674 "the old leader re-ran for election" )
Jon Hallca319892017-06-15 15:25:22 -07003675
Devin Lim58046fa2017-07-05 16:55:00 -07003676 def installDistributedPrimitiveApp( self, main ):
3677 """
3678 Install Distributed Primitives app
3679 """
3680 import time
Devin Lim58046fa2017-07-05 16:55:00 -07003681 assert main, "main not defined"
3682 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07003683
3684 # Variables for the distributed primitives tests
3685 main.pCounterName = "TestON-Partitions"
3686 main.pCounterValue = 0
3687 main.onosSet = set( [] )
3688 main.onosSetName = "TestON-set"
3689
3690 description = "Install Primitives app"
3691 main.case( description )
3692 main.step( "Install Primitives app" )
3693 appName = "org.onosproject.distributedprimitives"
Devin Lime9f0ccf2017-08-11 17:25:12 -07003694 appResults = main.Cluster.next().CLI.activateApp( appName )
Devin Lim58046fa2017-07-05 16:55:00 -07003695 utilities.assert_equals( expect=main.TRUE,
3696 actual=appResults,
3697 onpass="Primitives app activated",
3698 onfail="Primitives app not activated" )
3699 # TODO check on all nodes instead of sleeping
Jeremy Ronquillob27ce4c2017-07-17 12:41:28 -07003700 time.sleep( 5 ) # To allow all nodes to activate