blob: b9749724af3570d5cc4d13026742eccfdd285581 [file] [log] [blame]
Jon Halla440e872016-03-31 15:15:50 -07001import json
Jon Hall41d39f12016-04-11 22:54:35 -07002import time
Jon Halle1a3b752015-07-22 13:02:46 -07003
Jon Hallf37d44d2017-05-24 10:37:30 -07004
Jon Hall41d39f12016-04-11 22:54:35 -07005class HA():
Jon Hall57b50432015-10-22 10:20:10 -07006
Jon Halla440e872016-03-31 15:15:50 -07007 def __init__( self ):
8 self.default = ''
Jon Hall57b50432015-10-22 10:20:10 -07009
Devin Lim58046fa2017-07-05 16:55:00 -070010 def customizeOnosGenPartitions( self ):
11 self.startingMininet()
12 # copy gen-partions file to ONOS
13 # NOTE: this assumes TestON and ONOS are on the same machine
14 srcFile = main.testDir + "/HA/dependencies/onos-gen-partitions"
15 dstDir = main.ONOSbench.home + "/tools/test/bin/onos-gen-partitions"
16 cpResult = main.ONOSbench.secureCopy( main.ONOSbench.user_name,
17 main.ONOSbench.ip_address,
18 srcFile,
19 dstDir,
20 pwd=main.ONOSbench.pwd,
21 direction="from" )
22 def cleanUpGenPartition( self ):
23 # clean up gen-partitions file
24 try:
25 main.ONOSbench.handle.sendline( "cd " + main.ONOSbench.home )
26 main.ONOSbench.handle.expect( main.ONOSbench.home + "\$" )
27 main.ONOSbench.handle.sendline( "git checkout -- tools/test/bin/onos-gen-partitions" )
28 main.ONOSbench.handle.expect( main.ONOSbench.home + "\$" )
29 main.log.info( " Cleaning custom gen partitions file, response was: \n" +
30 str( main.ONOSbench.handle.before ) )
31 except ( pexpect.TIMEOUT, pexpect.EOF ):
32 main.log.exception( "ONOSbench: pexpect exception found:" +
33 main.ONOSbench.handle.before )
34 main.cleanup()
35 main.exit()
36 def startingMininet( self ):
37 main.step( "Starting Mininet" )
38 # scp topo file to mininet
39 # TODO: move to params?
40 topoName = "obelisk.py"
41 filePath = main.ONOSbench.home + "/tools/test/topos/"
42 main.ONOSbench.scp( main.Mininet1,
43 filePath + topoName,
44 main.Mininet1.home,
45 direction="to" )
46 mnResult = main.Mininet1.startNet()
47 utilities.assert_equals( expect=main.TRUE, actual=mnResult,
48 onpass="Mininet Started",
49 onfail="Error starting Mininet" )
50 def scalingMetadata( self ):
51 import re
52 main.scaling = main.params[ 'scaling' ].split( "," )
53 main.log.debug( main.scaling )
54 scale = main.scaling.pop( 0 )
55 main.log.debug( scale )
56 if "e" in scale:
57 equal = True
58 else:
59 equal = False
60 main.log.debug( equal )
61 main.numCtrls = int( re.search( "\d+", scale ).group( 0 ) )
62 genResult = main.Server.generateFile( main.numCtrls, equal=equal )
63 utilities.assert_equals( expect=main.TRUE, actual=genResult,
64 onpass="New cluster metadata file generated",
65 onfail="Failled to generate new metadata file" )
66 def swapNodeMetadata( self ):
67 if main.numCtrls >= 5:
68 main.numCtrls -= 2
69 else:
70 main.log.error( "Not enough ONOS nodes to run this test. Requires 5 or more" )
71 genResult = main.Server.generateFile( main.numCtrls )
72 utilities.assert_equals( expect=main.TRUE, actual=genResult,
73 onpass="New cluster metadata file generated",
74 onfail="Failled to generate new metadata file" )
75 def customizeOnosService( self, metadataMethod ):
76 import os
77 main.step( "Setup server for cluster metadata file" )
78 main.serverPort = main.params[ 'server' ][ 'port' ]
79 rootDir = os.path.dirname( main.testFile ) + "/dependencies"
80 main.log.debug( "Root dir: {}".format( rootDir ) )
81 status = main.Server.start( main.ONOSbench,
82 rootDir,
83 port=main.serverPort,
84 logDir=main.logdir + "/server.log" )
85 utilities.assert_equals( expect=main.TRUE, actual=status,
86 onpass="Server started",
87 onfail="Failled to start SimpleHTTPServer" )
88
89 main.step( "Generate initial metadata file" )
90 metadataMethod()
91
92 self.startingMininet()
93
94 main.step( "Copying backup config files" )
95 main.onosServicepath = main.ONOSbench.home + "/tools/package/bin/onos-service"
96 cp = main.ONOSbench.scp( main.ONOSbench,
97 main.onosServicepath,
98 main.onosServicepath + ".backup",
99 direction="to" )
100
101 utilities.assert_equals( expect=main.TRUE,
102 actual=cp,
103 onpass="Copy backup config file succeeded",
104 onfail="Copy backup config file failed" )
105 # we need to modify the onos-service file to use remote metadata file
106 # url for cluster metadata file
107 iface = main.params[ 'server' ].get( 'interface' )
108 ip = main.ONOSbench.getIpAddr( iface=iface )
109 metaFile = "cluster.json"
110 javaArgs = r"-Donos.cluster.metadata.uri=http:\/\/{}:{}\/{}".format( ip, main.serverPort, metaFile )
111 main.log.warn( javaArgs )
112 main.log.warn( repr( javaArgs ) )
113 handle = main.ONOSbench.handle
114 sed = r"sed -i 's/bash/bash\nexport JAVA_OPTS=${{JAVA_OPTS:-{}}}\n/' {}".format( javaArgs, main.onosServicepath )
115 main.log.warn( sed )
116 main.log.warn( repr( sed ) )
117 handle.sendline( sed )
118 handle.expect( metaFile )
119 output = handle.before
120 handle.expect( "\$" )
121 output += handle.before
122 main.log.debug( repr( output ) )
123
124 def cleanUpOnosService( self ):
125 # Cleanup custom onos-service file
126 main.ONOSbench.scp( main.ONOSbench,
127 main.onosServicepath + ".backup",
128 main.onosServicepath,
129 direction="to" )
Jon Halla440e872016-03-31 15:15:50 -0700130 def consistentCheck( self ):
131 """
132 Checks that TestON counters are consistent across all nodes.
Jon Halle1a3b752015-07-22 13:02:46 -0700133
Jon Hallf37d44d2017-05-24 10:37:30 -0700134 Returns the tuple ( onosCounters, consistent )
Jon Hall41d39f12016-04-11 22:54:35 -0700135 - onosCounters is the parsed json output of the counters command on
136 all nodes
137 - consistent is main.TRUE if all "TestON" counters are consitent across
138 all nodes or main.FALSE
Jon Halla440e872016-03-31 15:15:50 -0700139 """
Jon Halle1a3b752015-07-22 13:02:46 -0700140 try:
Jon Halla440e872016-03-31 15:15:50 -0700141 # Get onos counters results
142 onosCountersRaw = []
143 threads = []
144 for i in main.activeNodes:
145 t = main.Thread( target=utilities.retry,
146 name="counters-" + str( i ),
Jon Hallf37d44d2017-05-24 10:37:30 -0700147 args=[ main.CLIs[ i ].counters, [ None ] ],
148 kwargs={ 'sleep': 5, 'attempts': 5,
Jon Halla440e872016-03-31 15:15:50 -0700149 'randomTime': True } )
150 threads.append( t )
151 t.start()
152 for t in threads:
153 t.join()
154 onosCountersRaw.append( t.result )
155 onosCounters = []
156 for i in range( len( main.activeNodes ) ):
157 try:
Jon Hallf37d44d2017-05-24 10:37:30 -0700158 onosCounters.append( json.loads( onosCountersRaw[ i ] ) )
Jon Halla440e872016-03-31 15:15:50 -0700159 except ( ValueError, TypeError ):
160 main.log.error( "Could not parse counters response from ONOS" +
Jon Hallf37d44d2017-05-24 10:37:30 -0700161 str( main.activeNodes[ i ] + 1 ) )
Jon Halla440e872016-03-31 15:15:50 -0700162 main.log.warn( repr( onosCountersRaw[ i ] ) )
163 onosCounters.append( [] )
164
165 testCounters = {}
166 # make a list of all the "TestON-*" counters in ONOS
Jon Hall41d39f12016-04-11 22:54:35 -0700167 # lookes like a dict whose keys are the name of the ONOS node and
168 # values are a list of the counters. I.E.
Jon Hallf37d44d2017-05-24 10:37:30 -0700169 # { "ONOS1": [ { "name":"TestON-Partitions","value":56 } ]
Jon Halla440e872016-03-31 15:15:50 -0700170 # }
171 # NOTE: There is an assumtion that all nodes are active
172 # based on the above for loops
173 for controller in enumerate( onosCounters ):
Jon Hallf37d44d2017-05-24 10:37:30 -0700174 for key, value in controller[ 1 ].iteritems():
Jon Halla440e872016-03-31 15:15:50 -0700175 if 'TestON' in key:
Jon Hallf37d44d2017-05-24 10:37:30 -0700176 node = 'ONOS' + str( controller[ 0 ] + 1 )
Jon Halla440e872016-03-31 15:15:50 -0700177 try:
Jon Hallf37d44d2017-05-24 10:37:30 -0700178 testCounters[ node ].append( { key: value } )
Jon Halla440e872016-03-31 15:15:50 -0700179 except KeyError:
Jon Hallf37d44d2017-05-24 10:37:30 -0700180 testCounters[ node ] = [ { key: value } ]
Jon Halla440e872016-03-31 15:15:50 -0700181 # compare the counters on each node
Jon Hallf37d44d2017-05-24 10:37:30 -0700182 firstV = testCounters.values()[ 0 ]
Jon Halla440e872016-03-31 15:15:50 -0700183 tmp = [ v == firstV for k, v in testCounters.iteritems() ]
184 if all( tmp ):
185 consistent = main.TRUE
186 else:
187 consistent = main.FALSE
188 main.log.error( "ONOS nodes have different values for counters:\n" +
189 testCounters )
190 return ( onosCounters, consistent )
191 except Exception:
192 main.log.exception( "" )
193 main.cleanup()
194 main.exit()
195
196 def counterCheck( self, counterName, counterValue ):
197 """
198 Checks that TestON counters are consistent across all nodes and that
199 specified counter is in ONOS with the given value
200 """
201 try:
202 correctResults = main.TRUE
203 # Get onos counters results and consistentCheck
204 onosCounters, consistent = self.consistentCheck()
205 # Check for correct values
206 for i in range( len( main.activeNodes ) ):
Jon Hallf37d44d2017-05-24 10:37:30 -0700207 current = onosCounters[ i ]
Jon Halla440e872016-03-31 15:15:50 -0700208 onosValue = None
209 try:
210 onosValue = current.get( counterName )
Jon Hall41d39f12016-04-11 22:54:35 -0700211 except AttributeError:
Jon Hallf37d44d2017-05-24 10:37:30 -0700212 node = str( main.activeNodes[ i ] + 1 )
Jon Hall41d39f12016-04-11 22:54:35 -0700213 main.log.exception( "ONOS" + node + " counters result " +
214 "is not as expected" )
Jon Halla440e872016-03-31 15:15:50 -0700215 correctResults = main.FALSE
216 if onosValue == counterValue:
217 main.log.info( counterName + " counter value is correct" )
218 else:
Jon Hall41d39f12016-04-11 22:54:35 -0700219 main.log.error( counterName +
220 " counter value is incorrect," +
221 " expected value: " + str( counterValue ) +
222 " current value: " + str( onosValue ) )
Jon Halla440e872016-03-31 15:15:50 -0700223 correctResults = main.FALSE
224 return consistent and correctResults
225 except Exception:
226 main.log.exception( "" )
227 main.cleanup()
228 main.exit()
Jon Hall41d39f12016-04-11 22:54:35 -0700229
230 def consistentLeaderboards( self, nodes ):
231 TOPIC = 'org.onosproject.election'
232 # FIXME: use threads
Jon Halle0f0b342017-04-18 11:43:47 -0700233 # FIXME: should we retry outside the function?
Jon Hall41d39f12016-04-11 22:54:35 -0700234 for n in range( 5 ): # Retry in case election is still happening
235 leaderList = []
236 # Get all leaderboards
237 for cli in nodes:
238 leaderList.append( cli.specificLeaderCandidate( TOPIC ) )
239 # Compare leaderboards
Jon Hallf37d44d2017-05-24 10:37:30 -0700240 result = all( i == leaderList[ 0 ] for i in leaderList ) and\
Jon Hall41d39f12016-04-11 22:54:35 -0700241 leaderList is not None
242 main.log.debug( leaderList )
243 main.log.warn( result )
244 if result:
245 return ( result, leaderList )
Jon Hallf37d44d2017-05-24 10:37:30 -0700246 time.sleep( 5 ) # TODO: paramerterize
Jon Hall41d39f12016-04-11 22:54:35 -0700247 main.log.error( "Inconsistent leaderboards:" + str( leaderList ) )
248 return ( result, leaderList )
249
250 def nodesCheck( self, nodes ):
251 nodesOutput = []
252 results = True
253 threads = []
254 for i in nodes:
Jon Hallf37d44d2017-05-24 10:37:30 -0700255 t = main.Thread( target=main.CLIs[ i ].nodes,
Jon Hall41d39f12016-04-11 22:54:35 -0700256 name="nodes-" + str( i ),
Jon Hallf37d44d2017-05-24 10:37:30 -0700257 args=[] )
Jon Hall41d39f12016-04-11 22:54:35 -0700258 threads.append( t )
259 t.start()
260
261 for t in threads:
262 t.join()
263 nodesOutput.append( t.result )
Jon Hallf37d44d2017-05-24 10:37:30 -0700264 ips = sorted( [ main.nodes[ node ].ip_address for node in nodes ] )
Jon Hall41d39f12016-04-11 22:54:35 -0700265 for i in nodesOutput:
266 try:
267 current = json.loads( i )
268 activeIps = []
269 currentResult = False
270 for node in current:
Jon Hallf37d44d2017-05-24 10:37:30 -0700271 if node[ 'state' ] == 'READY':
272 activeIps.append( node[ 'ip' ] )
Jon Hall41d39f12016-04-11 22:54:35 -0700273 activeIps.sort()
274 if ips == activeIps:
275 currentResult = True
276 except ( ValueError, TypeError ):
277 main.log.error( "Error parsing nodes output" )
278 main.log.warn( repr( i ) )
279 currentResult = False
280 results = results and currentResult
281 return results
Devin Lim58046fa2017-07-05 16:55:00 -0700282 def generateGraph( self, testName, plotName="Plot-HA", index=2 ):
283 # GRAPHS
284 # NOTE: important params here:
285 # job = name of Jenkins job
286 # Plot Name = Plot-HA, only can be used if multiple plots
287 # index = The number of the graph under plot name
288 job = testName
289 graphs = '<ac:structured-macro ac:name="html">\n'
290 graphs += '<ac:plain-text-body><![CDATA[\n'
291 graphs += '<iframe src="https://onos-jenkins.onlab.us/job/' + job +\
292 '/plot/' + plotName + '/getPlot?index=' + str( index ) +\
293 '&width=500&height=300"' +\
294 'noborder="0" width="500" height="300" scrolling="yes" ' +\
295 'seamless="seamless"></iframe>\n'
296 graphs += ']]></ac:plain-text-body>\n'
297 graphs += '</ac:structured-macro>\n'
298 main.log.wiki( graphs )
299 def initialSetUp( self, serviceClean=False ):
300 """
301 rest of initialSetup
302 """
303
304 # Create a list of active nodes for use when some nodes are stopped
305 main.activeNodes = [ i for i in range( 0, main.numCtrls ) ]
306
307 if main.params[ 'tcpdump' ].lower() == "true":
308 main.step( "Start Packet Capture MN" )
309 main.Mininet2.startTcpdump(
310 str( main.params[ 'MNtcpdump' ][ 'folder' ] ) + str( main.TEST )
311 + "-MN.pcap",
312 intf=main.params[ 'MNtcpdump' ][ 'intf' ],
313 port=main.params[ 'MNtcpdump' ][ 'port' ] )
314
315 if serviceClean:
316 main.step( "Clean up ONOS service changes" )
317 main.ONOSbench.handle.sendline( "git checkout -- tools/package/init/onos.conf" )
318 main.ONOSbench.handle.sendline( "git checkout -- tools/package/init/onos.service" )
319 main.ONOSbench.handle.expect( "\$" )
320
321 main.step( "Checking ONOS nodes" )
322 nodeResults = utilities.retry( self.nodesCheck,
323 False,
324 args=[ main.activeNodes ],
325 attempts=5 )
326
327 utilities.assert_equals( expect=True, actual=nodeResults,
328 onpass="Nodes check successful",
329 onfail="Nodes check NOT successful" )
330
331 if not nodeResults:
332 for i in main.activeNodes:
333 cli = main.CLIs[ i ]
334 main.log.debug( "{} components not ACTIVE: \n{}".format(
335 cli.name,
336 cli.sendline( "scr:list | grep -v ACTIVE" ) ) )
337 main.log.error( "Failed to start ONOS, stopping test" )
338 main.cleanup()
339 main.exit()
340
341 main.step( "Activate apps defined in the params file" )
342 # get data from the params
343 apps = main.params.get( 'apps' )
344 if apps:
345 apps = apps.split( ',' )
346 main.log.warn( apps )
347 activateResult = True
348 for app in apps:
349 main.CLIs[ 0 ].app( app, "Activate" )
350 # TODO: check this worked
351 time.sleep( 10 ) # wait for apps to activate
352 for app in apps:
353 state = main.CLIs[ 0 ].appStatus( app )
354 if state == "ACTIVE":
355 activateResult = activateResult and True
356 else:
357 main.log.error( "{} is in {} state".format( app, state ) )
358 activateResult = False
359 utilities.assert_equals( expect=True,
360 actual=activateResult,
361 onpass="Successfully activated apps",
362 onfail="Failed to activate apps" )
363 else:
364 main.log.warn( "No apps were specified to be loaded after startup" )
365
366 main.step( "Set ONOS configurations" )
367 config = main.params.get( 'ONOS_Configuration' )
368 if config:
369 main.log.debug( config )
370 checkResult = main.TRUE
371 for component in config:
372 for setting in config[ component ]:
373 value = config[ component ][ setting ]
374 check = main.CLIs[ 0 ].setCfg( component, setting, value )
375 main.log.info( "Value was changed? {}".format( main.TRUE == check ) )
376 checkResult = check and checkResult
377 utilities.assert_equals( expect=main.TRUE,
378 actual=checkResult,
379 onpass="Successfully set config",
380 onfail="Failed to set config" )
381 else:
382 main.log.warn( "No configurations were specified to be changed after startup" )
383
384 main.step( "App Ids check" )
385 appCheck = main.TRUE
386 threads = []
387 for i in main.activeNodes:
388 t = main.Thread( target=main.CLIs[ i ].appToIDCheck,
389 name="appToIDCheck-" + str( i ),
390 args=[] )
391 threads.append( t )
392 t.start()
393
394 for t in threads:
395 t.join()
396 appCheck = appCheck and t.result
397 if appCheck != main.TRUE:
398 node = main.activeNodes[ 0 ]
399 main.log.warn( main.CLIs[ node ].apps() )
400 main.log.warn( main.CLIs[ node ].appIDs() )
401 utilities.assert_equals( expect=main.TRUE, actual=appCheck,
402 onpass="App Ids seem to be correct",
403 onfail="Something is wrong with app Ids" )
Jon Hall7a6ebfd2017-03-13 10:58:58 -0700404
Jon Halle0f0b342017-04-18 11:43:47 -0700405 def workQueueStatsCheck( self, workQueueName, completed, inProgress, pending ):
406 # Completed
407 threads = []
408 completedValues = []
409 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -0700410 t = main.Thread( target=main.CLIs[ i ].workQueueTotalCompleted,
Jon Halle0f0b342017-04-18 11:43:47 -0700411 name="WorkQueueCompleted-" + str( i ),
412 args=[ workQueueName ] )
413 threads.append( t )
414 t.start()
415
416 for t in threads:
417 t.join()
418 completedValues.append( int( t.result ) )
419 # Check the results
420 completedResults = [ x == completed for x in completedValues ]
421 completedResult = all( completedResults )
422 if not completedResult:
423 main.log.warn( "Expected Work Queue {} to have {} completed, found {}".format(
424 workQueueName, completed, completedValues ) )
425
426 # In Progress
427 threads = []
428 inProgressValues = []
429 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -0700430 t = main.Thread( target=main.CLIs[ i ].workQueueTotalInProgress,
Jon Halle0f0b342017-04-18 11:43:47 -0700431 name="WorkQueueInProgress-" + str( i ),
432 args=[ workQueueName ] )
433 threads.append( t )
434 t.start()
435
436 for t in threads:
437 t.join()
438 inProgressValues.append( int( t.result ) )
439 # Check the results
440 inProgressResults = [ x == inProgress for x in inProgressValues ]
441 inProgressResult = all( inProgressResults )
442 if not inProgressResult:
443 main.log.warn( "Expected Work Queue {} to have {} inProgress, found {}".format(
444 workQueueName, inProgress, inProgressValues ) )
445
446 # Pending
447 threads = []
448 pendingValues = []
449 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -0700450 t = main.Thread( target=main.CLIs[ i ].workQueueTotalPending,
Jon Halle0f0b342017-04-18 11:43:47 -0700451 name="WorkQueuePending-" + str( i ),
452 args=[ workQueueName ] )
453 threads.append( t )
454 t.start()
455
456 for t in threads:
457 t.join()
458 pendingValues.append( int( t.result ) )
459 # Check the results
460 pendingResults = [ x == pending for x in pendingValues ]
461 pendingResult = all( pendingResults )
462 if not pendingResult:
463 main.log.warn( "Expected Work Queue {} to have {} pending, found {}".format(
464 workQueueName, pending, pendingValues ) )
465 return completedResult and inProgressResult and pendingResult
466
Devin Lim58046fa2017-07-05 16:55:00 -0700467 def assignDevices( self, main ):
468 """
469 Assign devices to controllers
470 """
471 import re
472 assert main.numCtrls, "main.numCtrls not defined"
473 assert main, "main not defined"
474 assert utilities.assert_equals, "utilities.assert_equals not defined"
475 assert main.CLIs, "main.CLIs not defined"
476 assert main.nodes, "main.nodes not defined"
477
478 main.case( "Assigning devices to controllers" )
479 main.caseExplanation = "Assign switches to ONOS using 'ovs-vsctl' " + \
480 "and check that an ONOS node becomes the " + \
481 "master of the device."
482 main.step( "Assign switches to controllers" )
483
484 ipList = []
485 for i in range( main.ONOSbench.maxNodes ):
486 ipList.append( main.nodes[ i ].ip_address )
487 swList = []
488 for i in range( 1, 29 ):
489 swList.append( "s" + str( i ) )
490 main.Mininet1.assignSwController( sw=swList, ip=ipList )
491
492 mastershipCheck = main.TRUE
493 for i in range( 1, 29 ):
494 response = main.Mininet1.getSwController( "s" + str( i ) )
495 try:
496 main.log.info( str( response ) )
497 except Exception:
498 main.log.info( repr( response ) )
499 for node in main.nodes:
500 if re.search( "tcp:" + node.ip_address, response ):
501 mastershipCheck = mastershipCheck and main.TRUE
502 else:
503 main.log.error( "Error, node " + node.ip_address + " is " +
504 "not in the list of controllers s" +
505 str( i ) + " is connecting to." )
506 mastershipCheck = main.FALSE
507 utilities.assert_equals(
508 expect=main.TRUE,
509 actual=mastershipCheck,
510 onpass="Switch mastership assigned correctly",
511 onfail="Switches not assigned correctly to controllers" )
512 def assignIntents( self, main ):
513 """
514 Assign intents
515 """
516 import time
517 import json
518 assert main.numCtrls, "main.numCtrls not defined"
519 assert main, "main not defined"
520 assert utilities.assert_equals, "utilities.assert_equals not defined"
521 assert main.CLIs, "main.CLIs not defined"
522 assert main.nodes, "main.nodes not defined"
523 try:
524 main.HAlabels
525 except ( NameError, AttributeError ):
526 main.log.error( "main.HAlabels not defined, setting to []" )
527 main.HAlabels = []
528 try:
529 main.HAdata
530 except ( NameError, AttributeError ):
531 main.log.error( "data not defined, setting to []" )
532 main.HAdata = []
533 main.case( "Adding host Intents" )
534 main.caseExplanation = "Discover hosts by using pingall then " +\
535 "assign predetermined host-to-host intents." +\
536 " After installation, check that the intent" +\
537 " is distributed to all nodes and the state" +\
538 " is INSTALLED"
539
540 # install onos-app-fwd
541 main.step( "Install reactive forwarding app" )
542 onosCli = main.CLIs[ main.activeNodes[ 0 ] ]
543 installResults = onosCli.activateApp( "org.onosproject.fwd" )
544 utilities.assert_equals( expect=main.TRUE, actual=installResults,
545 onpass="Install fwd successful",
546 onfail="Install fwd failed" )
547
548 main.step( "Check app ids" )
549 appCheck = main.TRUE
550 threads = []
551 for i in main.activeNodes:
552 t = main.Thread( target=main.CLIs[ i ].appToIDCheck,
553 name="appToIDCheck-" + str( i ),
554 args=[] )
555 threads.append( t )
556 t.start()
557
558 for t in threads:
559 t.join()
560 appCheck = appCheck and t.result
561 if appCheck != main.TRUE:
562 main.log.warn( onosCli.apps() )
563 main.log.warn( onosCli.appIDs() )
564 utilities.assert_equals( expect=main.TRUE, actual=appCheck,
565 onpass="App Ids seem to be correct",
566 onfail="Something is wrong with app Ids" )
567
568 main.step( "Discovering Hosts( Via pingall for now )" )
569 # FIXME: Once we have a host discovery mechanism, use that instead
570 # REACTIVE FWD test
571 pingResult = main.FALSE
572 passMsg = "Reactive Pingall test passed"
573 time1 = time.time()
574 pingResult = main.Mininet1.pingall()
575 time2 = time.time()
576 if not pingResult:
577 main.log.warn( "First pingall failed. Trying again..." )
578 pingResult = main.Mininet1.pingall()
579 passMsg += " on the second try"
580 utilities.assert_equals(
581 expect=main.TRUE,
582 actual=pingResult,
583 onpass=passMsg,
584 onfail="Reactive Pingall failed, " +
585 "one or more ping pairs failed" )
586 main.log.info( "Time for pingall: %2f seconds" %
587 ( time2 - time1 ) )
588 # timeout for fwd flows
589 time.sleep( 11 )
590 # uninstall onos-app-fwd
591 main.step( "Uninstall reactive forwarding app" )
592 node = main.activeNodes[ 0 ]
593 uninstallResult = main.CLIs[ node ].deactivateApp( "org.onosproject.fwd" )
594 utilities.assert_equals( expect=main.TRUE, actual=uninstallResult,
595 onpass="Uninstall fwd successful",
596 onfail="Uninstall fwd failed" )
597
598 main.step( "Check app ids" )
599 threads = []
600 appCheck2 = main.TRUE
601 for i in main.activeNodes:
602 t = main.Thread( target=main.CLIs[ i ].appToIDCheck,
603 name="appToIDCheck-" + str( i ),
604 args=[] )
605 threads.append( t )
606 t.start()
607
608 for t in threads:
609 t.join()
610 appCheck2 = appCheck2 and t.result
611 if appCheck2 != main.TRUE:
612 node = main.activeNodes[ 0 ]
613 main.log.warn( main.CLIs[ node ].apps() )
614 main.log.warn( main.CLIs[ node ].appIDs() )
615 utilities.assert_equals( expect=main.TRUE, actual=appCheck2,
616 onpass="App Ids seem to be correct",
617 onfail="Something is wrong with app Ids" )
618
619 main.step( "Add host intents via cli" )
620 intentIds = []
621 # TODO: move the host numbers to params
622 # Maybe look at all the paths we ping?
623 intentAddResult = True
624 hostResult = main.TRUE
625 for i in range( 8, 18 ):
626 main.log.info( "Adding host intent between h" + str( i ) +
627 " and h" + str( i + 10 ) )
628 host1 = "00:00:00:00:00:" + \
629 str( hex( i )[ 2: ] ).zfill( 2 ).upper()
630 host2 = "00:00:00:00:00:" + \
631 str( hex( i + 10 )[ 2: ] ).zfill( 2 ).upper()
632 # NOTE: getHost can return None
633 host1Dict = onosCli.getHost( host1 )
634 host2Dict = onosCli.getHost( host2 )
635 host1Id = None
636 host2Id = None
637 if host1Dict and host2Dict:
638 host1Id = host1Dict.get( 'id', None )
639 host2Id = host2Dict.get( 'id', None )
640 if host1Id and host2Id:
641 nodeNum = ( i % len( main.activeNodes ) )
642 node = main.activeNodes[ nodeNum ]
643 tmpId = main.CLIs[ node ].addHostIntent( host1Id, host2Id )
644 if tmpId:
645 main.log.info( "Added intent with id: " + tmpId )
646 intentIds.append( tmpId )
647 else:
648 main.log.error( "addHostIntent returned: " +
649 repr( tmpId ) )
650 else:
651 main.log.error( "Error, getHost() failed for h" + str( i ) +
652 " and/or h" + str( i + 10 ) )
653 node = main.activeNodes[ 0 ]
654 hosts = main.CLIs[ node ].hosts()
655 main.log.warn( "Hosts output: " )
656 try:
657 main.log.warn( json.dumps( json.loads( hosts ),
658 sort_keys=True,
659 indent=4,
660 separators=( ',', ': ' ) ) )
661 except ( ValueError, TypeError ):
662 main.log.warn( repr( hosts ) )
663 hostResult = main.FALSE
664 utilities.assert_equals( expect=main.TRUE, actual=hostResult,
665 onpass="Found a host id for each host",
666 onfail="Error looking up host ids" )
667
668 intentStart = time.time()
669 onosIds = onosCli.getAllIntentsId()
670 main.log.info( "Submitted intents: " + str( intentIds ) )
671 main.log.info( "Intents in ONOS: " + str( onosIds ) )
672 for intent in intentIds:
673 if intent in onosIds:
674 pass # intent submitted is in onos
675 else:
676 intentAddResult = False
677 if intentAddResult:
678 intentStop = time.time()
679 else:
680 intentStop = None
681 # Print the intent states
682 intents = onosCli.intents()
683 intentStates = []
684 installedCheck = True
685 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
686 count = 0
687 try:
688 for intent in json.loads( intents ):
689 state = intent.get( 'state', None )
690 if "INSTALLED" not in state:
691 installedCheck = False
692 intentId = intent.get( 'id', None )
693 intentStates.append( ( intentId, state ) )
694 except ( ValueError, TypeError ):
695 main.log.exception( "Error parsing intents" )
696 # add submitted intents not in the store
697 tmplist = [ i for i, s in intentStates ]
698 missingIntents = False
699 for i in intentIds:
700 if i not in tmplist:
701 intentStates.append( ( i, " - " ) )
702 missingIntents = True
703 intentStates.sort()
704 for i, s in intentStates:
705 count += 1
706 main.log.info( "%-6s%-15s%-15s" %
707 ( str( count ), str( i ), str( s ) ) )
708 leaders = onosCli.leaders()
709 try:
710 missing = False
711 if leaders:
712 parsedLeaders = json.loads( leaders )
713 main.log.warn( json.dumps( parsedLeaders,
714 sort_keys=True,
715 indent=4,
716 separators=( ',', ': ' ) ) )
717 # check for all intent partitions
718 topics = []
719 for i in range( 14 ):
720 topics.append( "work-partition-" + str( i ) )
721 main.log.debug( topics )
722 ONOStopics = [ j[ 'topic' ] for j in parsedLeaders ]
723 for topic in topics:
724 if topic not in ONOStopics:
725 main.log.error( "Error: " + topic +
726 " not in leaders" )
727 missing = True
728 else:
729 main.log.error( "leaders() returned None" )
730 except ( ValueError, TypeError ):
731 main.log.exception( "Error parsing leaders" )
732 main.log.error( repr( leaders ) )
733 # Check all nodes
734 if missing:
735 for i in main.activeNodes:
736 response = main.CLIs[ i ].leaders( jsonFormat=False )
737 main.log.warn( str( main.CLIs[ i ].name ) + " leaders output: \n" +
738 str( response ) )
739
740 partitions = onosCli.partitions()
741 try:
742 if partitions:
743 parsedPartitions = json.loads( partitions )
744 main.log.warn( json.dumps( parsedPartitions,
745 sort_keys=True,
746 indent=4,
747 separators=( ',', ': ' ) ) )
748 # TODO check for a leader in all paritions
749 # TODO check for consistency among nodes
750 else:
751 main.log.error( "partitions() returned None" )
752 except ( ValueError, TypeError ):
753 main.log.exception( "Error parsing partitions" )
754 main.log.error( repr( partitions ) )
755 pendingMap = onosCli.pendingMap()
756 try:
757 if pendingMap:
758 parsedPending = json.loads( pendingMap )
759 main.log.warn( json.dumps( parsedPending,
760 sort_keys=True,
761 indent=4,
762 separators=( ',', ': ' ) ) )
763 # TODO check something here?
764 else:
765 main.log.error( "pendingMap() returned None" )
766 except ( ValueError, TypeError ):
767 main.log.exception( "Error parsing pending map" )
768 main.log.error( repr( pendingMap ) )
769
770 intentAddResult = bool( intentAddResult and not missingIntents and
771 installedCheck )
772 if not intentAddResult:
773 main.log.error( "Error in pushing host intents to ONOS" )
774
775 main.step( "Intent Anti-Entropy dispersion" )
776 for j in range( 100 ):
777 correct = True
778 main.log.info( "Submitted intents: " + str( sorted( intentIds ) ) )
779 for i in main.activeNodes:
780 onosIds = []
781 ids = main.CLIs[ i ].getAllIntentsId()
782 onosIds.append( ids )
783 main.log.debug( "Intents in " + main.CLIs[ i ].name + ": " +
784 str( sorted( onosIds ) ) )
785 if sorted( ids ) != sorted( intentIds ):
786 main.log.warn( "Set of intent IDs doesn't match" )
787 correct = False
788 break
789 else:
790 intents = json.loads( main.CLIs[ i ].intents() )
791 for intent in intents:
792 if intent[ 'state' ] != "INSTALLED":
793 main.log.warn( "Intent " + intent[ 'id' ] +
794 " is " + intent[ 'state' ] )
795 correct = False
796 break
797 if correct:
798 break
799 else:
800 time.sleep( 1 )
801 if not intentStop:
802 intentStop = time.time()
803 global gossipTime
804 gossipTime = intentStop - intentStart
805 main.log.info( "It took about " + str( gossipTime ) +
806 " seconds for all intents to appear in each node" )
807 append = False
808 title = "Gossip Intents"
809 count = 1
810 while append is False:
811 curTitle = title + str( count )
812 if curTitle not in main.HAlabels:
813 main.HAlabels.append( curTitle )
814 main.HAdata.append( str( gossipTime ) )
815 append = True
816 else:
817 count += 1
818 gossipPeriod = int( main.params[ 'timers' ][ 'gossip' ] )
819 maxGossipTime = gossipPeriod * len( main.activeNodes )
820 utilities.assert_greater_equals(
821 expect=maxGossipTime, actual=gossipTime,
822 onpass="ECM anti-entropy for intents worked within " +
823 "expected time",
824 onfail="Intent ECM anti-entropy took too long. " +
825 "Expected time:{}, Actual time:{}".format( maxGossipTime,
826 gossipTime ) )
827 if gossipTime <= maxGossipTime:
828 intentAddResult = True
829
830 if not intentAddResult or "key" in pendingMap:
831 import time
832 installedCheck = True
833 main.log.info( "Sleeping 60 seconds to see if intents are found" )
834 time.sleep( 60 )
835 onosIds = onosCli.getAllIntentsId()
836 main.log.info( "Submitted intents: " + str( intentIds ) )
837 main.log.info( "Intents in ONOS: " + str( onosIds ) )
838 # Print the intent states
839 intents = onosCli.intents()
840 intentStates = []
841 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
842 count = 0
843 try:
844 for intent in json.loads( intents ):
845 # Iter through intents of a node
846 state = intent.get( 'state', None )
847 if "INSTALLED" not in state:
848 installedCheck = False
849 intentId = intent.get( 'id', None )
850 intentStates.append( ( intentId, state ) )
851 except ( ValueError, TypeError ):
852 main.log.exception( "Error parsing intents" )
853 # add submitted intents not in the store
854 tmplist = [ i for i, s in intentStates ]
855 for i in intentIds:
856 if i not in tmplist:
857 intentStates.append( ( i, " - " ) )
858 intentStates.sort()
859 for i, s in intentStates:
860 count += 1
861 main.log.info( "%-6s%-15s%-15s" %
862 ( str( count ), str( i ), str( s ) ) )
863 leaders = onosCli.leaders()
864 try:
865 missing = False
866 if leaders:
867 parsedLeaders = json.loads( leaders )
868 main.log.warn( json.dumps( parsedLeaders,
869 sort_keys=True,
870 indent=4,
871 separators=( ',', ': ' ) ) )
872 # check for all intent partitions
873 # check for election
874 topics = []
875 for i in range( 14 ):
876 topics.append( "work-partition-" + str( i ) )
877 # FIXME: this should only be after we start the app
878 topics.append( "org.onosproject.election" )
879 main.log.debug( topics )
880 ONOStopics = [ j[ 'topic' ] for j in parsedLeaders ]
881 for topic in topics:
882 if topic not in ONOStopics:
883 main.log.error( "Error: " + topic +
884 " not in leaders" )
885 missing = True
886 else:
887 main.log.error( "leaders() returned None" )
888 except ( ValueError, TypeError ):
889 main.log.exception( "Error parsing leaders" )
890 main.log.error( repr( leaders ) )
891 # Check all nodes
892 if missing:
893 for i in main.activeNodes:
894 node = main.CLIs[ i ]
895 response = node.leaders( jsonFormat=False )
896 main.log.warn( str( node.name ) + " leaders output: \n" +
897 str( response ) )
898
899 partitions = onosCli.partitions()
900 try:
901 if partitions:
902 parsedPartitions = json.loads( partitions )
903 main.log.warn( json.dumps( parsedPartitions,
904 sort_keys=True,
905 indent=4,
906 separators=( ',', ': ' ) ) )
907 # TODO check for a leader in all paritions
908 # TODO check for consistency among nodes
909 else:
910 main.log.error( "partitions() returned None" )
911 except ( ValueError, TypeError ):
912 main.log.exception( "Error parsing partitions" )
913 main.log.error( repr( partitions ) )
914 pendingMap = onosCli.pendingMap()
915 try:
916 if pendingMap:
917 parsedPending = json.loads( pendingMap )
918 main.log.warn( json.dumps( parsedPending,
919 sort_keys=True,
920 indent=4,
921 separators=( ',', ': ' ) ) )
922 # TODO check something here?
923 else:
924 main.log.error( "pendingMap() returned None" )
925 except ( ValueError, TypeError ):
926 main.log.exception( "Error parsing pending map" )
927 main.log.error( repr( pendingMap ) )
928 def pingAcrossHostIntent( self, main, multiIntentCheck, activateNode ):
929 """
930 Ping across added host intents
931 """
932 import json
933 import time
934 assert main.numCtrls, "main.numCtrls not defined"
935 assert main, "main not defined"
936 assert utilities.assert_equals, "utilities.assert_equals not defined"
937 assert main.CLIs, "main.CLIs not defined"
938 assert main.nodes, "main.nodes not defined"
939 main.case( "Verify connectivity by sending traffic across Intents" )
940 main.caseExplanation = "Ping across added host intents to check " +\
941 "functionality and check the state of " +\
942 "the intent"
943
944 onosCli = main.CLIs[ main.activeNodes[ 0 ] ]
945 main.step( "Check Intent state" )
946 installedCheck = False
947 loopCount = 0
948 while not installedCheck and loopCount < 40:
949 installedCheck = True
950 # Print the intent states
951 intents = onosCli.intents() if multiIntentCheck else main.ONOScli1.intents()
952 intentStates = []
953 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
954 count = 0
955 # Iter through intents of a node
956 try:
957 for intent in json.loads( intents ):
958 state = intent.get( 'state', None )
959 if "INSTALLED" not in state:
960 installedCheck = False
961 intentId = intent.get( 'id', None )
962 intentStates.append( ( intentId, state ) )
963 except ( ValueError, TypeError ):
964 main.log.exception( "Error parsing intents." )
965 # Print states
966 intentStates.sort()
967 for i, s in intentStates:
968 count += 1
969 main.log.info( "%-6s%-15s%-15s" %
970 ( str( count ), str( i ), str( s ) ) )
971 if not multiIntentCheck:
972 break
973 if not installedCheck:
974 time.sleep( 1 )
975 loopCount += 1
976 utilities.assert_equals( expect=True, actual=installedCheck,
977 onpass="Intents are all INSTALLED",
978 onfail="Intents are not all in " +
979 "INSTALLED state" )
980
981 main.step( "Ping across added host intents" )
982 PingResult = main.TRUE
983 for i in range( 8, 18 ):
984 ping = main.Mininet1.pingHost( src="h" + str( i ),
985 target="h" + str( i + 10 ) )
986 PingResult = PingResult and ping
987 if ping == main.FALSE:
988 main.log.warn( "Ping failed between h" + str( i ) +
989 " and h" + str( i + 10 ) )
990 elif ping == main.TRUE:
991 main.log.info( "Ping test passed!" )
992 # Don't set PingResult or you'd override failures
993 if PingResult == main.FALSE:
994 main.log.error(
995 "Intents have not been installed correctly, pings failed." )
996 # TODO: pretty print
997 main.log.warn( "ONOS1 intents: " )
998 try:
999 tmpIntents = onosCli.intents()
1000 main.log.warn( json.dumps( json.loads( tmpIntents ),
1001 sort_keys=True,
1002 indent=4,
1003 separators=( ',', ': ' ) ) )
1004 except ( ValueError, TypeError ):
1005 main.log.warn( repr( tmpIntents ) )
1006 utilities.assert_equals(
1007 expect=main.TRUE,
1008 actual=PingResult,
1009 onpass="Intents have been installed correctly and pings work",
1010 onfail="Intents have not been installed correctly, pings failed." )
1011
1012 main.step( "Check leadership of topics" )
1013 leaders = onosCli.leaders()
1014 topicCheck = main.TRUE
1015 try:
1016 if leaders:
1017 parsedLeaders = json.loads( leaders )
1018 main.log.warn( json.dumps( parsedLeaders,
1019 sort_keys=True,
1020 indent=4,
1021 separators=( ',', ': ' ) ) )
1022 # check for all intent partitions
1023 # check for election
1024 # TODO: Look at Devices as topics now that it uses this system
1025 topics = []
1026 for i in range( 14 ):
1027 topics.append( "work-partition-" + str( i ) )
1028 # FIXME: this should only be after we start the app
1029 # FIXME: topics.append( "org.onosproject.election" )
1030 # Print leaders output
1031 main.log.debug( topics )
1032 ONOStopics = [ j[ 'topic' ] for j in parsedLeaders ]
1033 for topic in topics:
1034 if topic not in ONOStopics:
1035 main.log.error( "Error: " + topic +
1036 " not in leaders" )
1037 topicCheck = main.FALSE
1038 else:
1039 main.log.error( "leaders() returned None" )
1040 topicCheck = main.FALSE
1041 except ( ValueError, TypeError ):
1042 topicCheck = main.FALSE
1043 main.log.exception( "Error parsing leaders" )
1044 main.log.error( repr( leaders ) )
1045 # TODO: Check for a leader of these topics
1046 # Check all nodes
1047 if topicCheck:
1048 for i in main.activeNodes:
1049 node = main.CLIs[ i ]
1050 response = node.leaders( jsonFormat=False )
1051 main.log.warn( str( node.name ) + " leaders output: \n" +
1052 str( response ) )
1053
1054 utilities.assert_equals( expect=main.TRUE, actual=topicCheck,
1055 onpass="intent Partitions is in leaders",
1056 onfail="Some topics were lost " )
1057 # Print partitions
1058 partitions = onosCli.partitions()
1059 try:
1060 if partitions:
1061 parsedPartitions = json.loads( partitions )
1062 main.log.warn( json.dumps( parsedPartitions,
1063 sort_keys=True,
1064 indent=4,
1065 separators=( ',', ': ' ) ) )
1066 # TODO check for a leader in all paritions
1067 # TODO check for consistency among nodes
1068 else:
1069 main.log.error( "partitions() returned None" )
1070 except ( ValueError, TypeError ):
1071 main.log.exception( "Error parsing partitions" )
1072 main.log.error( repr( partitions ) )
1073 # Print Pending Map
1074 pendingMap = onosCli.pendingMap()
1075 try:
1076 if pendingMap:
1077 parsedPending = json.loads( pendingMap )
1078 main.log.warn( json.dumps( parsedPending,
1079 sort_keys=True,
1080 indent=4,
1081 separators=( ',', ': ' ) ) )
1082 # TODO check something here?
1083 else:
1084 main.log.error( "pendingMap() returned None" )
1085 except ( ValueError, TypeError ):
1086 main.log.exception( "Error parsing pending map" )
1087 main.log.error( repr( pendingMap ) )
1088
1089 if not installedCheck:
1090 main.log.info( "Waiting 60 seconds to see if the state of " +
1091 "intents change" )
1092 time.sleep( 60 )
1093 # Print the intent states
1094 intents = onosCli.intents()
1095 intentStates = []
1096 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
1097 count = 0
1098 # Iter through intents of a node
1099 try:
1100 for intent in json.loads( intents ):
1101 state = intent.get( 'state', None )
1102 if "INSTALLED" not in state:
1103 installedCheck = False
1104 intentId = intent.get( 'id', None )
1105 intentStates.append( ( intentId, state ) )
1106 except ( ValueError, TypeError ):
1107 main.log.exception( "Error parsing intents." )
1108 intentStates.sort()
1109 for i, s in intentStates:
1110 count += 1
1111 main.log.info( "%-6s%-15s%-15s" %
1112 ( str( count ), str( i ), str( s ) ) )
1113 leaders = onosCli.leaders()
1114 try:
1115 missing = False
1116 if leaders:
1117 parsedLeaders = json.loads( leaders )
1118 main.log.warn( json.dumps( parsedLeaders,
1119 sort_keys=True,
1120 indent=4,
1121 separators=( ',', ': ' ) ) )
1122 # check for all intent partitions
1123 # check for election
1124 topics = []
1125 for i in range( 14 ):
1126 topics.append( "work-partition-" + str( i ) )
1127 # FIXME: this should only be after we start the app
1128 topics.append( "org.onosproject.election" )
1129 main.log.debug( topics )
1130 ONOStopics = [ j[ 'topic' ] for j in parsedLeaders ]
1131 for topic in topics:
1132 if topic not in ONOStopics:
1133 main.log.error( "Error: " + topic +
1134 " not in leaders" )
1135 missing = True
1136 else:
1137 main.log.error( "leaders() returned None" )
1138 except ( ValueError, TypeError ):
1139 main.log.exception( "Error parsing leaders" )
1140 main.log.error( repr( leaders ) )
1141 if missing:
1142 for i in main.activeNodes:
1143 node = main.CLIs[ i ]
1144 response = node.leaders( jsonFormat=False )
1145 main.log.warn( str( node.name ) + " leaders output: \n" +
1146 str( response ) )
1147
1148 partitions = onosCli.partitions()
1149 try:
1150 if partitions:
1151 parsedPartitions = json.loads( partitions )
1152 main.log.warn( json.dumps( parsedPartitions,
1153 sort_keys=True,
1154 indent=4,
1155 separators=( ',', ': ' ) ) )
1156 # TODO check for a leader in all paritions
1157 # TODO check for consistency among nodes
1158 else:
1159 main.log.error( "partitions() returned None" )
1160 except ( ValueError, TypeError ):
1161 main.log.exception( "Error parsing partitions" )
1162 main.log.error( repr( partitions ) )
1163 pendingMap = onosCli.pendingMap()
1164 try:
1165 if pendingMap:
1166 parsedPending = json.loads( pendingMap )
1167 main.log.warn( json.dumps( parsedPending,
1168 sort_keys=True,
1169 indent=4,
1170 separators=( ',', ': ' ) ) )
1171 # TODO check something here?
1172 else:
1173 main.log.error( "pendingMap() returned None" )
1174 except ( ValueError, TypeError ):
1175 main.log.exception( "Error parsing pending map" )
1176 main.log.error( repr( pendingMap ) )
1177 # Print flowrules
1178 main.log.debug( main.CLIs[ main.activeNodes[0] ].flows( jsonFormat=False ) if activateNode else onosCli.flows( jsonFormat=False ) )
1179 main.step( "Wait a minute then ping again" )
1180 # the wait is above
1181 PingResult = main.TRUE
1182 for i in range( 8, 18 ):
1183 ping = main.Mininet1.pingHost( src="h" + str( i ),
1184 target="h" + str( i + 10 ) )
1185 PingResult = PingResult and ping
1186 if ping == main.FALSE:
1187 main.log.warn( "Ping failed between h" + str( i ) +
1188 " and h" + str( i + 10 ) )
1189 elif ping == main.TRUE:
1190 main.log.info( "Ping test passed!" )
1191 # Don't set PingResult or you'd override failures
1192 if PingResult == main.FALSE:
1193 main.log.error(
1194 "Intents have not been installed correctly, pings failed." )
1195 # TODO: pretty print
1196 main.log.warn( "ONOS1 intents: " )
1197 try:
1198 tmpIntents = onosCli.intents()
1199 main.log.warn( json.dumps( json.loads( tmpIntents ),
1200 sort_keys=True,
1201 indent=4,
1202 separators=( ',', ': ' ) ) )
1203 except ( ValueError, TypeError ):
1204 main.log.warn( repr( tmpIntents ) )
1205 utilities.assert_equals(
1206 expect=main.TRUE,
1207 actual=PingResult,
1208 onpass="Intents have been installed correctly and pings work",
1209 onfail="Intents have not been installed correctly, pings failed." )
1210
1211 def readingState( self, main ):
1212 """
1213 Reading state of ONOS
1214 """
1215 import json
1216 import time
1217 assert main.numCtrls, "main.numCtrls not defined"
1218 assert main, "main not defined"
1219 assert utilities.assert_equals, "utilities.assert_equals not defined"
1220 assert main.CLIs, "main.CLIs not defined"
1221 assert main.nodes, "main.nodes not defined"
1222 try:
1223 from tests.dependencies.topology import Topology
1224 except ImportError:
1225 main.log.error( "Topology not found exiting the test" )
1226 main.exit()
1227 try:
1228 main.topoRelated
1229 except ( NameError, AttributeError ):
1230 main.topoRelated = Topology()
1231 main.case( "Setting up and gathering data for current state" )
1232 # The general idea for this test case is to pull the state of
1233 # ( intents,flows, topology,... ) from each ONOS node
1234 # We can then compare them with each other and also with past states
1235
1236 main.step( "Check that each switch has a master" )
1237 global mastershipState
1238 mastershipState = '[]'
1239
1240 # Assert that each device has a master
1241 rolesNotNull = main.TRUE
1242 threads = []
1243 for i in main.activeNodes:
1244 t = main.Thread( target=main.CLIs[ i ].rolesNotNull,
1245 name="rolesNotNull-" + str( i ),
1246 args=[] )
1247 threads.append( t )
1248 t.start()
1249
1250 for t in threads:
1251 t.join()
1252 rolesNotNull = rolesNotNull and t.result
1253 utilities.assert_equals(
1254 expect=main.TRUE,
1255 actual=rolesNotNull,
1256 onpass="Each device has a master",
1257 onfail="Some devices don't have a master assigned" )
1258
1259 main.step( "Get the Mastership of each switch from each controller" )
1260 ONOSMastership = []
1261 consistentMastership = True
1262 rolesResults = True
1263 threads = []
1264 for i in main.activeNodes:
1265 t = main.Thread( target=main.CLIs[ i ].roles,
1266 name="roles-" + str( i ),
1267 args=[] )
1268 threads.append( t )
1269 t.start()
1270
1271 for t in threads:
1272 t.join()
1273 ONOSMastership.append( t.result )
1274
1275 for i in range( len( ONOSMastership ) ):
1276 node = str( main.activeNodes[ i ] + 1 )
1277 if not ONOSMastership[ i ] or "Error" in ONOSMastership[ i ]:
1278 main.log.error( "Error in getting ONOS" + node + " roles" )
1279 main.log.warn( "ONOS" + node + " mastership response: " +
1280 repr( ONOSMastership[ i ] ) )
1281 rolesResults = False
1282 utilities.assert_equals(
1283 expect=True,
1284 actual=rolesResults,
1285 onpass="No error in reading roles output",
1286 onfail="Error in reading roles from ONOS" )
1287
1288 main.step( "Check for consistency in roles from each controller" )
1289 if all( [ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
1290 main.log.info(
1291 "Switch roles are consistent across all ONOS nodes" )
1292 else:
1293 consistentMastership = False
1294 utilities.assert_equals(
1295 expect=True,
1296 actual=consistentMastership,
1297 onpass="Switch roles are consistent across all ONOS nodes",
1298 onfail="ONOS nodes have different views of switch roles" )
1299
1300 if rolesResults and not consistentMastership:
1301 for i in range( len( main.activeNodes ) ):
1302 node = str( main.activeNodes[ i ] + 1 )
1303 try:
1304 main.log.warn(
1305 "ONOS" + node + " roles: ",
1306 json.dumps(
1307 json.loads( ONOSMastership[ i ] ),
1308 sort_keys=True,
1309 indent=4,
1310 separators=( ',', ': ' ) ) )
1311 except ( ValueError, TypeError ):
1312 main.log.warn( repr( ONOSMastership[ i ] ) )
1313 elif rolesResults and consistentMastership:
1314 mastershipState = ONOSMastership[ 0 ]
1315
1316 main.step( "Get the intents from each controller" )
1317 global intentState
1318 intentState = []
1319 ONOSIntents = []
1320 consistentIntents = True # Are Intents consistent across nodes?
1321 intentsResults = True # Could we read Intents from ONOS?
1322 threads = []
1323 for i in main.activeNodes:
1324 t = main.Thread( target=main.CLIs[ i ].intents,
1325 name="intents-" + str( i ),
1326 args=[],
1327 kwargs={ 'jsonFormat': True } )
1328 threads.append( t )
1329 t.start()
1330
1331 for t in threads:
1332 t.join()
1333 ONOSIntents.append( t.result )
1334
1335 for i in range( len( ONOSIntents ) ):
1336 node = str( main.activeNodes[ i ] + 1 )
1337 if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
1338 main.log.error( "Error in getting ONOS" + node + " intents" )
1339 main.log.warn( "ONOS" + node + " intents response: " +
1340 repr( ONOSIntents[ i ] ) )
1341 intentsResults = False
1342 utilities.assert_equals(
1343 expect=True,
1344 actual=intentsResults,
1345 onpass="No error in reading intents output",
1346 onfail="Error in reading intents from ONOS" )
1347
1348 main.step( "Check for consistency in Intents from each controller" )
1349 if all( [ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
1350 main.log.info( "Intents are consistent across all ONOS " +
1351 "nodes" )
1352 else:
1353 consistentIntents = False
1354 main.log.error( "Intents not consistent" )
1355 utilities.assert_equals(
1356 expect=True,
1357 actual=consistentIntents,
1358 onpass="Intents are consistent across all ONOS nodes",
1359 onfail="ONOS nodes have different views of intents" )
1360
1361 if intentsResults:
1362 # Try to make it easy to figure out what is happening
1363 #
1364 # Intent ONOS1 ONOS2 ...
1365 # 0x01 INSTALLED INSTALLING
1366 # ... ... ...
1367 # ... ... ...
1368 title = " Id"
1369 for n in main.activeNodes:
1370 title += " " * 10 + "ONOS" + str( n + 1 )
1371 main.log.warn( title )
1372 # get all intent keys in the cluster
1373 keys = []
1374 try:
1375 # Get the set of all intent keys
1376 for nodeStr in ONOSIntents:
1377 node = json.loads( nodeStr )
1378 for intent in node:
1379 keys.append( intent.get( 'id' ) )
1380 keys = set( keys )
1381 # For each intent key, print the state on each node
1382 for key in keys:
1383 row = "%-13s" % key
1384 for nodeStr in ONOSIntents:
1385 node = json.loads( nodeStr )
1386 for intent in node:
1387 if intent.get( 'id', "Error" ) == key:
1388 row += "%-15s" % intent.get( 'state' )
1389 main.log.warn( row )
1390 # End of intent state table
1391 except ValueError as e:
1392 main.log.exception( e )
1393 main.log.debug( "nodeStr was: " + repr( nodeStr ) )
1394
1395 if intentsResults and not consistentIntents:
1396 # print the json objects
1397 n = str( main.activeNodes[ -1 ] + 1 )
1398 main.log.debug( "ONOS" + n + " intents: " )
1399 main.log.debug( json.dumps( json.loads( ONOSIntents[ -1 ] ),
1400 sort_keys=True,
1401 indent=4,
1402 separators=( ',', ': ' ) ) )
1403 for i in range( len( ONOSIntents ) ):
1404 node = str( main.activeNodes[ i ] + 1 )
1405 if ONOSIntents[ i ] != ONOSIntents[ -1 ]:
1406 main.log.debug( "ONOS" + node + " intents: " )
1407 main.log.debug( json.dumps( json.loads( ONOSIntents[ i ] ),
1408 sort_keys=True,
1409 indent=4,
1410 separators=( ',', ': ' ) ) )
1411 else:
1412 main.log.debug( "ONOS" + node + " intents match ONOS" +
1413 n + " intents" )
1414 elif intentsResults and consistentIntents:
1415 intentState = ONOSIntents[ 0 ]
1416
1417 main.step( "Get the flows from each controller" )
1418 global flowState
1419 flowState = []
1420 ONOSFlows = []
1421 ONOSFlowsJson = []
1422 flowCheck = main.FALSE
1423 consistentFlows = True
1424 flowsResults = True
1425 threads = []
1426 for i in main.activeNodes:
1427 t = main.Thread( target=main.CLIs[ i ].flows,
1428 name="flows-" + str( i ),
1429 args=[],
1430 kwargs={ 'jsonFormat': True } )
1431 threads.append( t )
1432 t.start()
1433
1434 # NOTE: Flows command can take some time to run
1435 time.sleep( 30 )
1436 for t in threads:
1437 t.join()
1438 result = t.result
1439 ONOSFlows.append( result )
1440
1441 for i in range( len( ONOSFlows ) ):
1442 num = str( main.activeNodes[ i ] + 1 )
1443 if not ONOSFlows[ i ] or "Error" in ONOSFlows[ i ]:
1444 main.log.error( "Error in getting ONOS" + num + " flows" )
1445 main.log.warn( "ONOS" + num + " flows response: " +
1446 repr( ONOSFlows[ i ] ) )
1447 flowsResults = False
1448 ONOSFlowsJson.append( None )
1449 else:
1450 try:
1451 ONOSFlowsJson.append( json.loads( ONOSFlows[ i ] ) )
1452 except ( ValueError, TypeError ):
1453 # FIXME: change this to log.error?
1454 main.log.exception( "Error in parsing ONOS" + num +
1455 " response as json." )
1456 main.log.error( repr( ONOSFlows[ i ] ) )
1457 ONOSFlowsJson.append( None )
1458 flowsResults = False
1459 utilities.assert_equals(
1460 expect=True,
1461 actual=flowsResults,
1462 onpass="No error in reading flows output",
1463 onfail="Error in reading flows from ONOS" )
1464
1465 main.step( "Check for consistency in Flows from each controller" )
1466 tmp = [ len( i ) == len( ONOSFlowsJson[ 0 ] ) for i in ONOSFlowsJson ]
1467 if all( tmp ):
1468 main.log.info( "Flow count is consistent across all ONOS nodes" )
1469 else:
1470 consistentFlows = False
1471 utilities.assert_equals(
1472 expect=True,
1473 actual=consistentFlows,
1474 onpass="The flow count is consistent across all ONOS nodes",
1475 onfail="ONOS nodes have different flow counts" )
1476
1477 if flowsResults and not consistentFlows:
1478 for i in range( len( ONOSFlows ) ):
1479 node = str( main.activeNodes[ i ] + 1 )
1480 try:
1481 main.log.warn(
1482 "ONOS" + node + " flows: " +
1483 json.dumps( json.loads( ONOSFlows[ i ] ), sort_keys=True,
1484 indent=4, separators=( ',', ': ' ) ) )
1485 except ( ValueError, TypeError ):
1486 main.log.warn( "ONOS" + node + " flows: " +
1487 repr( ONOSFlows[ i ] ) )
1488 elif flowsResults and consistentFlows:
1489 flowCheck = main.TRUE
1490 flowState = ONOSFlows[ 0 ]
1491
1492 main.step( "Get the OF Table entries" )
1493 global flows
1494 flows = []
1495 for i in range( 1, 29 ):
1496 flows.append( main.Mininet1.getFlowTable( "s" + str( i ), version="1.3", debug=False ) )
1497 if flowCheck == main.FALSE:
1498 for table in flows:
1499 main.log.warn( table )
1500 # TODO: Compare switch flow tables with ONOS flow tables
1501
1502 main.step( "Start continuous pings" )
1503 main.Mininet2.pingLong(
1504 src=main.params[ 'PING' ][ 'source1' ],
1505 target=main.params[ 'PING' ][ 'target1' ],
1506 pingTime=500 )
1507 main.Mininet2.pingLong(
1508 src=main.params[ 'PING' ][ 'source2' ],
1509 target=main.params[ 'PING' ][ 'target2' ],
1510 pingTime=500 )
1511 main.Mininet2.pingLong(
1512 src=main.params[ 'PING' ][ 'source3' ],
1513 target=main.params[ 'PING' ][ 'target3' ],
1514 pingTime=500 )
1515 main.Mininet2.pingLong(
1516 src=main.params[ 'PING' ][ 'source4' ],
1517 target=main.params[ 'PING' ][ 'target4' ],
1518 pingTime=500 )
1519 main.Mininet2.pingLong(
1520 src=main.params[ 'PING' ][ 'source5' ],
1521 target=main.params[ 'PING' ][ 'target5' ],
1522 pingTime=500 )
1523 main.Mininet2.pingLong(
1524 src=main.params[ 'PING' ][ 'source6' ],
1525 target=main.params[ 'PING' ][ 'target6' ],
1526 pingTime=500 )
1527 main.Mininet2.pingLong(
1528 src=main.params[ 'PING' ][ 'source7' ],
1529 target=main.params[ 'PING' ][ 'target7' ],
1530 pingTime=500 )
1531 main.Mininet2.pingLong(
1532 src=main.params[ 'PING' ][ 'source8' ],
1533 target=main.params[ 'PING' ][ 'target8' ],
1534 pingTime=500 )
1535 main.Mininet2.pingLong(
1536 src=main.params[ 'PING' ][ 'source9' ],
1537 target=main.params[ 'PING' ][ 'target9' ],
1538 pingTime=500 )
1539 main.Mininet2.pingLong(
1540 src=main.params[ 'PING' ][ 'source10' ],
1541 target=main.params[ 'PING' ][ 'target10' ],
1542 pingTime=500 )
1543
1544 main.step( "Collecting topology information from ONOS" )
1545 devices = main.topoRelated.getAllDevices( main.activeNodes, False )
1546 hosts = main.topoRelated.getAllHosts( main.activeNodes, False, inJson=True )
1547 ports = main.topoRelated.getAllPorts( main.activeNodes, False )
1548 links = main.topoRelated.getAllLinks( main.activeNodes, False )
1549 clusters = main.topoRelated.getAllClusters( main.activeNodes, False )
1550 # Compare json objects for hosts and dataplane clusters
1551
1552 # hosts
1553 main.step( "Host view is consistent across ONOS nodes" )
1554 consistentHostsResult = main.TRUE
1555 for controller in range( len( hosts ) ):
1556 controllerStr = str( main.activeNodes[ controller ] + 1 )
1557 if hosts[ controller ] and "Error" not in hosts[ controller ]:
1558 if hosts[ controller ] == hosts[ 0 ]:
1559 continue
1560 else: # hosts not consistent
1561 main.log.error( "hosts from ONOS" +
1562 controllerStr +
1563 " is inconsistent with ONOS1" )
1564 main.log.warn( repr( hosts[ controller ] ) )
1565 consistentHostsResult = main.FALSE
1566
1567 else:
1568 main.log.error( "Error in getting ONOS hosts from ONOS" +
1569 controllerStr )
1570 consistentHostsResult = main.FALSE
1571 main.log.warn( "ONOS" + controllerStr +
1572 " hosts response: " +
1573 repr( hosts[ controller ] ) )
1574 utilities.assert_equals(
1575 expect=main.TRUE,
1576 actual=consistentHostsResult,
1577 onpass="Hosts view is consistent across all ONOS nodes",
1578 onfail="ONOS nodes have different views of hosts" )
1579
1580 main.step( "Each host has an IP address" )
1581 ipResult = main.TRUE
1582 for controller in range( 0, len( hosts ) ):
1583 controllerStr = str( main.activeNodes[ controller ] + 1 )
1584 if hosts[ controller ]:
1585 for host in hosts[ controller ]:
1586 if not host.get( 'ipAddresses', [] ):
1587 main.log.error( "Error with host ips on controller" +
1588 controllerStr + ": " + str( host ) )
1589 ipResult = main.FALSE
1590 utilities.assert_equals(
1591 expect=main.TRUE,
1592 actual=ipResult,
1593 onpass="The ips of the hosts aren't empty",
1594 onfail="The ip of at least one host is missing" )
1595
1596 # Strongly connected clusters of devices
1597 main.step( "Cluster view is consistent across ONOS nodes" )
1598 consistentClustersResult = main.TRUE
1599 for controller in range( len( clusters ) ):
1600 controllerStr = str( main.activeNodes[ controller ] + 1 )
1601 if "Error" not in clusters[ controller ]:
1602 if clusters[ controller ] == clusters[ 0 ]:
1603 continue
1604 else: # clusters not consistent
1605 main.log.error( "clusters from ONOS" + controllerStr +
1606 " is inconsistent with ONOS1" )
1607 consistentClustersResult = main.FALSE
1608
1609 else:
1610 main.log.error( "Error in getting dataplane clusters " +
1611 "from ONOS" + controllerStr )
1612 consistentClustersResult = main.FALSE
1613 main.log.warn( "ONOS" + controllerStr +
1614 " clusters response: " +
1615 repr( clusters[ controller ] ) )
1616 utilities.assert_equals(
1617 expect=main.TRUE,
1618 actual=consistentClustersResult,
1619 onpass="Clusters view is consistent across all ONOS nodes",
1620 onfail="ONOS nodes have different views of clusters" )
1621 if not consistentClustersResult:
1622 main.log.debug( clusters )
1623
1624 # there should always only be one cluster
1625 main.step( "Cluster view correct across ONOS nodes" )
1626 try:
1627 numClusters = len( json.loads( clusters[ 0 ] ) )
1628 except ( ValueError, TypeError ):
1629 main.log.exception( "Error parsing clusters[0]: " +
1630 repr( clusters[ 0 ] ) )
1631 numClusters = "ERROR"
1632 utilities.assert_equals(
1633 expect=1,
1634 actual=numClusters,
1635 onpass="ONOS shows 1 SCC",
1636 onfail="ONOS shows " + str( numClusters ) + " SCCs" )
1637
1638 main.step( "Comparing ONOS topology to MN" )
1639 devicesResults = main.TRUE
1640 linksResults = main.TRUE
1641 hostsResults = main.TRUE
1642 mnSwitches = main.Mininet1.getSwitches()
1643 mnLinks = main.Mininet1.getLinks()
1644 mnHosts = main.Mininet1.getHosts()
1645 for controller in main.activeNodes:
1646 controllerStr = str( main.activeNodes[ controller ] + 1 )
1647 currentDevicesResult = main.topoRelated.compareDevicePort(
1648 main.Mininet1, controller,
1649 mnSwitches, devices, ports )
1650 utilities.assert_equals( expect=main.TRUE,
1651 actual=currentDevicesResult,
1652 onpass="ONOS" + controllerStr +
1653 " Switches view is correct",
1654 onfail="ONOS" + controllerStr +
1655 " Switches view is incorrect" )
1656
1657 currentLinksResult = main.topoRelated.compareBase( links, controller,
1658 main.Mininet1.compareLinks,
1659 [ mnSwitches, mnLinks ] )
1660 utilities.assert_equals( expect=main.TRUE,
1661 actual=currentLinksResult,
1662 onpass="ONOS" + controllerStr +
1663 " links view is correct",
1664 onfail="ONOS" + controllerStr +
1665 " links view is incorrect" )
1666
1667 if hosts[ controller ] and "Error" not in hosts[ controller ]:
1668 currentHostsResult = main.Mininet1.compareHosts(
1669 mnHosts,
1670 hosts[ controller ] )
1671 else:
1672 currentHostsResult = main.FALSE
1673 utilities.assert_equals( expect=main.TRUE,
1674 actual=currentHostsResult,
1675 onpass="ONOS" + controllerStr +
1676 " hosts exist in Mininet",
1677 onfail="ONOS" + controllerStr +
1678 " hosts don't match Mininet" )
1679
1680 devicesResults = devicesResults and currentDevicesResult
1681 linksResults = linksResults and currentLinksResult
1682 hostsResults = hostsResults and currentHostsResult
1683
1684 main.step( "Device information is correct" )
1685 utilities.assert_equals(
1686 expect=main.TRUE,
1687 actual=devicesResults,
1688 onpass="Device information is correct",
1689 onfail="Device information is incorrect" )
1690
1691 main.step( "Links are correct" )
1692 utilities.assert_equals(
1693 expect=main.TRUE,
1694 actual=linksResults,
1695 onpass="Link are correct",
1696 onfail="Links are incorrect" )
1697
1698 main.step( "Hosts are correct" )
1699 utilities.assert_equals(
1700 expect=main.TRUE,
1701 actual=hostsResults,
1702 onpass="Hosts are correct",
1703 onfail="Hosts are incorrect" )
1704
1705 def checkDistPrimitivesFunc( self, main ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001706 """
1707 Check for basic functionality with distributed primitives
1708 """
Jon Halle0f0b342017-04-18 11:43:47 -07001709 # TODO: Clean this up so it's not just a cut/paste from the test
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001710 try:
1711 # Make sure variables are defined/set
1712 assert main.numCtrls, "main.numCtrls not defined"
1713 assert utilities.assert_equals, "utilities.assert_equals not defined"
1714 assert main.CLIs, "main.CLIs not defined"
1715 assert main.nodes, "main.nodes not defined"
1716 assert main.pCounterName, "main.pCounterName not defined"
1717 assert main.onosSetName, "main.onosSetName not defined"
1718 # NOTE: assert fails if value is 0/None/Empty/False
1719 try:
1720 main.pCounterValue
Devin Lim58046fa2017-07-05 16:55:00 -07001721 except ( NameError, AttributeError ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001722 main.log.error( "main.pCounterValue not defined, setting to 0" )
1723 main.pCounterValue = 0
1724 try:
1725 main.onosSet
Devin Lim58046fa2017-07-05 16:55:00 -07001726 except ( NameError, AttributeError ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001727 main.log.error( "main.onosSet not defined, setting to empty Set" )
Jon Hallf37d44d2017-05-24 10:37:30 -07001728 main.onosSet = set( [] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001729 # Variables for the distributed primitives tests. These are local only
1730 addValue = "a"
1731 addAllValue = "a b c d e f"
1732 retainValue = "c d e f"
Jon Halle0f0b342017-04-18 11:43:47 -07001733 valueName = "TestON-Value"
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001734 valueValue = None
Jon Halle0f0b342017-04-18 11:43:47 -07001735 workQueueName = "TestON-Queue"
1736 workQueueCompleted = 0
1737 workQueueInProgress = 0
1738 workQueuePending = 0
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001739
1740 description = "Check for basic functionality with distributed " +\
1741 "primitives"
1742 main.case( description )
1743 main.caseExplanation = "Test the methods of the distributed " +\
1744 "primitives (counters and sets) throught the cli"
1745 # DISTRIBUTED ATOMIC COUNTERS
1746 # Partitioned counters
1747 main.step( "Increment then get a default counter on each node" )
1748 pCounters = []
1749 threads = []
1750 addedPValues = []
1751 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07001752 t = main.Thread( target=main.CLIs[ i ].counterTestAddAndGet,
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001753 name="counterAddAndGet-" + str( i ),
1754 args=[ main.pCounterName ] )
1755 main.pCounterValue += 1
1756 addedPValues.append( main.pCounterValue )
1757 threads.append( t )
1758 t.start()
1759
1760 for t in threads:
1761 t.join()
1762 pCounters.append( t.result )
1763 # Check that counter incremented numController times
1764 pCounterResults = True
1765 for i in addedPValues:
1766 tmpResult = i in pCounters
1767 pCounterResults = pCounterResults and tmpResult
1768 if not tmpResult:
1769 main.log.error( str( i ) + " is not in partitioned "
1770 "counter incremented results" )
1771 utilities.assert_equals( expect=True,
1772 actual=pCounterResults,
1773 onpass="Default counter incremented",
1774 onfail="Error incrementing default" +
1775 " counter" )
1776
1777 main.step( "Get then Increment a default counter on each node" )
1778 pCounters = []
1779 threads = []
1780 addedPValues = []
1781 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07001782 t = main.Thread( target=main.CLIs[ i ].counterTestGetAndAdd,
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001783 name="counterGetAndAdd-" + str( i ),
1784 args=[ main.pCounterName ] )
1785 addedPValues.append( main.pCounterValue )
1786 main.pCounterValue += 1
1787 threads.append( t )
1788 t.start()
1789
1790 for t in threads:
1791 t.join()
1792 pCounters.append( t.result )
1793 # Check that counter incremented numController times
1794 pCounterResults = True
1795 for i in addedPValues:
1796 tmpResult = i in pCounters
1797 pCounterResults = pCounterResults and tmpResult
1798 if not tmpResult:
1799 main.log.error( str( i ) + " is not in partitioned "
1800 "counter incremented results" )
1801 utilities.assert_equals( expect=True,
1802 actual=pCounterResults,
1803 onpass="Default counter incremented",
1804 onfail="Error incrementing default" +
1805 " counter" )
1806
1807 main.step( "Counters we added have the correct values" )
Devin Lim58046fa2017-07-05 16:55:00 -07001808 incrementCheck = self.counterCheck( main.pCounterName, main.pCounterValue )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001809 utilities.assert_equals( expect=main.TRUE,
1810 actual=incrementCheck,
1811 onpass="Added counters are correct",
1812 onfail="Added counters are incorrect" )
1813
1814 main.step( "Add -8 to then get a default counter on each node" )
1815 pCounters = []
1816 threads = []
1817 addedPValues = []
1818 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07001819 t = main.Thread( target=main.CLIs[ i ].counterTestAddAndGet,
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001820 name="counterIncrement-" + str( i ),
1821 args=[ main.pCounterName ],
1822 kwargs={ "delta": -8 } )
1823 main.pCounterValue += -8
1824 addedPValues.append( main.pCounterValue )
1825 threads.append( t )
1826 t.start()
1827
1828 for t in threads:
1829 t.join()
1830 pCounters.append( t.result )
1831 # Check that counter incremented numController times
1832 pCounterResults = True
1833 for i in addedPValues:
1834 tmpResult = i in pCounters
1835 pCounterResults = pCounterResults and tmpResult
1836 if not tmpResult:
1837 main.log.error( str( i ) + " is not in partitioned "
1838 "counter incremented results" )
1839 utilities.assert_equals( expect=True,
1840 actual=pCounterResults,
1841 onpass="Default counter incremented",
1842 onfail="Error incrementing default" +
1843 " counter" )
1844
1845 main.step( "Add 5 to then get a default counter on each node" )
1846 pCounters = []
1847 threads = []
1848 addedPValues = []
1849 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07001850 t = main.Thread( target=main.CLIs[ i ].counterTestAddAndGet,
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001851 name="counterIncrement-" + str( i ),
1852 args=[ main.pCounterName ],
1853 kwargs={ "delta": 5 } )
1854 main.pCounterValue += 5
1855 addedPValues.append( main.pCounterValue )
1856 threads.append( t )
1857 t.start()
1858
1859 for t in threads:
1860 t.join()
1861 pCounters.append( t.result )
1862 # Check that counter incremented numController times
1863 pCounterResults = True
1864 for i in addedPValues:
1865 tmpResult = i in pCounters
1866 pCounterResults = pCounterResults and tmpResult
1867 if not tmpResult:
1868 main.log.error( str( i ) + " is not in partitioned "
1869 "counter incremented results" )
1870 utilities.assert_equals( expect=True,
1871 actual=pCounterResults,
1872 onpass="Default counter incremented",
1873 onfail="Error incrementing default" +
1874 " counter" )
1875
1876 main.step( "Get then add 5 to a default counter on each node" )
1877 pCounters = []
1878 threads = []
1879 addedPValues = []
1880 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07001881 t = main.Thread( target=main.CLIs[ i ].counterTestGetAndAdd,
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001882 name="counterIncrement-" + str( i ),
1883 args=[ main.pCounterName ],
1884 kwargs={ "delta": 5 } )
1885 addedPValues.append( main.pCounterValue )
1886 main.pCounterValue += 5
1887 threads.append( t )
1888 t.start()
1889
1890 for t in threads:
1891 t.join()
1892 pCounters.append( t.result )
1893 # Check that counter incremented numController times
1894 pCounterResults = True
1895 for i in addedPValues:
1896 tmpResult = i in pCounters
1897 pCounterResults = pCounterResults and tmpResult
1898 if not tmpResult:
1899 main.log.error( str( i ) + " is not in partitioned "
1900 "counter incremented results" )
1901 utilities.assert_equals( expect=True,
1902 actual=pCounterResults,
1903 onpass="Default counter incremented",
1904 onfail="Error incrementing default" +
1905 " counter" )
1906
1907 main.step( "Counters we added have the correct values" )
Devin Lim58046fa2017-07-05 16:55:00 -07001908 incrementCheck = self.counterCheck( main.pCounterName, main.pCounterValue )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001909 utilities.assert_equals( expect=main.TRUE,
1910 actual=incrementCheck,
1911 onpass="Added counters are correct",
1912 onfail="Added counters are incorrect" )
1913
1914 # DISTRIBUTED SETS
1915 main.step( "Distributed Set get" )
1916 size = len( main.onosSet )
1917 getResponses = []
1918 threads = []
1919 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07001920 t = main.Thread( target=main.CLIs[ i ].setTestGet,
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001921 name="setTestGet-" + str( i ),
1922 args=[ main.onosSetName ] )
1923 threads.append( t )
1924 t.start()
1925 for t in threads:
1926 t.join()
1927 getResponses.append( t.result )
1928
1929 getResults = main.TRUE
1930 for i in range( len( main.activeNodes ) ):
Jon Hallf37d44d2017-05-24 10:37:30 -07001931 node = str( main.activeNodes[ i ] + 1 )
1932 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001933 current = set( getResponses[ i ] )
1934 if len( current ) == len( getResponses[ i ] ):
1935 # no repeats
1936 if main.onosSet != current:
1937 main.log.error( "ONOS" + node +
1938 " has incorrect view" +
1939 " of set " + main.onosSetName + ":\n" +
1940 str( getResponses[ i ] ) )
1941 main.log.debug( "Expected: " + str( main.onosSet ) )
1942 main.log.debug( "Actual: " + str( current ) )
1943 getResults = main.FALSE
1944 else:
1945 # error, set is not a set
1946 main.log.error( "ONOS" + node +
1947 " has repeat elements in" +
1948 " set " + main.onosSetName + ":\n" +
1949 str( getResponses[ i ] ) )
1950 getResults = main.FALSE
1951 elif getResponses[ i ] == main.ERROR:
1952 getResults = main.FALSE
1953 utilities.assert_equals( expect=main.TRUE,
1954 actual=getResults,
1955 onpass="Set elements are correct",
1956 onfail="Set elements are incorrect" )
1957
1958 main.step( "Distributed Set size" )
1959 sizeResponses = []
1960 threads = []
1961 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07001962 t = main.Thread( target=main.CLIs[ i ].setTestSize,
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001963 name="setTestSize-" + str( i ),
1964 args=[ main.onosSetName ] )
1965 threads.append( t )
1966 t.start()
1967 for t in threads:
1968 t.join()
1969 sizeResponses.append( t.result )
1970
1971 sizeResults = main.TRUE
1972 for i in range( len( main.activeNodes ) ):
Jon Hallf37d44d2017-05-24 10:37:30 -07001973 node = str( main.activeNodes[ i ] + 1 )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001974 if size != sizeResponses[ i ]:
1975 sizeResults = main.FALSE
1976 main.log.error( "ONOS" + node +
1977 " expected a size of " + str( size ) +
1978 " for set " + main.onosSetName +
1979 " but got " + str( sizeResponses[ i ] ) )
1980 utilities.assert_equals( expect=main.TRUE,
1981 actual=sizeResults,
1982 onpass="Set sizes are correct",
1983 onfail="Set sizes are incorrect" )
1984
1985 main.step( "Distributed Set add()" )
1986 main.onosSet.add( addValue )
1987 addResponses = []
1988 threads = []
1989 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07001990 t = main.Thread( target=main.CLIs[ i ].setTestAdd,
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001991 name="setTestAdd-" + str( i ),
1992 args=[ main.onosSetName, addValue ] )
1993 threads.append( t )
1994 t.start()
1995 for t in threads:
1996 t.join()
1997 addResponses.append( t.result )
1998
1999 # main.TRUE = successfully changed the set
2000 # main.FALSE = action resulted in no change in set
2001 # main.ERROR - Some error in executing the function
2002 addResults = main.TRUE
2003 for i in range( len( main.activeNodes ) ):
2004 if addResponses[ i ] == main.TRUE:
2005 # All is well
2006 pass
2007 elif addResponses[ i ] == main.FALSE:
2008 # Already in set, probably fine
2009 pass
2010 elif addResponses[ i ] == main.ERROR:
2011 # Error in execution
2012 addResults = main.FALSE
2013 else:
2014 # unexpected result
2015 addResults = main.FALSE
2016 if addResults != main.TRUE:
2017 main.log.error( "Error executing set add" )
2018
2019 # Check if set is still correct
2020 size = len( main.onosSet )
2021 getResponses = []
2022 threads = []
2023 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07002024 t = main.Thread( target=main.CLIs[ i ].setTestGet,
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002025 name="setTestGet-" + str( i ),
2026 args=[ main.onosSetName ] )
2027 threads.append( t )
2028 t.start()
2029 for t in threads:
2030 t.join()
2031 getResponses.append( t.result )
2032 getResults = main.TRUE
2033 for i in range( len( main.activeNodes ) ):
Jon Hallf37d44d2017-05-24 10:37:30 -07002034 node = str( main.activeNodes[ i ] + 1 )
2035 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002036 current = set( getResponses[ i ] )
2037 if len( current ) == len( getResponses[ i ] ):
2038 # no repeats
2039 if main.onosSet != current:
2040 main.log.error( "ONOS" + node + " has incorrect view" +
2041 " of set " + main.onosSetName + ":\n" +
2042 str( getResponses[ i ] ) )
2043 main.log.debug( "Expected: " + str( main.onosSet ) )
2044 main.log.debug( "Actual: " + str( current ) )
2045 getResults = main.FALSE
2046 else:
2047 # error, set is not a set
2048 main.log.error( "ONOS" + node + " has repeat elements in" +
2049 " set " + main.onosSetName + ":\n" +
2050 str( getResponses[ i ] ) )
2051 getResults = main.FALSE
2052 elif getResponses[ i ] == main.ERROR:
2053 getResults = main.FALSE
2054 sizeResponses = []
2055 threads = []
2056 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07002057 t = main.Thread( target=main.CLIs[ i ].setTestSize,
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002058 name="setTestSize-" + str( i ),
2059 args=[ main.onosSetName ] )
2060 threads.append( t )
2061 t.start()
2062 for t in threads:
2063 t.join()
2064 sizeResponses.append( t.result )
2065 sizeResults = main.TRUE
2066 for i in range( len( main.activeNodes ) ):
Jon Hallf37d44d2017-05-24 10:37:30 -07002067 node = str( main.activeNodes[ i ] + 1 )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002068 if size != sizeResponses[ i ]:
2069 sizeResults = main.FALSE
2070 main.log.error( "ONOS" + node +
2071 " expected a size of " + str( size ) +
2072 " for set " + main.onosSetName +
2073 " but got " + str( sizeResponses[ i ] ) )
2074 addResults = addResults and getResults and sizeResults
2075 utilities.assert_equals( expect=main.TRUE,
2076 actual=addResults,
2077 onpass="Set add correct",
2078 onfail="Set add was incorrect" )
2079
2080 main.step( "Distributed Set addAll()" )
2081 main.onosSet.update( addAllValue.split() )
2082 addResponses = []
2083 threads = []
2084 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07002085 t = main.Thread( target=main.CLIs[ i ].setTestAdd,
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002086 name="setTestAddAll-" + str( i ),
2087 args=[ main.onosSetName, addAllValue ] )
2088 threads.append( t )
2089 t.start()
2090 for t in threads:
2091 t.join()
2092 addResponses.append( t.result )
2093
2094 # main.TRUE = successfully changed the set
2095 # main.FALSE = action resulted in no change in set
2096 # main.ERROR - Some error in executing the function
2097 addAllResults = main.TRUE
2098 for i in range( len( main.activeNodes ) ):
2099 if addResponses[ i ] == main.TRUE:
2100 # All is well
2101 pass
2102 elif addResponses[ i ] == main.FALSE:
2103 # Already in set, probably fine
2104 pass
2105 elif addResponses[ i ] == main.ERROR:
2106 # Error in execution
2107 addAllResults = main.FALSE
2108 else:
2109 # unexpected result
2110 addAllResults = main.FALSE
2111 if addAllResults != main.TRUE:
2112 main.log.error( "Error executing set addAll" )
2113
2114 # Check if set is still correct
2115 size = len( main.onosSet )
2116 getResponses = []
2117 threads = []
2118 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07002119 t = main.Thread( target=main.CLIs[ i ].setTestGet,
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002120 name="setTestGet-" + str( i ),
2121 args=[ main.onosSetName ] )
2122 threads.append( t )
2123 t.start()
2124 for t in threads:
2125 t.join()
2126 getResponses.append( t.result )
2127 getResults = main.TRUE
2128 for i in range( len( main.activeNodes ) ):
Jon Hallf37d44d2017-05-24 10:37:30 -07002129 node = str( main.activeNodes[ i ] + 1 )
2130 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002131 current = set( getResponses[ i ] )
2132 if len( current ) == len( getResponses[ i ] ):
2133 # no repeats
2134 if main.onosSet != current:
2135 main.log.error( "ONOS" + node +
2136 " has incorrect view" +
2137 " of set " + main.onosSetName + ":\n" +
2138 str( getResponses[ i ] ) )
2139 main.log.debug( "Expected: " + str( main.onosSet ) )
2140 main.log.debug( "Actual: " + str( current ) )
2141 getResults = main.FALSE
2142 else:
2143 # error, set is not a set
2144 main.log.error( "ONOS" + node +
2145 " has repeat elements in" +
2146 " set " + main.onosSetName + ":\n" +
2147 str( getResponses[ i ] ) )
2148 getResults = main.FALSE
2149 elif getResponses[ i ] == main.ERROR:
2150 getResults = main.FALSE
2151 sizeResponses = []
2152 threads = []
2153 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07002154 t = main.Thread( target=main.CLIs[ i ].setTestSize,
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002155 name="setTestSize-" + str( i ),
2156 args=[ main.onosSetName ] )
2157 threads.append( t )
2158 t.start()
2159 for t in threads:
2160 t.join()
2161 sizeResponses.append( t.result )
2162 sizeResults = main.TRUE
2163 for i in range( len( main.activeNodes ) ):
Jon Hallf37d44d2017-05-24 10:37:30 -07002164 node = str( main.activeNodes[ i ] + 1 )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002165 if size != sizeResponses[ i ]:
2166 sizeResults = main.FALSE
2167 main.log.error( "ONOS" + node +
2168 " expected a size of " + str( size ) +
2169 " for set " + main.onosSetName +
2170 " but got " + str( sizeResponses[ i ] ) )
2171 addAllResults = addAllResults and getResults and sizeResults
2172 utilities.assert_equals( expect=main.TRUE,
2173 actual=addAllResults,
2174 onpass="Set addAll correct",
2175 onfail="Set addAll was incorrect" )
2176
2177 main.step( "Distributed Set contains()" )
2178 containsResponses = []
2179 threads = []
2180 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07002181 t = main.Thread( target=main.CLIs[ i ].setTestGet,
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002182 name="setContains-" + str( i ),
2183 args=[ main.onosSetName ],
2184 kwargs={ "values": addValue } )
2185 threads.append( t )
2186 t.start()
2187 for t in threads:
2188 t.join()
2189 # NOTE: This is the tuple
2190 containsResponses.append( t.result )
2191
2192 containsResults = main.TRUE
2193 for i in range( len( main.activeNodes ) ):
2194 if containsResponses[ i ] == main.ERROR:
2195 containsResults = main.FALSE
2196 else:
2197 containsResults = containsResults and\
2198 containsResponses[ i ][ 1 ]
2199 utilities.assert_equals( expect=main.TRUE,
2200 actual=containsResults,
2201 onpass="Set contains is functional",
2202 onfail="Set contains failed" )
2203
2204 main.step( "Distributed Set containsAll()" )
2205 containsAllResponses = []
2206 threads = []
2207 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07002208 t = main.Thread( target=main.CLIs[ i ].setTestGet,
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002209 name="setContainsAll-" + str( i ),
2210 args=[ main.onosSetName ],
2211 kwargs={ "values": addAllValue } )
2212 threads.append( t )
2213 t.start()
2214 for t in threads:
2215 t.join()
2216 # NOTE: This is the tuple
2217 containsAllResponses.append( t.result )
2218
2219 containsAllResults = main.TRUE
2220 for i in range( len( main.activeNodes ) ):
2221 if containsResponses[ i ] == main.ERROR:
2222 containsResults = main.FALSE
2223 else:
2224 containsResults = containsResults and\
2225 containsResponses[ i ][ 1 ]
2226 utilities.assert_equals( expect=main.TRUE,
2227 actual=containsAllResults,
2228 onpass="Set containsAll is functional",
2229 onfail="Set containsAll failed" )
2230
2231 main.step( "Distributed Set remove()" )
2232 main.onosSet.remove( addValue )
2233 removeResponses = []
2234 threads = []
2235 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07002236 t = main.Thread( target=main.CLIs[ i ].setTestRemove,
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002237 name="setTestRemove-" + str( i ),
2238 args=[ main.onosSetName, addValue ] )
2239 threads.append( t )
2240 t.start()
2241 for t in threads:
2242 t.join()
2243 removeResponses.append( t.result )
2244
2245 # main.TRUE = successfully changed the set
2246 # main.FALSE = action resulted in no change in set
2247 # main.ERROR - Some error in executing the function
2248 removeResults = main.TRUE
2249 for i in range( len( main.activeNodes ) ):
2250 if removeResponses[ i ] == main.TRUE:
2251 # All is well
2252 pass
2253 elif removeResponses[ i ] == main.FALSE:
2254 # not in set, probably fine
2255 pass
2256 elif removeResponses[ i ] == main.ERROR:
2257 # Error in execution
2258 removeResults = main.FALSE
2259 else:
2260 # unexpected result
2261 removeResults = main.FALSE
2262 if removeResults != main.TRUE:
2263 main.log.error( "Error executing set remove" )
2264
2265 # Check if set is still correct
2266 size = len( main.onosSet )
2267 getResponses = []
2268 threads = []
2269 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07002270 t = main.Thread( target=main.CLIs[ i ].setTestGet,
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002271 name="setTestGet-" + str( i ),
2272 args=[ main.onosSetName ] )
2273 threads.append( t )
2274 t.start()
2275 for t in threads:
2276 t.join()
2277 getResponses.append( t.result )
2278 getResults = main.TRUE
2279 for i in range( len( main.activeNodes ) ):
Jon Hallf37d44d2017-05-24 10:37:30 -07002280 node = str( main.activeNodes[ i ] + 1 )
2281 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002282 current = set( getResponses[ i ] )
2283 if len( current ) == len( getResponses[ i ] ):
2284 # no repeats
2285 if main.onosSet != current:
2286 main.log.error( "ONOS" + node +
2287 " has incorrect view" +
2288 " of set " + main.onosSetName + ":\n" +
2289 str( getResponses[ i ] ) )
2290 main.log.debug( "Expected: " + str( main.onosSet ) )
2291 main.log.debug( "Actual: " + str( current ) )
2292 getResults = main.FALSE
2293 else:
2294 # error, set is not a set
2295 main.log.error( "ONOS" + node +
2296 " has repeat elements in" +
2297 " set " + main.onosSetName + ":\n" +
2298 str( getResponses[ i ] ) )
2299 getResults = main.FALSE
2300 elif getResponses[ i ] == main.ERROR:
2301 getResults = main.FALSE
2302 sizeResponses = []
2303 threads = []
2304 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07002305 t = main.Thread( target=main.CLIs[ i ].setTestSize,
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002306 name="setTestSize-" + str( i ),
2307 args=[ main.onosSetName ] )
2308 threads.append( t )
2309 t.start()
2310 for t in threads:
2311 t.join()
2312 sizeResponses.append( t.result )
2313 sizeResults = main.TRUE
2314 for i in range( len( main.activeNodes ) ):
Jon Hallf37d44d2017-05-24 10:37:30 -07002315 node = str( main.activeNodes[ i ] + 1 )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002316 if size != sizeResponses[ i ]:
2317 sizeResults = main.FALSE
2318 main.log.error( "ONOS" + node +
2319 " expected a size of " + str( size ) +
2320 " for set " + main.onosSetName +
2321 " but got " + str( sizeResponses[ i ] ) )
2322 removeResults = removeResults and getResults and sizeResults
2323 utilities.assert_equals( expect=main.TRUE,
2324 actual=removeResults,
2325 onpass="Set remove correct",
2326 onfail="Set remove was incorrect" )
2327
2328 main.step( "Distributed Set removeAll()" )
2329 main.onosSet.difference_update( addAllValue.split() )
2330 removeAllResponses = []
2331 threads = []
2332 try:
2333 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07002334 t = main.Thread( target=main.CLIs[ i ].setTestRemove,
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002335 name="setTestRemoveAll-" + str( i ),
2336 args=[ main.onosSetName, addAllValue ] )
2337 threads.append( t )
2338 t.start()
2339 for t in threads:
2340 t.join()
2341 removeAllResponses.append( t.result )
Jon Hallf37d44d2017-05-24 10:37:30 -07002342 except Exception as e:
2343 main.log.exception( e )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002344
2345 # main.TRUE = successfully changed the set
2346 # main.FALSE = action resulted in no change in set
2347 # main.ERROR - Some error in executing the function
2348 removeAllResults = main.TRUE
2349 for i in range( len( main.activeNodes ) ):
2350 if removeAllResponses[ i ] == main.TRUE:
2351 # All is well
2352 pass
2353 elif removeAllResponses[ i ] == main.FALSE:
2354 # not in set, probably fine
2355 pass
2356 elif removeAllResponses[ i ] == main.ERROR:
2357 # Error in execution
2358 removeAllResults = main.FALSE
2359 else:
2360 # unexpected result
2361 removeAllResults = main.FALSE
2362 if removeAllResults != main.TRUE:
2363 main.log.error( "Error executing set removeAll" )
2364
2365 # Check if set is still correct
2366 size = len( main.onosSet )
2367 getResponses = []
2368 threads = []
2369 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07002370 t = main.Thread( target=main.CLIs[ i ].setTestGet,
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002371 name="setTestGet-" + str( i ),
2372 args=[ main.onosSetName ] )
2373 threads.append( t )
2374 t.start()
2375 for t in threads:
2376 t.join()
2377 getResponses.append( t.result )
2378 getResults = main.TRUE
2379 for i in range( len( main.activeNodes ) ):
Jon Hallf37d44d2017-05-24 10:37:30 -07002380 node = str( main.activeNodes[ i ] + 1 )
2381 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002382 current = set( getResponses[ i ] )
2383 if len( current ) == len( getResponses[ i ] ):
2384 # no repeats
2385 if main.onosSet != current:
2386 main.log.error( "ONOS" + node +
2387 " has incorrect view" +
2388 " of set " + main.onosSetName + ":\n" +
2389 str( getResponses[ i ] ) )
2390 main.log.debug( "Expected: " + str( main.onosSet ) )
2391 main.log.debug( "Actual: " + str( current ) )
2392 getResults = main.FALSE
2393 else:
2394 # error, set is not a set
2395 main.log.error( "ONOS" + node +
2396 " has repeat elements in" +
2397 " set " + main.onosSetName + ":\n" +
2398 str( getResponses[ i ] ) )
2399 getResults = main.FALSE
2400 elif getResponses[ i ] == main.ERROR:
2401 getResults = main.FALSE
2402 sizeResponses = []
2403 threads = []
2404 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07002405 t = main.Thread( target=main.CLIs[ i ].setTestSize,
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002406 name="setTestSize-" + str( i ),
2407 args=[ main.onosSetName ] )
2408 threads.append( t )
2409 t.start()
2410 for t in threads:
2411 t.join()
2412 sizeResponses.append( t.result )
2413 sizeResults = main.TRUE
2414 for i in range( len( main.activeNodes ) ):
Jon Hallf37d44d2017-05-24 10:37:30 -07002415 node = str( main.activeNodes[ i ] + 1 )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002416 if size != sizeResponses[ i ]:
2417 sizeResults = main.FALSE
2418 main.log.error( "ONOS" + node +
2419 " expected a size of " + str( size ) +
2420 " for set " + main.onosSetName +
2421 " but got " + str( sizeResponses[ i ] ) )
2422 removeAllResults = removeAllResults and getResults and sizeResults
2423 utilities.assert_equals( expect=main.TRUE,
2424 actual=removeAllResults,
2425 onpass="Set removeAll correct",
2426 onfail="Set removeAll was incorrect" )
2427
2428 main.step( "Distributed Set addAll()" )
2429 main.onosSet.update( addAllValue.split() )
2430 addResponses = []
2431 threads = []
2432 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07002433 t = main.Thread( target=main.CLIs[ i ].setTestAdd,
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002434 name="setTestAddAll-" + str( i ),
2435 args=[ main.onosSetName, addAllValue ] )
2436 threads.append( t )
2437 t.start()
2438 for t in threads:
2439 t.join()
2440 addResponses.append( t.result )
2441
2442 # main.TRUE = successfully changed the set
2443 # main.FALSE = action resulted in no change in set
2444 # main.ERROR - Some error in executing the function
2445 addAllResults = main.TRUE
2446 for i in range( len( main.activeNodes ) ):
2447 if addResponses[ i ] == main.TRUE:
2448 # All is well
2449 pass
2450 elif addResponses[ i ] == main.FALSE:
2451 # Already in set, probably fine
2452 pass
2453 elif addResponses[ i ] == main.ERROR:
2454 # Error in execution
2455 addAllResults = main.FALSE
2456 else:
2457 # unexpected result
2458 addAllResults = main.FALSE
2459 if addAllResults != main.TRUE:
2460 main.log.error( "Error executing set addAll" )
2461
2462 # Check if set is still correct
2463 size = len( main.onosSet )
2464 getResponses = []
2465 threads = []
2466 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07002467 t = main.Thread( target=main.CLIs[ i ].setTestGet,
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002468 name="setTestGet-" + str( i ),
2469 args=[ main.onosSetName ] )
2470 threads.append( t )
2471 t.start()
2472 for t in threads:
2473 t.join()
2474 getResponses.append( t.result )
2475 getResults = main.TRUE
2476 for i in range( len( main.activeNodes ) ):
Jon Hallf37d44d2017-05-24 10:37:30 -07002477 node = str( main.activeNodes[ i ] + 1 )
2478 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002479 current = set( getResponses[ i ] )
2480 if len( current ) == len( getResponses[ i ] ):
2481 # no repeats
2482 if main.onosSet != current:
2483 main.log.error( "ONOS" + node +
2484 " has incorrect view" +
2485 " of set " + main.onosSetName + ":\n" +
2486 str( getResponses[ i ] ) )
2487 main.log.debug( "Expected: " + str( main.onosSet ) )
2488 main.log.debug( "Actual: " + str( current ) )
2489 getResults = main.FALSE
2490 else:
2491 # error, set is not a set
2492 main.log.error( "ONOS" + node +
2493 " has repeat elements in" +
2494 " set " + main.onosSetName + ":\n" +
2495 str( getResponses[ i ] ) )
2496 getResults = main.FALSE
2497 elif getResponses[ i ] == main.ERROR:
2498 getResults = main.FALSE
2499 sizeResponses = []
2500 threads = []
2501 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07002502 t = main.Thread( target=main.CLIs[ i ].setTestSize,
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002503 name="setTestSize-" + str( i ),
2504 args=[ main.onosSetName ] )
2505 threads.append( t )
2506 t.start()
2507 for t in threads:
2508 t.join()
2509 sizeResponses.append( t.result )
2510 sizeResults = main.TRUE
2511 for i in range( len( main.activeNodes ) ):
Jon Hallf37d44d2017-05-24 10:37:30 -07002512 node = str( main.activeNodes[ i ] + 1 )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002513 if size != sizeResponses[ i ]:
2514 sizeResults = main.FALSE
2515 main.log.error( "ONOS" + node +
2516 " expected a size of " + str( size ) +
2517 " for set " + main.onosSetName +
2518 " but got " + str( sizeResponses[ i ] ) )
2519 addAllResults = addAllResults and getResults and sizeResults
2520 utilities.assert_equals( expect=main.TRUE,
2521 actual=addAllResults,
2522 onpass="Set addAll correct",
2523 onfail="Set addAll was incorrect" )
2524
2525 main.step( "Distributed Set clear()" )
2526 main.onosSet.clear()
2527 clearResponses = []
2528 threads = []
2529 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07002530 t = main.Thread( target=main.CLIs[ i ].setTestRemove,
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002531 name="setTestClear-" + str( i ),
Jon Hallf37d44d2017-05-24 10:37:30 -07002532 args=[ main.onosSetName, " " ], # Values doesn't matter
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002533 kwargs={ "clear": True } )
2534 threads.append( t )
2535 t.start()
2536 for t in threads:
2537 t.join()
2538 clearResponses.append( t.result )
2539
2540 # main.TRUE = successfully changed the set
2541 # main.FALSE = action resulted in no change in set
2542 # main.ERROR - Some error in executing the function
2543 clearResults = main.TRUE
2544 for i in range( len( main.activeNodes ) ):
2545 if clearResponses[ i ] == main.TRUE:
2546 # All is well
2547 pass
2548 elif clearResponses[ i ] == main.FALSE:
2549 # Nothing set, probably fine
2550 pass
2551 elif clearResponses[ i ] == main.ERROR:
2552 # Error in execution
2553 clearResults = main.FALSE
2554 else:
2555 # unexpected result
2556 clearResults = main.FALSE
2557 if clearResults != main.TRUE:
2558 main.log.error( "Error executing set clear" )
2559
2560 # Check if set is still correct
2561 size = len( main.onosSet )
2562 getResponses = []
2563 threads = []
2564 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07002565 t = main.Thread( target=main.CLIs[ i ].setTestGet,
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002566 name="setTestGet-" + str( i ),
2567 args=[ main.onosSetName ] )
2568 threads.append( t )
2569 t.start()
2570 for t in threads:
2571 t.join()
2572 getResponses.append( t.result )
2573 getResults = main.TRUE
2574 for i in range( len( main.activeNodes ) ):
Jon Hallf37d44d2017-05-24 10:37:30 -07002575 node = str( main.activeNodes[ i ] + 1 )
2576 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002577 current = set( getResponses[ i ] )
2578 if len( current ) == len( getResponses[ i ] ):
2579 # no repeats
2580 if main.onosSet != current:
2581 main.log.error( "ONOS" + node +
2582 " has incorrect view" +
2583 " of set " + main.onosSetName + ":\n" +
2584 str( getResponses[ i ] ) )
2585 main.log.debug( "Expected: " + str( main.onosSet ) )
2586 main.log.debug( "Actual: " + str( current ) )
2587 getResults = main.FALSE
2588 else:
2589 # error, set is not a set
2590 main.log.error( "ONOS" + node +
2591 " has repeat elements in" +
2592 " set " + main.onosSetName + ":\n" +
2593 str( getResponses[ i ] ) )
2594 getResults = main.FALSE
2595 elif getResponses[ i ] == main.ERROR:
2596 getResults = main.FALSE
2597 sizeResponses = []
2598 threads = []
2599 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07002600 t = main.Thread( target=main.CLIs[ i ].setTestSize,
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002601 name="setTestSize-" + str( i ),
2602 args=[ main.onosSetName ] )
2603 threads.append( t )
2604 t.start()
2605 for t in threads:
2606 t.join()
2607 sizeResponses.append( t.result )
2608 sizeResults = main.TRUE
2609 for i in range( len( main.activeNodes ) ):
Jon Hallf37d44d2017-05-24 10:37:30 -07002610 node = str( main.activeNodes[ i ] + 1 )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002611 if size != sizeResponses[ i ]:
2612 sizeResults = main.FALSE
2613 main.log.error( "ONOS" + node +
2614 " expected a size of " + str( size ) +
2615 " for set " + main.onosSetName +
2616 " but got " + str( sizeResponses[ i ] ) )
2617 clearResults = clearResults and getResults and sizeResults
2618 utilities.assert_equals( expect=main.TRUE,
2619 actual=clearResults,
2620 onpass="Set clear correct",
2621 onfail="Set clear was incorrect" )
2622
2623 main.step( "Distributed Set addAll()" )
2624 main.onosSet.update( addAllValue.split() )
2625 addResponses = []
2626 threads = []
2627 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07002628 t = main.Thread( target=main.CLIs[ i ].setTestAdd,
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002629 name="setTestAddAll-" + str( i ),
2630 args=[ main.onosSetName, addAllValue ] )
2631 threads.append( t )
2632 t.start()
2633 for t in threads:
2634 t.join()
2635 addResponses.append( t.result )
2636
2637 # main.TRUE = successfully changed the set
2638 # main.FALSE = action resulted in no change in set
2639 # main.ERROR - Some error in executing the function
2640 addAllResults = main.TRUE
2641 for i in range( len( main.activeNodes ) ):
2642 if addResponses[ i ] == main.TRUE:
2643 # All is well
2644 pass
2645 elif addResponses[ i ] == main.FALSE:
2646 # Already in set, probably fine
2647 pass
2648 elif addResponses[ i ] == main.ERROR:
2649 # Error in execution
2650 addAllResults = main.FALSE
2651 else:
2652 # unexpected result
2653 addAllResults = main.FALSE
2654 if addAllResults != main.TRUE:
2655 main.log.error( "Error executing set addAll" )
2656
2657 # Check if set is still correct
2658 size = len( main.onosSet )
2659 getResponses = []
2660 threads = []
2661 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07002662 t = main.Thread( target=main.CLIs[ i ].setTestGet,
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002663 name="setTestGet-" + str( i ),
2664 args=[ main.onosSetName ] )
2665 threads.append( t )
2666 t.start()
2667 for t in threads:
2668 t.join()
2669 getResponses.append( t.result )
2670 getResults = main.TRUE
2671 for i in range( len( main.activeNodes ) ):
Jon Hallf37d44d2017-05-24 10:37:30 -07002672 node = str( main.activeNodes[ i ] + 1 )
2673 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002674 current = set( getResponses[ i ] )
2675 if len( current ) == len( getResponses[ i ] ):
2676 # no repeats
2677 if main.onosSet != current:
2678 main.log.error( "ONOS" + node +
2679 " has incorrect view" +
2680 " of set " + main.onosSetName + ":\n" +
2681 str( getResponses[ i ] ) )
2682 main.log.debug( "Expected: " + str( main.onosSet ) )
2683 main.log.debug( "Actual: " + str( current ) )
2684 getResults = main.FALSE
2685 else:
2686 # error, set is not a set
2687 main.log.error( "ONOS" + node +
2688 " has repeat elements in" +
2689 " set " + main.onosSetName + ":\n" +
2690 str( getResponses[ i ] ) )
2691 getResults = main.FALSE
2692 elif getResponses[ i ] == main.ERROR:
2693 getResults = main.FALSE
2694 sizeResponses = []
2695 threads = []
2696 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07002697 t = main.Thread( target=main.CLIs[ i ].setTestSize,
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002698 name="setTestSize-" + str( i ),
2699 args=[ main.onosSetName ] )
2700 threads.append( t )
2701 t.start()
2702 for t in threads:
2703 t.join()
2704 sizeResponses.append( t.result )
2705 sizeResults = main.TRUE
2706 for i in range( len( main.activeNodes ) ):
Jon Hallf37d44d2017-05-24 10:37:30 -07002707 node = str( main.activeNodes[ i ] + 1 )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002708 if size != sizeResponses[ i ]:
2709 sizeResults = main.FALSE
2710 main.log.error( "ONOS" + node +
2711 " expected a size of " + str( size ) +
2712 " for set " + main.onosSetName +
2713 " but got " + str( sizeResponses[ i ] ) )
2714 addAllResults = addAllResults and getResults and sizeResults
2715 utilities.assert_equals( expect=main.TRUE,
2716 actual=addAllResults,
2717 onpass="Set addAll correct",
2718 onfail="Set addAll was incorrect" )
2719
2720 main.step( "Distributed Set retain()" )
2721 main.onosSet.intersection_update( retainValue.split() )
2722 retainResponses = []
2723 threads = []
2724 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07002725 t = main.Thread( target=main.CLIs[ i ].setTestRemove,
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002726 name="setTestRetain-" + str( i ),
2727 args=[ main.onosSetName, retainValue ],
2728 kwargs={ "retain": True } )
2729 threads.append( t )
2730 t.start()
2731 for t in threads:
2732 t.join()
2733 retainResponses.append( t.result )
2734
2735 # main.TRUE = successfully changed the set
2736 # main.FALSE = action resulted in no change in set
2737 # main.ERROR - Some error in executing the function
2738 retainResults = main.TRUE
2739 for i in range( len( main.activeNodes ) ):
2740 if retainResponses[ i ] == main.TRUE:
2741 # All is well
2742 pass
2743 elif retainResponses[ i ] == main.FALSE:
2744 # Already in set, probably fine
2745 pass
2746 elif retainResponses[ i ] == main.ERROR:
2747 # Error in execution
2748 retainResults = main.FALSE
2749 else:
2750 # unexpected result
2751 retainResults = main.FALSE
2752 if retainResults != main.TRUE:
2753 main.log.error( "Error executing set retain" )
2754
2755 # Check if set is still correct
2756 size = len( main.onosSet )
2757 getResponses = []
2758 threads = []
2759 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07002760 t = main.Thread( target=main.CLIs[ i ].setTestGet,
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002761 name="setTestGet-" + str( i ),
2762 args=[ main.onosSetName ] )
2763 threads.append( t )
2764 t.start()
2765 for t in threads:
2766 t.join()
2767 getResponses.append( t.result )
2768 getResults = main.TRUE
2769 for i in range( len( main.activeNodes ) ):
Jon Hallf37d44d2017-05-24 10:37:30 -07002770 node = str( main.activeNodes[ i ] + 1 )
2771 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002772 current = set( getResponses[ i ] )
2773 if len( current ) == len( getResponses[ i ] ):
2774 # no repeats
2775 if main.onosSet != current:
2776 main.log.error( "ONOS" + node +
2777 " has incorrect view" +
2778 " of set " + main.onosSetName + ":\n" +
2779 str( getResponses[ i ] ) )
2780 main.log.debug( "Expected: " + str( main.onosSet ) )
2781 main.log.debug( "Actual: " + str( current ) )
2782 getResults = main.FALSE
2783 else:
2784 # error, set is not a set
2785 main.log.error( "ONOS" + node +
2786 " has repeat elements in" +
2787 " set " + main.onosSetName + ":\n" +
2788 str( getResponses[ i ] ) )
2789 getResults = main.FALSE
2790 elif getResponses[ i ] == main.ERROR:
2791 getResults = main.FALSE
2792 sizeResponses = []
2793 threads = []
2794 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07002795 t = main.Thread( target=main.CLIs[ i ].setTestSize,
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002796 name="setTestSize-" + str( i ),
2797 args=[ main.onosSetName ] )
2798 threads.append( t )
2799 t.start()
2800 for t in threads:
2801 t.join()
2802 sizeResponses.append( t.result )
2803 sizeResults = main.TRUE
2804 for i in range( len( main.activeNodes ) ):
Jon Hallf37d44d2017-05-24 10:37:30 -07002805 node = str( main.activeNodes[ i ] + 1 )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002806 if size != sizeResponses[ i ]:
2807 sizeResults = main.FALSE
2808 main.log.error( "ONOS" + node + " expected a size of " +
2809 str( size ) + " for set " + main.onosSetName +
2810 " but got " + str( sizeResponses[ i ] ) )
2811 retainResults = retainResults and getResults and sizeResults
2812 utilities.assert_equals( expect=main.TRUE,
2813 actual=retainResults,
2814 onpass="Set retain correct",
2815 onfail="Set retain was incorrect" )
2816
2817 # Transactional maps
2818 main.step( "Partitioned Transactional maps put" )
2819 tMapValue = "Testing"
2820 numKeys = 100
2821 putResult = True
Jon Hallf37d44d2017-05-24 10:37:30 -07002822 node = main.activeNodes[ 0 ]
2823 putResponses = main.CLIs[ node ].transactionalMapPut( numKeys, tMapValue )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002824 if putResponses and len( putResponses ) == 100:
2825 for i in putResponses:
2826 if putResponses[ i ][ 'value' ] != tMapValue:
2827 putResult = False
2828 else:
2829 putResult = False
2830 if not putResult:
2831 main.log.debug( "Put response values: " + str( putResponses ) )
2832 utilities.assert_equals( expect=True,
2833 actual=putResult,
2834 onpass="Partitioned Transactional Map put successful",
2835 onfail="Partitioned Transactional Map put values are incorrect" )
2836
2837 main.step( "Partitioned Transactional maps get" )
2838 # FIXME: is this sleep needed?
2839 time.sleep( 5 )
2840
2841 getCheck = True
2842 for n in range( 1, numKeys + 1 ):
2843 getResponses = []
2844 threads = []
2845 valueCheck = True
2846 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07002847 t = main.Thread( target=main.CLIs[ i ].transactionalMapGet,
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002848 name="TMap-get-" + str( i ),
2849 args=[ "Key" + str( n ) ] )
2850 threads.append( t )
2851 t.start()
2852 for t in threads:
2853 t.join()
2854 getResponses.append( t.result )
2855 for node in getResponses:
2856 if node != tMapValue:
2857 valueCheck = False
2858 if not valueCheck:
Jon Hallf37d44d2017-05-24 10:37:30 -07002859 main.log.warn( "Values for key 'Key" + str(n) + "' do not match:" )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002860 main.log.warn( getResponses )
2861 getCheck = getCheck and valueCheck
2862 utilities.assert_equals( expect=True,
2863 actual=getCheck,
2864 onpass="Partitioned Transactional Map get values were correct",
2865 onfail="Partitioned Transactional Map values incorrect" )
2866
2867 # DISTRIBUTED ATOMIC VALUE
2868 main.step( "Get the value of a new value" )
2869 threads = []
2870 getValues = []
2871 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07002872 t = main.Thread( target=main.CLIs[ i ].valueTestGet,
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002873 name="ValueGet-" + str( i ),
2874 args=[ valueName ] )
2875 threads.append( t )
2876 t.start()
2877
2878 for t in threads:
2879 t.join()
2880 getValues.append( t.result )
2881 main.log.debug( getValues )
2882 # Check the results
2883 atomicValueGetResult = True
2884 expected = valueValue if valueValue is not None else "null"
2885 main.log.debug( "Checking for value of " + expected )
2886 for i in getValues:
2887 if i != expected:
2888 atomicValueGetResult = False
2889 utilities.assert_equals( expect=True,
2890 actual=atomicValueGetResult,
2891 onpass="Atomic Value get successful",
2892 onfail="Error getting atomic Value " +
2893 str( valueValue ) + ", found: " +
2894 str( getValues ) )
2895
2896 main.step( "Atomic Value set()" )
2897 valueValue = "foo"
2898 threads = []
2899 setValues = []
2900 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07002901 t = main.Thread( target=main.CLIs[ i ].valueTestSet,
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002902 name="ValueSet-" + str( i ),
2903 args=[ valueName, valueValue ] )
2904 threads.append( t )
2905 t.start()
2906
2907 for t in threads:
2908 t.join()
2909 setValues.append( t.result )
2910 main.log.debug( setValues )
2911 # Check the results
2912 atomicValueSetResults = True
2913 for i in setValues:
2914 if i != main.TRUE:
2915 atomicValueSetResults = False
2916 utilities.assert_equals( expect=True,
2917 actual=atomicValueSetResults,
2918 onpass="Atomic Value set successful",
2919 onfail="Error setting atomic Value" +
2920 str( setValues ) )
2921
2922 main.step( "Get the value after set()" )
2923 threads = []
2924 getValues = []
2925 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07002926 t = main.Thread( target=main.CLIs[ i ].valueTestGet,
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002927 name="ValueGet-" + str( i ),
2928 args=[ valueName ] )
2929 threads.append( t )
2930 t.start()
2931
2932 for t in threads:
2933 t.join()
2934 getValues.append( t.result )
2935 main.log.debug( getValues )
2936 # Check the results
2937 atomicValueGetResult = True
2938 expected = valueValue if valueValue is not None else "null"
2939 main.log.debug( "Checking for value of " + expected )
2940 for i in getValues:
2941 if i != expected:
2942 atomicValueGetResult = False
2943 utilities.assert_equals( expect=True,
2944 actual=atomicValueGetResult,
2945 onpass="Atomic Value get successful",
2946 onfail="Error getting atomic Value " +
2947 str( valueValue ) + ", found: " +
2948 str( getValues ) )
2949
2950 main.step( "Atomic Value compareAndSet()" )
2951 oldValue = valueValue
2952 valueValue = "bar"
Jon Hallf37d44d2017-05-24 10:37:30 -07002953 i = main.activeNodes[ 0 ]
2954 CASValue = main.CLIs[ i ].valueTestCompareAndSet( valueName, oldValue, valueValue )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002955 main.log.debug( CASValue )
2956 utilities.assert_equals( expect=main.TRUE,
2957 actual=CASValue,
2958 onpass="Atomic Value comapreAndSet successful",
2959 onfail="Error setting atomic Value:" +
2960 str( CASValue ) )
2961
2962 main.step( "Get the value after compareAndSet()" )
2963 threads = []
2964 getValues = []
2965 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07002966 t = main.Thread( target=main.CLIs[ i ].valueTestGet,
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002967 name="ValueGet-" + str( i ),
2968 args=[ valueName ] )
2969 threads.append( t )
2970 t.start()
2971
2972 for t in threads:
2973 t.join()
2974 getValues.append( t.result )
2975 main.log.debug( getValues )
2976 # Check the results
2977 atomicValueGetResult = True
2978 expected = valueValue if valueValue is not None else "null"
2979 main.log.debug( "Checking for value of " + expected )
2980 for i in getValues:
2981 if i != expected:
2982 atomicValueGetResult = False
2983 utilities.assert_equals( expect=True,
2984 actual=atomicValueGetResult,
2985 onpass="Atomic Value get successful",
2986 onfail="Error getting atomic Value " +
2987 str( valueValue ) + ", found: " +
2988 str( getValues ) )
2989
2990 main.step( "Atomic Value getAndSet()" )
2991 oldValue = valueValue
2992 valueValue = "baz"
Jon Hallf37d44d2017-05-24 10:37:30 -07002993 i = main.activeNodes[ 0 ]
2994 GASValue = main.CLIs[ i ].valueTestGetAndSet( valueName, valueValue )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002995 main.log.debug( GASValue )
2996 expected = oldValue if oldValue is not None else "null"
2997 utilities.assert_equals( expect=expected,
2998 actual=GASValue,
2999 onpass="Atomic Value GAS successful",
3000 onfail="Error with GetAndSet atomic Value: expected " +
3001 str( expected ) + ", found: " +
3002 str( GASValue ) )
3003
3004 main.step( "Get the value after getAndSet()" )
3005 threads = []
3006 getValues = []
3007 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07003008 t = main.Thread( target=main.CLIs[ i ].valueTestGet,
Jon Hall7a6ebfd2017-03-13 10:58:58 -07003009 name="ValueGet-" + str( i ),
3010 args=[ valueName ] )
3011 threads.append( t )
3012 t.start()
3013
3014 for t in threads:
3015 t.join()
3016 getValues.append( t.result )
3017 main.log.debug( getValues )
3018 # Check the results
3019 atomicValueGetResult = True
3020 expected = valueValue if valueValue is not None else "null"
3021 main.log.debug( "Checking for value of " + expected )
3022 for i in getValues:
3023 if i != expected:
3024 atomicValueGetResult = False
3025 utilities.assert_equals( expect=True,
3026 actual=atomicValueGetResult,
3027 onpass="Atomic Value get successful",
3028 onfail="Error getting atomic Value: expected " +
3029 str( valueValue ) + ", found: " +
3030 str( getValues ) )
3031
3032 main.step( "Atomic Value destory()" )
3033 valueValue = None
3034 threads = []
Jon Hallf37d44d2017-05-24 10:37:30 -07003035 i = main.activeNodes[ 0 ]
3036 destroyResult = main.CLIs[ i ].valueTestDestroy( valueName )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07003037 main.log.debug( destroyResult )
3038 # Check the results
3039 utilities.assert_equals( expect=main.TRUE,
3040 actual=destroyResult,
3041 onpass="Atomic Value destroy successful",
3042 onfail="Error destroying atomic Value" )
3043
3044 main.step( "Get the value after destroy()" )
3045 threads = []
3046 getValues = []
3047 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07003048 t = main.Thread( target=main.CLIs[ i ].valueTestGet,
Jon Hall7a6ebfd2017-03-13 10:58:58 -07003049 name="ValueGet-" + str( i ),
3050 args=[ valueName ] )
3051 threads.append( t )
3052 t.start()
3053
3054 for t in threads:
3055 t.join()
3056 getValues.append( t.result )
3057 main.log.debug( getValues )
3058 # Check the results
3059 atomicValueGetResult = True
3060 expected = valueValue if valueValue is not None else "null"
3061 main.log.debug( "Checking for value of " + expected )
3062 for i in getValues:
3063 if i != expected:
3064 atomicValueGetResult = False
3065 utilities.assert_equals( expect=True,
3066 actual=atomicValueGetResult,
3067 onpass="Atomic Value get successful",
3068 onfail="Error getting atomic Value " +
3069 str( valueValue ) + ", found: " +
3070 str( getValues ) )
Jon Halle0f0b342017-04-18 11:43:47 -07003071
3072 # WORK QUEUES
3073 main.step( "Work Queue add()" )
3074 threads = []
Jon Hallf37d44d2017-05-24 10:37:30 -07003075 i = main.activeNodes[ 0 ]
3076 addResult = main.CLIs[ i ].workQueueAdd( workQueueName, 'foo' )
Jon Halle0f0b342017-04-18 11:43:47 -07003077 workQueuePending += 1
3078 main.log.debug( addResult )
3079 # Check the results
3080 utilities.assert_equals( expect=main.TRUE,
3081 actual=addResult,
3082 onpass="Work Queue add successful",
3083 onfail="Error adding to Work Queue" )
3084
3085 main.step( "Check the work queue stats" )
3086 statsResults = self.workQueueStatsCheck( workQueueName,
3087 workQueueCompleted,
3088 workQueueInProgress,
3089 workQueuePending )
3090 utilities.assert_equals( expect=True,
3091 actual=statsResults,
3092 onpass="Work Queue stats correct",
3093 onfail="Work Queue stats incorrect " )
3094
3095 main.step( "Work Queue addMultiple()" )
3096 threads = []
Jon Hallf37d44d2017-05-24 10:37:30 -07003097 i = main.activeNodes[ 0 ]
3098 addMultipleResult = main.CLIs[ i ].workQueueAddMultiple( workQueueName, 'bar', 'baz' )
Jon Halle0f0b342017-04-18 11:43:47 -07003099 workQueuePending += 2
3100 main.log.debug( addMultipleResult )
3101 # Check the results
3102 utilities.assert_equals( expect=main.TRUE,
3103 actual=addMultipleResult,
3104 onpass="Work Queue add multiple successful",
3105 onfail="Error adding multiple items to Work Queue" )
3106
3107 main.step( "Check the work queue stats" )
3108 statsResults = self.workQueueStatsCheck( workQueueName,
3109 workQueueCompleted,
3110 workQueueInProgress,
3111 workQueuePending )
3112 utilities.assert_equals( expect=True,
3113 actual=statsResults,
3114 onpass="Work Queue stats correct",
3115 onfail="Work Queue stats incorrect " )
3116
3117 main.step( "Work Queue takeAndComplete() 1" )
3118 threads = []
Jon Hallf37d44d2017-05-24 10:37:30 -07003119 i = main.activeNodes[ 0 ]
Jon Halle0f0b342017-04-18 11:43:47 -07003120 number = 1
Jon Hallf37d44d2017-05-24 10:37:30 -07003121 take1Result = main.CLIs[ i ].workQueueTakeAndComplete( workQueueName, number )
Jon Halle0f0b342017-04-18 11:43:47 -07003122 workQueuePending -= number
3123 workQueueCompleted += number
3124 main.log.debug( take1Result )
3125 # Check the results
3126 utilities.assert_equals( expect=main.TRUE,
3127 actual=take1Result,
3128 onpass="Work Queue takeAndComplete 1 successful",
3129 onfail="Error taking 1 from Work Queue" )
3130
3131 main.step( "Check the work queue stats" )
3132 statsResults = self.workQueueStatsCheck( workQueueName,
3133 workQueueCompleted,
3134 workQueueInProgress,
3135 workQueuePending )
3136 utilities.assert_equals( expect=True,
3137 actual=statsResults,
3138 onpass="Work Queue stats correct",
3139 onfail="Work Queue stats incorrect " )
3140
3141 main.step( "Work Queue takeAndComplete() 2" )
3142 threads = []
Jon Hallf37d44d2017-05-24 10:37:30 -07003143 i = main.activeNodes[ 0 ]
Jon Halle0f0b342017-04-18 11:43:47 -07003144 number = 2
Jon Hallf37d44d2017-05-24 10:37:30 -07003145 take2Result = main.CLIs[ i ].workQueueTakeAndComplete( workQueueName, number )
Jon Halle0f0b342017-04-18 11:43:47 -07003146 workQueuePending -= number
3147 workQueueCompleted += number
3148 main.log.debug( take2Result )
3149 # Check the results
3150 utilities.assert_equals( expect=main.TRUE,
3151 actual=take2Result,
3152 onpass="Work Queue takeAndComplete 2 successful",
3153 onfail="Error taking 2 from Work Queue" )
3154
3155 main.step( "Check the work queue stats" )
3156 statsResults = self.workQueueStatsCheck( workQueueName,
3157 workQueueCompleted,
3158 workQueueInProgress,
3159 workQueuePending )
3160 utilities.assert_equals( expect=True,
3161 actual=statsResults,
3162 onpass="Work Queue stats correct",
3163 onfail="Work Queue stats incorrect " )
3164
3165 main.step( "Work Queue destroy()" )
3166 valueValue = None
3167 threads = []
Jon Hallf37d44d2017-05-24 10:37:30 -07003168 i = main.activeNodes[ 0 ]
3169 destroyResult = main.CLIs[ i ].workQueueDestroy( workQueueName )
Jon Halle0f0b342017-04-18 11:43:47 -07003170 workQueueCompleted = 0
3171 workQueueInProgress = 0
3172 workQueuePending = 0
3173 main.log.debug( destroyResult )
3174 # Check the results
3175 utilities.assert_equals( expect=main.TRUE,
3176 actual=destroyResult,
3177 onpass="Work Queue destroy successful",
3178 onfail="Error destroying Work Queue" )
3179
3180 main.step( "Check the work queue stats" )
3181 statsResults = self.workQueueStatsCheck( workQueueName,
3182 workQueueCompleted,
3183 workQueueInProgress,
3184 workQueuePending )
3185 utilities.assert_equals( expect=True,
3186 actual=statsResults,
3187 onpass="Work Queue stats correct",
3188 onfail="Work Queue stats incorrect " )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07003189 except Exception as e:
3190 main.log.error( "Exception: " + str( e ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003191
3192 def cleanUp( self, main ):
3193 """
3194 Clean up
3195 """
3196 import os
3197 import time
3198 assert main.numCtrls, "main.numCtrls not defined"
3199 assert main, "main not defined"
3200 assert utilities.assert_equals, "utilities.assert_equals not defined"
3201 assert main.CLIs, "main.CLIs not defined"
3202 assert main.nodes, "main.nodes not defined"
3203
3204 # printing colors to terminal
3205 colors = { 'cyan': '\033[96m', 'purple': '\033[95m',
3206 'blue': '\033[94m', 'green': '\033[92m',
3207 'yellow': '\033[93m', 'red': '\033[91m', 'end': '\033[0m' }
3208 main.case( "Test Cleanup" )
3209 main.step( "Killing tcpdumps" )
3210 main.Mininet2.stopTcpdump()
3211
3212 testname = main.TEST
3213 if main.params[ 'BACKUP' ][ 'ENABLED' ] == "True":
3214 main.step( "Copying MN pcap and ONOS log files to test station" )
3215 teststationUser = main.params[ 'BACKUP' ][ 'TESTONUSER' ]
3216 teststationIP = main.params[ 'BACKUP' ][ 'TESTONIP' ]
3217 # NOTE: MN Pcap file is being saved to logdir.
3218 # We scp this file as MN and TestON aren't necessarily the same vm
3219
3220 # FIXME: To be replaced with a Jenkin's post script
3221 # TODO: Load these from params
3222 # NOTE: must end in /
3223 logFolder = "/opt/onos/log/"
3224 logFiles = [ "karaf.log", "karaf.log.1" ]
3225 # NOTE: must end in /
3226 for f in logFiles:
3227 for node in main.nodes:
3228 dstName = main.logdir + "/" + node.name + "-" + f
3229 main.ONOSbench.secureCopy( node.user_name, node.ip_address,
3230 logFolder + f, dstName )
3231 # std*.log's
3232 # NOTE: must end in /
3233 logFolder = "/opt/onos/var/"
3234 logFiles = [ "stderr.log", "stdout.log" ]
3235 # NOTE: must end in /
3236 for f in logFiles:
3237 for node in main.nodes:
3238 dstName = main.logdir + "/" + node.name + "-" + f
3239 main.ONOSbench.secureCopy( node.user_name, node.ip_address,
3240 logFolder + f, dstName )
3241 else:
3242 main.log.debug( "skipping saving log files" )
3243
3244 main.step( "Stopping Mininet" )
3245 mnResult = main.Mininet1.stopNet()
3246 utilities.assert_equals( expect=main.TRUE, actual=mnResult,
3247 onpass="Mininet stopped",
3248 onfail="MN cleanup NOT successful" )
3249
3250 main.step( "Checking ONOS Logs for errors" )
3251 for node in main.nodes:
3252 main.log.debug( "Checking logs for errors on " + node.name + ":" )
3253 main.log.warn( main.ONOSbench.checkLogs( node.ip_address ) )
3254
3255 try:
3256 timerLog = open( main.logdir + "/Timers.csv", 'w' )
3257 main.log.debug( ", ".join( main.HAlabels ) + "\n" + ", ".join( main.HAdata ) )
3258 timerLog.write( ", ".join( main.HAlabels ) + "\n" + ", ".join( main.HAdata ) )
3259 timerLog.close()
3260 except NameError as e:
3261 main.log.exception( e )
3262 def assignMastership( self, main ):
3263 """
3264 Assign mastership to controllers
3265 """
3266 import time
3267 assert main.numCtrls, "main.numCtrls not defined"
3268 assert main, "main not defined"
3269 assert utilities.assert_equals, "utilities.assert_equals not defined"
3270 assert main.CLIs, "main.CLIs not defined"
3271 assert main.nodes, "main.nodes not defined"
3272
3273 main.case( "Assigning Controller roles for switches" )
3274 main.caseExplanation = "Check that ONOS is connected to each " +\
3275 "device. Then manually assign" +\
3276 " mastership to specific ONOS nodes using" +\
3277 " 'device-role'"
3278 main.step( "Assign mastership of switches to specific controllers" )
3279 # Manually assign mastership to the controller we want
3280 roleCall = main.TRUE
3281
3282 ipList = []
3283 deviceList = []
3284 onosCli = main.CLIs[ main.activeNodes[ 0 ] ]
3285 try:
3286 # Assign mastership to specific controllers. This assignment was
3287 # determined for a 7 node cluser, but will work with any sized
3288 # cluster
3289 for i in range( 1, 29 ): # switches 1 through 28
3290 # set up correct variables:
3291 if i == 1:
3292 c = 0
3293 ip = main.nodes[ c ].ip_address # ONOS1
3294 deviceId = onosCli.getDevice( "1000" ).get( 'id' )
3295 elif i == 2:
3296 c = 1 % main.numCtrls
3297 ip = main.nodes[ c ].ip_address # ONOS2
3298 deviceId = onosCli.getDevice( "2000" ).get( 'id' )
3299 elif i == 3:
3300 c = 1 % main.numCtrls
3301 ip = main.nodes[ c ].ip_address # ONOS2
3302 deviceId = onosCli.getDevice( "3000" ).get( 'id' )
3303 elif i == 4:
3304 c = 3 % main.numCtrls
3305 ip = main.nodes[ c ].ip_address # ONOS4
3306 deviceId = onosCli.getDevice( "3004" ).get( 'id' )
3307 elif i == 5:
3308 c = 2 % main.numCtrls
3309 ip = main.nodes[ c ].ip_address # ONOS3
3310 deviceId = onosCli.getDevice( "5000" ).get( 'id' )
3311 elif i == 6:
3312 c = 2 % main.numCtrls
3313 ip = main.nodes[ c ].ip_address # ONOS3
3314 deviceId = onosCli.getDevice( "6000" ).get( 'id' )
3315 elif i == 7:
3316 c = 5 % main.numCtrls
3317 ip = main.nodes[ c ].ip_address # ONOS6
3318 deviceId = onosCli.getDevice( "6007" ).get( 'id' )
3319 elif i >= 8 and i <= 17:
3320 c = 4 % main.numCtrls
3321 ip = main.nodes[ c ].ip_address # ONOS5
3322 dpid = '3' + str( i ).zfill( 3 )
3323 deviceId = onosCli.getDevice( dpid ).get( 'id' )
3324 elif i >= 18 and i <= 27:
3325 c = 6 % main.numCtrls
3326 ip = main.nodes[ c ].ip_address # ONOS7
3327 dpid = '6' + str( i ).zfill( 3 )
3328 deviceId = onosCli.getDevice( dpid ).get( 'id' )
3329 elif i == 28:
3330 c = 0
3331 ip = main.nodes[ c ].ip_address # ONOS1
3332 deviceId = onosCli.getDevice( "2800" ).get( 'id' )
3333 else:
3334 main.log.error( "You didn't write an else statement for " +
3335 "switch s" + str( i ) )
3336 roleCall = main.FALSE
3337 # Assign switch
3338 assert deviceId, "No device id for s" + str( i ) + " in ONOS"
3339 # TODO: make this controller dynamic
3340 roleCall = roleCall and onosCli.deviceRole( deviceId, ip )
3341 ipList.append( ip )
3342 deviceList.append( deviceId )
3343 except ( AttributeError, AssertionError ):
3344 main.log.exception( "Something is wrong with ONOS device view" )
3345 main.log.info( onosCli.devices() )
3346 utilities.assert_equals(
3347 expect=main.TRUE,
3348 actual=roleCall,
3349 onpass="Re-assigned switch mastership to designated controller",
3350 onfail="Something wrong with deviceRole calls" )
3351
3352 main.step( "Check mastership was correctly assigned" )
3353 roleCheck = main.TRUE
3354 # NOTE: This is due to the fact that device mastership change is not
3355 # atomic and is actually a multi step process
3356 time.sleep( 5 )
3357 for i in range( len( ipList ) ):
3358 ip = ipList[ i ]
3359 deviceId = deviceList[ i ]
3360 # Check assignment
3361 master = onosCli.getRole( deviceId ).get( 'master' )
3362 if ip in master:
3363 roleCheck = roleCheck and main.TRUE
3364 else:
3365 roleCheck = roleCheck and main.FALSE
3366 main.log.error( "Error, controller " + ip + " is not" +
3367 " master " + "of device " +
3368 str( deviceId ) + ". Master is " +
3369 repr( master ) + "." )
3370 utilities.assert_equals(
3371 expect=main.TRUE,
3372 actual=roleCheck,
3373 onpass="Switches were successfully reassigned to designated " +
3374 "controller",
3375 onfail="Switches were not successfully reassigned" )
3376 def bringUpStoppedNode( self, main ):
3377 """
3378 The bring up stopped nodes
3379 """
3380 import time
3381 assert main.numCtrls, "main.numCtrls not defined"
3382 assert main, "main not defined"
3383 assert utilities.assert_equals, "utilities.assert_equals not defined"
3384 assert main.CLIs, "main.CLIs not defined"
3385 assert main.nodes, "main.nodes not defined"
3386 assert main.kill, "main.kill not defined"
3387 main.case( "Restart minority of ONOS nodes" )
3388
3389 main.step( "Restarting " + str( len( main.kill ) ) + " ONOS nodes" )
3390 startResults = main.TRUE
3391 restartTime = time.time()
3392 for i in main.kill:
3393 startResults = startResults and\
3394 main.ONOSbench.onosStart( main.nodes[ i ].ip_address )
3395 utilities.assert_equals( expect=main.TRUE, actual=startResults,
3396 onpass="ONOS nodes started successfully",
3397 onfail="ONOS nodes NOT successfully started" )
3398
3399 main.step( "Checking if ONOS is up yet" )
3400 count = 0
3401 onosIsupResult = main.FALSE
3402 while onosIsupResult == main.FALSE and count < 10:
3403 onosIsupResult = main.TRUE
3404 for i in main.kill:
3405 onosIsupResult = onosIsupResult and\
3406 main.ONOSbench.isup( main.nodes[ i ].ip_address )
3407 count = count + 1
3408 utilities.assert_equals( expect=main.TRUE, actual=onosIsupResult,
3409 onpass="ONOS restarted successfully",
3410 onfail="ONOS restart NOT successful" )
3411
3412 main.step( "Restarting ONOS main.CLIs" )
3413 cliResults = main.TRUE
3414 for i in main.kill:
3415 cliResults = cliResults and\
3416 main.CLIs[ i ].startOnosCli( main.nodes[ i ].ip_address )
3417 main.activeNodes.append( i )
3418 utilities.assert_equals( expect=main.TRUE, actual=cliResults,
3419 onpass="ONOS cli restarted",
3420 onfail="ONOS cli did not restart" )
3421 main.activeNodes.sort()
3422 try:
3423 assert list( set( main.activeNodes ) ) == main.activeNodes,\
3424 "List of active nodes has duplicates, this likely indicates something was run out of order"
3425 except AssertionError:
3426 main.log.exception( "" )
3427 main.cleanup()
3428 main.exit()
3429
3430 # Grab the time of restart so we chan check how long the gossip
3431 # protocol has had time to work
3432 main.restartTime = time.time() - restartTime
3433 main.log.debug( "Restart time: " + str( main.restartTime ) )
3434 # TODO: MAke this configurable. Also, we are breaking the above timer
3435 main.step( "Checking ONOS nodes" )
3436 nodeResults = utilities.retry( self.nodesCheck,
3437 False,
3438 args=[ main.activeNodes ],
3439 sleep=15,
3440 attempts=5 )
3441
3442 utilities.assert_equals( expect=True, actual=nodeResults,
3443 onpass="Nodes check successful",
3444 onfail="Nodes check NOT successful" )
3445
3446 if not nodeResults:
3447 for i in main.activeNodes:
3448 cli = main.CLIs[ i ]
3449 main.log.debug( "{} components not ACTIVE: \n{}".format(
3450 cli.name,
3451 cli.sendline( "scr:list | grep -v ACTIVE" ) ) )
3452 main.log.error( "Failed to start ONOS, stopping test" )
3453 main.cleanup()
3454 main.exit()
3455
3456 node = main.activeNodes[ 0 ]
3457 main.log.debug( main.CLIs[ node ].nodes( jsonFormat=False ) )
3458 main.log.debug( main.CLIs[ node ].leaders( jsonFormat=False ) )
3459 main.log.debug( main.CLIs[ node ].partitions( jsonFormat=False ) )
3460 main.log.debug(main.CLIs[node].apps(jsonFormat=False))
3461
3462 main.step( "Rerun for election on the node(s) that were killed" )
3463 runResults = main.TRUE
3464 for i in main.kill:
3465 runResults = runResults and\
3466 main.CLIs[ i ].electionTestRun()
3467 utilities.assert_equals( expect=main.TRUE, actual=runResults,
3468 onpass="ONOS nodes reran for election topic",
3469 onfail="Errror rerunning for election" )
3470
3471
3472 def checkStateAfterONOS( self, main, afterWhich, compareSwitch=False, isRestart=False ):
3473 """
3474 afterWhich :
3475 0: failture
3476 1: scaling
3477 """
3478 """
3479 Check state after ONOS failure/scaling
3480 """
3481 import json
3482 assert main.numCtrls, "main.numCtrls not defined"
3483 assert main, "main not defined"
3484 assert utilities.assert_equals, "utilities.assert_equals not defined"
3485 assert main.CLIs, "main.CLIs not defined"
3486 assert main.nodes, "main.nodes not defined"
3487 main.case( "Running ONOS Constant State Tests" )
3488
3489 OnosAfterWhich = [ "failure" , "scaliing" ]
3490
3491 main.step( "Check that each switch has a master" )
3492 # Assert that each device has a master
3493 rolesNotNull = main.TRUE
3494 threads = []
3495 for i in main.activeNodes:
3496 t = main.Thread( target=main.CLIs[ i ].rolesNotNull,
3497 name="rolesNotNull-" + str( i ),
3498 args=[] )
3499 threads.append( t )
3500 t.start()
3501
3502 for t in threads:
3503 t.join()
3504 rolesNotNull = rolesNotNull and t.result
3505 utilities.assert_equals(
3506 expect=main.TRUE,
3507 actual=rolesNotNull,
3508 onpass="Each device has a master",
3509 onfail="Some devices don't have a master assigned" )
3510
3511 main.step( "Read device roles from ONOS" )
3512 ONOSMastership = []
3513 consistentMastership = True
3514 rolesResults = True
3515 threads = []
3516 for i in main.activeNodes:
3517 t = main.Thread( target=main.CLIs[ i ].roles,
3518 name="roles-" + str( i ),
3519 args=[] )
3520 threads.append( t )
3521 t.start()
3522
3523 for t in threads:
3524 t.join()
3525 ONOSMastership.append( t.result )
3526
3527 for i in range( len( ONOSMastership ) ):
3528 node = str( main.activeNodes[ i ] + 1 )
3529 if not ONOSMastership[ i ] or "Error" in ONOSMastership[ i ]:
3530 main.log.error( "Error in getting ONOS" + node + " roles" )
3531 main.log.warn( "ONOS" + node + " mastership response: " +
3532 repr( ONOSMastership[ i ] ) )
3533 rolesResults = False
3534 utilities.assert_equals(
3535 expect=True,
3536 actual=rolesResults,
3537 onpass="No error in reading roles output",
3538 onfail="Error in reading roles from ONOS" )
3539
3540 main.step( "Check for consistency in roles from each controller" )
3541 if all( [ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
3542 main.log.info(
3543 "Switch roles are consistent across all ONOS nodes" )
3544 else:
3545 consistentMastership = False
3546 utilities.assert_equals(
3547 expect=True,
3548 actual=consistentMastership,
3549 onpass="Switch roles are consistent across all ONOS nodes",
3550 onfail="ONOS nodes have different views of switch roles" )
3551
3552 if rolesResults and not consistentMastership:
3553 for i in range( len( ONOSMastership ) ):
3554 node = str( main.activeNodes[ i ] + 1 )
3555 main.log.warn( "ONOS" + node + " roles: ",
3556 json.dumps( json.loads( ONOSMastership[ i ] ),
3557 sort_keys=True,
3558 indent=4,
3559 separators=( ',', ': ' ) ) )
3560
3561 if compareSwitch:
3562 description2 = "Compare switch roles from before failure"
3563 main.step( description2 )
3564 try:
3565 currentJson = json.loads( ONOSMastership[ 0 ] )
3566 oldJson = json.loads( mastershipState )
3567 except ( ValueError, TypeError ):
3568 main.log.exception( "Something is wrong with parsing " +
3569 "ONOSMastership[0] or mastershipState" )
3570 main.log.error( "ONOSMastership[0]: " + repr( ONOSMastership[ 0 ] ) )
3571 main.log.error( "mastershipState" + repr( mastershipState ) )
3572 main.cleanup()
3573 main.exit()
3574 mastershipCheck = main.TRUE
3575 for i in range( 1, 29 ):
3576 switchDPID = str(
3577 main.Mininet1.getSwitchDPID( switch="s" + str( i ) ) )
3578 current = [ switch[ 'master' ] for switch in currentJson
3579 if switchDPID in switch[ 'id' ] ]
3580 old = [ switch[ 'master' ] for switch in oldJson
3581 if switchDPID in switch[ 'id' ] ]
3582 if current == old:
3583 mastershipCheck = mastershipCheck and main.TRUE
3584 else:
3585 main.log.warn( "Mastership of switch %s changed" % switchDPID )
3586 mastershipCheck = main.FALSE
3587 utilities.assert_equals(
3588 expect=main.TRUE,
3589 actual=mastershipCheck,
3590 onpass="Mastership of Switches was not changed",
3591 onfail="Mastership of some switches changed" )
3592
3593 # NOTE: we expect mastership to change on controller failure/scaling down
3594 main.step( "Get the intents and compare across all nodes" )
3595 ONOSIntents = []
3596 intentCheck = main.FALSE
3597 consistentIntents = True
3598 intentsResults = True
3599 threads = []
3600 for i in main.activeNodes:
3601 t = main.Thread( target=main.CLIs[ i ].intents,
3602 name="intents-" + str( i ),
3603 args=[],
3604 kwargs={ 'jsonFormat': True } )
3605 threads.append( t )
3606 t.start()
3607
3608 for t in threads:
3609 t.join()
3610 ONOSIntents.append( t.result )
3611
3612 for i in range( len( ONOSIntents ) ):
3613 node = str( main.activeNodes[ i ] + 1 )
3614 if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
3615 main.log.error( "Error in getting ONOS" + node + " intents" )
3616 main.log.warn( "ONOS" + node + " intents response: " +
3617 repr( ONOSIntents[ i ] ) )
3618 intentsResults = False
3619 utilities.assert_equals(
3620 expect=True,
3621 actual=intentsResults,
3622 onpass="No error in reading intents output",
3623 onfail="Error in reading intents from ONOS" )
3624
3625 main.step( "Check for consistency in Intents from each controller" )
3626 if all( [ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
3627 main.log.info( "Intents are consistent across all ONOS " +
3628 "nodes" )
3629 else:
3630 consistentIntents = False
3631
3632 # Try to make it easy to figure out what is happening
3633 #
3634 # Intent ONOS1 ONOS2 ...
3635 # 0x01 INSTALLED INSTALLING
3636 # ... ... ...
3637 # ... ... ...
3638 title = " ID"
3639 for n in main.activeNodes:
3640 title += " " * 10 + "ONOS" + str( n + 1 )
3641 main.log.warn( title )
3642 # get all intent keys in the cluster
3643 keys = []
3644 for nodeStr in ONOSIntents:
3645 node = json.loads( nodeStr )
3646 for intent in node:
3647 keys.append( intent.get( 'id' ) )
3648 keys = set( keys )
3649 for key in keys:
3650 row = "%-13s" % key
3651 for nodeStr in ONOSIntents:
3652 node = json.loads( nodeStr )
3653 for intent in node:
3654 if intent.get( 'id' ) == key:
3655 row += "%-15s" % intent.get( 'state' )
3656 main.log.warn( row )
3657 # End table view
3658
3659 utilities.assert_equals(
3660 expect=True,
3661 actual=consistentIntents,
3662 onpass="Intents are consistent across all ONOS nodes",
3663 onfail="ONOS nodes have different views of intents" )
3664 intentStates = []
3665 for node in ONOSIntents: # Iter through ONOS nodes
3666 nodeStates = []
3667 # Iter through intents of a node
3668 try:
3669 for intent in json.loads( node ):
3670 nodeStates.append( intent[ 'state' ] )
3671 except ( ValueError, TypeError ):
3672 main.log.exception( "Error in parsing intents" )
3673 main.log.error( repr( node ) )
3674 intentStates.append( nodeStates )
3675 out = [ ( i, nodeStates.count( i ) ) for i in set( nodeStates ) ]
3676 main.log.info( dict( out ) )
3677
3678 if intentsResults and not consistentIntents:
3679 for i in range( len( main.activeNodes ) ):
3680 node = str( main.activeNodes[ i ] + 1 )
3681 main.log.warn( "ONOS" + node + " intents: " )
3682 main.log.warn( json.dumps(
3683 json.loads( ONOSIntents[ i ] ),
3684 sort_keys=True,
3685 indent=4,
3686 separators=( ',', ': ' ) ) )
3687 elif intentsResults and consistentIntents:
3688 intentCheck = main.TRUE
3689
3690 # NOTE: Store has no durability, so intents are lost across system
3691 # restarts
3692 if not isRestart:
3693 main.step( "Compare current intents with intents before the " + OnosAfterWhich[ afterWhich ] )
3694 # NOTE: this requires case 5 to pass for intentState to be set.
3695 # maybe we should stop the test if that fails?
3696 sameIntents = main.FALSE
3697 try:
3698 intentState
3699 except NameError:
3700 main.log.warn( "No previous intent state was saved" )
3701 else:
3702 if intentState and intentState == ONOSIntents[ 0 ]:
3703 sameIntents = main.TRUE
3704 main.log.info( "Intents are consistent with before " + OnosAfterWhich[ afterWhich ] )
3705 # TODO: possibly the states have changed? we may need to figure out
3706 # what the acceptable states are
3707 elif len( intentState ) == len( ONOSIntents[ 0 ] ):
3708 sameIntents = main.TRUE
3709 try:
3710 before = json.loads( intentState )
3711 after = json.loads( ONOSIntents[ 0 ] )
3712 for intent in before:
3713 if intent not in after:
3714 sameIntents = main.FALSE
3715 main.log.debug( "Intent is not currently in ONOS " +
3716 "(at least in the same form):" )
3717 main.log.debug( json.dumps( intent ) )
3718 except ( ValueError, TypeError ):
3719 main.log.exception( "Exception printing intents" )
3720 main.log.debug( repr( ONOSIntents[ 0 ] ) )
3721 main.log.debug( repr( intentState ) )
3722 if sameIntents == main.FALSE:
3723 try:
3724 main.log.debug( "ONOS intents before: " )
3725 main.log.debug( json.dumps( json.loads( intentState ),
3726 sort_keys=True, indent=4,
3727 separators=( ',', ': ' ) ) )
3728 main.log.debug( "Current ONOS intents: " )
3729 main.log.debug( json.dumps( json.loads( ONOSIntents[ 0 ] ),
3730 sort_keys=True, indent=4,
3731 separators=( ',', ': ' ) ) )
3732 except ( ValueError, TypeError ):
3733 main.log.exception( "Exception printing intents" )
3734 main.log.debug( repr( ONOSIntents[ 0 ] ) )
3735 main.log.debug( repr( intentState ) )
3736 utilities.assert_equals(
3737 expect=main.TRUE,
3738 actual=sameIntents,
3739 onpass="Intents are consistent with before " + OnosAfterWhich[ afterWhich ] ,
3740 onfail="The Intents changed during " + OnosAfterWhich[ afterWhich ] )
3741 intentCheck = intentCheck and sameIntents
3742
3743 main.step( "Get the OF Table entries and compare to before " +
3744 "component " + OnosAfterWhich[ afterWhich ] )
3745 FlowTables = main.TRUE
3746 for i in range( 28 ):
3747 main.log.info( "Checking flow table on s" + str( i + 1 ) )
3748 tmpFlows = main.Mininet1.getFlowTable( "s" + str( i + 1 ), version="1.3", debug=False )
3749 curSwitch = main.Mininet1.flowTableComp( flows[ i ], tmpFlows )
3750 FlowTables = FlowTables and curSwitch
3751 if curSwitch == main.FALSE:
3752 main.log.warn( "Differences in flow table for switch: s{}".format( i + 1 ) )
3753 utilities.assert_equals(
3754 expect=main.TRUE,
3755 actual=FlowTables,
3756 onpass="No changes were found in the flow tables",
3757 onfail="Changes were found in the flow tables" )
3758
3759 main.Mininet2.pingLongKill()
3760
3761 """
3762 main.step( "Check the continuous pings to ensure that no packets " +
3763 "were dropped during component failure" )
3764 main.Mininet2.pingKill( main.params[ 'TESTONUSER' ],
3765 main.params[ 'TESTONIP' ] )
3766 LossInPings = main.FALSE
3767 # NOTE: checkForLoss returns main.FALSE with 0% packet loss
3768 for i in range( 8, 18 ):
3769 main.log.info(
3770 "Checking for a loss in pings along flow from s" +
3771 str( i ) )
3772 LossInPings = main.Mininet2.checkForLoss(
3773 "/tmp/ping.h" +
3774 str( i ) ) or LossInPings
3775 if LossInPings == main.TRUE:
3776 main.log.info( "Loss in ping detected" )
3777 elif LossInPings == main.ERROR:
3778 main.log.info( "There are multiple mininet process running" )
3779 elif LossInPings == main.FALSE:
3780 main.log.info( "No Loss in the pings" )
3781 main.log.info( "No loss of dataplane connectivity" )
3782 utilities.assert_equals(
3783 expect=main.FALSE,
3784 actual=LossInPings,
3785 onpass="No Loss of connectivity",
3786 onfail="Loss of dataplane connectivity detected" )
3787 # NOTE: Since intents are not persisted with IntnentStore,
3788 # we expect loss in dataplane connectivity
3789 LossInPings = main.FALSE
3790 """
3791
3792 def compareTopo( self, main ):
3793 """
3794 Compare topo
3795 """
3796 import json
3797 import time
3798 assert main.numCtrls, "main.numCtrls not defined"
3799 assert main, "main not defined"
3800 assert utilities.assert_equals, "utilities.assert_equals not defined"
3801 assert main.CLIs, "main.CLIs not defined"
3802 assert main.nodes, "main.nodes not defined"
3803 try:
3804 from tests.dependencies.topology import Topology
3805 except ImportError:
3806 main.log.error( "Topology not found exiting the test" )
3807 main.exit()
3808 try:
3809 main.topoRelated
3810 except ( NameError, AttributeError ):
3811 main.topoRelated = Topology()
3812 main.case( "Compare ONOS Topology view to Mininet topology" )
3813 main.caseExplanation = "Compare topology objects between Mininet" +\
3814 " and ONOS"
3815 topoResult = main.FALSE
3816 topoFailMsg = "ONOS topology don't match Mininet"
3817 elapsed = 0
3818 count = 0
3819 main.step( "Comparing ONOS topology to MN topology" )
3820 startTime = time.time()
3821 # Give time for Gossip to work
3822 while topoResult == main.FALSE and ( elapsed < 60 or count < 3 ):
3823 devicesResults = main.TRUE
3824 linksResults = main.TRUE
3825 hostsResults = main.TRUE
3826 hostAttachmentResults = True
3827 count += 1
3828 cliStart = time.time()
3829 devices = main.topoRelated.getAllDevices( main.activeNodes, True,
3830 kwargs={ 'sleep': 5, 'attempts': 5,
3831 'randomTime': True } )
3832 ipResult = main.TRUE
3833
3834 hosts = main.topoRelated.getAllHosts( main.activeNodes, True,
3835 kwargs={ 'sleep': 5, 'attempts': 5,
3836 'randomTime': True },
3837 inJson=True )
3838
3839 for controller in range( 0, len( hosts ) ):
3840 controllerStr = str( main.activeNodes[ controller ] + 1 )
3841 if hosts[ controller ]:
3842 for host in hosts[ controller ]:
3843 if host is None or host.get( 'ipAddresses', [] ) == []:
3844 main.log.error(
3845 "Error with host ipAddresses on controller" +
3846 controllerStr + ": " + str( host ) )
3847 ipResult = main.FALSE
3848 ports = main.topoRelated.getAllPorts( main.activeNodes , True,
3849 kwargs={ 'sleep': 5, 'attempts': 5,
3850 'randomTime': True } )
3851 links = main.topoRelated.getAllLinks( main.activeNodes, True,
3852 kwargs={ 'sleep': 5, 'attempts': 5,
3853 'randomTime': True } )
3854 clusters = main.topoRelated.getAllClusters( main.activeNodes , True,
3855 kwargs={ 'sleep': 5, 'attempts': 5,
3856 'randomTime': True } )
3857
3858 elapsed = time.time() - startTime
3859 cliTime = time.time() - cliStart
3860 print "Elapsed time: " + str( elapsed )
3861 print "CLI time: " + str( cliTime )
3862
3863 if all( e is None for e in devices ) and\
3864 all( e is None for e in hosts ) and\
3865 all( e is None for e in ports ) and\
3866 all( e is None for e in links ) and\
3867 all( e is None for e in clusters ):
3868 topoFailMsg = "Could not get topology from ONOS"
3869 main.log.error( topoFailMsg )
3870 continue # Try again, No use trying to compare
3871
3872 mnSwitches = main.Mininet1.getSwitches()
3873 mnLinks = main.Mininet1.getLinks()
3874 mnHosts = main.Mininet1.getHosts()
3875 for controller in range( len( main.activeNodes ) ):
3876 controllerStr = str( main.activeNodes[ controller ] + 1 )
3877 currentDevicesResult = main.topoRelated.compareDevicePort( main.Mininet1, controller,
3878 mnSwitches,
3879 devices, ports )
3880 utilities.assert_equals( expect=main.TRUE,
3881 actual=currentDevicesResult,
3882 onpass="ONOS" + controllerStr +
3883 " Switches view is correct",
3884 onfail="ONOS" + controllerStr +
3885 " Switches view is incorrect" )
3886
3887
3888 currentLinksResult = main.topoRelated.compareBase( links, controller,
3889 main.Mininet1.compareLinks,
3890 [mnSwitches, mnLinks] )
3891 utilities.assert_equals( expect=main.TRUE,
3892 actual=currentLinksResult,
3893 onpass="ONOS" + controllerStr +
3894 " links view is correct",
3895 onfail="ONOS" + controllerStr +
3896 " links view is incorrect" )
3897 if hosts[ controller ] and "Error" not in hosts[ controller ]:
3898 currentHostsResult = main.Mininet1.compareHosts(
3899 mnHosts,
3900 hosts[ controller ] )
3901 elif hosts[ controller ] == []:
3902 currentHostsResult = main.TRUE
3903 else:
3904 currentHostsResult = main.FALSE
3905 utilities.assert_equals( expect=main.TRUE,
3906 actual=currentHostsResult,
3907 onpass="ONOS" + controllerStr +
3908 " hosts exist in Mininet",
3909 onfail="ONOS" + controllerStr +
3910 " hosts don't match Mininet" )
3911 # CHECKING HOST ATTACHMENT POINTS
3912 hostAttachment = True
3913 zeroHosts = False
3914 # FIXME: topo-HA/obelisk specific mappings:
3915 # key is mac and value is dpid
3916 mappings = {}
3917 for i in range( 1, 29 ): # hosts 1 through 28
3918 # set up correct variables:
3919 macId = "00:" * 5 + hex( i ).split( "0x" )[ 1 ].upper().zfill( 2 )
3920 if i == 1:
3921 deviceId = "1000".zfill( 16 )
3922 elif i == 2:
3923 deviceId = "2000".zfill( 16 )
3924 elif i == 3:
3925 deviceId = "3000".zfill( 16 )
3926 elif i == 4:
3927 deviceId = "3004".zfill( 16 )
3928 elif i == 5:
3929 deviceId = "5000".zfill( 16 )
3930 elif i == 6:
3931 deviceId = "6000".zfill( 16 )
3932 elif i == 7:
3933 deviceId = "6007".zfill( 16 )
3934 elif i >= 8 and i <= 17:
3935 dpid = '3' + str( i ).zfill( 3 )
3936 deviceId = dpid.zfill( 16 )
3937 elif i >= 18 and i <= 27:
3938 dpid = '6' + str( i ).zfill( 3 )
3939 deviceId = dpid.zfill( 16 )
3940 elif i == 28:
3941 deviceId = "2800".zfill( 16 )
3942 mappings[ macId ] = deviceId
3943 if hosts[ controller ] is not None and "Error" not in hosts[ controller ]:
3944 if hosts[ controller ] == []:
3945 main.log.warn( "There are no hosts discovered" )
3946 zeroHosts = True
3947 else:
3948 for host in hosts[ controller ]:
3949 mac = None
3950 location = None
3951 device = None
3952 port = None
3953 try:
3954 mac = host.get( 'mac' )
3955 assert mac, "mac field could not be found for this host object"
3956
3957 location = host.get( 'locations' )[ 0 ]
3958 assert location, "location field could not be found for this host object"
3959
3960 # Trim the protocol identifier off deviceId
3961 device = str( location.get( 'elementId' ) ).split( ':' )[ 1 ]
3962 assert device, "elementId field could not be found for this host location object"
3963
3964 port = location.get( 'port' )
3965 assert port, "port field could not be found for this host location object"
3966
3967 # Now check if this matches where they should be
3968 if mac and device and port:
3969 if str( port ) != "1":
3970 main.log.error( "The attachment port is incorrect for " +
3971 "host " + str( mac ) +
3972 ". Expected: 1 Actual: " + str( port ) )
3973 hostAttachment = False
3974 if device != mappings[ str( mac ) ]:
3975 main.log.error( "The attachment device is incorrect for " +
3976 "host " + str( mac ) +
3977 ". Expected: " + mappings[ str( mac ) ] +
3978 " Actual: " + device )
3979 hostAttachment = False
3980 else:
3981 hostAttachment = False
3982 except AssertionError:
3983 main.log.exception( "Json object not as expected" )
3984 main.log.error( repr( host ) )
3985 hostAttachment = False
3986 else:
3987 main.log.error( "No hosts json output or \"Error\"" +
3988 " in output. hosts = " +
3989 repr( hosts[ controller ] ) )
3990 if zeroHosts is False:
3991 # TODO: Find a way to know if there should be hosts in a
3992 # given point of the test
3993 hostAttachment = True
3994
3995 # END CHECKING HOST ATTACHMENT POINTS
3996 devicesResults = devicesResults and currentDevicesResult
3997 linksResults = linksResults and currentLinksResult
3998 hostsResults = hostsResults and currentHostsResult
3999 hostAttachmentResults = hostAttachmentResults and\
4000 hostAttachment
4001 topoResult = ( devicesResults and linksResults
4002 and hostsResults and ipResult and
4003 hostAttachmentResults )
4004 utilities.assert_equals( expect=True,
4005 actual=topoResult,
4006 onpass="ONOS topology matches Mininet",
4007 onfail=topoFailMsg )
4008 # End of While loop to pull ONOS state
4009
4010 # Compare json objects for hosts and dataplane clusters
4011
4012 # hosts
4013 main.step( "Hosts view is consistent across all ONOS nodes" )
4014 consistentHostsResult = main.TRUE
4015 for controller in range( len( hosts ) ):
4016 controllerStr = str( main.activeNodes[ controller ] + 1 )
4017 if hosts[ controller ] is not None and "Error" not in hosts[ controller ]:
4018 if hosts[ controller ] == hosts[ 0 ]:
4019 continue
4020 else: # hosts not consistent
4021 main.log.error( "hosts from ONOS" + controllerStr +
4022 " is inconsistent with ONOS1" )
4023 main.log.warn( repr( hosts[ controller ] ) )
4024 consistentHostsResult = main.FALSE
4025
4026 else:
4027 main.log.error( "Error in getting ONOS hosts from ONOS" +
4028 controllerStr )
4029 consistentHostsResult = main.FALSE
4030 main.log.warn( "ONOS" + controllerStr +
4031 " hosts response: " +
4032 repr( hosts[ controller ] ) )
4033 utilities.assert_equals(
4034 expect=main.TRUE,
4035 actual=consistentHostsResult,
4036 onpass="Hosts view is consistent across all ONOS nodes",
4037 onfail="ONOS nodes have different views of hosts" )
4038
4039 main.step( "Hosts information is correct" )
4040 hostsResults = hostsResults and ipResult
4041 utilities.assert_equals(
4042 expect=main.TRUE,
4043 actual=hostsResults,
4044 onpass="Host information is correct",
4045 onfail="Host information is incorrect" )
4046
4047 main.step( "Host attachment points to the network" )
4048 utilities.assert_equals(
4049 expect=True,
4050 actual=hostAttachmentResults,
4051 onpass="Hosts are correctly attached to the network",
4052 onfail="ONOS did not correctly attach hosts to the network" )
4053
4054 # Strongly connected clusters of devices
4055 main.step( "Clusters view is consistent across all ONOS nodes" )
4056 consistentClustersResult = main.TRUE
4057 for controller in range( len( clusters ) ):
4058 controllerStr = str( main.activeNodes[ controller ] + 1 )
4059 if "Error" not in clusters[ controller ]:
4060 if clusters[ controller ] == clusters[ 0 ]:
4061 continue
4062 else: # clusters not consistent
4063 main.log.error( "clusters from ONOS" +
4064 controllerStr +
4065 " is inconsistent with ONOS1" )
4066 consistentClustersResult = main.FALSE
4067 else:
4068 main.log.error( "Error in getting dataplane clusters " +
4069 "from ONOS" + controllerStr )
4070 consistentClustersResult = main.FALSE
4071 main.log.warn( "ONOS" + controllerStr +
4072 " clusters response: " +
4073 repr( clusters[ controller ] ) )
4074 utilities.assert_equals(
4075 expect=main.TRUE,
4076 actual=consistentClustersResult,
4077 onpass="Clusters view is consistent across all ONOS nodes",
4078 onfail="ONOS nodes have different views of clusters" )
4079 if not consistentClustersResult:
4080 main.log.debug( clusters )
4081 for x in links:
4082 main.log.warn( "{}: {}".format( len( x ), x ) )
4083
4084 main.step( "There is only one SCC" )
4085 # there should always only be one cluster
4086 try:
4087 numClusters = len( json.loads( clusters[ 0 ] ) )
4088 except ( ValueError, TypeError ):
4089 main.log.exception( "Error parsing clusters[0]: " +
4090 repr( clusters[ 0 ] ) )
4091 numClusters = "ERROR"
4092 clusterResults = main.FALSE
4093 if numClusters == 1:
4094 clusterResults = main.TRUE
4095 utilities.assert_equals(
4096 expect=1,
4097 actual=numClusters,
4098 onpass="ONOS shows 1 SCC",
4099 onfail="ONOS shows " + str( numClusters ) + " SCCs" )
4100
4101 topoResult = ( devicesResults and linksResults
4102 and hostsResults and consistentHostsResult
4103 and consistentClustersResult and clusterResults
4104 and ipResult and hostAttachmentResults )
4105
4106 topoResult = topoResult and int( count <= 2 )
4107 note = "note it takes about " + str( int( cliTime ) ) + \
4108 " seconds for the test to make all the cli calls to fetch " +\
4109 "the topology from each ONOS instance"
4110 main.log.info(
4111 "Very crass estimate for topology discovery/convergence( " +
4112 str( note ) + " ): " + str( elapsed ) + " seconds, " +
4113 str( count ) + " tries" )
4114
4115 main.step( "Device information is correct" )
4116 utilities.assert_equals(
4117 expect=main.TRUE,
4118 actual=devicesResults,
4119 onpass="Device information is correct",
4120 onfail="Device information is incorrect" )
4121
4122 main.step( "Links are correct" )
4123 utilities.assert_equals(
4124 expect=main.TRUE,
4125 actual=linksResults,
4126 onpass="Link are correct",
4127 onfail="Links are incorrect" )
4128
4129 main.step( "Hosts are correct" )
4130 utilities.assert_equals(
4131 expect=main.TRUE,
4132 actual=hostsResults,
4133 onpass="Hosts are correct",
4134 onfail="Hosts are incorrect" )
4135
4136 # FIXME: move this to an ONOS state case
4137 main.step( "Checking ONOS nodes" )
4138 nodeResults = utilities.retry( self.nodesCheck,
4139 False,
4140 args=[ main.activeNodes ],
4141 attempts=5 )
4142 utilities.assert_equals( expect=True, actual=nodeResults,
4143 onpass="Nodes check successful",
4144 onfail="Nodes check NOT successful" )
4145 if not nodeResults:
4146 for i in main.activeNodes:
4147 main.log.debug( "{} components not ACTIVE: \n{}".format(
4148 main.CLIs[ i ].name,
4149 main.CLIs[ i ].sendline( "scr:list | grep -v ACTIVE" ) ) )
4150
4151 if not topoResult:
4152 main.cleanup()
4153 main.exit()
4154 def linkDown( self, main, fromS="s3", toS="s28" ):
4155 """
4156 Link fromS-toS down
4157 """
4158 import time
4159 assert main.numCtrls, "main.numCtrls not defined"
4160 assert main, "main not defined"
4161 assert utilities.assert_equals, "utilities.assert_equals not defined"
4162 assert main.CLIs, "main.CLIs not defined"
4163 assert main.nodes, "main.nodes not defined"
4164 # NOTE: You should probably run a topology check after this
4165
4166 linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
4167
4168 description = "Turn off a link to ensure that Link Discovery " +\
4169 "is working properly"
4170 main.case( description )
4171
4172 main.step( "Kill Link between " + fromS + " and " + toS )
4173 LinkDown = main.Mininet1.link( END1=fromS, END2=toS, OPTION="down" )
4174 main.log.info( "Waiting " + str( linkSleep ) +
4175 " seconds for link down to be discovered" )
4176 time.sleep( linkSleep )
4177 utilities.assert_equals( expect=main.TRUE, actual=LinkDown,
4178 onpass="Link down successful",
4179 onfail="Failed to bring link down" )
4180 # TODO do some sort of check here
4181
4182 def linkUp( self, main, fromS="s3", toS="s28" ):
4183 """
4184 Link fromS-toS up
4185 """
4186 import time
4187 assert main.numCtrls, "main.numCtrls not defined"
4188 assert main, "main not defined"
4189 assert utilities.assert_equals, "utilities.assert_equals not defined"
4190 assert main.CLIs, "main.CLIs not defined"
4191 assert main.nodes, "main.nodes not defined"
4192 # NOTE: You should probably run a topology check after this
4193
4194 linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
4195
4196 description = "Restore a link to ensure that Link Discovery is " + \
4197 "working properly"
4198 main.case( description )
4199
4200 main.step( "Bring link between " + fromS + " and " + toS +" back up" )
4201 LinkUp = main.Mininet1.link( END1=fromS, END2=toS, OPTION="up" )
4202 main.log.info( "Waiting " + str( linkSleep ) +
4203 " seconds for link up to be discovered" )
4204 time.sleep( linkSleep )
4205 utilities.assert_equals( expect=main.TRUE, actual=LinkUp,
4206 onpass="Link up successful",
4207 onfail="Failed to bring link up" )
4208
4209 def switchDown( self, main ):
4210 """
4211 Switch Down
4212 """
4213 # NOTE: You should probably run a topology check after this
4214 import time
4215 assert main.numCtrls, "main.numCtrls not defined"
4216 assert main, "main not defined"
4217 assert utilities.assert_equals, "utilities.assert_equals not defined"
4218 assert main.CLIs, "main.CLIs not defined"
4219 assert main.nodes, "main.nodes not defined"
4220
4221 switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
4222
4223 description = "Killing a switch to ensure it is discovered correctly"
4224 onosCli = main.CLIs[ main.activeNodes[ 0 ] ]
4225 main.case( description )
4226 switch = main.params[ 'kill' ][ 'switch' ]
4227 switchDPID = main.params[ 'kill' ][ 'dpid' ]
4228
4229 # TODO: Make this switch parameterizable
4230 main.step( "Kill " + switch )
4231 main.log.info( "Deleting " + switch )
4232 main.Mininet1.delSwitch( switch )
4233 main.log.info( "Waiting " + str( switchSleep ) +
4234 " seconds for switch down to be discovered" )
4235 time.sleep( switchSleep )
4236 device = onosCli.getDevice( dpid=switchDPID )
4237 # Peek at the deleted switch
4238 main.log.warn( str( device ) )
4239 result = main.FALSE
4240 if device and device[ 'available' ] is False:
4241 result = main.TRUE
4242 utilities.assert_equals( expect=main.TRUE, actual=result,
4243 onpass="Kill switch successful",
4244 onfail="Failed to kill switch?" )
4245 def switchUp( self, main ):
4246 """
4247 Switch Up
4248 """
4249 # NOTE: You should probably run a topology check after this
4250 import time
4251 assert main.numCtrls, "main.numCtrls not defined"
4252 assert main, "main not defined"
4253 assert utilities.assert_equals, "utilities.assert_equals not defined"
4254 assert main.CLIs, "main.CLIs not defined"
4255 assert main.nodes, "main.nodes not defined"
4256
4257 switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
4258 switch = main.params[ 'kill' ][ 'switch' ]
4259 switchDPID = main.params[ 'kill' ][ 'dpid' ]
4260 links = main.params[ 'kill' ][ 'links' ].split()
4261 onosCli = main.CLIs[ main.activeNodes[ 0 ] ]
4262 description = "Adding a switch to ensure it is discovered correctly"
4263 main.case( description )
4264
4265 main.step( "Add back " + switch )
4266 main.Mininet1.addSwitch( switch, dpid=switchDPID )
4267 for peer in links:
4268 main.Mininet1.addLink( switch, peer )
4269 ipList = [ node.ip_address for node in main.nodes ]
4270 main.Mininet1.assignSwController( sw=switch, ip=ipList )
4271 main.log.info( "Waiting " + str( switchSleep ) +
4272 " seconds for switch up to be discovered" )
4273 time.sleep( switchSleep )
4274 device = onosCli.getDevice( dpid=switchDPID )
4275 # Peek at the deleted switch
4276 main.log.warn( str( device ) )
4277 result = main.FALSE
4278 if device and device[ 'available' ]:
4279 result = main.TRUE
4280 utilities.assert_equals( expect=main.TRUE, actual=result,
4281 onpass="add switch successful",
4282 onfail="Failed to add switch?" )
4283
4284 def startElectionApp( self, main ):
4285 """
4286 start election app on all onos nodes
4287 """
4288 assert main.numCtrls, "main.numCtrls not defined"
4289 assert main, "main not defined"
4290 assert utilities.assert_equals, "utilities.assert_equals not defined"
4291 assert main.CLIs, "main.CLIs not defined"
4292 assert main.nodes, "main.nodes not defined"
4293
4294 main.case( "Start Leadership Election app" )
4295 main.step( "Install leadership election app" )
4296 onosCli = main.CLIs[ main.activeNodes[ 0 ] ]
4297 appResult = onosCli.activateApp( "org.onosproject.election" )
4298 utilities.assert_equals(
4299 expect=main.TRUE,
4300 actual=appResult,
4301 onpass="Election app installed",
4302 onfail="Something went wrong with installing Leadership election" )
4303
4304 main.step( "Run for election on each node" )
4305 for i in main.activeNodes:
4306 main.CLIs[ i ].electionTestRun()
4307 time.sleep( 5 )
4308 activeCLIs = [ main.CLIs[ i ] for i in main.activeNodes ]
4309 sameResult, leaders = self.consistentLeaderboards( activeCLIs )
4310 utilities.assert_equals(
4311 expect=True,
4312 actual=sameResult,
4313 onpass="All nodes see the same leaderboards",
4314 onfail="Inconsistent leaderboards" )
4315
4316 if sameResult:
4317 leader = leaders[ 0 ][ 0 ]
4318 if main.nodes[ main.activeNodes[ 0 ] ].ip_address in leader:
4319 correctLeader = True
4320 else:
4321 correctLeader = False
4322 main.step( "First node was elected leader" )
4323 utilities.assert_equals(
4324 expect=True,
4325 actual=correctLeader,
4326 onpass="Correct leader was elected",
4327 onfail="Incorrect leader" )
4328 def isElectionFunctional( self, main ):
4329 """
4330 Check that Leadership Election is still functional
4331 15.1 Run election on each node
4332 15.2 Check that each node has the same leaders and candidates
4333 15.3 Find current leader and withdraw
4334 15.4 Check that a new node was elected leader
4335 15.5 Check that that new leader was the candidate of old leader
4336 15.6 Run for election on old leader
4337 15.7 Check that oldLeader is a candidate, and leader if only 1 node
4338 15.8 Make sure that the old leader was added to the candidate list
4339
4340 old and new variable prefixes refer to data from before vs after
4341 withdrawl and later before withdrawl vs after re-election
4342 """
4343 import time
4344 assert main.numCtrls, "main.numCtrls not defined"
4345 assert main, "main not defined"
4346 assert utilities.assert_equals, "utilities.assert_equals not defined"
4347 assert main.CLIs, "main.CLIs not defined"
4348 assert main.nodes, "main.nodes not defined"
4349
4350 description = "Check that Leadership Election is still functional"
4351 main.case( description )
4352 # NOTE: Need to re-run after restarts since being a canidate is not persistant
4353
4354 oldLeaders = [] # list of lists of each nodes' candidates before
4355 newLeaders = [] # list of lists of each nodes' candidates after
4356 oldLeader = '' # the old leader from oldLeaders, None if not same
4357 newLeader = '' # the new leaders fron newLoeaders, None if not same
4358 oldLeaderCLI = None # the CLI of the old leader used for re-electing
4359 expectNoLeader = False # True when there is only one leader
4360 if main.numCtrls == 1:
4361 expectNoLeader = True
4362
4363 main.step( "Run for election on each node" )
4364 electionResult = main.TRUE
4365
4366 for i in main.activeNodes: # run test election on each node
4367 if main.CLIs[ i ].electionTestRun() == main.FALSE:
4368 electionResult = main.FALSE
4369 utilities.assert_equals(
4370 expect=main.TRUE,
4371 actual=electionResult,
4372 onpass="All nodes successfully ran for leadership",
4373 onfail="At least one node failed to run for leadership" )
4374
4375 if electionResult == main.FALSE:
4376 main.log.error(
4377 "Skipping Test Case because Election Test App isn't loaded" )
4378 main.skipCase()
4379
4380 main.step( "Check that each node shows the same leader and candidates" )
4381 failMessage = "Nodes have different leaderboards"
4382 activeCLIs = [ main.CLIs[ i ] for i in main.activeNodes ]
4383 sameResult, oldLeaders = self.consistentLeaderboards( activeCLIs )
4384 if sameResult:
4385 oldLeader = oldLeaders[ 0 ][ 0 ]
4386 main.log.warn( oldLeader )
4387 else:
4388 oldLeader = None
4389 utilities.assert_equals(
4390 expect=True,
4391 actual=sameResult,
4392 onpass="Leaderboards are consistent for the election topic",
4393 onfail=failMessage )
4394
4395 main.step( "Find current leader and withdraw" )
4396 withdrawResult = main.TRUE
4397 # do some sanity checking on leader before using it
4398 if oldLeader is None:
4399 main.log.error( "Leadership isn't consistent." )
4400 withdrawResult = main.FALSE
4401 # Get the CLI of the oldLeader
4402 for i in main.activeNodes:
4403 if oldLeader == main.nodes[ i ].ip_address:
4404 oldLeaderCLI = main.CLIs[ i ]
4405 break
4406 else: # FOR/ELSE statement
4407 main.log.error( "Leader election, could not find current leader" )
4408 if oldLeader:
4409 withdrawResult = oldLeaderCLI.electionTestWithdraw()
4410 utilities.assert_equals(
4411 expect=main.TRUE,
4412 actual=withdrawResult,
4413 onpass="Node was withdrawn from election",
4414 onfail="Node was not withdrawn from election" )
4415
4416 main.step( "Check that a new node was elected leader" )
4417 failMessage = "Nodes have different leaders"
4418 # Get new leaders and candidates
4419 newLeaderResult, newLeaders = self.consistentLeaderboards( activeCLIs )
4420 newLeader = None
4421 if newLeaderResult:
4422 if newLeaders[ 0 ][ 0 ] == 'none':
4423 main.log.error( "No leader was elected on at least 1 node" )
4424 if not expectNoLeader:
4425 newLeaderResult = False
4426 newLeader = newLeaders[ 0 ][ 0 ]
4427
4428 # Check that the new leader is not the older leader, which was withdrawn
4429 if newLeader == oldLeader:
4430 newLeaderResult = False
4431 main.log.error( "All nodes still see old leader: " + str( oldLeader ) +
4432 " as the current leader" )
4433 utilities.assert_equals(
4434 expect=True,
4435 actual=newLeaderResult,
4436 onpass="Leadership election passed",
4437 onfail="Something went wrong with Leadership election" )
4438
4439 main.step( "Check that that new leader was the candidate of old leader" )
4440 # candidates[ 2 ] should become the top candidate after withdrawl
4441 correctCandidateResult = main.TRUE
4442 if expectNoLeader:
4443 if newLeader == 'none':
4444 main.log.info( "No leader expected. None found. Pass" )
4445 correctCandidateResult = main.TRUE
4446 else:
4447 main.log.info( "Expected no leader, got: " + str( newLeader ) )
4448 correctCandidateResult = main.FALSE
4449 elif len( oldLeaders[ 0 ] ) >= 3:
4450 if newLeader == oldLeaders[ 0 ][ 2 ]:
4451 # correct leader was elected
4452 correctCandidateResult = main.TRUE
4453 else:
4454 correctCandidateResult = main.FALSE
4455 main.log.error( "Candidate {} was elected. {} should have had priority.".format(
4456 newLeader, oldLeaders[ 0 ][ 2 ] ) )
4457 else:
4458 main.log.warn( "Could not determine who should be the correct leader" )
4459 main.log.debug( oldLeaders[ 0 ] )
4460 correctCandidateResult = main.FALSE
4461 utilities.assert_equals(
4462 expect=main.TRUE,
4463 actual=correctCandidateResult,
4464 onpass="Correct Candidate Elected",
4465 onfail="Incorrect Candidate Elected" )
4466
4467 main.step( "Run for election on old leader( just so everyone " +
4468 "is in the hat )" )
4469 if oldLeaderCLI is not None:
4470 runResult = oldLeaderCLI.electionTestRun()
4471 else:
4472 main.log.error( "No old leader to re-elect" )
4473 runResult = main.FALSE
4474 utilities.assert_equals(
4475 expect=main.TRUE,
4476 actual=runResult,
4477 onpass="App re-ran for election",
4478 onfail="App failed to run for election" )
4479
4480 main.step(
4481 "Check that oldLeader is a candidate, and leader if only 1 node" )
4482 # verify leader didn't just change
4483 # Get new leaders and candidates
4484 reRunLeaders = []
4485 time.sleep( 5 ) # Paremterize
4486 positionResult, reRunLeaders = self.consistentLeaderboards( activeCLIs )
4487
4488 # Check that the re-elected node is last on the candidate List
4489 if not reRunLeaders[ 0 ]:
4490 positionResult = main.FALSE
4491 elif oldLeader != reRunLeaders[ 0 ][ -1 ]:
4492 main.log.error( "Old Leader ({}) not in the proper position: {} ".format( str( oldLeader ),
4493 str( reRunLeaders[ 0 ] ) ) )
4494 positionResult = main.FALSE
4495 utilities.assert_equals(
4496 expect=True,
4497 actual=positionResult,
4498 onpass="Old leader successfully re-ran for election",
4499 onfail="Something went wrong with Leadership election after " +
4500 "the old leader re-ran for election" )
4501 def installDistributedPrimitiveApp( self, main ):
4502 """
4503 Install Distributed Primitives app
4504 """
4505 import time
4506 assert main.numCtrls, "main.numCtrls not defined"
4507 assert main, "main not defined"
4508 assert utilities.assert_equals, "utilities.assert_equals not defined"
4509 assert main.CLIs, "main.CLIs not defined"
4510 assert main.nodes, "main.nodes not defined"
4511
4512 # Variables for the distributed primitives tests
4513 main.pCounterName = "TestON-Partitions"
4514 main.pCounterValue = 0
4515 main.onosSet = set( [] )
4516 main.onosSetName = "TestON-set"
4517
4518 description = "Install Primitives app"
4519 main.case( description )
4520 main.step( "Install Primitives app" )
4521 appName = "org.onosproject.distributedprimitives"
4522 node = main.activeNodes[ 0 ]
4523 appResults = main.CLIs[ node ].activateApp( appName )
4524 utilities.assert_equals( expect=main.TRUE,
4525 actual=appResults,
4526 onpass="Primitives app activated",
4527 onfail="Primitives app not activated" )
4528 # TODO check on all nodes instead of sleeping
4529 time.sleep( 5 ) # To allow all nodes to activate