blob: 30efbbdebbe00ee1ea133c9f03f30444e9bca930 [file] [log] [blame]
Jon Hall69b2b982016-05-11 12:04:59 -07001"""
2Description: This test is to determine if ONOS can handle
3 dynamic swapping of cluster nodes.
4
5List of test cases:
6CASE1: Compile ONOS and push it to the test machines
7CASE2: Assign devices to controllers
8CASE21: Assign mastership to controllers
9CASE3: Assign intents
10CASE4: Ping across added host intents
11CASE5: Reading state of ONOS
12CASE6: Swap nodes
13CASE7: Check state after control plane failure
14CASE8: Compare topo
15CASE9: Link s3-s28 down
16CASE10: Link s3-s28 up
17CASE11: Switch down
18CASE12: Switch up
19CASE13: Clean up
20CASE14: start election app on all onos nodes
21CASE15: Check that Leadership Election is still functional
22CASE16: Install Distributed Primitives app
23CASE17: Check for basic functionality with distributed primitives
24"""
25
26
27class HAswapNodes:
28
29 def __init__( self ):
30 self.default = ''
31
32 def CASE1( self, main ):
33 """
34 CASE1 is to compile ONOS and push it to the test machines
35
36 Startup sequence:
37 cell <name>
38 onos-verify-cell
39 NOTE: temporary - onos-remove-raft-logs
40 onos-uninstall
41 start mininet
42 git pull
43 mvn clean install
44 onos-package
45 onos-install -f
46 onos-wait-for-start
47 start cli sessions
48 start tcpdump
49 """
50 import time
51 import os
52 import re
53 main.log.info( "ONOS HA test: Restart all ONOS nodes - " +
54 "initialization" )
55 main.case( "Setting up test environment" )
56 main.caseExplanation = "Setup the test environment including " +\
57 "installing ONOS, starting Mininet and ONOS" +\
58 "cli sessions."
59
60 # load some variables from the params file
61 PULLCODE = False
62 if main.params[ 'Git' ] == 'True':
63 PULLCODE = True
64 gitBranch = main.params[ 'branch' ]
65 cellName = main.params[ 'ENV' ][ 'cellName' ]
66
67 main.numCtrls = int( main.params[ 'num_controllers' ] )
68 if main.ONOSbench.maxNodes:
69 if main.ONOSbench.maxNodes < main.numCtrls:
70 main.numCtrls = int( main.ONOSbench.maxNodes )
71 # set global variables
72 # These are for csv plotting in jenkins
73 global labels
74 global data
75 labels = []
76 data = []
77
78 try:
79 from tests.HA.dependencies.HA import HA
80 main.HA = HA()
81 from tests.HA.HAswapNodes.dependencies.Server import Server
82 main.Server = Server()
83 except Exception as e:
84 main.log.exception( e )
85 main.cleanup()
86 main.exit()
87
88 main.CLIs = []
89 main.nodes = []
90 ipList = []
91 for i in range( 1, main.numCtrls + 1 ):
92 try:
93 main.CLIs.append( getattr( main, 'ONOScli' + str( i ) ) )
94 main.nodes.append( getattr( main, 'ONOS' + str( i ) ) )
95 ipList.append( main.nodes[ -1 ].ip_address )
96 except AttributeError:
97 break
98
99 main.step( "Create cell file" )
100 cellAppString = main.params[ 'ENV' ][ 'appString' ]
101 main.ONOSbench.createCellFile( main.ONOSbench.ip_address, cellName,
102 main.Mininet1.ip_address,
103 cellAppString, ipList )
104
105 main.step( "Applying cell variable to environment" )
106 cellResult = main.ONOSbench.setCell( cellName )
107 utilities.assert_equals( expect=main.TRUE, actual=cellResult,
108 onpass="Set cell successfull",
109 onfail="Failled to set cell" )
110
111 main.step( "Verify connectivity to cell" )
112 verifyResult = main.ONOSbench.verifyCell()
113 utilities.assert_equals( expect=main.TRUE, actual=verifyResult,
114 onpass="Verify cell passed",
115 onfail="Failled to verify cell" )
116
117 # FIXME:this is short term fix
118 main.log.info( "Removing raft logs" )
119 main.ONOSbench.onosRemoveRaftLogs()
120
121 main.log.info( "Uninstalling ONOS" )
122 for node in main.nodes:
123 main.ONOSbench.onosUninstall( node.ip_address )
124
125 # Make sure ONOS is DEAD
126 main.log.info( "Killing any ONOS processes" )
127 killResults = main.TRUE
128 for node in main.nodes:
129 killed = main.ONOSbench.onosKill( node.ip_address )
130 killResults = killResults and killed
131
132 main.step( "Setup server for cluster metadata file" )
133 port = main.params['serverPort']
134 rootDir = os.path.dirname( main.testFile ) + "/dependencies"
135 main.log.debug( "Root dir: {}".format( rootDir ) )
136 status = main.Server.start( main.ONOSbench,
137 rootDir,
138 port=port,
139 logDir=main.logdir + "/server.log" )
140 utilities.assert_equals( expect=main.TRUE, actual=status,
141 onpass="Server started",
142 onfail="Failled to start SimpleHTTPServer" )
143
144 main.step( "Generate initial metadata file" )
145 if main.numCtrls >= 5:
146 main.numCtrls -= 2
147 else:
148 main.log.error( "Not enough ONOS nodes to run this test. Requires 5 or more" )
149 genResult = main.Server.generateFile( main.numCtrls )
150 utilities.assert_equals( expect=main.TRUE, actual=genResult,
151 onpass="New cluster metadata file generated",
152 onfail="Failled to generate new metadata file" )
153
154 cleanInstallResult = main.TRUE
155 gitPullResult = main.TRUE
156
157 main.step( "Starting Mininet" )
158 # scp topo file to mininet
159 # TODO: move to params?
160 topoName = "obelisk.py"
161 filePath = main.ONOSbench.home + "/tools/test/topos/"
162 main.ONOSbench.scp( main.Mininet1,
163 filePath + topoName,
164 main.Mininet1.home,
165 direction="to" )
166 mnResult = main.Mininet1.startNet( )
167 utilities.assert_equals( expect=main.TRUE, actual=mnResult,
168 onpass="Mininet Started",
169 onfail="Error starting Mininet" )
170
171 main.step( "Git checkout and pull " + gitBranch )
172 if PULLCODE:
173 main.ONOSbench.gitCheckout( gitBranch )
174 gitPullResult = main.ONOSbench.gitPull()
175 # values of 1 or 3 are good
176 utilities.assert_lesser( expect=0, actual=gitPullResult,
177 onpass="Git pull successful",
178 onfail="Git pull failed" )
179 main.ONOSbench.getVersion( report=True )
180
181 main.step( "Using mvn clean install" )
182 cleanInstallResult = main.TRUE
183 if PULLCODE and gitPullResult == main.TRUE:
184 cleanInstallResult = main.ONOSbench.cleanInstall()
185 else:
186 main.log.warn( "Did not pull new code so skipping mvn " +
187 "clean install" )
188 utilities.assert_equals( expect=main.TRUE,
189 actual=cleanInstallResult,
190 onpass="MCI successful",
191 onfail="MCI failed" )
192 # GRAPHS
193 # NOTE: important params here:
194 # job = name of Jenkins job
195 # Plot Name = Plot-HA, only can be used if multiple plots
196 # index = The number of the graph under plot name
197 job = "HAswapNodes"
198 plotName = "Plot-HA"
199 index = "0"
200 graphs = '<ac:structured-macro ac:name="html">\n'
201 graphs += '<ac:plain-text-body><![CDATA[\n'
202 graphs += '<iframe src="https://onos-jenkins.onlab.us/job/' + job +\
203 '/plot/' + plotName + '/getPlot?index=' + index +\
204 '&width=500&height=300"' +\
205 'noborder="0" width="500" height="300" scrolling="yes" ' +\
206 'seamless="seamless"></iframe>\n'
207 graphs += ']]></ac:plain-text-body>\n'
208 graphs += '</ac:structured-macro>\n'
209 main.log.wiki(graphs)
210
211 main.step( "Copying backup config files" )
212 path = "~/onos/tools/package/bin/onos-service"
213 cp = main.ONOSbench.scp( main.ONOSbench,
214 path,
215 path + ".backup",
216 direction="to" )
217
218 utilities.assert_equals( expect=main.TRUE,
219 actual=cp,
220 onpass="Copy backup config file succeeded",
221 onfail="Copy backup config file failed" )
222 # we need to modify the onos-service file to use remote metadata file
223 # url for cluster metadata file
224 ip = main.ONOSbench.getIpAddr()
225 metaFile = "cluster.json"
226 javaArgs = r"-Donos.cluster.metadata.uri=http:\/\/{}:{}\/{}".format( ip, port, metaFile )
227 main.log.warn( javaArgs )
228 main.log.warn( repr( javaArgs ) )
229 handle = main.ONOSbench.handle
230 sed = r"sed -i 's/bash/bash\nexport JAVA_OPTS=${{JAVA_OPTS:-{}}}\n/' {}".format( javaArgs, path )
231 main.log.warn( sed )
232 main.log.warn( repr( sed ) )
233 handle.sendline( sed )
234 handle.expect( "\$" )
235 main.log.debug( repr( handle.before ) )
236
237 main.step( "Creating ONOS package" )
238 packageResult = main.ONOSbench.onosPackage()
239 utilities.assert_equals( expect=main.TRUE, actual=packageResult,
240 onpass="ONOS package successful",
241 onfail="ONOS package failed" )
242
243 main.step( "Installing ONOS package" )
244 onosInstallResult = main.TRUE
245 for i in range( main.ONOSbench.maxNodes ):
246 node = main.nodes[i]
247 options = "-f"
248 if i >= main.numCtrls:
249 options = "-nf" # Don't start more than the current scale
250 tmpResult = main.ONOSbench.onosInstall( options=options,
251 node=node.ip_address )
252 onosInstallResult = onosInstallResult and tmpResult
253 utilities.assert_equals( expect=main.TRUE, actual=onosInstallResult,
254 onpass="ONOS install successful",
255 onfail="ONOS install failed" )
256
257 # Cleanup custom onos-service file
258 main.ONOSbench.scp( main.ONOSbench,
259 path + ".backup",
260 path,
261 direction="to" )
262
263 main.step( "Checking if ONOS is up yet" )
264 for i in range( 2 ):
265 onosIsupResult = main.TRUE
266 for i in range( main.numCtrls ):
267 node = main.nodes[i]
268 started = main.ONOSbench.isup( node.ip_address )
269 if not started:
270 main.log.error( node.name + " hasn't started" )
271 onosIsupResult = onosIsupResult and started
272 if onosIsupResult == main.TRUE:
273 break
274 utilities.assert_equals( expect=main.TRUE, actual=onosIsupResult,
275 onpass="ONOS startup successful",
276 onfail="ONOS startup failed" )
277
278 main.log.step( "Starting ONOS CLI sessions" )
279 cliResults = main.TRUE
280 threads = []
281 for i in range( main.numCtrls ):
282 t = main.Thread( target=main.CLIs[i].startOnosCli,
283 name="startOnosCli-" + str( i ),
284 args=[main.nodes[i].ip_address] )
285 threads.append( t )
286 t.start()
287
288 for t in threads:
289 t.join()
290 cliResults = cliResults and t.result
291 utilities.assert_equals( expect=main.TRUE, actual=cliResults,
292 onpass="ONOS cli startup successful",
293 onfail="ONOS cli startup failed" )
294
295 # Create a list of active nodes for use when some nodes are stopped
296 main.activeNodes = [ i for i in range( 0, main.numCtrls ) ]
297
298 if main.params[ 'tcpdump' ].lower() == "true":
299 main.step( "Start Packet Capture MN" )
300 main.Mininet2.startTcpdump(
301 str( main.params[ 'MNtcpdump' ][ 'folder' ] ) + str( main.TEST )
302 + "-MN.pcap",
303 intf=main.params[ 'MNtcpdump' ][ 'intf' ],
304 port=main.params[ 'MNtcpdump' ][ 'port' ] )
305
306 main.step( "Checking ONOS nodes" )
307 nodeResults = utilities.retry( main.HA.nodesCheck,
308 False,
309 args=[main.activeNodes],
310 attempts=5 )
311 utilities.assert_equals( expect=True, actual=nodeResults,
312 onpass="Nodes check successful",
313 onfail="Nodes check NOT successful" )
314
315 if not nodeResults:
316 for i in main.activeNodes:
317 cli = main.CLIs[i]
318 main.log.debug( "{} components not ACTIVE: \n{}".format(
319 cli.name,
320 cli.sendline( "scr:list | grep -v ACTIVE" ) ) )
321 main.log.error( "Failed to start ONOS, stopping test" )
322 main.cleanup()
323 main.exit()
324
325 main.step( "Activate apps defined in the params file" )
326 # get data from the params
327 apps = main.params.get( 'apps' )
328 if apps:
329 apps = apps.split(',')
330 main.log.warn( apps )
331 activateResult = True
332 for app in apps:
333 main.CLIs[ 0 ].app( app, "Activate" )
334 # TODO: check this worked
335 time.sleep( 10 ) # wait for apps to activate
336 for app in apps:
337 state = main.CLIs[ 0 ].appStatus( app )
338 if state == "ACTIVE":
339 activateResult = activateResult and True
340 else:
341 main.log.error( "{} is in {} state".format( app, state ) )
342 activateResult = False
343 utilities.assert_equals( expect=True,
344 actual=activateResult,
345 onpass="Successfully activated apps",
346 onfail="Failed to activate apps" )
347 else:
348 main.log.warn( "No apps were specified to be loaded after startup" )
349
350 main.step( "Set ONOS configurations" )
351 config = main.params.get( 'ONOS_Configuration' )
352 if config:
353 main.log.debug( config )
354 checkResult = main.TRUE
355 for component in config:
356 for setting in config[component]:
357 value = config[component][setting]
358 check = main.CLIs[ 0 ].setCfg( component, setting, value )
359 main.log.info( "Value was changed? {}".format( main.TRUE == check ) )
360 checkResult = check and checkResult
361 utilities.assert_equals( expect=main.TRUE,
362 actual=checkResult,
363 onpass="Successfully set config",
364 onfail="Failed to set config" )
365 else:
366 main.log.warn( "No configurations were specified to be changed after startup" )
367
368 main.step( "App Ids check" )
369 appCheck = main.TRUE
370 threads = []
371 for i in main.activeNodes:
372 t = main.Thread( target=main.CLIs[i].appToIDCheck,
373 name="appToIDCheck-" + str( i ),
374 args=[] )
375 threads.append( t )
376 t.start()
377
378 for t in threads:
379 t.join()
380 appCheck = appCheck and t.result
381 if appCheck != main.TRUE:
382 node = main.activeNodes[0]
383 main.log.warn( main.CLIs[node].apps() )
384 main.log.warn( main.CLIs[node].appIDs() )
385 utilities.assert_equals( expect=main.TRUE, actual=appCheck,
386 onpass="App Ids seem to be correct",
387 onfail="Something is wrong with app Ids" )
388
389 def CASE2( self, main ):
390 """
391 Assign devices to controllers
392 """
393 import re
394 assert main.numCtrls, "main.numCtrls not defined"
395 assert main, "main not defined"
396 assert utilities.assert_equals, "utilities.assert_equals not defined"
397 assert main.CLIs, "main.CLIs not defined"
398 assert main.nodes, "main.nodes not defined"
399
400 main.case( "Assigning devices to controllers" )
401 main.caseExplanation = "Assign switches to ONOS using 'ovs-vsctl' " +\
402 "and check that an ONOS node becomes the " +\
403 "master of the device."
404 main.step( "Assign switches to controllers" )
405
406 ipList = []
407 for i in range( main.ONOSbench.maxNodes ):
408 ipList.append( main.nodes[ i ].ip_address )
409 swList = []
410 for i in range( 1, 29 ):
411 swList.append( "s" + str( i ) )
412 main.Mininet1.assignSwController( sw=swList, ip=ipList )
413
414 mastershipCheck = main.TRUE
415 for i in range( 1, 29 ):
416 response = main.Mininet1.getSwController( "s" + str( i ) )
417 try:
418 main.log.info( str( response ) )
419 except Exception:
420 main.log.info( repr( response ) )
421 for node in main.nodes:
422 if re.search( "tcp:" + node.ip_address, response ):
423 mastershipCheck = mastershipCheck and main.TRUE
424 else:
425 main.log.error( "Error, node " + node.ip_address + " is " +
426 "not in the list of controllers s" +
427 str( i ) + " is connecting to." )
428 mastershipCheck = main.FALSE
429 utilities.assert_equals(
430 expect=main.TRUE,
431 actual=mastershipCheck,
432 onpass="Switch mastership assigned correctly",
433 onfail="Switches not assigned correctly to controllers" )
434
435 def CASE21( self, main ):
436 """
437 Assign mastership to controllers
438 """
439 import time
440 assert main.numCtrls, "main.numCtrls not defined"
441 assert main, "main not defined"
442 assert utilities.assert_equals, "utilities.assert_equals not defined"
443 assert main.CLIs, "main.CLIs not defined"
444 assert main.nodes, "main.nodes not defined"
445
446 main.case( "Assigning Controller roles for switches" )
447 main.caseExplanation = "Check that ONOS is connected to each " +\
448 "device. Then manually assign" +\
449 " mastership to specific ONOS nodes using" +\
450 " 'device-role'"
451 main.step( "Assign mastership of switches to specific controllers" )
452 # Manually assign mastership to the controller we want
453 roleCall = main.TRUE
454
455 ipList = [ ]
456 deviceList = []
457 onosCli = main.CLIs[ main.activeNodes[0] ]
458 try:
459 # Assign mastership to specific controllers. This assignment was
460 # determined for a 7 node cluser, but will work with any sized
461 # cluster
462 for i in range( 1, 29 ): # switches 1 through 28
463 # set up correct variables:
464 if i == 1:
465 c = 0
466 ip = main.nodes[ c ].ip_address # ONOS1
467 deviceId = onosCli.getDevice( "1000" ).get( 'id' )
468 elif i == 2:
469 c = 1 % main.numCtrls
470 ip = main.nodes[ c ].ip_address # ONOS2
471 deviceId = onosCli.getDevice( "2000" ).get( 'id' )
472 elif i == 3:
473 c = 1 % main.numCtrls
474 ip = main.nodes[ c ].ip_address # ONOS2
475 deviceId = onosCli.getDevice( "3000" ).get( 'id' )
476 elif i == 4:
477 c = 3 % main.numCtrls
478 ip = main.nodes[ c ].ip_address # ONOS4
479 deviceId = onosCli.getDevice( "3004" ).get( 'id' )
480 elif i == 5:
481 c = 2 % main.numCtrls
482 ip = main.nodes[ c ].ip_address # ONOS3
483 deviceId = onosCli.getDevice( "5000" ).get( 'id' )
484 elif i == 6:
485 c = 2 % main.numCtrls
486 ip = main.nodes[ c ].ip_address # ONOS3
487 deviceId = onosCli.getDevice( "6000" ).get( 'id' )
488 elif i == 7:
489 c = 5 % main.numCtrls
490 ip = main.nodes[ c ].ip_address # ONOS6
491 deviceId = onosCli.getDevice( "6007" ).get( 'id' )
492 elif i >= 8 and i <= 17:
493 c = 4 % main.numCtrls
494 ip = main.nodes[ c ].ip_address # ONOS5
495 dpid = '3' + str( i ).zfill( 3 )
496 deviceId = onosCli.getDevice( dpid ).get( 'id' )
497 elif i >= 18 and i <= 27:
498 c = 6 % main.numCtrls
499 ip = main.nodes[ c ].ip_address # ONOS7
500 dpid = '6' + str( i ).zfill( 3 )
501 deviceId = onosCli.getDevice( dpid ).get( 'id' )
502 elif i == 28:
503 c = 0
504 ip = main.nodes[ c ].ip_address # ONOS1
505 deviceId = onosCli.getDevice( "2800" ).get( 'id' )
506 else:
507 main.log.error( "You didn't write an else statement for " +
508 "switch s" + str( i ) )
509 roleCall = main.FALSE
510 # Assign switch
511 assert deviceId, "No device id for s" + str( i ) + " in ONOS"
512 # TODO: make this controller dynamic
513 roleCall = roleCall and onosCli.deviceRole( deviceId, ip )
514 ipList.append( ip )
515 deviceList.append( deviceId )
516 except ( AttributeError, AssertionError ):
517 main.log.exception( "Something is wrong with ONOS device view" )
518 main.log.info( onosCli.devices() )
519 utilities.assert_equals(
520 expect=main.TRUE,
521 actual=roleCall,
522 onpass="Re-assigned switch mastership to designated controller",
523 onfail="Something wrong with deviceRole calls" )
524
525 main.step( "Check mastership was correctly assigned" )
526 roleCheck = main.TRUE
527 # NOTE: This is due to the fact that device mastership change is not
528 # atomic and is actually a multi step process
529 time.sleep( 5 )
530 for i in range( len( ipList ) ):
531 ip = ipList[i]
532 deviceId = deviceList[i]
533 # Check assignment
534 master = onosCli.getRole( deviceId ).get( 'master' )
535 if ip in master:
536 roleCheck = roleCheck and main.TRUE
537 else:
538 roleCheck = roleCheck and main.FALSE
539 main.log.error( "Error, controller " + ip + " is not" +
540 " master " + "of device " +
541 str( deviceId ) + ". Master is " +
542 repr( master ) + "." )
543 utilities.assert_equals(
544 expect=main.TRUE,
545 actual=roleCheck,
546 onpass="Switches were successfully reassigned to designated " +
547 "controller",
548 onfail="Switches were not successfully reassigned" )
549
550 def CASE3( self, main ):
551 """
552 Assign intents
553 """
554 import time
555 import json
556 assert main.numCtrls, "main.numCtrls not defined"
557 assert main, "main not defined"
558 assert utilities.assert_equals, "utilities.assert_equals not defined"
559 assert main.CLIs, "main.CLIs not defined"
560 assert main.nodes, "main.nodes not defined"
561 try:
562 labels
563 except NameError:
564 main.log.error( "labels not defined, setting to []" )
565 labels = []
566 try:
567 data
568 except NameError:
569 main.log.error( "data not defined, setting to []" )
570 data = []
571 # NOTE: we must reinstall intents until we have a persistant intent
572 # datastore!
573 main.case( "Adding host Intents" )
574 main.caseExplanation = "Discover hosts by using pingall then " +\
575 "assign predetermined host-to-host intents." +\
576 " After installation, check that the intent" +\
577 " is distributed to all nodes and the state" +\
578 " is INSTALLED"
579
580 # install onos-app-fwd
581 main.step( "Install reactive forwarding app" )
582 onosCli = main.CLIs[ main.activeNodes[0] ]
583 installResults = onosCli.activateApp( "org.onosproject.fwd" )
584 utilities.assert_equals( expect=main.TRUE, actual=installResults,
585 onpass="Install fwd successful",
586 onfail="Install fwd failed" )
587
588 main.step( "Check app ids" )
589 appCheck = main.TRUE
590 threads = []
591 for i in main.activeNodes:
592 t = main.Thread( target=main.CLIs[i].appToIDCheck,
593 name="appToIDCheck-" + str( i ),
594 args=[] )
595 threads.append( t )
596 t.start()
597
598 for t in threads:
599 t.join()
600 appCheck = appCheck and t.result
601 if appCheck != main.TRUE:
602 main.log.warn( onosCli.apps() )
603 main.log.warn( onosCli.appIDs() )
604 utilities.assert_equals( expect=main.TRUE, actual=appCheck,
605 onpass="App Ids seem to be correct",
606 onfail="Something is wrong with app Ids" )
607
608 main.step( "Discovering Hosts( Via pingall for now )" )
609 # FIXME: Once we have a host discovery mechanism, use that instead
610 # REACTIVE FWD test
611 pingResult = main.FALSE
612 passMsg = "Reactive Pingall test passed"
613 time1 = time.time()
614 pingResult = main.Mininet1.pingall()
615 time2 = time.time()
616 if not pingResult:
617 main.log.warn("First pingall failed. Trying again...")
618 pingResult = main.Mininet1.pingall()
619 passMsg += " on the second try"
620 utilities.assert_equals(
621 expect=main.TRUE,
622 actual=pingResult,
623 onpass= passMsg,
624 onfail="Reactive Pingall failed, " +
625 "one or more ping pairs failed" )
626 main.log.info( "Time for pingall: %2f seconds" %
627 ( time2 - time1 ) )
628 # timeout for fwd flows
629 time.sleep( 11 )
630 # uninstall onos-app-fwd
631 main.step( "Uninstall reactive forwarding app" )
632 node = main.activeNodes[0]
633 uninstallResult = main.CLIs[node].deactivateApp( "org.onosproject.fwd" )
634 utilities.assert_equals( expect=main.TRUE, actual=uninstallResult,
635 onpass="Uninstall fwd successful",
636 onfail="Uninstall fwd failed" )
637
638 main.step( "Check app ids" )
639 threads = []
640 appCheck2 = main.TRUE
641 for i in main.activeNodes:
642 t = main.Thread( target=main.CLIs[i].appToIDCheck,
643 name="appToIDCheck-" + str( i ),
644 args=[] )
645 threads.append( t )
646 t.start()
647
648 for t in threads:
649 t.join()
650 appCheck2 = appCheck2 and t.result
651 if appCheck2 != main.TRUE:
652 node = main.activeNodes[0]
653 main.log.warn( main.CLIs[node].apps() )
654 main.log.warn( main.CLIs[node].appIDs() )
655 utilities.assert_equals( expect=main.TRUE, actual=appCheck2,
656 onpass="App Ids seem to be correct",
657 onfail="Something is wrong with app Ids" )
658
659 main.step( "Add host intents via cli" )
660 intentIds = []
661 # TODO: move the host numbers to params
662 # Maybe look at all the paths we ping?
663 intentAddResult = True
664 hostResult = main.TRUE
665 for i in range( 8, 18 ):
666 main.log.info( "Adding host intent between h" + str( i ) +
667 " and h" + str( i + 10 ) )
668 host1 = "00:00:00:00:00:" + \
669 str( hex( i )[ 2: ] ).zfill( 2 ).upper()
670 host2 = "00:00:00:00:00:" + \
671 str( hex( i + 10 )[ 2: ] ).zfill( 2 ).upper()
672 # NOTE: getHost can return None
673 host1Dict = onosCli.getHost( host1 )
674 host2Dict = onosCli.getHost( host2 )
675 host1Id = None
676 host2Id = None
677 if host1Dict and host2Dict:
678 host1Id = host1Dict.get( 'id', None )
679 host2Id = host2Dict.get( 'id', None )
680 if host1Id and host2Id:
681 nodeNum = ( i % len( main.activeNodes ) )
682 node = main.activeNodes[nodeNum]
683 tmpId = main.CLIs[node].addHostIntent( host1Id, host2Id )
684 if tmpId:
685 main.log.info( "Added intent with id: " + tmpId )
686 intentIds.append( tmpId )
687 else:
688 main.log.error( "addHostIntent returned: " +
689 repr( tmpId ) )
690 else:
691 main.log.error( "Error, getHost() failed for h" + str( i ) +
692 " and/or h" + str( i + 10 ) )
693 node = main.activeNodes[0]
694 hosts = main.CLIs[node].hosts()
695 main.log.warn( "Hosts output: " )
696 try:
697 main.log.warn( json.dumps( json.loads( hosts ),
698 sort_keys=True,
699 indent=4,
700 separators=( ',', ': ' ) ) )
701 except ( ValueError, TypeError ):
702 main.log.warn( repr( hosts ) )
703 hostResult = main.FALSE
704 utilities.assert_equals( expect=main.TRUE, actual=hostResult,
705 onpass="Found a host id for each host",
706 onfail="Error looking up host ids" )
707
708 intentStart = time.time()
709 onosIds = onosCli.getAllIntentsId()
710 main.log.info( "Submitted intents: " + str( intentIds ) )
711 main.log.info( "Intents in ONOS: " + str( onosIds ) )
712 for intent in intentIds:
713 if intent in onosIds:
714 pass # intent submitted is in onos
715 else:
716 intentAddResult = False
717 if intentAddResult:
718 intentStop = time.time()
719 else:
720 intentStop = None
721 # Print the intent states
722 intents = onosCli.intents()
723 intentStates = []
724 installedCheck = True
725 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
726 count = 0
727 try:
728 for intent in json.loads( intents ):
729 state = intent.get( 'state', None )
730 if "INSTALLED" not in state:
731 installedCheck = False
732 intentId = intent.get( 'id', None )
733 intentStates.append( ( intentId, state ) )
734 except ( ValueError, TypeError ):
735 main.log.exception( "Error parsing intents" )
736 # add submitted intents not in the store
737 tmplist = [ i for i, s in intentStates ]
738 missingIntents = False
739 for i in intentIds:
740 if i not in tmplist:
741 intentStates.append( ( i, " - " ) )
742 missingIntents = True
743 intentStates.sort()
744 for i, s in intentStates:
745 count += 1
746 main.log.info( "%-6s%-15s%-15s" %
747 ( str( count ), str( i ), str( s ) ) )
748 leaders = onosCli.leaders()
749 try:
750 missing = False
751 if leaders:
752 parsedLeaders = json.loads( leaders )
753 main.log.warn( json.dumps( parsedLeaders,
754 sort_keys=True,
755 indent=4,
756 separators=( ',', ': ' ) ) )
757 # check for all intent partitions
758 topics = []
759 for i in range( 14 ):
760 topics.append( "intent-partition-" + str( i ) )
761 main.log.debug( topics )
762 ONOStopics = [ j['topic'] for j in parsedLeaders ]
763 for topic in topics:
764 if topic not in ONOStopics:
765 main.log.error( "Error: " + topic +
766 " not in leaders" )
767 missing = True
768 else:
769 main.log.error( "leaders() returned None" )
770 except ( ValueError, TypeError ):
771 main.log.exception( "Error parsing leaders" )
772 main.log.error( repr( leaders ) )
773 # Check all nodes
774 if missing:
775 for i in main.activeNodes:
776 response = main.CLIs[i].leaders( jsonFormat=False)
777 main.log.warn( str( main.CLIs[i].name ) + " leaders output: \n" +
778 str( response ) )
779
780 partitions = onosCli.partitions()
781 try:
782 if partitions :
783 parsedPartitions = json.loads( partitions )
784 main.log.warn( json.dumps( parsedPartitions,
785 sort_keys=True,
786 indent=4,
787 separators=( ',', ': ' ) ) )
788 # TODO check for a leader in all paritions
789 # TODO check for consistency among nodes
790 else:
791 main.log.error( "partitions() returned None" )
792 except ( ValueError, TypeError ):
793 main.log.exception( "Error parsing partitions" )
794 main.log.error( repr( partitions ) )
795 pendingMap = onosCli.pendingMap()
796 try:
797 if pendingMap :
798 parsedPending = json.loads( pendingMap )
799 main.log.warn( json.dumps( parsedPending,
800 sort_keys=True,
801 indent=4,
802 separators=( ',', ': ' ) ) )
803 # TODO check something here?
804 else:
805 main.log.error( "pendingMap() returned None" )
806 except ( ValueError, TypeError ):
807 main.log.exception( "Error parsing pending map" )
808 main.log.error( repr( pendingMap ) )
809
810 intentAddResult = bool( intentAddResult and not missingIntents and
811 installedCheck )
812 if not intentAddResult:
813 main.log.error( "Error in pushing host intents to ONOS" )
814
815 main.step( "Intent Anti-Entropy dispersion" )
816 for j in range(100):
817 correct = True
818 main.log.info( "Submitted intents: " + str( sorted( intentIds ) ) )
819 for i in main.activeNodes:
820 onosIds = []
821 ids = main.CLIs[i].getAllIntentsId()
822 onosIds.append( ids )
823 main.log.debug( "Intents in " + main.CLIs[i].name + ": " +
824 str( sorted( onosIds ) ) )
825 if sorted( ids ) != sorted( intentIds ):
826 main.log.warn( "Set of intent IDs doesn't match" )
827 correct = False
828 break
829 else:
830 intents = json.loads( main.CLIs[i].intents() )
831 for intent in intents:
832 if intent[ 'state' ] != "INSTALLED":
833 main.log.warn( "Intent " + intent[ 'id' ] +
834 " is " + intent[ 'state' ] )
835 correct = False
836 break
837 if correct:
838 break
839 else:
840 time.sleep(1)
841 if not intentStop:
842 intentStop = time.time()
843 global gossipTime
844 gossipTime = intentStop - intentStart
845 main.log.info( "It took about " + str( gossipTime ) +
846 " seconds for all intents to appear in each node" )
847 append = False
848 title = "Gossip Intents"
849 count = 1
850 while append is False:
851 curTitle = title + str( count )
852 if curTitle not in labels:
853 labels.append( curTitle )
854 data.append( str( gossipTime ) )
855 append = True
856 else:
857 count += 1
858 gossipPeriod = int( main.params['timers']['gossip'] )
859 maxGossipTime = gossipPeriod * len( main.activeNodes )
860 utilities.assert_greater_equals(
861 expect=maxGossipTime, actual=gossipTime,
862 onpass="ECM anti-entropy for intents worked within " +
863 "expected time",
864 onfail="Intent ECM anti-entropy took too long. " +
865 "Expected time:{}, Actual time:{}".format( maxGossipTime,
866 gossipTime ) )
867 if gossipTime <= maxGossipTime:
868 intentAddResult = True
869
870 if not intentAddResult or "key" in pendingMap:
871 import time
872 installedCheck = True
873 main.log.info( "Sleeping 60 seconds to see if intents are found" )
874 time.sleep( 60 )
875 onosIds = onosCli.getAllIntentsId()
876 main.log.info( "Submitted intents: " + str( intentIds ) )
877 main.log.info( "Intents in ONOS: " + str( onosIds ) )
878 # Print the intent states
879 intents = onosCli.intents()
880 intentStates = []
881 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
882 count = 0
883 try:
884 for intent in json.loads( intents ):
885 # Iter through intents of a node
886 state = intent.get( 'state', None )
887 if "INSTALLED" not in state:
888 installedCheck = False
889 intentId = intent.get( 'id', None )
890 intentStates.append( ( intentId, state ) )
891 except ( ValueError, TypeError ):
892 main.log.exception( "Error parsing intents" )
893 # add submitted intents not in the store
894 tmplist = [ i for i, s in intentStates ]
895 for i in intentIds:
896 if i not in tmplist:
897 intentStates.append( ( i, " - " ) )
898 intentStates.sort()
899 for i, s in intentStates:
900 count += 1
901 main.log.info( "%-6s%-15s%-15s" %
902 ( str( count ), str( i ), str( s ) ) )
903 leaders = onosCli.leaders()
904 try:
905 missing = False
906 if leaders:
907 parsedLeaders = json.loads( leaders )
908 main.log.warn( json.dumps( parsedLeaders,
909 sort_keys=True,
910 indent=4,
911 separators=( ',', ': ' ) ) )
912 # check for all intent partitions
913 # check for election
914 topics = []
915 for i in range( 14 ):
916 topics.append( "intent-partition-" + str( i ) )
917 # FIXME: this should only be after we start the app
918 topics.append( "org.onosproject.election" )
919 main.log.debug( topics )
920 ONOStopics = [ j['topic'] for j in parsedLeaders ]
921 for topic in topics:
922 if topic not in ONOStopics:
923 main.log.error( "Error: " + topic +
924 " not in leaders" )
925 missing = True
926 else:
927 main.log.error( "leaders() returned None" )
928 except ( ValueError, TypeError ):
929 main.log.exception( "Error parsing leaders" )
930 main.log.error( repr( leaders ) )
931 # Check all nodes
932 if missing:
933 for i in main.activeNodes:
934 node = main.CLIs[i]
935 response = node.leaders( jsonFormat=False)
936 main.log.warn( str( node.name ) + " leaders output: \n" +
937 str( response ) )
938
939 partitions = onosCli.partitions()
940 try:
941 if partitions :
942 parsedPartitions = json.loads( partitions )
943 main.log.warn( json.dumps( parsedPartitions,
944 sort_keys=True,
945 indent=4,
946 separators=( ',', ': ' ) ) )
947 # TODO check for a leader in all paritions
948 # TODO check for consistency among nodes
949 else:
950 main.log.error( "partitions() returned None" )
951 except ( ValueError, TypeError ):
952 main.log.exception( "Error parsing partitions" )
953 main.log.error( repr( partitions ) )
954 pendingMap = onosCli.pendingMap()
955 try:
956 if pendingMap :
957 parsedPending = json.loads( pendingMap )
958 main.log.warn( json.dumps( parsedPending,
959 sort_keys=True,
960 indent=4,
961 separators=( ',', ': ' ) ) )
962 # TODO check something here?
963 else:
964 main.log.error( "pendingMap() returned None" )
965 except ( ValueError, TypeError ):
966 main.log.exception( "Error parsing pending map" )
967 main.log.error( repr( pendingMap ) )
968
969 def CASE4( self, main ):
970 """
971 Ping across added host intents
972 """
973 import json
974 import time
975 assert main.numCtrls, "main.numCtrls not defined"
976 assert main, "main not defined"
977 assert utilities.assert_equals, "utilities.assert_equals not defined"
978 assert main.CLIs, "main.CLIs not defined"
979 assert main.nodes, "main.nodes not defined"
980 main.case( "Verify connectivity by sending traffic across Intents" )
981 main.caseExplanation = "Ping across added host intents to check " +\
982 "functionality and check the state of " +\
983 "the intent"
984
985 onosCli = main.CLIs[ main.activeNodes[0] ]
986 main.step( "Check Intent state" )
987 installedCheck = False
988 loopCount = 0
989 while not installedCheck and loopCount < 40:
990 installedCheck = True
991 # Print the intent states
992 intents = onosCli.intents()
993 intentStates = []
994 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
995 count = 0
996 # Iter through intents of a node
997 try:
998 for intent in json.loads( intents ):
999 state = intent.get( 'state', None )
1000 if "INSTALLED" not in state:
1001 installedCheck = False
1002 intentId = intent.get( 'id', None )
1003 intentStates.append( ( intentId, state ) )
1004 except ( ValueError, TypeError ):
1005 main.log.exception( "Error parsing intents." )
1006 # Print states
1007 intentStates.sort()
1008 for i, s in intentStates:
1009 count += 1
1010 main.log.info( "%-6s%-15s%-15s" %
1011 ( str( count ), str( i ), str( s ) ) )
1012 if not installedCheck:
1013 time.sleep( 1 )
1014 loopCount += 1
1015 utilities.assert_equals( expect=True, actual=installedCheck,
1016 onpass="Intents are all INSTALLED",
1017 onfail="Intents are not all in " +
1018 "INSTALLED state" )
1019
1020 main.step( "Ping across added host intents" )
1021 PingResult = main.TRUE
1022 for i in range( 8, 18 ):
1023 ping = main.Mininet1.pingHost( src="h" + str( i ),
1024 target="h" + str( i + 10 ) )
1025 PingResult = PingResult and ping
1026 if ping == main.FALSE:
1027 main.log.warn( "Ping failed between h" + str( i ) +
1028 " and h" + str( i + 10 ) )
1029 elif ping == main.TRUE:
1030 main.log.info( "Ping test passed!" )
1031 # Don't set PingResult or you'd override failures
1032 if PingResult == main.FALSE:
1033 main.log.error(
1034 "Intents have not been installed correctly, pings failed." )
1035 # TODO: pretty print
1036 main.log.warn( "ONOS1 intents: " )
1037 try:
1038 tmpIntents = onosCli.intents()
1039 main.log.warn( json.dumps( json.loads( tmpIntents ),
1040 sort_keys=True,
1041 indent=4,
1042 separators=( ',', ': ' ) ) )
1043 except ( ValueError, TypeError ):
1044 main.log.warn( repr( tmpIntents ) )
1045 utilities.assert_equals(
1046 expect=main.TRUE,
1047 actual=PingResult,
1048 onpass="Intents have been installed correctly and pings work",
1049 onfail="Intents have not been installed correctly, pings failed." )
1050
1051 main.step( "Check leadership of topics" )
1052 leaders = onosCli.leaders()
1053 topicCheck = main.TRUE
1054 try:
1055 if leaders:
1056 parsedLeaders = json.loads( leaders )
1057 main.log.warn( json.dumps( parsedLeaders,
1058 sort_keys=True,
1059 indent=4,
1060 separators=( ',', ': ' ) ) )
1061 # check for all intent partitions
1062 # check for election
1063 # TODO: Look at Devices as topics now that it uses this system
1064 topics = []
1065 for i in range( 14 ):
1066 topics.append( "intent-partition-" + str( i ) )
1067 # FIXME: this should only be after we start the app
1068 # FIXME: topics.append( "org.onosproject.election" )
1069 # Print leaders output
1070 main.log.debug( topics )
1071 ONOStopics = [ j['topic'] for j in parsedLeaders ]
1072 for topic in topics:
1073 if topic not in ONOStopics:
1074 main.log.error( "Error: " + topic +
1075 " not in leaders" )
1076 topicCheck = main.FALSE
1077 else:
1078 main.log.error( "leaders() returned None" )
1079 topicCheck = main.FALSE
1080 except ( ValueError, TypeError ):
1081 topicCheck = main.FALSE
1082 main.log.exception( "Error parsing leaders" )
1083 main.log.error( repr( leaders ) )
1084 # TODO: Check for a leader of these topics
1085 # Check all nodes
1086 if topicCheck:
1087 for i in main.activeNodes:
1088 node = main.CLIs[i]
1089 response = node.leaders( jsonFormat=False)
1090 main.log.warn( str( node.name ) + " leaders output: \n" +
1091 str( response ) )
1092
1093 utilities.assert_equals( expect=main.TRUE, actual=topicCheck,
1094 onpass="intent Partitions is in leaders",
1095 onfail="Some topics were lost " )
1096 # Print partitions
1097 partitions = onosCli.partitions()
1098 try:
1099 if partitions :
1100 parsedPartitions = json.loads( partitions )
1101 main.log.warn( json.dumps( parsedPartitions,
1102 sort_keys=True,
1103 indent=4,
1104 separators=( ',', ': ' ) ) )
1105 # TODO check for a leader in all paritions
1106 # TODO check for consistency among nodes
1107 else:
1108 main.log.error( "partitions() returned None" )
1109 except ( ValueError, TypeError ):
1110 main.log.exception( "Error parsing partitions" )
1111 main.log.error( repr( partitions ) )
1112 # Print Pending Map
1113 pendingMap = onosCli.pendingMap()
1114 try:
1115 if pendingMap :
1116 parsedPending = json.loads( pendingMap )
1117 main.log.warn( json.dumps( parsedPending,
1118 sort_keys=True,
1119 indent=4,
1120 separators=( ',', ': ' ) ) )
1121 # TODO check something here?
1122 else:
1123 main.log.error( "pendingMap() returned None" )
1124 except ( ValueError, TypeError ):
1125 main.log.exception( "Error parsing pending map" )
1126 main.log.error( repr( pendingMap ) )
1127
1128 if not installedCheck:
1129 main.log.info( "Waiting 60 seconds to see if the state of " +
1130 "intents change" )
1131 time.sleep( 60 )
1132 # Print the intent states
1133 intents = onosCli.intents()
1134 intentStates = []
1135 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
1136 count = 0
1137 # Iter through intents of a node
1138 try:
1139 for intent in json.loads( intents ):
1140 state = intent.get( 'state', None )
1141 if "INSTALLED" not in state:
1142 installedCheck = False
1143 intentId = intent.get( 'id', None )
1144 intentStates.append( ( intentId, state ) )
1145 except ( ValueError, TypeError ):
1146 main.log.exception( "Error parsing intents." )
1147 intentStates.sort()
1148 for i, s in intentStates:
1149 count += 1
1150 main.log.info( "%-6s%-15s%-15s" %
1151 ( str( count ), str( i ), str( s ) ) )
1152 leaders = onosCli.leaders()
1153 try:
1154 missing = False
1155 if leaders:
1156 parsedLeaders = json.loads( leaders )
1157 main.log.warn( json.dumps( parsedLeaders,
1158 sort_keys=True,
1159 indent=4,
1160 separators=( ',', ': ' ) ) )
1161 # check for all intent partitions
1162 # check for election
1163 topics = []
1164 for i in range( 14 ):
1165 topics.append( "intent-partition-" + str( i ) )
1166 # FIXME: this should only be after we start the app
1167 topics.append( "org.onosproject.election" )
1168 main.log.debug( topics )
1169 ONOStopics = [ j['topic'] for j in parsedLeaders ]
1170 for topic in topics:
1171 if topic not in ONOStopics:
1172 main.log.error( "Error: " + topic +
1173 " not in leaders" )
1174 missing = True
1175 else:
1176 main.log.error( "leaders() returned None" )
1177 except ( ValueError, TypeError ):
1178 main.log.exception( "Error parsing leaders" )
1179 main.log.error( repr( leaders ) )
1180 if missing:
1181 for i in main.activeNodes:
1182 node = main.CLIs[i]
1183 response = node.leaders( jsonFormat=False)
1184 main.log.warn( str( node.name ) + " leaders output: \n" +
1185 str( response ) )
1186
1187 partitions = onosCli.partitions()
1188 try:
1189 if partitions :
1190 parsedPartitions = json.loads( partitions )
1191 main.log.warn( json.dumps( parsedPartitions,
1192 sort_keys=True,
1193 indent=4,
1194 separators=( ',', ': ' ) ) )
1195 # TODO check for a leader in all paritions
1196 # TODO check for consistency among nodes
1197 else:
1198 main.log.error( "partitions() returned None" )
1199 except ( ValueError, TypeError ):
1200 main.log.exception( "Error parsing partitions" )
1201 main.log.error( repr( partitions ) )
1202 pendingMap = onosCli.pendingMap()
1203 try:
1204 if pendingMap :
1205 parsedPending = json.loads( pendingMap )
1206 main.log.warn( json.dumps( parsedPending,
1207 sort_keys=True,
1208 indent=4,
1209 separators=( ',', ': ' ) ) )
1210 # TODO check something here?
1211 else:
1212 main.log.error( "pendingMap() returned None" )
1213 except ( ValueError, TypeError ):
1214 main.log.exception( "Error parsing pending map" )
1215 main.log.error( repr( pendingMap ) )
1216 # Print flowrules
1217 node = main.activeNodes[0]
1218 main.log.debug( main.CLIs[node].flows( jsonFormat=False ) )
1219 main.step( "Wait a minute then ping again" )
1220 # the wait is above
1221 PingResult = main.TRUE
1222 for i in range( 8, 18 ):
1223 ping = main.Mininet1.pingHost( src="h" + str( i ),
1224 target="h" + str( i + 10 ) )
1225 PingResult = PingResult and ping
1226 if ping == main.FALSE:
1227 main.log.warn( "Ping failed between h" + str( i ) +
1228 " and h" + str( i + 10 ) )
1229 elif ping == main.TRUE:
1230 main.log.info( "Ping test passed!" )
1231 # Don't set PingResult or you'd override failures
1232 if PingResult == main.FALSE:
1233 main.log.error(
1234 "Intents have not been installed correctly, pings failed." )
1235 # TODO: pretty print
1236 main.log.warn( "ONOS1 intents: " )
1237 try:
1238 tmpIntents = onosCli.intents()
1239 main.log.warn( json.dumps( json.loads( tmpIntents ),
1240 sort_keys=True,
1241 indent=4,
1242 separators=( ',', ': ' ) ) )
1243 except ( ValueError, TypeError ):
1244 main.log.warn( repr( tmpIntents ) )
1245 utilities.assert_equals(
1246 expect=main.TRUE,
1247 actual=PingResult,
1248 onpass="Intents have been installed correctly and pings work",
1249 onfail="Intents have not been installed correctly, pings failed." )
1250
1251 def CASE5( self, main ):
1252 """
1253 Reading state of ONOS
1254 """
1255 import json
1256 import time
1257 assert main.numCtrls, "main.numCtrls not defined"
1258 assert main, "main not defined"
1259 assert utilities.assert_equals, "utilities.assert_equals not defined"
1260 assert main.CLIs, "main.CLIs not defined"
1261 assert main.nodes, "main.nodes not defined"
1262
1263 main.case( "Setting up and gathering data for current state" )
1264 # The general idea for this test case is to pull the state of
1265 # ( intents,flows, topology,... ) from each ONOS node
1266 # We can then compare them with each other and also with past states
1267
1268 main.step( "Check that each switch has a master" )
1269 global mastershipState
1270 mastershipState = '[]'
1271
1272 # Assert that each device has a master
1273 rolesNotNull = main.TRUE
1274 threads = []
1275 for i in main.activeNodes:
1276 t = main.Thread( target=main.CLIs[i].rolesNotNull,
1277 name="rolesNotNull-" + str( i ),
1278 args=[] )
1279 threads.append( t )
1280 t.start()
1281
1282 for t in threads:
1283 t.join()
1284 rolesNotNull = rolesNotNull and t.result
1285 utilities.assert_equals(
1286 expect=main.TRUE,
1287 actual=rolesNotNull,
1288 onpass="Each device has a master",
1289 onfail="Some devices don't have a master assigned" )
1290
1291 main.step( "Get the Mastership of each switch from each controller" )
1292 ONOSMastership = []
1293 consistentMastership = True
1294 rolesResults = True
1295 threads = []
1296 for i in main.activeNodes:
1297 t = main.Thread( target=main.CLIs[i].roles,
1298 name="roles-" + str( i ),
1299 args=[] )
1300 threads.append( t )
1301 t.start()
1302
1303 for t in threads:
1304 t.join()
1305 ONOSMastership.append( t.result )
1306
1307 for i in range( len( ONOSMastership ) ):
1308 node = str( main.activeNodes[i] + 1 )
1309 if not ONOSMastership[i] or "Error" in ONOSMastership[i]:
1310 main.log.error( "Error in getting ONOS" + node + " roles" )
1311 main.log.warn( "ONOS" + node + " mastership response: " +
1312 repr( ONOSMastership[i] ) )
1313 rolesResults = False
1314 utilities.assert_equals(
1315 expect=True,
1316 actual=rolesResults,
1317 onpass="No error in reading roles output",
1318 onfail="Error in reading roles from ONOS" )
1319
1320 main.step( "Check for consistency in roles from each controller" )
1321 if all([ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
1322 main.log.info(
1323 "Switch roles are consistent across all ONOS nodes" )
1324 else:
1325 consistentMastership = False
1326 utilities.assert_equals(
1327 expect=True,
1328 actual=consistentMastership,
1329 onpass="Switch roles are consistent across all ONOS nodes",
1330 onfail="ONOS nodes have different views of switch roles" )
1331
1332 if rolesResults and not consistentMastership:
1333 for i in range( len( main.activeNodes ) ):
1334 node = str( main.activeNodes[i] + 1 )
1335 try:
1336 main.log.warn(
1337 "ONOS" + node + " roles: ",
1338 json.dumps(
1339 json.loads( ONOSMastership[ i ] ),
1340 sort_keys=True,
1341 indent=4,
1342 separators=( ',', ': ' ) ) )
1343 except ( ValueError, TypeError ):
1344 main.log.warn( repr( ONOSMastership[ i ] ) )
1345 elif rolesResults and consistentMastership:
1346 mastershipState = ONOSMastership[ 0 ]
1347
1348 main.step( "Get the intents from each controller" )
1349 global intentState
1350 intentState = []
1351 ONOSIntents = []
1352 consistentIntents = True # Are Intents consistent across nodes?
1353 intentsResults = True # Could we read Intents from ONOS?
1354 threads = []
1355 for i in main.activeNodes:
1356 t = main.Thread( target=main.CLIs[i].intents,
1357 name="intents-" + str( i ),
1358 args=[],
1359 kwargs={ 'jsonFormat': True } )
1360 threads.append( t )
1361 t.start()
1362
1363 for t in threads:
1364 t.join()
1365 ONOSIntents.append( t.result )
1366
1367 for i in range( len( ONOSIntents ) ):
1368 node = str( main.activeNodes[i] + 1 )
1369 if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
1370 main.log.error( "Error in getting ONOS" + node + " intents" )
1371 main.log.warn( "ONOS" + node + " intents response: " +
1372 repr( ONOSIntents[ i ] ) )
1373 intentsResults = False
1374 utilities.assert_equals(
1375 expect=True,
1376 actual=intentsResults,
1377 onpass="No error in reading intents output",
1378 onfail="Error in reading intents from ONOS" )
1379
1380 main.step( "Check for consistency in Intents from each controller" )
1381 if all([ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
1382 main.log.info( "Intents are consistent across all ONOS " +
1383 "nodes" )
1384 else:
1385 consistentIntents = False
1386 main.log.error( "Intents not consistent" )
1387 utilities.assert_equals(
1388 expect=True,
1389 actual=consistentIntents,
1390 onpass="Intents are consistent across all ONOS nodes",
1391 onfail="ONOS nodes have different views of intents" )
1392
1393 if intentsResults:
1394 # Try to make it easy to figure out what is happening
1395 #
1396 # Intent ONOS1 ONOS2 ...
1397 # 0x01 INSTALLED INSTALLING
1398 # ... ... ...
1399 # ... ... ...
1400 title = " Id"
1401 for n in main.activeNodes:
1402 title += " " * 10 + "ONOS" + str( n + 1 )
1403 main.log.warn( title )
1404 # get all intent keys in the cluster
1405 keys = []
1406 try:
1407 # Get the set of all intent keys
1408 for nodeStr in ONOSIntents:
1409 node = json.loads( nodeStr )
1410 for intent in node:
1411 keys.append( intent.get( 'id' ) )
1412 keys = set( keys )
1413 # For each intent key, print the state on each node
1414 for key in keys:
1415 row = "%-13s" % key
1416 for nodeStr in ONOSIntents:
1417 node = json.loads( nodeStr )
1418 for intent in node:
1419 if intent.get( 'id', "Error" ) == key:
1420 row += "%-15s" % intent.get( 'state' )
1421 main.log.warn( row )
1422 # End of intent state table
1423 except ValueError as e:
1424 main.log.exception( e )
1425 main.log.debug( "nodeStr was: " + repr( nodeStr ) )
1426
1427 if intentsResults and not consistentIntents:
1428 # print the json objects
1429 n = str( main.activeNodes[-1] + 1 )
1430 main.log.debug( "ONOS" + n + " intents: " )
1431 main.log.debug( json.dumps( json.loads( ONOSIntents[ -1 ] ),
1432 sort_keys=True,
1433 indent=4,
1434 separators=( ',', ': ' ) ) )
1435 for i in range( len( ONOSIntents ) ):
1436 node = str( main.activeNodes[i] + 1 )
1437 if ONOSIntents[ i ] != ONOSIntents[ -1 ]:
1438 main.log.debug( "ONOS" + node + " intents: " )
1439 main.log.debug( json.dumps( json.loads( ONOSIntents[i] ),
1440 sort_keys=True,
1441 indent=4,
1442 separators=( ',', ': ' ) ) )
1443 else:
1444 main.log.debug( "ONOS" + node + " intents match ONOS" +
1445 n + " intents" )
1446 elif intentsResults and consistentIntents:
1447 intentState = ONOSIntents[ 0 ]
1448
1449 main.step( "Get the flows from each controller" )
1450 global flowState
1451 flowState = []
1452 ONOSFlows = []
1453 ONOSFlowsJson = []
1454 flowCheck = main.FALSE
1455 consistentFlows = True
1456 flowsResults = True
1457 threads = []
1458 for i in main.activeNodes:
1459 t = main.Thread( target=main.CLIs[i].flows,
1460 name="flows-" + str( i ),
1461 args=[],
1462 kwargs={ 'jsonFormat': True } )
1463 threads.append( t )
1464 t.start()
1465
1466 # NOTE: Flows command can take some time to run
1467 time.sleep(30)
1468 for t in threads:
1469 t.join()
1470 result = t.result
1471 ONOSFlows.append( result )
1472
1473 for i in range( len( ONOSFlows ) ):
1474 num = str( main.activeNodes[i] + 1 )
1475 if not ONOSFlows[ i ] or "Error" in ONOSFlows[ i ]:
1476 main.log.error( "Error in getting ONOS" + num + " flows" )
1477 main.log.warn( "ONOS" + num + " flows response: " +
1478 repr( ONOSFlows[ i ] ) )
1479 flowsResults = False
1480 ONOSFlowsJson.append( None )
1481 else:
1482 try:
1483 ONOSFlowsJson.append( json.loads( ONOSFlows[ i ] ) )
1484 except ( ValueError, TypeError ):
1485 # FIXME: change this to log.error?
1486 main.log.exception( "Error in parsing ONOS" + num +
1487 " response as json." )
1488 main.log.error( repr( ONOSFlows[ i ] ) )
1489 ONOSFlowsJson.append( None )
1490 flowsResults = False
1491 utilities.assert_equals(
1492 expect=True,
1493 actual=flowsResults,
1494 onpass="No error in reading flows output",
1495 onfail="Error in reading flows from ONOS" )
1496
1497 main.step( "Check for consistency in Flows from each controller" )
1498 tmp = [ len( i ) == len( ONOSFlowsJson[ 0 ] ) for i in ONOSFlowsJson ]
1499 if all( tmp ):
1500 main.log.info( "Flow count is consistent across all ONOS nodes" )
1501 else:
1502 consistentFlows = False
1503 utilities.assert_equals(
1504 expect=True,
1505 actual=consistentFlows,
1506 onpass="The flow count is consistent across all ONOS nodes",
1507 onfail="ONOS nodes have different flow counts" )
1508
1509 if flowsResults and not consistentFlows:
1510 for i in range( len( ONOSFlows ) ):
1511 node = str( main.activeNodes[i] + 1 )
1512 try:
1513 main.log.warn(
1514 "ONOS" + node + " flows: " +
1515 json.dumps( json.loads( ONOSFlows[i] ), sort_keys=True,
1516 indent=4, separators=( ',', ': ' ) ) )
1517 except ( ValueError, TypeError ):
1518 main.log.warn( "ONOS" + node + " flows: " +
1519 repr( ONOSFlows[ i ] ) )
1520 elif flowsResults and consistentFlows:
1521 flowCheck = main.TRUE
1522 flowState = ONOSFlows[ 0 ]
1523
1524 main.step( "Get the OF Table entries" )
1525 global flows
1526 flows = []
1527 for i in range( 1, 29 ):
1528 flows.append( main.Mininet1.getFlowTable( "s" + str( i ), version="1.3", debug=False ) )
1529 if flowCheck == main.FALSE:
1530 for table in flows:
1531 main.log.warn( table )
1532 # TODO: Compare switch flow tables with ONOS flow tables
1533
1534 main.step( "Start continuous pings" )
1535 main.Mininet2.pingLong(
1536 src=main.params[ 'PING' ][ 'source1' ],
1537 target=main.params[ 'PING' ][ 'target1' ],
1538 pingTime=500 )
1539 main.Mininet2.pingLong(
1540 src=main.params[ 'PING' ][ 'source2' ],
1541 target=main.params[ 'PING' ][ 'target2' ],
1542 pingTime=500 )
1543 main.Mininet2.pingLong(
1544 src=main.params[ 'PING' ][ 'source3' ],
1545 target=main.params[ 'PING' ][ 'target3' ],
1546 pingTime=500 )
1547 main.Mininet2.pingLong(
1548 src=main.params[ 'PING' ][ 'source4' ],
1549 target=main.params[ 'PING' ][ 'target4' ],
1550 pingTime=500 )
1551 main.Mininet2.pingLong(
1552 src=main.params[ 'PING' ][ 'source5' ],
1553 target=main.params[ 'PING' ][ 'target5' ],
1554 pingTime=500 )
1555 main.Mininet2.pingLong(
1556 src=main.params[ 'PING' ][ 'source6' ],
1557 target=main.params[ 'PING' ][ 'target6' ],
1558 pingTime=500 )
1559 main.Mininet2.pingLong(
1560 src=main.params[ 'PING' ][ 'source7' ],
1561 target=main.params[ 'PING' ][ 'target7' ],
1562 pingTime=500 )
1563 main.Mininet2.pingLong(
1564 src=main.params[ 'PING' ][ 'source8' ],
1565 target=main.params[ 'PING' ][ 'target8' ],
1566 pingTime=500 )
1567 main.Mininet2.pingLong(
1568 src=main.params[ 'PING' ][ 'source9' ],
1569 target=main.params[ 'PING' ][ 'target9' ],
1570 pingTime=500 )
1571 main.Mininet2.pingLong(
1572 src=main.params[ 'PING' ][ 'source10' ],
1573 target=main.params[ 'PING' ][ 'target10' ],
1574 pingTime=500 )
1575
1576 main.step( "Collecting topology information from ONOS" )
1577 devices = []
1578 threads = []
1579 for i in main.activeNodes:
1580 t = main.Thread( target=main.CLIs[i].devices,
1581 name="devices-" + str( i ),
1582 args=[ ] )
1583 threads.append( t )
1584 t.start()
1585
1586 for t in threads:
1587 t.join()
1588 devices.append( t.result )
1589 hosts = []
1590 threads = []
1591 for i in main.activeNodes:
1592 t = main.Thread( target=main.CLIs[i].hosts,
1593 name="hosts-" + str( i ),
1594 args=[ ] )
1595 threads.append( t )
1596 t.start()
1597
1598 for t in threads:
1599 t.join()
1600 try:
1601 hosts.append( json.loads( t.result ) )
1602 except ( ValueError, TypeError ):
1603 # FIXME: better handling of this, print which node
1604 # Maybe use thread name?
1605 main.log.exception( "Error parsing json output of hosts" )
1606 main.log.warn( repr( t.result ) )
1607 hosts.append( None )
1608
1609 ports = []
1610 threads = []
1611 for i in main.activeNodes:
1612 t = main.Thread( target=main.CLIs[i].ports,
1613 name="ports-" + str( i ),
1614 args=[ ] )
1615 threads.append( t )
1616 t.start()
1617
1618 for t in threads:
1619 t.join()
1620 ports.append( t.result )
1621 links = []
1622 threads = []
1623 for i in main.activeNodes:
1624 t = main.Thread( target=main.CLIs[i].links,
1625 name="links-" + str( i ),
1626 args=[ ] )
1627 threads.append( t )
1628 t.start()
1629
1630 for t in threads:
1631 t.join()
1632 links.append( t.result )
1633 clusters = []
1634 threads = []
1635 for i in main.activeNodes:
1636 t = main.Thread( target=main.CLIs[i].clusters,
1637 name="clusters-" + str( i ),
1638 args=[ ] )
1639 threads.append( t )
1640 t.start()
1641
1642 for t in threads:
1643 t.join()
1644 clusters.append( t.result )
1645 # Compare json objects for hosts and dataplane clusters
1646
1647 # hosts
1648 main.step( "Host view is consistent across ONOS nodes" )
1649 consistentHostsResult = main.TRUE
1650 for controller in range( len( hosts ) ):
1651 controllerStr = str( main.activeNodes[controller] + 1 )
1652 if hosts[ controller ] and "Error" not in hosts[ controller ]:
1653 if hosts[ controller ] == hosts[ 0 ]:
1654 continue
1655 else: # hosts not consistent
1656 main.log.error( "hosts from ONOS" +
1657 controllerStr +
1658 " is inconsistent with ONOS1" )
1659 main.log.warn( repr( hosts[ controller ] ) )
1660 consistentHostsResult = main.FALSE
1661
1662 else:
1663 main.log.error( "Error in getting ONOS hosts from ONOS" +
1664 controllerStr )
1665 consistentHostsResult = main.FALSE
1666 main.log.warn( "ONOS" + controllerStr +
1667 " hosts response: " +
1668 repr( hosts[ controller ] ) )
1669 utilities.assert_equals(
1670 expect=main.TRUE,
1671 actual=consistentHostsResult,
1672 onpass="Hosts view is consistent across all ONOS nodes",
1673 onfail="ONOS nodes have different views of hosts" )
1674
1675 main.step( "Each host has an IP address" )
1676 ipResult = main.TRUE
1677 for controller in range( 0, len( hosts ) ):
1678 controllerStr = str( main.activeNodes[controller] + 1 )
1679 if hosts[ controller ]:
1680 for host in hosts[ controller ]:
1681 if not host.get( 'ipAddresses', [ ] ):
1682 main.log.error( "Error with host ips on controller" +
1683 controllerStr + ": " + str( host ) )
1684 ipResult = main.FALSE
1685 utilities.assert_equals(
1686 expect=main.TRUE,
1687 actual=ipResult,
1688 onpass="The ips of the hosts aren't empty",
1689 onfail="The ip of at least one host is missing" )
1690
1691 # Strongly connected clusters of devices
1692 main.step( "Cluster view is consistent across ONOS nodes" )
1693 consistentClustersResult = main.TRUE
1694 for controller in range( len( clusters ) ):
1695 controllerStr = str( main.activeNodes[controller] + 1 )
1696 if "Error" not in clusters[ controller ]:
1697 if clusters[ controller ] == clusters[ 0 ]:
1698 continue
1699 else: # clusters not consistent
1700 main.log.error( "clusters from ONOS" + controllerStr +
1701 " is inconsistent with ONOS1" )
1702 consistentClustersResult = main.FALSE
1703
1704 else:
1705 main.log.error( "Error in getting dataplane clusters " +
1706 "from ONOS" + controllerStr )
1707 consistentClustersResult = main.FALSE
1708 main.log.warn( "ONOS" + controllerStr +
1709 " clusters response: " +
1710 repr( clusters[ controller ] ) )
1711 utilities.assert_equals(
1712 expect=main.TRUE,
1713 actual=consistentClustersResult,
1714 onpass="Clusters view is consistent across all ONOS nodes",
1715 onfail="ONOS nodes have different views of clusters" )
1716 if not consistentClustersResult:
1717 main.log.debug( clusters )
1718
1719 # there should always only be one cluster
1720 main.step( "Cluster view correct across ONOS nodes" )
1721 try:
1722 numClusters = len( json.loads( clusters[ 0 ] ) )
1723 except ( ValueError, TypeError ):
1724 main.log.exception( "Error parsing clusters[0]: " +
1725 repr( clusters[ 0 ] ) )
1726 numClusters = "ERROR"
1727 utilities.assert_equals(
1728 expect=1,
1729 actual=numClusters,
1730 onpass="ONOS shows 1 SCC",
1731 onfail="ONOS shows " + str( numClusters ) + " SCCs" )
1732
1733 main.step( "Comparing ONOS topology to MN" )
1734 devicesResults = main.TRUE
1735 linksResults = main.TRUE
1736 hostsResults = main.TRUE
1737 mnSwitches = main.Mininet1.getSwitches()
1738 mnLinks = main.Mininet1.getLinks()
1739 mnHosts = main.Mininet1.getHosts()
1740 for controller in main.activeNodes:
1741 controllerStr = str( main.activeNodes[controller] + 1 )
1742 if devices[ controller ] and ports[ controller ] and\
1743 "Error" not in devices[ controller ] and\
1744 "Error" not in ports[ controller ]:
1745 currentDevicesResult = main.Mininet1.compareSwitches(
1746 mnSwitches,
1747 json.loads( devices[ controller ] ),
1748 json.loads( ports[ controller ] ) )
1749 else:
1750 currentDevicesResult = main.FALSE
1751 utilities.assert_equals( expect=main.TRUE,
1752 actual=currentDevicesResult,
1753 onpass="ONOS" + controllerStr +
1754 " Switches view is correct",
1755 onfail="ONOS" + controllerStr +
1756 " Switches view is incorrect" )
1757 if links[ controller ] and "Error" not in links[ controller ]:
1758 currentLinksResult = main.Mininet1.compareLinks(
1759 mnSwitches, mnLinks,
1760 json.loads( links[ controller ] ) )
1761 else:
1762 currentLinksResult = main.FALSE
1763 utilities.assert_equals( expect=main.TRUE,
1764 actual=currentLinksResult,
1765 onpass="ONOS" + controllerStr +
1766 " links view is correct",
1767 onfail="ONOS" + controllerStr +
1768 " links view is incorrect" )
1769
1770 if hosts[ controller ] and "Error" not in hosts[ controller ]:
1771 currentHostsResult = main.Mininet1.compareHosts(
1772 mnHosts,
1773 hosts[ controller ] )
1774 else:
1775 currentHostsResult = main.FALSE
1776 utilities.assert_equals( expect=main.TRUE,
1777 actual=currentHostsResult,
1778 onpass="ONOS" + controllerStr +
1779 " hosts exist in Mininet",
1780 onfail="ONOS" + controllerStr +
1781 " hosts don't match Mininet" )
1782
1783 devicesResults = devicesResults and currentDevicesResult
1784 linksResults = linksResults and currentLinksResult
1785 hostsResults = hostsResults and currentHostsResult
1786
1787 main.step( "Device information is correct" )
1788 utilities.assert_equals(
1789 expect=main.TRUE,
1790 actual=devicesResults,
1791 onpass="Device information is correct",
1792 onfail="Device information is incorrect" )
1793
1794 main.step( "Links are correct" )
1795 utilities.assert_equals(
1796 expect=main.TRUE,
1797 actual=linksResults,
1798 onpass="Link are correct",
1799 onfail="Links are incorrect" )
1800
1801 main.step( "Hosts are correct" )
1802 utilities.assert_equals(
1803 expect=main.TRUE,
1804 actual=hostsResults,
1805 onpass="Hosts are correct",
1806 onfail="Hosts are incorrect" )
1807
1808 def CASE6( self, main ):
1809 """
1810 The Scaling case.
1811 """
1812 import time
1813 import re
1814 assert main.numCtrls, "main.numCtrls not defined"
1815 assert main, "main not defined"
1816 assert utilities.assert_equals, "utilities.assert_equals not defined"
1817 assert main.CLIs, "main.CLIs not defined"
1818 assert main.nodes, "main.nodes not defined"
1819 try:
1820 labels
1821 except NameError:
1822 main.log.error( "labels not defined, setting to []" )
1823 global labels
1824 labels = []
1825 try:
1826 data
1827 except NameError:
1828 main.log.error( "data not defined, setting to []" )
1829 global data
1830 data = []
1831
1832 main.case( "Swap some of the ONOS nodes" )
1833
1834 main.step( "Checking ONOS Logs for errors" )
1835 for i in main.activeNodes:
1836 node = main.nodes[i]
1837 main.log.debug( "Checking logs for errors on " + node.name + ":" )
1838 main.log.warn( main.ONOSbench.checkLogs( node.ip_address ) )
1839
1840 main.step( "Generate new metadata file" )
1841 old = [ main.activeNodes[0], main.activeNodes[-1] ]
1842 new = range( main.ONOSbench.maxNodes )[-2:]
1843 assert len( old ) == len( new ), "Length of nodes to swap don't match"
1844 handle = main.ONOSbench.handle
1845 for x, y in zip( old, new ):
1846 handle.sendline( "export OC{}=$OC{}".format( x + 1, y + 1 ) )
1847 handle.expect( "\$" ) # from the variable
1848 ret = handle.before
1849 handle.expect( "\$" ) # From the prompt
1850 ret += handle.before
1851 main.log.debug( ret )
1852 main.activeNodes.remove( x )
1853 main.activeNodes.append( y )
1854
1855 genResult = main.Server.generateFile( main.numCtrls )
1856 utilities.assert_equals( expect=main.TRUE, actual=genResult,
1857 onpass="New cluster metadata file generated",
1858 onfail="Failled to generate new metadata file" )
1859 time.sleep( 5 ) # Give time for nodes to read new file
1860
1861 main.step( "Start new nodes" ) # OR stop old nodes?
1862 started = main.TRUE
1863 for i in new:
1864 started = main.ONOSbench.onosStart( main.nodes[i].ip_address ) and main.TRUE
1865 utilities.assert_equals( expect=main.TRUE, actual=started,
1866 onpass="ONOS started",
1867 onfail="ONOS start NOT successful" )
1868
1869 main.step( "Checking if ONOS is up yet" )
1870 for i in range( 2 ):
1871 onosIsupResult = main.TRUE
1872 for i in main.activeNodes:
1873 node = main.nodes[i]
1874 started = main.ONOSbench.isup( node.ip_address )
1875 if not started:
1876 main.log.error( node.name + " didn't start!" )
1877 onosIsupResult = onosIsupResult and started
1878 if onosIsupResult == main.TRUE:
1879 break
1880 utilities.assert_equals( expect=main.TRUE, actual=onosIsupResult,
1881 onpass="ONOS started",
1882 onfail="ONOS start NOT successful" )
1883
1884 main.log.step( "Starting ONOS CLI sessions" )
1885 cliResults = main.TRUE
1886 threads = []
1887 for i in main.activeNodes:
1888 t = main.Thread( target=main.CLIs[i].startOnosCli,
1889 name="startOnosCli-" + str( i ),
1890 args=[main.nodes[i].ip_address] )
1891 threads.append( t )
1892 t.start()
1893
1894 for t in threads:
1895 t.join()
1896 cliResults = cliResults and t.result
1897 utilities.assert_equals( expect=main.TRUE, actual=cliResults,
1898 onpass="ONOS cli started",
1899 onfail="ONOS clis did not start" )
1900
1901 main.step( "Checking ONOS nodes" )
1902 nodeResults = utilities.retry( main.HA.nodesCheck,
1903 False,
1904 args=[main.activeNodes],
1905 attempts=5 )
1906 utilities.assert_equals( expect=True, actual=nodeResults,
1907 onpass="Nodes check successful",
1908 onfail="Nodes check NOT successful" )
1909
1910 for i in range( 10 ):
1911 ready = True
1912 for i in main.activeNodes:
1913 cli = main.CLIs[i]
1914 output = cli.summary()
1915 if not output:
1916 ready = False
1917 if ready:
1918 break
1919 time.sleep( 30 )
1920 utilities.assert_equals( expect=True, actual=ready,
1921 onpass="ONOS summary command succeded",
1922 onfail="ONOS summary command failed" )
1923 if not ready:
1924 main.cleanup()
1925 main.exit()
1926
1927 # Rerun for election on new nodes
1928 runResults = main.TRUE
1929 for i in main.activeNodes:
1930 cli = main.CLIs[i]
1931 run = cli.electionTestRun()
1932 if run != main.TRUE:
1933 main.log.error( "Error running for election on " + cli.name )
1934 runResults = runResults and run
1935 utilities.assert_equals( expect=main.TRUE, actual=runResults,
1936 onpass="Reran for election",
1937 onfail="Failed to rerun for election" )
1938
1939 for node in main.activeNodes:
1940 main.log.warn( "\n****************** {} **************".format( main.nodes[node].ip_address ) )
1941 main.log.debug( main.CLIs[node].nodes( jsonFormat=False ) )
1942 main.log.debug( main.CLIs[node].leaders( jsonFormat=False ) )
1943 main.log.debug( main.CLIs[node].partitions( jsonFormat=False ) )
1944 main.log.debug( main.CLIs[node].apps( jsonFormat=False ) )
1945
1946 main.step( "Reapplying cell variable to environment" )
1947 cellName = main.params[ 'ENV' ][ 'cellName' ]
1948 cellResult = main.ONOSbench.setCell( cellName )
1949 utilities.assert_equals( expect=main.TRUE, actual=cellResult,
1950 onpass="Set cell successfull",
1951 onfail="Failled to set cell" )
1952
1953 def CASE7( self, main ):
1954 """
1955 Check state after ONOS scaling
1956 """
1957 import json
1958 assert main.numCtrls, "main.numCtrls not defined"
1959 assert main, "main not defined"
1960 assert utilities.assert_equals, "utilities.assert_equals not defined"
1961 assert main.CLIs, "main.CLIs not defined"
1962 assert main.nodes, "main.nodes not defined"
1963 main.case( "Running ONOS Constant State Tests" )
1964
1965 main.step( "Check that each switch has a master" )
1966 # Assert that each device has a master
1967 rolesNotNull = main.TRUE
1968 threads = []
1969 for i in main.activeNodes:
1970 t = main.Thread( target=main.CLIs[i].rolesNotNull,
1971 name="rolesNotNull-" + str( i ),
1972 args=[ ] )
1973 threads.append( t )
1974 t.start()
1975
1976 for t in threads:
1977 t.join()
1978 rolesNotNull = rolesNotNull and t.result
1979 utilities.assert_equals(
1980 expect=main.TRUE,
1981 actual=rolesNotNull,
1982 onpass="Each device has a master",
1983 onfail="Some devices don't have a master assigned" )
1984
1985 main.step( "Read device roles from ONOS" )
1986 ONOSMastership = []
1987 consistentMastership = True
1988 rolesResults = True
1989 threads = []
1990 for i in main.activeNodes:
1991 t = main.Thread( target=main.CLIs[i].roles,
1992 name="roles-" + str( i ),
1993 args=[] )
1994 threads.append( t )
1995 t.start()
1996
1997 for t in threads:
1998 t.join()
1999 ONOSMastership.append( t.result )
2000
2001 for i in range( len( ONOSMastership ) ):
2002 node = str( main.activeNodes[i] + 1 )
2003 if not ONOSMastership[i] or "Error" in ONOSMastership[i]:
2004 main.log.error( "Error in getting ONOS" + node + " roles" )
2005 main.log.warn( "ONOS" + node + " mastership response: " +
2006 repr( ONOSMastership[i] ) )
2007 rolesResults = False
2008 utilities.assert_equals(
2009 expect=True,
2010 actual=rolesResults,
2011 onpass="No error in reading roles output",
2012 onfail="Error in reading roles from ONOS" )
2013
2014 main.step( "Check for consistency in roles from each controller" )
2015 if all([ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
2016 main.log.info(
2017 "Switch roles are consistent across all ONOS nodes" )
2018 else:
2019 consistentMastership = False
2020 utilities.assert_equals(
2021 expect=True,
2022 actual=consistentMastership,
2023 onpass="Switch roles are consistent across all ONOS nodes",
2024 onfail="ONOS nodes have different views of switch roles" )
2025
2026 if rolesResults and not consistentMastership:
2027 for i in range( len( ONOSMastership ) ):
2028 node = str( main.activeNodes[i] + 1 )
2029 main.log.warn( "ONOS" + node + " roles: ",
2030 json.dumps( json.loads( ONOSMastership[ i ] ),
2031 sort_keys=True,
2032 indent=4,
2033 separators=( ',', ': ' ) ) )
2034
2035 # NOTE: we expect mastership to change on controller scaling down
2036
2037 main.step( "Get the intents and compare across all nodes" )
2038 ONOSIntents = []
2039 intentCheck = main.FALSE
2040 consistentIntents = True
2041 intentsResults = True
2042 threads = []
2043 for i in main.activeNodes:
2044 t = main.Thread( target=main.CLIs[i].intents,
2045 name="intents-" + str( i ),
2046 args=[],
2047 kwargs={ 'jsonFormat': True } )
2048 threads.append( t )
2049 t.start()
2050
2051 for t in threads:
2052 t.join()
2053 ONOSIntents.append( t.result )
2054
2055 for i in range( len( ONOSIntents) ):
2056 node = str( main.activeNodes[i] + 1 )
2057 if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
2058 main.log.error( "Error in getting ONOS" + node + " intents" )
2059 main.log.warn( "ONOS" + node + " intents response: " +
2060 repr( ONOSIntents[ i ] ) )
2061 intentsResults = False
2062 utilities.assert_equals(
2063 expect=True,
2064 actual=intentsResults,
2065 onpass="No error in reading intents output",
2066 onfail="Error in reading intents from ONOS" )
2067
2068 main.step( "Check for consistency in Intents from each controller" )
2069 if all([ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
2070 main.log.info( "Intents are consistent across all ONOS " +
2071 "nodes" )
2072 else:
2073 consistentIntents = False
2074
2075 # Try to make it easy to figure out what is happening
2076 #
2077 # Intent ONOS1 ONOS2 ...
2078 # 0x01 INSTALLED INSTALLING
2079 # ... ... ...
2080 # ... ... ...
2081 title = " ID"
2082 for n in main.activeNodes:
2083 title += " " * 10 + "ONOS" + str( n + 1 )
2084 main.log.warn( title )
2085 # get all intent keys in the cluster
2086 keys = []
2087 for nodeStr in ONOSIntents:
2088 node = json.loads( nodeStr )
2089 for intent in node:
2090 keys.append( intent.get( 'id' ) )
2091 keys = set( keys )
2092 for key in keys:
2093 row = "%-13s" % key
2094 for nodeStr in ONOSIntents:
2095 node = json.loads( nodeStr )
2096 for intent in node:
2097 if intent.get( 'id' ) == key:
2098 row += "%-15s" % intent.get( 'state' )
2099 main.log.warn( row )
2100 # End table view
2101
2102 utilities.assert_equals(
2103 expect=True,
2104 actual=consistentIntents,
2105 onpass="Intents are consistent across all ONOS nodes",
2106 onfail="ONOS nodes have different views of intents" )
2107 intentStates = []
2108 for node in ONOSIntents: # Iter through ONOS nodes
2109 nodeStates = []
2110 # Iter through intents of a node
2111 try:
2112 for intent in json.loads( node ):
2113 nodeStates.append( intent[ 'state' ] )
2114 except ( ValueError, TypeError ):
2115 main.log.exception( "Error in parsing intents" )
2116 main.log.error( repr( node ) )
2117 intentStates.append( nodeStates )
2118 out = [ (i, nodeStates.count( i ) ) for i in set( nodeStates ) ]
2119 main.log.info( dict( out ) )
2120
2121 if intentsResults and not consistentIntents:
2122 for i in range( len( main.activeNodes ) ):
2123 node = str( main.activeNodes[i] + 1 )
2124 main.log.warn( "ONOS" + node + " intents: " )
2125 main.log.warn( json.dumps(
2126 json.loads( ONOSIntents[ i ] ),
2127 sort_keys=True,
2128 indent=4,
2129 separators=( ',', ': ' ) ) )
2130 elif intentsResults and consistentIntents:
2131 intentCheck = main.TRUE
2132
2133 main.step( "Compare current intents with intents before the scaling" )
2134 # NOTE: this requires case 5 to pass for intentState to be set.
2135 # maybe we should stop the test if that fails?
2136 sameIntents = main.FALSE
2137 try:
2138 intentState
2139 except NameError:
2140 main.log.warn( "No previous intent state was saved" )
2141 else:
2142 if intentState and intentState == ONOSIntents[ 0 ]:
2143 sameIntents = main.TRUE
2144 main.log.info( "Intents are consistent with before scaling" )
2145 # TODO: possibly the states have changed? we may need to figure out
2146 # what the acceptable states are
2147 elif len( intentState ) == len( ONOSIntents[ 0 ] ):
2148 sameIntents = main.TRUE
2149 try:
2150 before = json.loads( intentState )
2151 after = json.loads( ONOSIntents[ 0 ] )
2152 for intent in before:
2153 if intent not in after:
2154 sameIntents = main.FALSE
2155 main.log.debug( "Intent is not currently in ONOS " +
2156 "(at least in the same form):" )
2157 main.log.debug( json.dumps( intent ) )
2158 except ( ValueError, TypeError ):
2159 main.log.exception( "Exception printing intents" )
2160 main.log.debug( repr( ONOSIntents[0] ) )
2161 main.log.debug( repr( intentState ) )
2162 if sameIntents == main.FALSE:
2163 try:
2164 main.log.debug( "ONOS intents before: " )
2165 main.log.debug( json.dumps( json.loads( intentState ),
2166 sort_keys=True, indent=4,
2167 separators=( ',', ': ' ) ) )
2168 main.log.debug( "Current ONOS intents: " )
2169 main.log.debug( json.dumps( json.loads( ONOSIntents[ 0 ] ),
2170 sort_keys=True, indent=4,
2171 separators=( ',', ': ' ) ) )
2172 except ( ValueError, TypeError ):
2173 main.log.exception( "Exception printing intents" )
2174 main.log.debug( repr( ONOSIntents[0] ) )
2175 main.log.debug( repr( intentState ) )
2176 utilities.assert_equals(
2177 expect=main.TRUE,
2178 actual=sameIntents,
2179 onpass="Intents are consistent with before scaling",
2180 onfail="The Intents changed during scaling" )
2181 intentCheck = intentCheck and sameIntents
2182
2183 main.step( "Get the OF Table entries and compare to before " +
2184 "component scaling" )
2185 FlowTables = main.TRUE
2186 for i in range( 28 ):
2187 main.log.info( "Checking flow table on s" + str( i + 1 ) )
2188 tmpFlows = main.Mininet1.getFlowTable( "s" + str( i + 1 ), version="1.3", debug=False )
2189 curSwitch = main.Mininet1.flowTableComp( flows[i], tmpFlows )
2190 FlowTables = FlowTables and curSwitch
2191 if curSwitch == main.FALSE:
2192 main.log.warn( "Differences in flow table for switch: s{}".format( i + 1 ) )
2193 utilities.assert_equals(
2194 expect=main.TRUE,
2195 actual=FlowTables,
2196 onpass="No changes were found in the flow tables",
2197 onfail="Changes were found in the flow tables" )
2198
2199 main.Mininet2.pingLongKill()
2200 '''
2201 # main.step( "Check the continuous pings to ensure that no packets " +
2202 # "were dropped during component failure" )
2203 main.Mininet2.pingKill( main.params[ 'TESTONUSER' ],
2204 main.params[ 'TESTONIP' ] )
2205 LossInPings = main.FALSE
2206 # NOTE: checkForLoss returns main.FALSE with 0% packet loss
2207 for i in range( 8, 18 ):
2208 main.log.info(
2209 "Checking for a loss in pings along flow from s" +
2210 str( i ) )
2211 LossInPings = main.Mininet2.checkForLoss(
2212 "/tmp/ping.h" +
2213 str( i ) ) or LossInPings
2214 if LossInPings == main.TRUE:
2215 main.log.info( "Loss in ping detected" )
2216 elif LossInPings == main.ERROR:
2217 main.log.info( "There are multiple mininet process running" )
2218 elif LossInPings == main.FALSE:
2219 main.log.info( "No Loss in the pings" )
2220 main.log.info( "No loss of dataplane connectivity" )
2221 # utilities.assert_equals(
2222 # expect=main.FALSE,
2223 # actual=LossInPings,
2224 # onpass="No Loss of connectivity",
2225 # onfail="Loss of dataplane connectivity detected" )
2226
2227 # NOTE: Since intents are not persisted with IntnentStore,
2228 # we expect loss in dataplane connectivity
2229 LossInPings = main.FALSE
2230 '''
2231
2232 main.step( "Leadership Election is still functional" )
2233 # Test of LeadershipElection
2234 leaderList = []
2235 leaderResult = main.TRUE
2236
2237 for i in main.activeNodes:
2238 cli = main.CLIs[i]
2239 leaderN = cli.electionTestLeader()
2240 leaderList.append( leaderN )
2241 if leaderN == main.FALSE:
2242 # error in response
2243 main.log.error( "Something is wrong with " +
2244 "electionTestLeader function, check the" +
2245 " error logs" )
2246 leaderResult = main.FALSE
2247 elif leaderN is None:
2248 main.log.error( cli.name +
2249 " shows no leader for the election-app." )
2250 leaderResult = main.FALSE
2251 if len( set( leaderList ) ) != 1:
2252 leaderResult = main.FALSE
2253 main.log.error(
2254 "Inconsistent view of leader for the election test app" )
2255 # TODO: print the list
2256 utilities.assert_equals(
2257 expect=main.TRUE,
2258 actual=leaderResult,
2259 onpass="Leadership election passed",
2260 onfail="Something went wrong with Leadership election" )
2261
2262 def CASE8( self, main ):
2263 """
2264 Compare topo
2265 """
2266 import json
2267 import time
2268 assert main.numCtrls, "main.numCtrls not defined"
2269 assert main, "main not defined"
2270 assert utilities.assert_equals, "utilities.assert_equals not defined"
2271 assert main.CLIs, "main.CLIs not defined"
2272 assert main.nodes, "main.nodes not defined"
2273
2274 main.case( "Compare ONOS Topology view to Mininet topology" )
2275 main.caseExplanation = "Compare topology objects between Mininet" +\
2276 " and ONOS"
2277 topoResult = main.FALSE
2278 topoFailMsg = "ONOS topology don't match Mininet"
2279 elapsed = 0
2280 count = 0
2281 main.step( "Comparing ONOS topology to MN topology" )
2282 startTime = time.time()
2283 # Give time for Gossip to work
2284 while topoResult == main.FALSE and ( elapsed < 60 or count < 3 ):
2285 devicesResults = main.TRUE
2286 linksResults = main.TRUE
2287 hostsResults = main.TRUE
2288 hostAttachmentResults = True
2289 count += 1
2290 cliStart = time.time()
2291 devices = []
2292 threads = []
2293 for i in main.activeNodes:
2294 t = main.Thread( target=utilities.retry,
2295 name="devices-" + str( i ),
2296 args=[ main.CLIs[i].devices, [ None ] ],
2297 kwargs= { 'sleep': 5, 'attempts': 5,
2298 'randomTime': True } )
2299 threads.append( t )
2300 t.start()
2301
2302 for t in threads:
2303 t.join()
2304 devices.append( t.result )
2305 hosts = []
2306 ipResult = main.TRUE
2307 threads = []
2308 for i in main.activeNodes:
2309 t = main.Thread( target=utilities.retry,
2310 name="hosts-" + str( i ),
2311 args=[ main.CLIs[i].hosts, [ None ] ],
2312 kwargs= { 'sleep': 5, 'attempts': 5,
2313 'randomTime': True } )
2314 threads.append( t )
2315 t.start()
2316
2317 for t in threads:
2318 t.join()
2319 try:
2320 hosts.append( json.loads( t.result ) )
2321 except ( ValueError, TypeError ):
2322 main.log.exception( "Error parsing hosts results" )
2323 main.log.error( repr( t.result ) )
2324 hosts.append( None )
2325 for controller in range( 0, len( hosts ) ):
2326 controllerStr = str( main.activeNodes[controller] + 1 )
2327 if hosts[ controller ]:
2328 for host in hosts[ controller ]:
2329 if host is None or host.get( 'ipAddresses', [] ) == []:
2330 main.log.error(
2331 "Error with host ipAddresses on controller" +
2332 controllerStr + ": " + str( host ) )
2333 ipResult = main.FALSE
2334 ports = []
2335 threads = []
2336 for i in main.activeNodes:
2337 t = main.Thread( target=utilities.retry,
2338 name="ports-" + str( i ),
2339 args=[ main.CLIs[i].ports, [ None ] ],
2340 kwargs= { 'sleep': 5, 'attempts': 5,
2341 'randomTime': True } )
2342 threads.append( t )
2343 t.start()
2344
2345 for t in threads:
2346 t.join()
2347 ports.append( t.result )
2348 links = []
2349 threads = []
2350 for i in main.activeNodes:
2351 t = main.Thread( target=utilities.retry,
2352 name="links-" + str( i ),
2353 args=[ main.CLIs[i].links, [ None ] ],
2354 kwargs= { 'sleep': 5, 'attempts': 5,
2355 'randomTime': True } )
2356 threads.append( t )
2357 t.start()
2358
2359 for t in threads:
2360 t.join()
2361 links.append( t.result )
2362 clusters = []
2363 threads = []
2364 for i in main.activeNodes:
2365 t = main.Thread( target=utilities.retry,
2366 name="clusters-" + str( i ),
2367 args=[ main.CLIs[i].clusters, [ None ] ],
2368 kwargs= { 'sleep': 5, 'attempts': 5,
2369 'randomTime': True } )
2370 threads.append( t )
2371 t.start()
2372
2373 for t in threads:
2374 t.join()
2375 clusters.append( t.result )
2376
2377 elapsed = time.time() - startTime
2378 cliTime = time.time() - cliStart
2379 print "Elapsed time: " + str( elapsed )
2380 print "CLI time: " + str( cliTime )
2381
2382 if all( e is None for e in devices ) and\
2383 all( e is None for e in hosts ) and\
2384 all( e is None for e in ports ) and\
2385 all( e is None for e in links ) and\
2386 all( e is None for e in clusters ):
2387 topoFailMsg = "Could not get topology from ONOS"
2388 main.log.error( topoFailMsg )
2389 continue # Try again, No use trying to compare
2390
2391 mnSwitches = main.Mininet1.getSwitches()
2392 mnLinks = main.Mininet1.getLinks()
2393 mnHosts = main.Mininet1.getHosts()
2394 for controller in range( len( main.activeNodes ) ):
2395 controllerStr = str( main.activeNodes[controller] + 1 )
2396 if devices[ controller ] and ports[ controller ] and\
2397 "Error" not in devices[ controller ] and\
2398 "Error" not in ports[ controller ]:
2399
2400 try:
2401 currentDevicesResult = main.Mininet1.compareSwitches(
2402 mnSwitches,
2403 json.loads( devices[ controller ] ),
2404 json.loads( ports[ controller ] ) )
2405 except ( TypeError, ValueError ):
2406 main.log.exception( "Object not as expected; devices={!r}\nports={!r}".format(
2407 devices[ controller ], ports[ controller ] ) )
2408 else:
2409 currentDevicesResult = main.FALSE
2410 utilities.assert_equals( expect=main.TRUE,
2411 actual=currentDevicesResult,
2412 onpass="ONOS" + controllerStr +
2413 " Switches view is correct",
2414 onfail="ONOS" + controllerStr +
2415 " Switches view is incorrect" )
2416
2417 if links[ controller ] and "Error" not in links[ controller ]:
2418 currentLinksResult = main.Mininet1.compareLinks(
2419 mnSwitches, mnLinks,
2420 json.loads( links[ controller ] ) )
2421 else:
2422 currentLinksResult = main.FALSE
2423 utilities.assert_equals( expect=main.TRUE,
2424 actual=currentLinksResult,
2425 onpass="ONOS" + controllerStr +
2426 " links view is correct",
2427 onfail="ONOS" + controllerStr +
2428 " links view is incorrect" )
2429 if hosts[ controller ] and "Error" not in hosts[ controller ]:
2430 currentHostsResult = main.Mininet1.compareHosts(
2431 mnHosts,
2432 hosts[ controller ] )
2433 elif hosts[ controller ] == []:
2434 currentHostsResult = main.TRUE
2435 else:
2436 currentHostsResult = main.FALSE
2437 utilities.assert_equals( expect=main.TRUE,
2438 actual=currentHostsResult,
2439 onpass="ONOS" + controllerStr +
2440 " hosts exist in Mininet",
2441 onfail="ONOS" + controllerStr +
2442 " hosts don't match Mininet" )
2443 # CHECKING HOST ATTACHMENT POINTS
2444 hostAttachment = True
2445 zeroHosts = False
2446 # FIXME: topo-HA/obelisk specific mappings:
2447 # key is mac and value is dpid
2448 mappings = {}
2449 for i in range( 1, 29 ): # hosts 1 through 28
2450 # set up correct variables:
2451 macId = "00:" * 5 + hex( i ).split( "0x" )[1].upper().zfill(2)
2452 if i == 1:
2453 deviceId = "1000".zfill(16)
2454 elif i == 2:
2455 deviceId = "2000".zfill(16)
2456 elif i == 3:
2457 deviceId = "3000".zfill(16)
2458 elif i == 4:
2459 deviceId = "3004".zfill(16)
2460 elif i == 5:
2461 deviceId = "5000".zfill(16)
2462 elif i == 6:
2463 deviceId = "6000".zfill(16)
2464 elif i == 7:
2465 deviceId = "6007".zfill(16)
2466 elif i >= 8 and i <= 17:
2467 dpid = '3' + str( i ).zfill( 3 )
2468 deviceId = dpid.zfill(16)
2469 elif i >= 18 and i <= 27:
2470 dpid = '6' + str( i ).zfill( 3 )
2471 deviceId = dpid.zfill(16)
2472 elif i == 28:
2473 deviceId = "2800".zfill(16)
2474 mappings[ macId ] = deviceId
2475 if hosts[ controller ] is not None and "Error" not in hosts[ controller ]:
2476 if hosts[ controller ] == []:
2477 main.log.warn( "There are no hosts discovered" )
2478 zeroHosts = True
2479 else:
2480 for host in hosts[ controller ]:
2481 mac = None
2482 location = None
2483 device = None
2484 port = None
2485 try:
2486 mac = host.get( 'mac' )
2487 assert mac, "mac field could not be found for this host object"
2488
2489 location = host.get( 'location' )
2490 assert location, "location field could not be found for this host object"
2491
2492 # Trim the protocol identifier off deviceId
2493 device = str( location.get( 'elementId' ) ).split(':')[1]
2494 assert device, "elementId field could not be found for this host location object"
2495
2496 port = location.get( 'port' )
2497 assert port, "port field could not be found for this host location object"
2498
2499 # Now check if this matches where they should be
2500 if mac and device and port:
2501 if str( port ) != "1":
2502 main.log.error( "The attachment port is incorrect for " +
2503 "host " + str( mac ) +
2504 ". Expected: 1 Actual: " + str( port) )
2505 hostAttachment = False
2506 if device != mappings[ str( mac ) ]:
2507 main.log.error( "The attachment device is incorrect for " +
2508 "host " + str( mac ) +
2509 ". Expected: " + mappings[ str( mac ) ] +
2510 " Actual: " + device )
2511 hostAttachment = False
2512 else:
2513 hostAttachment = False
2514 except AssertionError:
2515 main.log.exception( "Json object not as expected" )
2516 main.log.error( repr( host ) )
2517 hostAttachment = False
2518 else:
2519 main.log.error( "No hosts json output or \"Error\"" +
2520 " in output. hosts = " +
2521 repr( hosts[ controller ] ) )
2522 if zeroHosts is False:
2523 # TODO: Find a way to know if there should be hosts in a
2524 # given point of the test
2525 hostAttachment = True
2526
2527 # END CHECKING HOST ATTACHMENT POINTS
2528 devicesResults = devicesResults and currentDevicesResult
2529 linksResults = linksResults and currentLinksResult
2530 hostsResults = hostsResults and currentHostsResult
2531 hostAttachmentResults = hostAttachmentResults and\
2532 hostAttachment
2533 topoResult = ( devicesResults and linksResults
2534 and hostsResults and ipResult and
2535 hostAttachmentResults )
2536 utilities.assert_equals( expect=True,
2537 actual=topoResult,
2538 onpass="ONOS topology matches Mininet",
2539 onfail=topoFailMsg )
2540 # End of While loop to pull ONOS state
2541
2542 # Compare json objects for hosts and dataplane clusters
2543
2544 # hosts
2545 main.step( "Hosts view is consistent across all ONOS nodes" )
2546 consistentHostsResult = main.TRUE
2547 for controller in range( len( hosts ) ):
2548 controllerStr = str( main.activeNodes[controller] + 1 )
2549 if hosts[ controller ] is not None and "Error" not in hosts[ controller ]:
2550 if hosts[ controller ] == hosts[ 0 ]:
2551 continue
2552 else: # hosts not consistent
2553 main.log.error( "hosts from ONOS" + controllerStr +
2554 " is inconsistent with ONOS1" )
2555 main.log.warn( repr( hosts[ controller ] ) )
2556 consistentHostsResult = main.FALSE
2557
2558 else:
2559 main.log.error( "Error in getting ONOS hosts from ONOS" +
2560 controllerStr )
2561 consistentHostsResult = main.FALSE
2562 main.log.warn( "ONOS" + controllerStr +
2563 " hosts response: " +
2564 repr( hosts[ controller ] ) )
2565 utilities.assert_equals(
2566 expect=main.TRUE,
2567 actual=consistentHostsResult,
2568 onpass="Hosts view is consistent across all ONOS nodes",
2569 onfail="ONOS nodes have different views of hosts" )
2570
2571 main.step( "Hosts information is correct" )
2572 hostsResults = hostsResults and ipResult
2573 utilities.assert_equals(
2574 expect=main.TRUE,
2575 actual=hostsResults,
2576 onpass="Host information is correct",
2577 onfail="Host information is incorrect" )
2578
2579 main.step( "Host attachment points to the network" )
2580 utilities.assert_equals(
2581 expect=True,
2582 actual=hostAttachmentResults,
2583 onpass="Hosts are correctly attached to the network",
2584 onfail="ONOS did not correctly attach hosts to the network" )
2585
2586 # Strongly connected clusters of devices
2587 main.step( "Clusters view is consistent across all ONOS nodes" )
2588 consistentClustersResult = main.TRUE
2589 for controller in range( len( clusters ) ):
2590 controllerStr = str( main.activeNodes[controller] + 1 )
2591 if "Error" not in clusters[ controller ]:
2592 if clusters[ controller ] == clusters[ 0 ]:
2593 continue
2594 else: # clusters not consistent
2595 main.log.error( "clusters from ONOS" +
2596 controllerStr +
2597 " is inconsistent with ONOS1" )
2598 consistentClustersResult = main.FALSE
2599 else:
2600 main.log.error( "Error in getting dataplane clusters " +
2601 "from ONOS" + controllerStr )
2602 consistentClustersResult = main.FALSE
2603 main.log.warn( "ONOS" + controllerStr +
2604 " clusters response: " +
2605 repr( clusters[ controller ] ) )
2606 utilities.assert_equals(
2607 expect=main.TRUE,
2608 actual=consistentClustersResult,
2609 onpass="Clusters view is consistent across all ONOS nodes",
2610 onfail="ONOS nodes have different views of clusters" )
2611 if not consistentClustersResult:
2612 main.log.debug( clusters )
2613 for x in links:
2614 main.log.warn( "{}: {}".format( len( x ), x ) )
2615
2616
2617 main.step( "There is only one SCC" )
2618 # there should always only be one cluster
2619 try:
2620 numClusters = len( json.loads( clusters[ 0 ] ) )
2621 except ( ValueError, TypeError ):
2622 main.log.exception( "Error parsing clusters[0]: " +
2623 repr( clusters[0] ) )
2624 numClusters = "ERROR"
2625 clusterResults = main.FALSE
2626 if numClusters == 1:
2627 clusterResults = main.TRUE
2628 utilities.assert_equals(
2629 expect=1,
2630 actual=numClusters,
2631 onpass="ONOS shows 1 SCC",
2632 onfail="ONOS shows " + str( numClusters ) + " SCCs" )
2633
2634 topoResult = ( devicesResults and linksResults
2635 and hostsResults and consistentHostsResult
2636 and consistentClustersResult and clusterResults
2637 and ipResult and hostAttachmentResults )
2638
2639 topoResult = topoResult and int( count <= 2 )
2640 note = "note it takes about " + str( int( cliTime ) ) + \
2641 " seconds for the test to make all the cli calls to fetch " +\
2642 "the topology from each ONOS instance"
2643 main.log.info(
2644 "Very crass estimate for topology discovery/convergence( " +
2645 str( note ) + " ): " + str( elapsed ) + " seconds, " +
2646 str( count ) + " tries" )
2647
2648 main.step( "Device information is correct" )
2649 utilities.assert_equals(
2650 expect=main.TRUE,
2651 actual=devicesResults,
2652 onpass="Device information is correct",
2653 onfail="Device information is incorrect" )
2654
2655 main.step( "Links are correct" )
2656 utilities.assert_equals(
2657 expect=main.TRUE,
2658 actual=linksResults,
2659 onpass="Link are correct",
2660 onfail="Links are incorrect" )
2661
2662 main.step( "Hosts are correct" )
2663 utilities.assert_equals(
2664 expect=main.TRUE,
2665 actual=hostsResults,
2666 onpass="Hosts are correct",
2667 onfail="Hosts are incorrect" )
2668
2669 # FIXME: move this to an ONOS state case
2670 main.step( "Checking ONOS nodes" )
2671 nodeResults = utilities.retry( main.HA.nodesCheck,
2672 False,
2673 args=[main.activeNodes],
2674 attempts=5 )
2675 utilities.assert_equals( expect=True, actual=nodeResults,
2676 onpass="Nodes check successful",
2677 onfail="Nodes check NOT successful" )
2678 if not nodeResults:
2679 for i in main.activeNodes:
2680 main.log.debug( "{} components not ACTIVE: \n{}".format(
2681 main.CLIs[i].name,
2682 main.CLIs[i].sendline( "scr:list | grep -v ACTIVE" ) ) )
2683
2684 def CASE9( self, main ):
2685 """
2686 Link s3-s28 down
2687 """
2688 import time
2689 assert main.numCtrls, "main.numCtrls not defined"
2690 assert main, "main not defined"
2691 assert utilities.assert_equals, "utilities.assert_equals not defined"
2692 assert main.CLIs, "main.CLIs not defined"
2693 assert main.nodes, "main.nodes not defined"
2694 # NOTE: You should probably run a topology check after this
2695
2696 linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
2697
2698 description = "Turn off a link to ensure that Link Discovery " +\
2699 "is working properly"
2700 main.case( description )
2701
2702 main.step( "Kill Link between s3 and s28" )
2703 LinkDown = main.Mininet1.link( END1="s3", END2="s28", OPTION="down" )
2704 main.log.info( "Waiting " + str( linkSleep ) +
2705 " seconds for link down to be discovered" )
2706 time.sleep( linkSleep )
2707 utilities.assert_equals( expect=main.TRUE, actual=LinkDown,
2708 onpass="Link down successful",
2709 onfail="Failed to bring link down" )
2710 # TODO do some sort of check here
2711
2712 def CASE10( self, main ):
2713 """
2714 Link s3-s28 up
2715 """
2716 import time
2717 assert main.numCtrls, "main.numCtrls not defined"
2718 assert main, "main not defined"
2719 assert utilities.assert_equals, "utilities.assert_equals not defined"
2720 assert main.CLIs, "main.CLIs not defined"
2721 assert main.nodes, "main.nodes not defined"
2722 # NOTE: You should probably run a topology check after this
2723
2724 linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
2725
2726 description = "Restore a link to ensure that Link Discovery is " + \
2727 "working properly"
2728 main.case( description )
2729
2730 main.step( "Bring link between s3 and s28 back up" )
2731 LinkUp = main.Mininet1.link( END1="s3", END2="s28", OPTION="up" )
2732 main.log.info( "Waiting " + str( linkSleep ) +
2733 " seconds for link up to be discovered" )
2734 time.sleep( linkSleep )
2735 utilities.assert_equals( expect=main.TRUE, actual=LinkUp,
2736 onpass="Link up successful",
2737 onfail="Failed to bring link up" )
2738 # TODO do some sort of check here
2739
2740 def CASE11( self, main ):
2741 """
2742 Switch Down
2743 """
2744 # NOTE: You should probably run a topology check after this
2745 import time
2746 assert main.numCtrls, "main.numCtrls not defined"
2747 assert main, "main not defined"
2748 assert utilities.assert_equals, "utilities.assert_equals not defined"
2749 assert main.CLIs, "main.CLIs not defined"
2750 assert main.nodes, "main.nodes not defined"
2751
2752 switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
2753
2754 description = "Killing a switch to ensure it is discovered correctly"
2755 onosCli = main.CLIs[ main.activeNodes[0] ]
2756 main.case( description )
2757 switch = main.params[ 'kill' ][ 'switch' ]
2758 switchDPID = main.params[ 'kill' ][ 'dpid' ]
2759
2760 # TODO: Make this switch parameterizable
2761 main.step( "Kill " + switch )
2762 main.log.info( "Deleting " + switch )
2763 main.Mininet1.delSwitch( switch )
2764 main.log.info( "Waiting " + str( switchSleep ) +
2765 " seconds for switch down to be discovered" )
2766 time.sleep( switchSleep )
2767 device = onosCli.getDevice( dpid=switchDPID )
2768 # Peek at the deleted switch
2769 main.log.warn( str( device ) )
2770 result = main.FALSE
2771 if device and device[ 'available' ] is False:
2772 result = main.TRUE
2773 utilities.assert_equals( expect=main.TRUE, actual=result,
2774 onpass="Kill switch successful",
2775 onfail="Failed to kill switch?" )
2776
2777 def CASE12( self, main ):
2778 """
2779 Switch Up
2780 """
2781 # NOTE: You should probably run a topology check after this
2782 import time
2783 assert main.numCtrls, "main.numCtrls not defined"
2784 assert main, "main not defined"
2785 assert utilities.assert_equals, "utilities.assert_equals not defined"
2786 assert main.CLIs, "main.CLIs not defined"
2787 assert main.nodes, "main.nodes not defined"
2788
2789 switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
2790 switch = main.params[ 'kill' ][ 'switch' ]
2791 switchDPID = main.params[ 'kill' ][ 'dpid' ]
2792 links = main.params[ 'kill' ][ 'links' ].split()
2793 onosCli = main.CLIs[ main.activeNodes[0] ]
2794 description = "Adding a switch to ensure it is discovered correctly"
2795 main.case( description )
2796
2797 main.step( "Add back " + switch )
2798 main.Mininet1.addSwitch( switch, dpid=switchDPID )
2799 for peer in links:
2800 main.Mininet1.addLink( switch, peer )
2801 ipList = [ node.ip_address for node in main.nodes ]
2802 main.Mininet1.assignSwController( sw=switch, ip=ipList )
2803 main.log.info( "Waiting " + str( switchSleep ) +
2804 " seconds for switch up to be discovered" )
2805 time.sleep( switchSleep )
2806 device = onosCli.getDevice( dpid=switchDPID )
2807 # Peek at the deleted switch
2808 main.log.warn( str( device ) )
2809 result = main.FALSE
2810 if device and device[ 'available' ]:
2811 result = main.TRUE
2812 utilities.assert_equals( expect=main.TRUE, actual=result,
2813 onpass="add switch successful",
2814 onfail="Failed to add switch?" )
2815
2816 def CASE13( self, main ):
2817 """
2818 Clean up
2819 """
2820 assert main.numCtrls, "main.numCtrls not defined"
2821 assert main, "main not defined"
2822 assert utilities.assert_equals, "utilities.assert_equals not defined"
2823 assert main.CLIs, "main.CLIs not defined"
2824 assert main.nodes, "main.nodes not defined"
2825
2826 main.case( "Test Cleanup" )
2827 main.step( "Killing tcpdumps" )
2828 main.Mininet2.stopTcpdump()
2829
2830 if main.params[ 'BACKUP' ][ 'ENABLED' ] == "True":
2831 main.step( "Copying MN pcap and ONOS log files to test station" )
2832 # NOTE: MN Pcap file is being saved to logdir.
2833 # We scp this file as MN and TestON aren't necessarily the same vm
2834
2835 # FIXME: To be replaced with a Jenkin's post script
2836 # TODO: Load these from params
2837 # NOTE: must end in /
2838 logFolder = "/opt/onos/log/"
2839 logFiles = [ "karaf.log", "karaf.log.1" ]
2840 # NOTE: must end in /
2841 for f in logFiles:
2842 for node in main.nodes:
2843 dstName = main.logdir + "/" + node.name + "-" + f
2844 main.ONOSbench.secureCopy( node.user_name, node.ip_address,
2845 logFolder + f, dstName )
2846 # std*.log's
2847 # NOTE: must end in /
2848 logFolder = "/opt/onos/var/"
2849 logFiles = [ "stderr.log", "stdout.log" ]
2850 # NOTE: must end in /
2851 for f in logFiles:
2852 for node in main.nodes:
2853 dstName = main.logdir + "/" + node.name + "-" + f
2854 main.ONOSbench.secureCopy( node.user_name, node.ip_address,
2855 logFolder + f, dstName )
2856 else:
2857 main.log.debug( "skipping saving log files" )
2858
2859 main.step( "Stopping Mininet" )
2860 mnResult = main.Mininet1.stopNet()
2861 utilities.assert_equals( expect=main.TRUE, actual=mnResult,
2862 onpass="Mininet stopped",
2863 onfail="MN cleanup NOT successful" )
2864
2865 main.step( "Checking ONOS Logs for errors" )
2866 for node in main.nodes:
2867 main.log.debug( "Checking logs for errors on " + node.name + ":" )
2868 main.log.warn( main.ONOSbench.checkLogs( node.ip_address ) )
2869
2870 try:
2871 timerLog = open( main.logdir + "/Timers.csv", 'w')
2872 main.log.error( ", ".join( labels ) + "\n" + ", ".join( data ) )
2873 timerLog.write( ", ".join( labels ) + "\n" + ", ".join( data ) )
2874 timerLog.close()
2875 except NameError, e:
2876 main.log.exception(e)
2877
2878 main.step( "Stopping webserver" )
2879 status = main.Server.stop( )
2880 utilities.assert_equals( expect=main.TRUE, actual=status,
2881 onpass="Stop Server",
2882 onfail="Failled to stop SimpleHTTPServer" )
2883 del main.Server
2884
2885 def CASE14( self, main ):
2886 """
2887 start election app on all onos nodes
2888 """
2889 import time
2890 assert main.numCtrls, "main.numCtrls not defined"
2891 assert main, "main not defined"
2892 assert utilities.assert_equals, "utilities.assert_equals not defined"
2893 assert main.CLIs, "main.CLIs not defined"
2894 assert main.nodes, "main.nodes not defined"
2895
2896 main.case("Start Leadership Election app")
2897 main.step( "Install leadership election app" )
2898 onosCli = main.CLIs[ main.activeNodes[0] ]
2899 appResult = onosCli.activateApp( "org.onosproject.election" )
2900 utilities.assert_equals(
2901 expect=main.TRUE,
2902 actual=appResult,
2903 onpass="Election app installed",
2904 onfail="Something went wrong with installing Leadership election" )
2905
2906 main.step( "Run for election on each node" )
2907 for i in main.activeNodes:
2908 main.CLIs[i].electionTestRun()
2909 time.sleep(5)
2910 activeCLIs = [ main.CLIs[i] for i in main.activeNodes ]
2911 sameResult, leaders = main.HA.consistentLeaderboards( activeCLIs )
2912 utilities.assert_equals(
2913 expect=True,
2914 actual=sameResult,
2915 onpass="All nodes see the same leaderboards",
2916 onfail="Inconsistent leaderboards" )
2917
2918 if sameResult:
2919 leader = leaders[ 0 ][ 0 ]
2920 if main.nodes[ main.activeNodes[0] ].ip_address in leader:
2921 correctLeader = True
2922 else:
2923 correctLeader = False
2924 main.step( "First node was elected leader" )
2925 utilities.assert_equals(
2926 expect=True,
2927 actual=correctLeader,
2928 onpass="Correct leader was elected",
2929 onfail="Incorrect leader" )
2930
2931 def CASE15( self, main ):
2932 """
2933 Check that Leadership Election is still functional
2934 15.1 Run election on each node
2935 15.2 Check that each node has the same leaders and candidates
2936 15.3 Find current leader and withdraw
2937 15.4 Check that a new node was elected leader
2938 15.5 Check that that new leader was the candidate of old leader
2939 15.6 Run for election on old leader
2940 15.7 Check that oldLeader is a candidate, and leader if only 1 node
2941 15.8 Make sure that the old leader was added to the candidate list
2942
2943 old and new variable prefixes refer to data from before vs after
2944 withdrawl and later before withdrawl vs after re-election
2945 """
2946 import time
2947 assert main.numCtrls, "main.numCtrls not defined"
2948 assert main, "main not defined"
2949 assert utilities.assert_equals, "utilities.assert_equals not defined"
2950 assert main.CLIs, "main.CLIs not defined"
2951 assert main.nodes, "main.nodes not defined"
2952
2953 description = "Check that Leadership Election is still functional"
2954 main.case( description )
2955 # NOTE: Need to re-run after restarts since being a canidate is not persistant
2956
2957 oldLeaders = [] # list of lists of each nodes' candidates before
2958 newLeaders = [] # list of lists of each nodes' candidates after
2959 oldLeader = '' # the old leader from oldLeaders, None if not same
2960 newLeader = '' # the new leaders fron newLoeaders, None if not same
2961 oldLeaderCLI = None # the CLI of the old leader used for re-electing
2962 expectNoLeader = False # True when there is only one leader
2963 if main.numCtrls == 1:
2964 expectNoLeader = True
2965
2966 main.step( "Run for election on each node" )
2967 electionResult = main.TRUE
2968
2969 for i in main.activeNodes: # run test election on each node
2970 if main.CLIs[i].electionTestRun() == main.FALSE:
2971 electionResult = main.FALSE
2972 utilities.assert_equals(
2973 expect=main.TRUE,
2974 actual=electionResult,
2975 onpass="All nodes successfully ran for leadership",
2976 onfail="At least one node failed to run for leadership" )
2977
2978 if electionResult == main.FALSE:
2979 main.log.error(
2980 "Skipping Test Case because Election Test App isn't loaded" )
2981 main.skipCase()
2982
2983 main.step( "Check that each node shows the same leader and candidates" )
2984 failMessage = "Nodes have different leaderboards"
2985 activeCLIs = [ main.CLIs[i] for i in main.activeNodes ]
2986 sameResult, oldLeaders = main.HA.consistentLeaderboards( activeCLIs )
2987 if sameResult:
2988 oldLeader = oldLeaders[ 0 ][ 0 ]
2989 main.log.warn( oldLeader )
2990 else:
2991 oldLeader = None
2992 utilities.assert_equals(
2993 expect=True,
2994 actual=sameResult,
2995 onpass="Leaderboards are consistent for the election topic",
2996 onfail=failMessage )
2997
2998 main.step( "Find current leader and withdraw" )
2999 withdrawResult = main.TRUE
3000 # do some sanity checking on leader before using it
3001 if oldLeader is None:
3002 main.log.error( "Leadership isn't consistent." )
3003 withdrawResult = main.FALSE
3004 # Get the CLI of the oldLeader
3005 for i in main.activeNodes:
3006 if oldLeader == main.nodes[ i ].ip_address:
3007 oldLeaderCLI = main.CLIs[ i ]
3008 break
3009 else: # FOR/ELSE statement
3010 main.log.error( "Leader election, could not find current leader" )
3011 if oldLeader:
3012 withdrawResult = oldLeaderCLI.electionTestWithdraw()
3013 utilities.assert_equals(
3014 expect=main.TRUE,
3015 actual=withdrawResult,
3016 onpass="Node was withdrawn from election",
3017 onfail="Node was not withdrawn from election" )
3018
3019 main.step( "Check that a new node was elected leader" )
3020 failMessage = "Nodes have different leaders"
3021 # Get new leaders and candidates
3022 newLeaderResult, newLeaders = main.HA.consistentLeaderboards( activeCLIs )
3023 newLeader = None
3024 if newLeaderResult:
3025 if newLeaders[ 0 ][ 0 ] == 'none':
3026 main.log.error( "No leader was elected on at least 1 node" )
3027 if not expectNoLeader:
3028 newLeaderResult = False
3029 newLeader = newLeaders[ 0 ][ 0 ]
3030
3031 # Check that the new leader is not the older leader, which was withdrawn
3032 if newLeader == oldLeader:
3033 newLeaderResult = False
3034 main.log.error( "All nodes still see old leader: " + str( oldLeader ) +
3035 " as the current leader" )
3036 utilities.assert_equals(
3037 expect=True,
3038 actual=newLeaderResult,
3039 onpass="Leadership election passed",
3040 onfail="Something went wrong with Leadership election" )
3041
3042 main.step( "Check that that new leader was the candidate of old leader" )
3043 # candidates[ 2 ] should become the top candidate after withdrawl
3044 correctCandidateResult = main.TRUE
3045 if expectNoLeader:
3046 if newLeader == 'none':
3047 main.log.info( "No leader expected. None found. Pass" )
3048 correctCandidateResult = main.TRUE
3049 else:
3050 main.log.info( "Expected no leader, got: " + str( newLeader ) )
3051 correctCandidateResult = main.FALSE
3052 elif len( oldLeaders[0] ) >= 3:
3053 if newLeader == oldLeaders[ 0 ][ 2 ]:
3054 # correct leader was elected
3055 correctCandidateResult = main.TRUE
3056 else:
3057 correctCandidateResult = main.FALSE
3058 main.log.error( "Candidate {} was elected. {} should have had priority.".format(
3059 newLeader, oldLeaders[ 0 ][ 2 ] ) )
3060 else:
3061 main.log.warn( "Could not determine who should be the correct leader" )
3062 main.log.debug( oldLeaders[ 0 ] )
3063 correctCandidateResult = main.FALSE
3064 utilities.assert_equals(
3065 expect=main.TRUE,
3066 actual=correctCandidateResult,
3067 onpass="Correct Candidate Elected",
3068 onfail="Incorrect Candidate Elected" )
3069
3070 main.step( "Run for election on old leader( just so everyone " +
3071 "is in the hat )" )
3072 if oldLeaderCLI is not None:
3073 runResult = oldLeaderCLI.electionTestRun()
3074 else:
3075 main.log.error( "No old leader to re-elect" )
3076 runResult = main.FALSE
3077 utilities.assert_equals(
3078 expect=main.TRUE,
3079 actual=runResult,
3080 onpass="App re-ran for election",
3081 onfail="App failed to run for election" )
3082
3083 main.step(
3084 "Check that oldLeader is a candidate, and leader if only 1 node" )
3085 # verify leader didn't just change
3086 # Get new leaders and candidates
3087 reRunLeaders = []
3088 time.sleep( 5 ) # Paremterize
3089 positionResult, reRunLeaders = main.HA.consistentLeaderboards( activeCLIs )
3090
3091 # Check that the re-elected node is last on the candidate List
3092 if not reRunLeaders[0]:
3093 positionResult = main.FALSE
3094 elif oldLeader != reRunLeaders[ 0 ][ -1 ]:
3095 main.log.error( "Old Leader ({}) not in the proper position: {} ".format( str( oldLeader),
3096 str( reRunLeaders[ 0 ] ) ) )
3097 positionResult = main.FALSE
3098 utilities.assert_equals(
3099 expect=True,
3100 actual=positionResult,
3101 onpass="Old leader successfully re-ran for election",
3102 onfail="Something went wrong with Leadership election after " +
3103 "the old leader re-ran for election" )
3104
3105 def CASE16( self, main ):
3106 """
3107 Install Distributed Primitives app
3108 """
3109 import time
3110 assert main.numCtrls, "main.numCtrls not defined"
3111 assert main, "main not defined"
3112 assert utilities.assert_equals, "utilities.assert_equals not defined"
3113 assert main.CLIs, "main.CLIs not defined"
3114 assert main.nodes, "main.nodes not defined"
3115
3116 # Variables for the distributed primitives tests
3117 global pCounterName
3118 global pCounterValue
3119 global onosSet
3120 global onosSetName
3121 pCounterName = "TestON-Partitions"
3122 pCounterValue = 0
3123 onosSet = set([])
3124 onosSetName = "TestON-set"
3125
3126 description = "Install Primitives app"
3127 main.case( description )
3128 main.step( "Install Primitives app" )
3129 appName = "org.onosproject.distributedprimitives"
3130 node = main.activeNodes[0]
3131 appResults = main.CLIs[node].activateApp( appName )
3132 utilities.assert_equals( expect=main.TRUE,
3133 actual=appResults,
3134 onpass="Primitives app activated",
3135 onfail="Primitives app not activated" )
3136 time.sleep( 5 ) # To allow all nodes to activate
3137
3138 def CASE17( self, main ):
3139 """
3140 Check for basic functionality with distributed primitives
3141 """
3142 # Make sure variables are defined/set
3143 assert main.numCtrls, "main.numCtrls not defined"
3144 assert main, "main not defined"
3145 assert utilities.assert_equals, "utilities.assert_equals not defined"
3146 assert main.CLIs, "main.CLIs not defined"
3147 assert main.nodes, "main.nodes not defined"
3148 assert pCounterName, "pCounterName not defined"
3149 assert onosSetName, "onosSetName not defined"
3150 # NOTE: assert fails if value is 0/None/Empty/False
3151 try:
3152 pCounterValue
3153 except NameError:
3154 main.log.error( "pCounterValue not defined, setting to 0" )
3155 pCounterValue = 0
3156 try:
3157 onosSet
3158 except NameError:
3159 main.log.error( "onosSet not defined, setting to empty Set" )
3160 onosSet = set([])
3161 # Variables for the distributed primitives tests. These are local only
3162 addValue = "a"
3163 addAllValue = "a b c d e f"
3164 retainValue = "c d e f"
3165
3166 description = "Check for basic functionality with distributed " +\
3167 "primitives"
3168 main.case( description )
3169 main.caseExplanation = "Test the methods of the distributed " +\
3170 "primitives (counters and sets) throught the cli"
3171 # DISTRIBUTED ATOMIC COUNTERS
3172 # Partitioned counters
3173 main.step( "Increment then get a default counter on each node" )
3174 pCounters = []
3175 threads = []
3176 addedPValues = []
3177 for i in main.activeNodes:
3178 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3179 name="counterAddAndGet-" + str( i ),
3180 args=[ pCounterName ] )
3181 pCounterValue += 1
3182 addedPValues.append( pCounterValue )
3183 threads.append( t )
3184 t.start()
3185
3186 for t in threads:
3187 t.join()
3188 pCounters.append( t.result )
3189 # Check that counter incremented numController times
3190 pCounterResults = True
3191 for i in addedPValues:
3192 tmpResult = i in pCounters
3193 pCounterResults = pCounterResults and tmpResult
3194 if not tmpResult:
3195 main.log.error( str( i ) + " is not in partitioned "
3196 "counter incremented results" )
3197 utilities.assert_equals( expect=True,
3198 actual=pCounterResults,
3199 onpass="Default counter incremented",
3200 onfail="Error incrementing default" +
3201 " counter" )
3202
3203 main.step( "Get then Increment a default counter on each node" )
3204 pCounters = []
3205 threads = []
3206 addedPValues = []
3207 for i in main.activeNodes:
3208 t = main.Thread( target=main.CLIs[i].counterTestGetAndAdd,
3209 name="counterGetAndAdd-" + str( i ),
3210 args=[ pCounterName ] )
3211 addedPValues.append( pCounterValue )
3212 pCounterValue += 1
3213 threads.append( t )
3214 t.start()
3215
3216 for t in threads:
3217 t.join()
3218 pCounters.append( t.result )
3219 # Check that counter incremented numController times
3220 pCounterResults = True
3221 for i in addedPValues:
3222 tmpResult = i in pCounters
3223 pCounterResults = pCounterResults and tmpResult
3224 if not tmpResult:
3225 main.log.error( str( i ) + " is not in partitioned "
3226 "counter incremented results" )
3227 utilities.assert_equals( expect=True,
3228 actual=pCounterResults,
3229 onpass="Default counter incremented",
3230 onfail="Error incrementing default" +
3231 " counter" )
3232
3233 main.step( "Counters we added have the correct values" )
3234 incrementCheck = main.HA.counterCheck( pCounterName, pCounterValue )
3235 utilities.assert_equals( expect=main.TRUE,
3236 actual=incrementCheck,
3237 onpass="Added counters are correct",
3238 onfail="Added counters are incorrect" )
3239
3240 main.step( "Add -8 to then get a default counter on each node" )
3241 pCounters = []
3242 threads = []
3243 addedPValues = []
3244 for i in main.activeNodes:
3245 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3246 name="counterIncrement-" + str( i ),
3247 args=[ pCounterName ],
3248 kwargs={ "delta": -8 } )
3249 pCounterValue += -8
3250 addedPValues.append( pCounterValue )
3251 threads.append( t )
3252 t.start()
3253
3254 for t in threads:
3255 t.join()
3256 pCounters.append( t.result )
3257 # Check that counter incremented numController times
3258 pCounterResults = True
3259 for i in addedPValues:
3260 tmpResult = i in pCounters
3261 pCounterResults = pCounterResults and tmpResult
3262 if not tmpResult:
3263 main.log.error( str( i ) + " is not in partitioned "
3264 "counter incremented results" )
3265 utilities.assert_equals( expect=True,
3266 actual=pCounterResults,
3267 onpass="Default counter incremented",
3268 onfail="Error incrementing default" +
3269 " counter" )
3270
3271 main.step( "Add 5 to then get a default counter on each node" )
3272 pCounters = []
3273 threads = []
3274 addedPValues = []
3275 for i in main.activeNodes:
3276 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3277 name="counterIncrement-" + str( i ),
3278 args=[ pCounterName ],
3279 kwargs={ "delta": 5 } )
3280 pCounterValue += 5
3281 addedPValues.append( pCounterValue )
3282 threads.append( t )
3283 t.start()
3284
3285 for t in threads:
3286 t.join()
3287 pCounters.append( t.result )
3288 # Check that counter incremented numController times
3289 pCounterResults = True
3290 for i in addedPValues:
3291 tmpResult = i in pCounters
3292 pCounterResults = pCounterResults and tmpResult
3293 if not tmpResult:
3294 main.log.error( str( i ) + " is not in partitioned "
3295 "counter incremented results" )
3296 utilities.assert_equals( expect=True,
3297 actual=pCounterResults,
3298 onpass="Default counter incremented",
3299 onfail="Error incrementing default" +
3300 " counter" )
3301
3302 main.step( "Get then add 5 to a default counter on each node" )
3303 pCounters = []
3304 threads = []
3305 addedPValues = []
3306 for i in main.activeNodes:
3307 t = main.Thread( target=main.CLIs[i].counterTestGetAndAdd,
3308 name="counterIncrement-" + str( i ),
3309 args=[ pCounterName ],
3310 kwargs={ "delta": 5 } )
3311 addedPValues.append( pCounterValue )
3312 pCounterValue += 5
3313 threads.append( t )
3314 t.start()
3315
3316 for t in threads:
3317 t.join()
3318 pCounters.append( t.result )
3319 # Check that counter incremented numController times
3320 pCounterResults = True
3321 for i in addedPValues:
3322 tmpResult = i in pCounters
3323 pCounterResults = pCounterResults and tmpResult
3324 if not tmpResult:
3325 main.log.error( str( i ) + " is not in partitioned "
3326 "counter incremented results" )
3327 utilities.assert_equals( expect=True,
3328 actual=pCounterResults,
3329 onpass="Default counter incremented",
3330 onfail="Error incrementing default" +
3331 " counter" )
3332
3333 main.step( "Counters we added have the correct values" )
3334 incrementCheck = main.HA.counterCheck( pCounterName, pCounterValue )
3335 utilities.assert_equals( expect=main.TRUE,
3336 actual=incrementCheck,
3337 onpass="Added counters are correct",
3338 onfail="Added counters are incorrect" )
3339
3340 # DISTRIBUTED SETS
3341 main.step( "Distributed Set get" )
3342 size = len( onosSet )
3343 getResponses = []
3344 threads = []
3345 for i in main.activeNodes:
3346 t = main.Thread( target=main.CLIs[i].setTestGet,
3347 name="setTestGet-" + str( i ),
3348 args=[ onosSetName ] )
3349 threads.append( t )
3350 t.start()
3351 for t in threads:
3352 t.join()
3353 getResponses.append( t.result )
3354
3355 getResults = main.TRUE
3356 for i in range( len( main.activeNodes ) ):
3357 node = str( main.activeNodes[i] + 1 )
3358 if isinstance( getResponses[ i ], list):
3359 current = set( getResponses[ i ] )
3360 if len( current ) == len( getResponses[ i ] ):
3361 # no repeats
3362 if onosSet != current:
3363 main.log.error( "ONOS" + node +
3364 " has incorrect view" +
3365 " of set " + onosSetName + ":\n" +
3366 str( getResponses[ i ] ) )
3367 main.log.debug( "Expected: " + str( onosSet ) )
3368 main.log.debug( "Actual: " + str( current ) )
3369 getResults = main.FALSE
3370 else:
3371 # error, set is not a set
3372 main.log.error( "ONOS" + node +
3373 " has repeat elements in" +
3374 " set " + onosSetName + ":\n" +
3375 str( getResponses[ i ] ) )
3376 getResults = main.FALSE
3377 elif getResponses[ i ] == main.ERROR:
3378 getResults = main.FALSE
3379 utilities.assert_equals( expect=main.TRUE,
3380 actual=getResults,
3381 onpass="Set elements are correct",
3382 onfail="Set elements are incorrect" )
3383
3384 main.step( "Distributed Set size" )
3385 sizeResponses = []
3386 threads = []
3387 for i in main.activeNodes:
3388 t = main.Thread( target=main.CLIs[i].setTestSize,
3389 name="setTestSize-" + str( i ),
3390 args=[ onosSetName ] )
3391 threads.append( t )
3392 t.start()
3393 for t in threads:
3394 t.join()
3395 sizeResponses.append( t.result )
3396
3397 sizeResults = main.TRUE
3398 for i in range( len( main.activeNodes ) ):
3399 node = str( main.activeNodes[i] + 1 )
3400 if size != sizeResponses[ i ]:
3401 sizeResults = main.FALSE
3402 main.log.error( "ONOS" + node +
3403 " expected a size of " + str( size ) +
3404 " for set " + onosSetName +
3405 " but got " + str( sizeResponses[ i ] ) )
3406 utilities.assert_equals( expect=main.TRUE,
3407 actual=sizeResults,
3408 onpass="Set sizes are correct",
3409 onfail="Set sizes are incorrect" )
3410
3411 main.step( "Distributed Set add()" )
3412 onosSet.add( addValue )
3413 addResponses = []
3414 threads = []
3415 for i in main.activeNodes:
3416 t = main.Thread( target=main.CLIs[i].setTestAdd,
3417 name="setTestAdd-" + str( i ),
3418 args=[ onosSetName, addValue ] )
3419 threads.append( t )
3420 t.start()
3421 for t in threads:
3422 t.join()
3423 addResponses.append( t.result )
3424
3425 # main.TRUE = successfully changed the set
3426 # main.FALSE = action resulted in no change in set
3427 # main.ERROR - Some error in executing the function
3428 addResults = main.TRUE
3429 for i in range( len( main.activeNodes ) ):
3430 if addResponses[ i ] == main.TRUE:
3431 # All is well
3432 pass
3433 elif addResponses[ i ] == main.FALSE:
3434 # Already in set, probably fine
3435 pass
3436 elif addResponses[ i ] == main.ERROR:
3437 # Error in execution
3438 addResults = main.FALSE
3439 else:
3440 # unexpected result
3441 addResults = main.FALSE
3442 if addResults != main.TRUE:
3443 main.log.error( "Error executing set add" )
3444
3445 # Check if set is still correct
3446 size = len( onosSet )
3447 getResponses = []
3448 threads = []
3449 for i in main.activeNodes:
3450 t = main.Thread( target=main.CLIs[i].setTestGet,
3451 name="setTestGet-" + str( i ),
3452 args=[ onosSetName ] )
3453 threads.append( t )
3454 t.start()
3455 for t in threads:
3456 t.join()
3457 getResponses.append( t.result )
3458 getResults = main.TRUE
3459 for i in range( len( main.activeNodes ) ):
3460 node = str( main.activeNodes[i] + 1 )
3461 if isinstance( getResponses[ i ], list):
3462 current = set( getResponses[ i ] )
3463 if len( current ) == len( getResponses[ i ] ):
3464 # no repeats
3465 if onosSet != current:
3466 main.log.error( "ONOS" + node + " has incorrect view" +
3467 " of set " + onosSetName + ":\n" +
3468 str( getResponses[ i ] ) )
3469 main.log.debug( "Expected: " + str( onosSet ) )
3470 main.log.debug( "Actual: " + str( current ) )
3471 getResults = main.FALSE
3472 else:
3473 # error, set is not a set
3474 main.log.error( "ONOS" + node + " has repeat elements in" +
3475 " set " + onosSetName + ":\n" +
3476 str( getResponses[ i ] ) )
3477 getResults = main.FALSE
3478 elif getResponses[ i ] == main.ERROR:
3479 getResults = main.FALSE
3480 sizeResponses = []
3481 threads = []
3482 for i in main.activeNodes:
3483 t = main.Thread( target=main.CLIs[i].setTestSize,
3484 name="setTestSize-" + str( i ),
3485 args=[ onosSetName ] )
3486 threads.append( t )
3487 t.start()
3488 for t in threads:
3489 t.join()
3490 sizeResponses.append( t.result )
3491 sizeResults = main.TRUE
3492 for i in range( len( main.activeNodes ) ):
3493 node = str( main.activeNodes[i] + 1 )
3494 if size != sizeResponses[ i ]:
3495 sizeResults = main.FALSE
3496 main.log.error( "ONOS" + node +
3497 " expected a size of " + str( size ) +
3498 " for set " + onosSetName +
3499 " but got " + str( sizeResponses[ i ] ) )
3500 addResults = addResults and getResults and sizeResults
3501 utilities.assert_equals( expect=main.TRUE,
3502 actual=addResults,
3503 onpass="Set add correct",
3504 onfail="Set add was incorrect" )
3505
3506 main.step( "Distributed Set addAll()" )
3507 onosSet.update( addAllValue.split() )
3508 addResponses = []
3509 threads = []
3510 for i in main.activeNodes:
3511 t = main.Thread( target=main.CLIs[i].setTestAdd,
3512 name="setTestAddAll-" + str( i ),
3513 args=[ onosSetName, addAllValue ] )
3514 threads.append( t )
3515 t.start()
3516 for t in threads:
3517 t.join()
3518 addResponses.append( t.result )
3519
3520 # main.TRUE = successfully changed the set
3521 # main.FALSE = action resulted in no change in set
3522 # main.ERROR - Some error in executing the function
3523 addAllResults = main.TRUE
3524 for i in range( len( main.activeNodes ) ):
3525 if addResponses[ i ] == main.TRUE:
3526 # All is well
3527 pass
3528 elif addResponses[ i ] == main.FALSE:
3529 # Already in set, probably fine
3530 pass
3531 elif addResponses[ i ] == main.ERROR:
3532 # Error in execution
3533 addAllResults = main.FALSE
3534 else:
3535 # unexpected result
3536 addAllResults = main.FALSE
3537 if addAllResults != main.TRUE:
3538 main.log.error( "Error executing set addAll" )
3539
3540 # Check if set is still correct
3541 size = len( onosSet )
3542 getResponses = []
3543 threads = []
3544 for i in main.activeNodes:
3545 t = main.Thread( target=main.CLIs[i].setTestGet,
3546 name="setTestGet-" + str( i ),
3547 args=[ onosSetName ] )
3548 threads.append( t )
3549 t.start()
3550 for t in threads:
3551 t.join()
3552 getResponses.append( t.result )
3553 getResults = main.TRUE
3554 for i in range( len( main.activeNodes ) ):
3555 node = str( main.activeNodes[i] + 1 )
3556 if isinstance( getResponses[ i ], list):
3557 current = set( getResponses[ i ] )
3558 if len( current ) == len( getResponses[ i ] ):
3559 # no repeats
3560 if onosSet != current:
3561 main.log.error( "ONOS" + node +
3562 " has incorrect view" +
3563 " of set " + onosSetName + ":\n" +
3564 str( getResponses[ i ] ) )
3565 main.log.debug( "Expected: " + str( onosSet ) )
3566 main.log.debug( "Actual: " + str( current ) )
3567 getResults = main.FALSE
3568 else:
3569 # error, set is not a set
3570 main.log.error( "ONOS" + node +
3571 " has repeat elements in" +
3572 " set " + onosSetName + ":\n" +
3573 str( getResponses[ i ] ) )
3574 getResults = main.FALSE
3575 elif getResponses[ i ] == main.ERROR:
3576 getResults = main.FALSE
3577 sizeResponses = []
3578 threads = []
3579 for i in main.activeNodes:
3580 t = main.Thread( target=main.CLIs[i].setTestSize,
3581 name="setTestSize-" + str( i ),
3582 args=[ onosSetName ] )
3583 threads.append( t )
3584 t.start()
3585 for t in threads:
3586 t.join()
3587 sizeResponses.append( t.result )
3588 sizeResults = main.TRUE
3589 for i in range( len( main.activeNodes ) ):
3590 node = str( main.activeNodes[i] + 1 )
3591 if size != sizeResponses[ i ]:
3592 sizeResults = main.FALSE
3593 main.log.error( "ONOS" + node +
3594 " expected a size of " + str( size ) +
3595 " for set " + onosSetName +
3596 " but got " + str( sizeResponses[ i ] ) )
3597 addAllResults = addAllResults and getResults and sizeResults
3598 utilities.assert_equals( expect=main.TRUE,
3599 actual=addAllResults,
3600 onpass="Set addAll correct",
3601 onfail="Set addAll was incorrect" )
3602
3603 main.step( "Distributed Set contains()" )
3604 containsResponses = []
3605 threads = []
3606 for i in main.activeNodes:
3607 t = main.Thread( target=main.CLIs[i].setTestGet,
3608 name="setContains-" + str( i ),
3609 args=[ onosSetName ],
3610 kwargs={ "values": addValue } )
3611 threads.append( t )
3612 t.start()
3613 for t in threads:
3614 t.join()
3615 # NOTE: This is the tuple
3616 containsResponses.append( t.result )
3617
3618 containsResults = main.TRUE
3619 for i in range( len( main.activeNodes ) ):
3620 if containsResponses[ i ] == main.ERROR:
3621 containsResults = main.FALSE
3622 else:
3623 containsResults = containsResults and\
3624 containsResponses[ i ][ 1 ]
3625 utilities.assert_equals( expect=main.TRUE,
3626 actual=containsResults,
3627 onpass="Set contains is functional",
3628 onfail="Set contains failed" )
3629
3630 main.step( "Distributed Set containsAll()" )
3631 containsAllResponses = []
3632 threads = []
3633 for i in main.activeNodes:
3634 t = main.Thread( target=main.CLIs[i].setTestGet,
3635 name="setContainsAll-" + str( i ),
3636 args=[ onosSetName ],
3637 kwargs={ "values": addAllValue } )
3638 threads.append( t )
3639 t.start()
3640 for t in threads:
3641 t.join()
3642 # NOTE: This is the tuple
3643 containsAllResponses.append( t.result )
3644
3645 containsAllResults = main.TRUE
3646 for i in range( len( main.activeNodes ) ):
3647 if containsResponses[ i ] == main.ERROR:
3648 containsResults = main.FALSE
3649 else:
3650 containsResults = containsResults and\
3651 containsResponses[ i ][ 1 ]
3652 utilities.assert_equals( expect=main.TRUE,
3653 actual=containsAllResults,
3654 onpass="Set containsAll is functional",
3655 onfail="Set containsAll failed" )
3656
3657 main.step( "Distributed Set remove()" )
3658 onosSet.remove( addValue )
3659 removeResponses = []
3660 threads = []
3661 for i in main.activeNodes:
3662 t = main.Thread( target=main.CLIs[i].setTestRemove,
3663 name="setTestRemove-" + str( i ),
3664 args=[ onosSetName, addValue ] )
3665 threads.append( t )
3666 t.start()
3667 for t in threads:
3668 t.join()
3669 removeResponses.append( t.result )
3670
3671 # main.TRUE = successfully changed the set
3672 # main.FALSE = action resulted in no change in set
3673 # main.ERROR - Some error in executing the function
3674 removeResults = main.TRUE
3675 for i in range( len( main.activeNodes ) ):
3676 if removeResponses[ i ] == main.TRUE:
3677 # All is well
3678 pass
3679 elif removeResponses[ i ] == main.FALSE:
3680 # not in set, probably fine
3681 pass
3682 elif removeResponses[ i ] == main.ERROR:
3683 # Error in execution
3684 removeResults = main.FALSE
3685 else:
3686 # unexpected result
3687 removeResults = main.FALSE
3688 if removeResults != main.TRUE:
3689 main.log.error( "Error executing set remove" )
3690
3691 # Check if set is still correct
3692 size = len( onosSet )
3693 getResponses = []
3694 threads = []
3695 for i in main.activeNodes:
3696 t = main.Thread( target=main.CLIs[i].setTestGet,
3697 name="setTestGet-" + str( i ),
3698 args=[ onosSetName ] )
3699 threads.append( t )
3700 t.start()
3701 for t in threads:
3702 t.join()
3703 getResponses.append( t.result )
3704 getResults = main.TRUE
3705 for i in range( len( main.activeNodes ) ):
3706 node = str( main.activeNodes[i] + 1 )
3707 if isinstance( getResponses[ i ], list):
3708 current = set( getResponses[ i ] )
3709 if len( current ) == len( getResponses[ i ] ):
3710 # no repeats
3711 if onosSet != current:
3712 main.log.error( "ONOS" + node +
3713 " has incorrect view" +
3714 " of set " + onosSetName + ":\n" +
3715 str( getResponses[ i ] ) )
3716 main.log.debug( "Expected: " + str( onosSet ) )
3717 main.log.debug( "Actual: " + str( current ) )
3718 getResults = main.FALSE
3719 else:
3720 # error, set is not a set
3721 main.log.error( "ONOS" + node +
3722 " has repeat elements in" +
3723 " set " + onosSetName + ":\n" +
3724 str( getResponses[ i ] ) )
3725 getResults = main.FALSE
3726 elif getResponses[ i ] == main.ERROR:
3727 getResults = main.FALSE
3728 sizeResponses = []
3729 threads = []
3730 for i in main.activeNodes:
3731 t = main.Thread( target=main.CLIs[i].setTestSize,
3732 name="setTestSize-" + str( i ),
3733 args=[ onosSetName ] )
3734 threads.append( t )
3735 t.start()
3736 for t in threads:
3737 t.join()
3738 sizeResponses.append( t.result )
3739 sizeResults = main.TRUE
3740 for i in range( len( main.activeNodes ) ):
3741 node = str( main.activeNodes[i] + 1 )
3742 if size != sizeResponses[ i ]:
3743 sizeResults = main.FALSE
3744 main.log.error( "ONOS" + node +
3745 " expected a size of " + str( size ) +
3746 " for set " + onosSetName +
3747 " but got " + str( sizeResponses[ i ] ) )
3748 removeResults = removeResults and getResults and sizeResults
3749 utilities.assert_equals( expect=main.TRUE,
3750 actual=removeResults,
3751 onpass="Set remove correct",
3752 onfail="Set remove was incorrect" )
3753
3754 main.step( "Distributed Set removeAll()" )
3755 onosSet.difference_update( addAllValue.split() )
3756 removeAllResponses = []
3757 threads = []
3758 try:
3759 for i in main.activeNodes:
3760 t = main.Thread( target=main.CLIs[i].setTestRemove,
3761 name="setTestRemoveAll-" + str( i ),
3762 args=[ onosSetName, addAllValue ] )
3763 threads.append( t )
3764 t.start()
3765 for t in threads:
3766 t.join()
3767 removeAllResponses.append( t.result )
3768 except Exception, e:
3769 main.log.exception(e)
3770
3771 # main.TRUE = successfully changed the set
3772 # main.FALSE = action resulted in no change in set
3773 # main.ERROR - Some error in executing the function
3774 removeAllResults = main.TRUE
3775 for i in range( len( main.activeNodes ) ):
3776 if removeAllResponses[ i ] == main.TRUE:
3777 # All is well
3778 pass
3779 elif removeAllResponses[ i ] == main.FALSE:
3780 # not in set, probably fine
3781 pass
3782 elif removeAllResponses[ i ] == main.ERROR:
3783 # Error in execution
3784 removeAllResults = main.FALSE
3785 else:
3786 # unexpected result
3787 removeAllResults = main.FALSE
3788 if removeAllResults != main.TRUE:
3789 main.log.error( "Error executing set removeAll" )
3790
3791 # Check if set is still correct
3792 size = len( onosSet )
3793 getResponses = []
3794 threads = []
3795 for i in main.activeNodes:
3796 t = main.Thread( target=main.CLIs[i].setTestGet,
3797 name="setTestGet-" + str( i ),
3798 args=[ onosSetName ] )
3799 threads.append( t )
3800 t.start()
3801 for t in threads:
3802 t.join()
3803 getResponses.append( t.result )
3804 getResults = main.TRUE
3805 for i in range( len( main.activeNodes ) ):
3806 node = str( main.activeNodes[i] + 1 )
3807 if isinstance( getResponses[ i ], list):
3808 current = set( getResponses[ i ] )
3809 if len( current ) == len( getResponses[ i ] ):
3810 # no repeats
3811 if onosSet != current:
3812 main.log.error( "ONOS" + node +
3813 " has incorrect view" +
3814 " of set " + onosSetName + ":\n" +
3815 str( getResponses[ i ] ) )
3816 main.log.debug( "Expected: " + str( onosSet ) )
3817 main.log.debug( "Actual: " + str( current ) )
3818 getResults = main.FALSE
3819 else:
3820 # error, set is not a set
3821 main.log.error( "ONOS" + node +
3822 " has repeat elements in" +
3823 " set " + onosSetName + ":\n" +
3824 str( getResponses[ i ] ) )
3825 getResults = main.FALSE
3826 elif getResponses[ i ] == main.ERROR:
3827 getResults = main.FALSE
3828 sizeResponses = []
3829 threads = []
3830 for i in main.activeNodes:
3831 t = main.Thread( target=main.CLIs[i].setTestSize,
3832 name="setTestSize-" + str( i ),
3833 args=[ onosSetName ] )
3834 threads.append( t )
3835 t.start()
3836 for t in threads:
3837 t.join()
3838 sizeResponses.append( t.result )
3839 sizeResults = main.TRUE
3840 for i in range( len( main.activeNodes ) ):
3841 node = str( main.activeNodes[i] + 1 )
3842 if size != sizeResponses[ i ]:
3843 sizeResults = main.FALSE
3844 main.log.error( "ONOS" + node +
3845 " expected a size of " + str( size ) +
3846 " for set " + onosSetName +
3847 " but got " + str( sizeResponses[ i ] ) )
3848 removeAllResults = removeAllResults and getResults and sizeResults
3849 utilities.assert_equals( expect=main.TRUE,
3850 actual=removeAllResults,
3851 onpass="Set removeAll correct",
3852 onfail="Set removeAll was incorrect" )
3853
3854 main.step( "Distributed Set addAll()" )
3855 onosSet.update( addAllValue.split() )
3856 addResponses = []
3857 threads = []
3858 for i in main.activeNodes:
3859 t = main.Thread( target=main.CLIs[i].setTestAdd,
3860 name="setTestAddAll-" + str( i ),
3861 args=[ onosSetName, addAllValue ] )
3862 threads.append( t )
3863 t.start()
3864 for t in threads:
3865 t.join()
3866 addResponses.append( t.result )
3867
3868 # main.TRUE = successfully changed the set
3869 # main.FALSE = action resulted in no change in set
3870 # main.ERROR - Some error in executing the function
3871 addAllResults = main.TRUE
3872 for i in range( len( main.activeNodes ) ):
3873 if addResponses[ i ] == main.TRUE:
3874 # All is well
3875 pass
3876 elif addResponses[ i ] == main.FALSE:
3877 # Already in set, probably fine
3878 pass
3879 elif addResponses[ i ] == main.ERROR:
3880 # Error in execution
3881 addAllResults = main.FALSE
3882 else:
3883 # unexpected result
3884 addAllResults = main.FALSE
3885 if addAllResults != main.TRUE:
3886 main.log.error( "Error executing set addAll" )
3887
3888 # Check if set is still correct
3889 size = len( onosSet )
3890 getResponses = []
3891 threads = []
3892 for i in main.activeNodes:
3893 t = main.Thread( target=main.CLIs[i].setTestGet,
3894 name="setTestGet-" + str( i ),
3895 args=[ onosSetName ] )
3896 threads.append( t )
3897 t.start()
3898 for t in threads:
3899 t.join()
3900 getResponses.append( t.result )
3901 getResults = main.TRUE
3902 for i in range( len( main.activeNodes ) ):
3903 node = str( main.activeNodes[i] + 1 )
3904 if isinstance( getResponses[ i ], list):
3905 current = set( getResponses[ i ] )
3906 if len( current ) == len( getResponses[ i ] ):
3907 # no repeats
3908 if onosSet != current:
3909 main.log.error( "ONOS" + node +
3910 " has incorrect view" +
3911 " of set " + onosSetName + ":\n" +
3912 str( getResponses[ i ] ) )
3913 main.log.debug( "Expected: " + str( onosSet ) )
3914 main.log.debug( "Actual: " + str( current ) )
3915 getResults = main.FALSE
3916 else:
3917 # error, set is not a set
3918 main.log.error( "ONOS" + node +
3919 " has repeat elements in" +
3920 " set " + onosSetName + ":\n" +
3921 str( getResponses[ i ] ) )
3922 getResults = main.FALSE
3923 elif getResponses[ i ] == main.ERROR:
3924 getResults = main.FALSE
3925 sizeResponses = []
3926 threads = []
3927 for i in main.activeNodes:
3928 t = main.Thread( target=main.CLIs[i].setTestSize,
3929 name="setTestSize-" + str( i ),
3930 args=[ onosSetName ] )
3931 threads.append( t )
3932 t.start()
3933 for t in threads:
3934 t.join()
3935 sizeResponses.append( t.result )
3936 sizeResults = main.TRUE
3937 for i in range( len( main.activeNodes ) ):
3938 node = str( main.activeNodes[i] + 1 )
3939 if size != sizeResponses[ i ]:
3940 sizeResults = main.FALSE
3941 main.log.error( "ONOS" + node +
3942 " expected a size of " + str( size ) +
3943 " for set " + onosSetName +
3944 " but got " + str( sizeResponses[ i ] ) )
3945 addAllResults = addAllResults and getResults and sizeResults
3946 utilities.assert_equals( expect=main.TRUE,
3947 actual=addAllResults,
3948 onpass="Set addAll correct",
3949 onfail="Set addAll was incorrect" )
3950
3951 main.step( "Distributed Set clear()" )
3952 onosSet.clear()
3953 clearResponses = []
3954 threads = []
3955 for i in main.activeNodes:
3956 t = main.Thread( target=main.CLIs[i].setTestRemove,
3957 name="setTestClear-" + str( i ),
3958 args=[ onosSetName, " "], # Values doesn't matter
3959 kwargs={ "clear": True } )
3960 threads.append( t )
3961 t.start()
3962 for t in threads:
3963 t.join()
3964 clearResponses.append( t.result )
3965
3966 # main.TRUE = successfully changed the set
3967 # main.FALSE = action resulted in no change in set
3968 # main.ERROR - Some error in executing the function
3969 clearResults = main.TRUE
3970 for i in range( len( main.activeNodes ) ):
3971 if clearResponses[ i ] == main.TRUE:
3972 # All is well
3973 pass
3974 elif clearResponses[ i ] == main.FALSE:
3975 # Nothing set, probably fine
3976 pass
3977 elif clearResponses[ i ] == main.ERROR:
3978 # Error in execution
3979 clearResults = main.FALSE
3980 else:
3981 # unexpected result
3982 clearResults = main.FALSE
3983 if clearResults != main.TRUE:
3984 main.log.error( "Error executing set clear" )
3985
3986 # Check if set is still correct
3987 size = len( onosSet )
3988 getResponses = []
3989 threads = []
3990 for i in main.activeNodes:
3991 t = main.Thread( target=main.CLIs[i].setTestGet,
3992 name="setTestGet-" + str( i ),
3993 args=[ onosSetName ] )
3994 threads.append( t )
3995 t.start()
3996 for t in threads:
3997 t.join()
3998 getResponses.append( t.result )
3999 getResults = main.TRUE
4000 for i in range( len( main.activeNodes ) ):
4001 node = str( main.activeNodes[i] + 1 )
4002 if isinstance( getResponses[ i ], list):
4003 current = set( getResponses[ i ] )
4004 if len( current ) == len( getResponses[ i ] ):
4005 # no repeats
4006 if onosSet != current:
4007 main.log.error( "ONOS" + node +
4008 " has incorrect view" +
4009 " of set " + onosSetName + ":\n" +
4010 str( getResponses[ i ] ) )
4011 main.log.debug( "Expected: " + str( onosSet ) )
4012 main.log.debug( "Actual: " + str( current ) )
4013 getResults = main.FALSE
4014 else:
4015 # error, set is not a set
4016 main.log.error( "ONOS" + node +
4017 " has repeat elements in" +
4018 " set " + onosSetName + ":\n" +
4019 str( getResponses[ i ] ) )
4020 getResults = main.FALSE
4021 elif getResponses[ i ] == main.ERROR:
4022 getResults = main.FALSE
4023 sizeResponses = []
4024 threads = []
4025 for i in main.activeNodes:
4026 t = main.Thread( target=main.CLIs[i].setTestSize,
4027 name="setTestSize-" + str( i ),
4028 args=[ onosSetName ] )
4029 threads.append( t )
4030 t.start()
4031 for t in threads:
4032 t.join()
4033 sizeResponses.append( t.result )
4034 sizeResults = main.TRUE
4035 for i in range( len( main.activeNodes ) ):
4036 node = str( main.activeNodes[i] + 1 )
4037 if size != sizeResponses[ i ]:
4038 sizeResults = main.FALSE
4039 main.log.error( "ONOS" + node +
4040 " expected a size of " + str( size ) +
4041 " for set " + onosSetName +
4042 " but got " + str( sizeResponses[ i ] ) )
4043 clearResults = clearResults and getResults and sizeResults
4044 utilities.assert_equals( expect=main.TRUE,
4045 actual=clearResults,
4046 onpass="Set clear correct",
4047 onfail="Set clear was incorrect" )
4048
4049 main.step( "Distributed Set addAll()" )
4050 onosSet.update( addAllValue.split() )
4051 addResponses = []
4052 threads = []
4053 for i in main.activeNodes:
4054 t = main.Thread( target=main.CLIs[i].setTestAdd,
4055 name="setTestAddAll-" + str( i ),
4056 args=[ onosSetName, addAllValue ] )
4057 threads.append( t )
4058 t.start()
4059 for t in threads:
4060 t.join()
4061 addResponses.append( t.result )
4062
4063 # main.TRUE = successfully changed the set
4064 # main.FALSE = action resulted in no change in set
4065 # main.ERROR - Some error in executing the function
4066 addAllResults = main.TRUE
4067 for i in range( len( main.activeNodes ) ):
4068 if addResponses[ i ] == main.TRUE:
4069 # All is well
4070 pass
4071 elif addResponses[ i ] == main.FALSE:
4072 # Already in set, probably fine
4073 pass
4074 elif addResponses[ i ] == main.ERROR:
4075 # Error in execution
4076 addAllResults = main.FALSE
4077 else:
4078 # unexpected result
4079 addAllResults = main.FALSE
4080 if addAllResults != main.TRUE:
4081 main.log.error( "Error executing set addAll" )
4082
4083 # Check if set is still correct
4084 size = len( onosSet )
4085 getResponses = []
4086 threads = []
4087 for i in main.activeNodes:
4088 t = main.Thread( target=main.CLIs[i].setTestGet,
4089 name="setTestGet-" + str( i ),
4090 args=[ onosSetName ] )
4091 threads.append( t )
4092 t.start()
4093 for t in threads:
4094 t.join()
4095 getResponses.append( t.result )
4096 getResults = main.TRUE
4097 for i in range( len( main.activeNodes ) ):
4098 node = str( main.activeNodes[i] + 1 )
4099 if isinstance( getResponses[ i ], list):
4100 current = set( getResponses[ i ] )
4101 if len( current ) == len( getResponses[ i ] ):
4102 # no repeats
4103 if onosSet != current:
4104 main.log.error( "ONOS" + node +
4105 " has incorrect view" +
4106 " of set " + onosSetName + ":\n" +
4107 str( getResponses[ i ] ) )
4108 main.log.debug( "Expected: " + str( onosSet ) )
4109 main.log.debug( "Actual: " + str( current ) )
4110 getResults = main.FALSE
4111 else:
4112 # error, set is not a set
4113 main.log.error( "ONOS" + node +
4114 " has repeat elements in" +
4115 " set " + onosSetName + ":\n" +
4116 str( getResponses[ i ] ) )
4117 getResults = main.FALSE
4118 elif getResponses[ i ] == main.ERROR:
4119 getResults = main.FALSE
4120 sizeResponses = []
4121 threads = []
4122 for i in main.activeNodes:
4123 t = main.Thread( target=main.CLIs[i].setTestSize,
4124 name="setTestSize-" + str( i ),
4125 args=[ onosSetName ] )
4126 threads.append( t )
4127 t.start()
4128 for t in threads:
4129 t.join()
4130 sizeResponses.append( t.result )
4131 sizeResults = main.TRUE
4132 for i in range( len( main.activeNodes ) ):
4133 node = str( main.activeNodes[i] + 1 )
4134 if size != sizeResponses[ i ]:
4135 sizeResults = main.FALSE
4136 main.log.error( "ONOS" + node +
4137 " expected a size of " + str( size ) +
4138 " for set " + onosSetName +
4139 " but got " + str( sizeResponses[ i ] ) )
4140 addAllResults = addAllResults and getResults and sizeResults
4141 utilities.assert_equals( expect=main.TRUE,
4142 actual=addAllResults,
4143 onpass="Set addAll correct",
4144 onfail="Set addAll was incorrect" )
4145
4146 main.step( "Distributed Set retain()" )
4147 onosSet.intersection_update( retainValue.split() )
4148 retainResponses = []
4149 threads = []
4150 for i in main.activeNodes:
4151 t = main.Thread( target=main.CLIs[i].setTestRemove,
4152 name="setTestRetain-" + str( i ),
4153 args=[ onosSetName, retainValue ],
4154 kwargs={ "retain": True } )
4155 threads.append( t )
4156 t.start()
4157 for t in threads:
4158 t.join()
4159 retainResponses.append( t.result )
4160
4161 # main.TRUE = successfully changed the set
4162 # main.FALSE = action resulted in no change in set
4163 # main.ERROR - Some error in executing the function
4164 retainResults = main.TRUE
4165 for i in range( len( main.activeNodes ) ):
4166 if retainResponses[ i ] == main.TRUE:
4167 # All is well
4168 pass
4169 elif retainResponses[ i ] == main.FALSE:
4170 # Already in set, probably fine
4171 pass
4172 elif retainResponses[ i ] == main.ERROR:
4173 # Error in execution
4174 retainResults = main.FALSE
4175 else:
4176 # unexpected result
4177 retainResults = main.FALSE
4178 if retainResults != main.TRUE:
4179 main.log.error( "Error executing set retain" )
4180
4181 # Check if set is still correct
4182 size = len( onosSet )
4183 getResponses = []
4184 threads = []
4185 for i in main.activeNodes:
4186 t = main.Thread( target=main.CLIs[i].setTestGet,
4187 name="setTestGet-" + str( i ),
4188 args=[ onosSetName ] )
4189 threads.append( t )
4190 t.start()
4191 for t in threads:
4192 t.join()
4193 getResponses.append( t.result )
4194 getResults = main.TRUE
4195 for i in range( len( main.activeNodes ) ):
4196 node = str( main.activeNodes[i] + 1 )
4197 if isinstance( getResponses[ i ], list):
4198 current = set( getResponses[ i ] )
4199 if len( current ) == len( getResponses[ i ] ):
4200 # no repeats
4201 if onosSet != current:
4202 main.log.error( "ONOS" + node +
4203 " has incorrect view" +
4204 " of set " + onosSetName + ":\n" +
4205 str( getResponses[ i ] ) )
4206 main.log.debug( "Expected: " + str( onosSet ) )
4207 main.log.debug( "Actual: " + str( current ) )
4208 getResults = main.FALSE
4209 else:
4210 # error, set is not a set
4211 main.log.error( "ONOS" + node +
4212 " has repeat elements in" +
4213 " set " + onosSetName + ":\n" +
4214 str( getResponses[ i ] ) )
4215 getResults = main.FALSE
4216 elif getResponses[ i ] == main.ERROR:
4217 getResults = main.FALSE
4218 sizeResponses = []
4219 threads = []
4220 for i in main.activeNodes:
4221 t = main.Thread( target=main.CLIs[i].setTestSize,
4222 name="setTestSize-" + str( i ),
4223 args=[ onosSetName ] )
4224 threads.append( t )
4225 t.start()
4226 for t in threads:
4227 t.join()
4228 sizeResponses.append( t.result )
4229 sizeResults = main.TRUE
4230 for i in range( len( main.activeNodes ) ):
4231 node = str( main.activeNodes[i] + 1 )
4232 if size != sizeResponses[ i ]:
4233 sizeResults = main.FALSE
4234 main.log.error( "ONOS" + node + " expected a size of " +
4235 str( size ) + " for set " + onosSetName +
4236 " but got " + str( sizeResponses[ i ] ) )
4237 retainResults = retainResults and getResults and sizeResults
4238 utilities.assert_equals( expect=main.TRUE,
4239 actual=retainResults,
4240 onpass="Set retain correct",
4241 onfail="Set retain was incorrect" )
4242
4243 # Transactional maps
4244 main.step( "Partitioned Transactional maps put" )
4245 tMapValue = "Testing"
4246 numKeys = 100
4247 putResult = True
4248 node = main.activeNodes[0]
4249 putResponses = main.CLIs[node].transactionalMapPut( numKeys, tMapValue )
4250 if putResponses and len( putResponses ) == 100:
4251 for i in putResponses:
4252 if putResponses[ i ][ 'value' ] != tMapValue:
4253 putResult = False
4254 else:
4255 putResult = False
4256 if not putResult:
4257 main.log.debug( "Put response values: " + str( putResponses ) )
4258 utilities.assert_equals( expect=True,
4259 actual=putResult,
4260 onpass="Partitioned Transactional Map put successful",
4261 onfail="Partitioned Transactional Map put values are incorrect" )
4262
4263 main.step( "Partitioned Transactional maps get" )
4264 getCheck = True
4265 for n in range( 1, numKeys + 1 ):
4266 getResponses = []
4267 threads = []
4268 valueCheck = True
4269 for i in main.activeNodes:
4270 t = main.Thread( target=main.CLIs[i].transactionalMapGet,
4271 name="TMap-get-" + str( i ),
4272 args=[ "Key" + str( n ) ] )
4273 threads.append( t )
4274 t.start()
4275 for t in threads:
4276 t.join()
4277 getResponses.append( t.result )
4278 for node in getResponses:
4279 if node != tMapValue:
4280 valueCheck = False
4281 if not valueCheck:
4282 main.log.warn( "Values for key 'Key" + str( n ) + "' do not match:" )
4283 main.log.warn( getResponses )
4284 getCheck = getCheck and valueCheck
4285 utilities.assert_equals( expect=True,
4286 actual=getCheck,
4287 onpass="Partitioned Transactional Map get values were correct",
4288 onfail="Partitioned Transactional Map values incorrect" )