blob: 8cf10264414a8daed140f261db075b8f8bda618a [file] [log] [blame]
Jon Hall9ebd1bd2016-04-19 01:37:17 -07001"""
2Description: This test is to determine if ONOS can handle
3 dynamic scaling of the cluster size.
4
5List of test cases:
6CASE1: Compile ONOS and push it to the test machines
7CASE2: Assign devices to controllers
8CASE21: Assign mastership to controllers
9CASE3: Assign intents
10CASE4: Ping across added host intents
11CASE5: Reading state of ONOS
12CASE6: The scaling case.
13CASE7: Check state after control plane failure
14CASE8: Compare topo
15CASE9: Link s3-s28 down
16CASE10: Link s3-s28 up
17CASE11: Switch down
18CASE12: Switch up
19CASE13: Clean up
20CASE14: start election app on all onos nodes
21CASE15: Check that Leadership Election is still functional
22CASE16: Install Distributed Primitives app
23CASE17: Check for basic functionality with distributed primitives
24"""
25
26
27class HAscaling:
28
29 def __init__( self ):
30 self.default = ''
31
32 def CASE1( self, main ):
33 """
34 CASE1 is to compile ONOS and push it to the test machines
35
36 Startup sequence:
37 cell <name>
38 onos-verify-cell
39 NOTE: temporary - onos-remove-raft-logs
40 onos-uninstall
41 start mininet
42 git pull
43 mvn clean install
44 onos-package
45 onos-install -f
46 onos-wait-for-start
47 start cli sessions
48 start tcpdump
49 """
50 import time
51 import os
52 import re
53 main.log.info( "ONOS HA test: Restart all ONOS nodes - " +
54 "initialization" )
55 main.case( "Setting up test environment" )
56 main.caseExplanation = "Setup the test environment including " +\
57 "installing ONOS, starting Mininet and ONOS" +\
58 "cli sessions."
59
60 # load some variables from the params file
61 PULLCODE = False
62 if main.params[ 'Git' ] == 'True':
63 PULLCODE = True
64 gitBranch = main.params[ 'branch' ]
65 cellName = main.params[ 'ENV' ][ 'cellName' ]
66
67 main.numCtrls = int( main.params[ 'num_controllers' ] )
68 if main.ONOSbench.maxNodes:
69 if main.ONOSbench.maxNodes < main.numCtrls:
70 main.numCtrls = int( main.ONOSbench.maxNodes )
71 # set global variables
72 # These are for csv plotting in jenkins
73 global labels
74 global data
75 labels = []
76 data = []
77
78 try:
79 from tests.HA.dependencies.HA import HA
80 main.HA = HA()
81 from tests.HA.HAscaling.dependencies.Server import Server
82 main.Server = Server()
83 except Exception as e:
84 main.log.exception( e )
85 main.cleanup()
86 main.exit()
87
88 main.CLIs = []
89 main.nodes = []
90 ipList = []
91 for i in range( 1, main.numCtrls + 1 ):
92 try:
93 main.CLIs.append( getattr( main, 'ONOScli' + str( i ) ) )
94 main.nodes.append( getattr( main, 'ONOS' + str( i ) ) )
95 ipList.append( main.nodes[ -1 ].ip_address )
96 except AttributeError:
97 break
98
99 main.step( "Create cell file" )
100 cellAppString = main.params[ 'ENV' ][ 'appString' ]
101 main.ONOSbench.createCellFile( main.ONOSbench.ip_address, cellName,
102 main.Mininet1.ip_address,
103 cellAppString, ipList )
104
105 main.step( "Applying cell variable to environment" )
106 cellResult = main.ONOSbench.setCell( cellName )
107 utilities.assert_equals( expect=main.TRUE, actual=cellResult,
108 onpass="Set cell successfull",
109 onfail="Failled to set cell" )
110
111 main.step( "Verify connectivity to cell" )
112 verifyResult = main.ONOSbench.verifyCell()
113 utilities.assert_equals( expect=main.TRUE, actual=verifyResult,
114 onpass="Verify cell passed",
115 onfail="Failled to verify cell" )
116
117 # FIXME:this is short term fix
118 main.log.info( "Removing raft logs" )
119 main.ONOSbench.onosRemoveRaftLogs()
120
121 main.log.info( "Uninstalling ONOS" )
122 for node in main.nodes:
123 main.ONOSbench.onosUninstall( node.ip_address )
124
125 # Make sure ONOS is DEAD
126 main.log.info( "Killing any ONOS processes" )
127 killResults = main.TRUE
128 for node in main.nodes:
129 killed = main.ONOSbench.onosKill( node.ip_address )
130 killResults = killResults and killed
131
132 main.step( "Setup server for cluster metadata file" )
133 port = 8000
134 rootDir = os.path.dirname( main.testFile ) + "/dependencies"
135 main.log.debug( "Root dir: {}".format( rootDir ) )
136 status = main.Server.start( main.ONOSbench,
137 rootDir,
138 port=port,
139 logDir=main.logdir + "/server.log" )
140 utilities.assert_equals( expect=main.TRUE, actual=status,
141 onpass="Server started",
142 onfail="Failled to start SimpleHTTPServer" )
143
144 main.step( "Generate initial metadata file" )
145 main.scaling = main.params['scaling'].split( "," )
146 main.log.debug( main.scaling )
147 scale = main.scaling.pop(0)
148 main.log.debug( scale)
149 if "e" in scale:
150 equal = True
151 else:
152 equal = False
153 main.log.debug( equal)
154 main.numCtrls = int( re.search( "\d+", scale ).group(0) )
155 genResult = main.Server.generateFile( main.numCtrls, equal=equal )
156 utilities.assert_equals( expect=main.TRUE, actual=genResult,
157 onpass="New cluster metadata file generated",
158 onfail="Failled to generate new metadata file" )
159
160 cleanInstallResult = main.TRUE
161 gitPullResult = main.TRUE
162
163 main.step( "Starting Mininet" )
164 # scp topo file to mininet
165 # TODO: move to params?
166 topoName = "obelisk.py"
167 filePath = main.ONOSbench.home + "/tools/test/topos/"
168 main.ONOSbench.scp( main.Mininet1,
169 filePath + topoName,
170 main.Mininet1.home,
171 direction="to" )
172 mnResult = main.Mininet1.startNet( )
173 utilities.assert_equals( expect=main.TRUE, actual=mnResult,
174 onpass="Mininet Started",
175 onfail="Error starting Mininet" )
176
177 main.step( "Git checkout and pull " + gitBranch )
178 if PULLCODE:
179 main.ONOSbench.gitCheckout( gitBranch )
180 gitPullResult = main.ONOSbench.gitPull()
181 # values of 1 or 3 are good
182 utilities.assert_lesser( expect=0, actual=gitPullResult,
183 onpass="Git pull successful",
184 onfail="Git pull failed" )
185 main.ONOSbench.getVersion( report=True )
186
187 main.step( "Using mvn clean install" )
188 cleanInstallResult = main.TRUE
189 if PULLCODE and gitPullResult == main.TRUE:
190 cleanInstallResult = main.ONOSbench.cleanInstall()
191 else:
192 main.log.warn( "Did not pull new code so skipping mvn " +
193 "clean install" )
194 utilities.assert_equals( expect=main.TRUE,
195 actual=cleanInstallResult,
196 onpass="MCI successful",
197 onfail="MCI failed" )
198 # GRAPHS
199 # NOTE: important params here:
200 # job = name of Jenkins job
201 # Plot Name = Plot-HA, only can be used if multiple plots
202 # index = The number of the graph under plot name
203 job = "HAscaling"
204 plotName = "Plot-HA"
205 index = "0"
206 graphs = '<ac:structured-macro ac:name="html">\n'
207 graphs += '<ac:plain-text-body><![CDATA[\n'
208 graphs += '<iframe src="https://onos-jenkins.onlab.us/job/' + job +\
209 '/plot/' + plotName + '/getPlot?index=' + index +\
210 '&width=500&height=300"' +\
211 'noborder="0" width="500" height="300" scrolling="yes" ' +\
212 'seamless="seamless"></iframe>\n'
213 graphs += ']]></ac:plain-text-body>\n'
214 graphs += '</ac:structured-macro>\n'
215 main.log.wiki(graphs)
216
217 main.step( "Copying backup config files" )
218 path = "~/onos/tools/package/bin/onos-service"
219 cp = main.ONOSbench.scp( main.ONOSbench,
220 path,
221 path + ".backup",
222 direction="to" )
223
224 utilities.assert_equals( expect=main.TRUE,
225 actual=cp,
226 onpass="Copy backup config file succeeded",
227 onfail="Copy backup config file failed" )
228 # we need to modify the onos-service file to use remote metadata file
229 # url for cluster metadata file
230 ip = main.ONOSbench.getIpAddr()
231 metaFile = "cluster.json"
232 javaArgs = r"-Donos.cluster.metadata.uri=http:\/\/{}:{}\/{}".format( ip, port, metaFile )
233 main.log.warn( javaArgs )
234 main.log.warn( repr( javaArgs ) )
235 handle = main.ONOSbench.handle
236 sed = r"sed -i 's/bash/bash\nexport JAVA_OPTS=${{JAVA_OPTS:-{}}}\n/' {}".format( javaArgs, path )
237 main.log.warn( sed )
238 main.log.warn( repr( sed ) )
239 handle.sendline( sed )
240 handle.expect( "\$" )
241 main.log.debug( repr( handle.before ) )
242
243 main.step( "Creating ONOS package" )
244 packageResult = main.ONOSbench.onosPackage()
245 utilities.assert_equals( expect=main.TRUE, actual=packageResult,
246 onpass="ONOS package successful",
247 onfail="ONOS package failed" )
248
249 main.step( "Installing ONOS package" )
250 onosInstallResult = main.TRUE
251 for i in range( main.ONOSbench.maxNodes ):
252 node = main.nodes[i]
253 options = "-f"
254 if i >= main.numCtrls:
255 options = "-nf" # Don't start more than the current scale
256 tmpResult = main.ONOSbench.onosInstall( options=options,
257 node=node.ip_address )
258 onosInstallResult = onosInstallResult and tmpResult
259 utilities.assert_equals( expect=main.TRUE, actual=onosInstallResult,
260 onpass="ONOS install successful",
261 onfail="ONOS install failed" )
262
263 # Cleanup custom onos-service file
264 main.ONOSbench.scp( main.ONOSbench,
265 path + ".backup",
266 path,
267 direction="to" )
268
269 main.step( "Checking if ONOS is up yet" )
270 for i in range( 2 ):
271 onosIsupResult = main.TRUE
272 for i in range( main.numCtrls ):
273 node = main.nodes[i]
274 started = main.ONOSbench.isup( node.ip_address )
275 if not started:
276 main.log.error( node.name + " hasn't started" )
277 onosIsupResult = onosIsupResult and started
278 if onosIsupResult == main.TRUE:
279 break
280 utilities.assert_equals( expect=main.TRUE, actual=onosIsupResult,
281 onpass="ONOS startup successful",
282 onfail="ONOS startup failed" )
283
284 main.log.step( "Starting ONOS CLI sessions" )
285 cliResults = main.TRUE
286 threads = []
287 for i in range( main.numCtrls ):
288 t = main.Thread( target=main.CLIs[i].startOnosCli,
289 name="startOnosCli-" + str( i ),
290 args=[main.nodes[i].ip_address] )
291 threads.append( t )
292 t.start()
293
294 for t in threads:
295 t.join()
296 cliResults = cliResults and t.result
297 utilities.assert_equals( expect=main.TRUE, actual=cliResults,
298 onpass="ONOS cli startup successful",
299 onfail="ONOS cli startup failed" )
300
301 # Create a list of active nodes for use when some nodes are stopped
302 main.activeNodes = [ i for i in range( 0, main.numCtrls ) ]
303
304 if main.params[ 'tcpdump' ].lower() == "true":
305 main.step( "Start Packet Capture MN" )
306 main.Mininet2.startTcpdump(
307 str( main.params[ 'MNtcpdump' ][ 'folder' ] ) + str( main.TEST )
308 + "-MN.pcap",
309 intf=main.params[ 'MNtcpdump' ][ 'intf' ],
310 port=main.params[ 'MNtcpdump' ][ 'port' ] )
311
312 main.step( "Checking ONOS nodes" )
313 nodeResults = utilities.retry( main.HA.nodesCheck,
314 False,
315 args=[main.activeNodes],
316 attempts=5 )
317 utilities.assert_equals( expect=True, actual=nodeResults,
318 onpass="Nodes check successful",
319 onfail="Nodes check NOT successful" )
320
321 if not nodeResults:
322 for cli in main.CLIs:
323 main.log.debug( "{} components not ACTIVE: \n{}".format(
324 cli.name,
325 cli.sendline( "scr:list | grep -v ACTIVE" ) ) )
326
327 if cliResults == main.FALSE:
328 main.log.error( "Failed to start ONOS, stopping test" )
329 main.cleanup()
330 main.exit()
331
332 main.step( "Activate apps defined in the params file" )
333 # get data from the params
334 apps = main.params.get( 'apps' )
335 if apps:
336 apps = apps.split(',')
337 main.log.warn( apps )
338 activateResult = True
339 for app in apps:
340 main.CLIs[ 0 ].app( app, "Activate" )
341 # TODO: check this worked
342 time.sleep( 10 ) # wait for apps to activate
343 for app in apps:
344 state = main.CLIs[ 0 ].appStatus( app )
345 if state == "ACTIVE":
346 activateResult = activateResult and True
347 else:
348 main.log.error( "{} is in {} state".format( app, state ) )
349 activateResult = False
350 utilities.assert_equals( expect=True,
351 actual=activateResult,
352 onpass="Successfully activated apps",
353 onfail="Failed to activate apps" )
354 else:
355 main.log.warn( "No apps were specified to be loaded after startup" )
356
357 main.step( "Set ONOS configurations" )
358 config = main.params.get( 'ONOS_Configuration' )
359 if config:
360 main.log.debug( config )
361 checkResult = main.TRUE
362 for component in config:
363 for setting in config[component]:
364 value = config[component][setting]
365 check = main.CLIs[ 0 ].setCfg( component, setting, value )
366 main.log.info( "Value was changed? {}".format( main.TRUE == check ) )
367 checkResult = check and checkResult
368 utilities.assert_equals( expect=main.TRUE,
369 actual=checkResult,
370 onpass="Successfully set config",
371 onfail="Failed to set config" )
372 else:
373 main.log.warn( "No configurations were specified to be changed after startup" )
374
375 main.step( "App Ids check" )
376 appCheck = main.TRUE
377 threads = []
378 for i in main.activeNodes:
379 t = main.Thread( target=main.CLIs[i].appToIDCheck,
380 name="appToIDCheck-" + str( i ),
381 args=[] )
382 threads.append( t )
383 t.start()
384
385 for t in threads:
386 t.join()
387 appCheck = appCheck and t.result
388 if appCheck != main.TRUE:
389 node = main.activeNodes[0]
390 main.log.warn( main.CLIs[node].apps() )
391 main.log.warn( main.CLIs[node].appIDs() )
392 utilities.assert_equals( expect=main.TRUE, actual=appCheck,
393 onpass="App Ids seem to be correct",
394 onfail="Something is wrong with app Ids" )
395
396 def CASE2( self, main ):
397 """
398 Assign devices to controllers
399 """
400 import re
401 assert main.numCtrls, "main.numCtrls not defined"
402 assert main, "main not defined"
403 assert utilities.assert_equals, "utilities.assert_equals not defined"
404 assert main.CLIs, "main.CLIs not defined"
405 assert main.nodes, "main.nodes not defined"
406
407 main.case( "Assigning devices to controllers" )
408 main.caseExplanation = "Assign switches to ONOS using 'ovs-vsctl' " +\
409 "and check that an ONOS node becomes the " +\
410 "master of the device."
411 main.step( "Assign switches to controllers" )
412
413 ipList = []
414 for i in range( main.ONOSbench.maxNodes ):
415 ipList.append( main.nodes[ i ].ip_address )
416 swList = []
417 for i in range( 1, 29 ):
418 swList.append( "s" + str( i ) )
419 main.Mininet1.assignSwController( sw=swList, ip=ipList )
420
421 mastershipCheck = main.TRUE
422 for i in range( 1, 29 ):
423 response = main.Mininet1.getSwController( "s" + str( i ) )
424 try:
425 main.log.info( str( response ) )
426 except Exception:
427 main.log.info( repr( response ) )
428 for node in main.nodes:
429 if re.search( "tcp:" + node.ip_address, response ):
430 mastershipCheck = mastershipCheck and main.TRUE
431 else:
432 main.log.error( "Error, node " + node.ip_address + " is " +
433 "not in the list of controllers s" +
434 str( i ) + " is connecting to." )
435 mastershipCheck = main.FALSE
436 utilities.assert_equals(
437 expect=main.TRUE,
438 actual=mastershipCheck,
439 onpass="Switch mastership assigned correctly",
440 onfail="Switches not assigned correctly to controllers" )
441
442 def CASE21( self, main ):
443 """
444 Assign mastership to controllers
445 """
446 import time
447 assert main.numCtrls, "main.numCtrls not defined"
448 assert main, "main not defined"
449 assert utilities.assert_equals, "utilities.assert_equals not defined"
450 assert main.CLIs, "main.CLIs not defined"
451 assert main.nodes, "main.nodes not defined"
452
453 main.case( "Assigning Controller roles for switches" )
454 main.caseExplanation = "Check that ONOS is connected to each " +\
455 "device. Then manually assign" +\
456 " mastership to specific ONOS nodes using" +\
457 " 'device-role'"
458 main.step( "Assign mastership of switches to specific controllers" )
459 # Manually assign mastership to the controller we want
460 roleCall = main.TRUE
461
462 ipList = [ ]
463 deviceList = []
464 onosCli = main.CLIs[ main.activeNodes[0] ]
465 try:
466 # Assign mastership to specific controllers. This assignment was
467 # determined for a 7 node cluser, but will work with any sized
468 # cluster
469 for i in range( 1, 29 ): # switches 1 through 28
470 # set up correct variables:
471 if i == 1:
472 c = 0
473 ip = main.nodes[ c ].ip_address # ONOS1
474 deviceId = onosCli.getDevice( "1000" ).get( 'id' )
475 elif i == 2:
476 c = 1 % main.numCtrls
477 ip = main.nodes[ c ].ip_address # ONOS2
478 deviceId = onosCli.getDevice( "2000" ).get( 'id' )
479 elif i == 3:
480 c = 1 % main.numCtrls
481 ip = main.nodes[ c ].ip_address # ONOS2
482 deviceId = onosCli.getDevice( "3000" ).get( 'id' )
483 elif i == 4:
484 c = 3 % main.numCtrls
485 ip = main.nodes[ c ].ip_address # ONOS4
486 deviceId = onosCli.getDevice( "3004" ).get( 'id' )
487 elif i == 5:
488 c = 2 % main.numCtrls
489 ip = main.nodes[ c ].ip_address # ONOS3
490 deviceId = onosCli.getDevice( "5000" ).get( 'id' )
491 elif i == 6:
492 c = 2 % main.numCtrls
493 ip = main.nodes[ c ].ip_address # ONOS3
494 deviceId = onosCli.getDevice( "6000" ).get( 'id' )
495 elif i == 7:
496 c = 5 % main.numCtrls
497 ip = main.nodes[ c ].ip_address # ONOS6
498 deviceId = onosCli.getDevice( "6007" ).get( 'id' )
499 elif i >= 8 and i <= 17:
500 c = 4 % main.numCtrls
501 ip = main.nodes[ c ].ip_address # ONOS5
502 dpid = '3' + str( i ).zfill( 3 )
503 deviceId = onosCli.getDevice( dpid ).get( 'id' )
504 elif i >= 18 and i <= 27:
505 c = 6 % main.numCtrls
506 ip = main.nodes[ c ].ip_address # ONOS7
507 dpid = '6' + str( i ).zfill( 3 )
508 deviceId = onosCli.getDevice( dpid ).get( 'id' )
509 elif i == 28:
510 c = 0
511 ip = main.nodes[ c ].ip_address # ONOS1
512 deviceId = onosCli.getDevice( "2800" ).get( 'id' )
513 else:
514 main.log.error( "You didn't write an else statement for " +
515 "switch s" + str( i ) )
516 roleCall = main.FALSE
517 # Assign switch
518 assert deviceId, "No device id for s" + str( i ) + " in ONOS"
519 # TODO: make this controller dynamic
520 roleCall = roleCall and onosCli.deviceRole( deviceId, ip )
521 ipList.append( ip )
522 deviceList.append( deviceId )
523 except ( AttributeError, AssertionError ):
524 main.log.exception( "Something is wrong with ONOS device view" )
525 main.log.info( onosCli.devices() )
526 utilities.assert_equals(
527 expect=main.TRUE,
528 actual=roleCall,
529 onpass="Re-assigned switch mastership to designated controller",
530 onfail="Something wrong with deviceRole calls" )
531
532 main.step( "Check mastership was correctly assigned" )
533 roleCheck = main.TRUE
534 # NOTE: This is due to the fact that device mastership change is not
535 # atomic and is actually a multi step process
536 time.sleep( 5 )
537 for i in range( len( ipList ) ):
538 ip = ipList[i]
539 deviceId = deviceList[i]
540 # Check assignment
541 master = onosCli.getRole( deviceId ).get( 'master' )
542 if ip in master:
543 roleCheck = roleCheck and main.TRUE
544 else:
545 roleCheck = roleCheck and main.FALSE
546 main.log.error( "Error, controller " + ip + " is not" +
547 " master " + "of device " +
548 str( deviceId ) + ". Master is " +
549 repr( master ) + "." )
550 utilities.assert_equals(
551 expect=main.TRUE,
552 actual=roleCheck,
553 onpass="Switches were successfully reassigned to designated " +
554 "controller",
555 onfail="Switches were not successfully reassigned" )
556
557 def CASE3( self, main ):
558 """
559 Assign intents
560 """
561 import time
562 import json
563 assert main.numCtrls, "main.numCtrls not defined"
564 assert main, "main not defined"
565 assert utilities.assert_equals, "utilities.assert_equals not defined"
566 assert main.CLIs, "main.CLIs not defined"
567 assert main.nodes, "main.nodes not defined"
568 try:
569 labels
570 except NameError:
571 main.log.error( "labels not defined, setting to []" )
572 labels = []
573 try:
574 data
575 except NameError:
576 main.log.error( "data not defined, setting to []" )
577 data = []
578 # NOTE: we must reinstall intents until we have a persistant intent
579 # datastore!
580 main.case( "Adding host Intents" )
581 main.caseExplanation = "Discover hosts by using pingall then " +\
582 "assign predetermined host-to-host intents." +\
583 " After installation, check that the intent" +\
584 " is distributed to all nodes and the state" +\
585 " is INSTALLED"
586
587 # install onos-app-fwd
588 main.step( "Install reactive forwarding app" )
589 onosCli = main.CLIs[ main.activeNodes[0] ]
590 installResults = onosCli.activateApp( "org.onosproject.fwd" )
591 utilities.assert_equals( expect=main.TRUE, actual=installResults,
592 onpass="Install fwd successful",
593 onfail="Install fwd failed" )
594
595 main.step( "Check app ids" )
596 appCheck = main.TRUE
597 threads = []
598 for i in main.activeNodes:
599 t = main.Thread( target=main.CLIs[i].appToIDCheck,
600 name="appToIDCheck-" + str( i ),
601 args=[] )
602 threads.append( t )
603 t.start()
604
605 for t in threads:
606 t.join()
607 appCheck = appCheck and t.result
608 if appCheck != main.TRUE:
609 main.log.warn( onosCli.apps() )
610 main.log.warn( onosCli.appIDs() )
611 utilities.assert_equals( expect=main.TRUE, actual=appCheck,
612 onpass="App Ids seem to be correct",
613 onfail="Something is wrong with app Ids" )
614
615 main.step( "Discovering Hosts( Via pingall for now )" )
616 # FIXME: Once we have a host discovery mechanism, use that instead
617 # REACTIVE FWD test
618 pingResult = main.FALSE
619 passMsg = "Reactive Pingall test passed"
620 time1 = time.time()
621 pingResult = main.Mininet1.pingall()
622 time2 = time.time()
623 if not pingResult:
624 main.log.warn("First pingall failed. Trying again...")
625 pingResult = main.Mininet1.pingall()
626 passMsg += " on the second try"
627 utilities.assert_equals(
628 expect=main.TRUE,
629 actual=pingResult,
630 onpass= passMsg,
631 onfail="Reactive Pingall failed, " +
632 "one or more ping pairs failed" )
633 main.log.info( "Time for pingall: %2f seconds" %
634 ( time2 - time1 ) )
635 # timeout for fwd flows
636 time.sleep( 11 )
637 # uninstall onos-app-fwd
638 main.step( "Uninstall reactive forwarding app" )
639 node = main.activeNodes[0]
640 uninstallResult = main.CLIs[node].deactivateApp( "org.onosproject.fwd" )
641 utilities.assert_equals( expect=main.TRUE, actual=uninstallResult,
642 onpass="Uninstall fwd successful",
643 onfail="Uninstall fwd failed" )
644
645 main.step( "Check app ids" )
646 threads = []
647 appCheck2 = main.TRUE
648 for i in main.activeNodes:
649 t = main.Thread( target=main.CLIs[i].appToIDCheck,
650 name="appToIDCheck-" + str( i ),
651 args=[] )
652 threads.append( t )
653 t.start()
654
655 for t in threads:
656 t.join()
657 appCheck2 = appCheck2 and t.result
658 if appCheck2 != main.TRUE:
659 node = main.activeNodes[0]
660 main.log.warn( main.CLIs[node].apps() )
661 main.log.warn( main.CLIs[node].appIDs() )
662 utilities.assert_equals( expect=main.TRUE, actual=appCheck2,
663 onpass="App Ids seem to be correct",
664 onfail="Something is wrong with app Ids" )
665
666 main.step( "Add host intents via cli" )
667 intentIds = []
668 # TODO: move the host numbers to params
669 # Maybe look at all the paths we ping?
670 intentAddResult = True
671 hostResult = main.TRUE
672 for i in range( 8, 18 ):
673 main.log.info( "Adding host intent between h" + str( i ) +
674 " and h" + str( i + 10 ) )
675 host1 = "00:00:00:00:00:" + \
676 str( hex( i )[ 2: ] ).zfill( 2 ).upper()
677 host2 = "00:00:00:00:00:" + \
678 str( hex( i + 10 )[ 2: ] ).zfill( 2 ).upper()
679 # NOTE: getHost can return None
680 host1Dict = onosCli.getHost( host1 )
681 host2Dict = onosCli.getHost( host2 )
682 host1Id = None
683 host2Id = None
684 if host1Dict and host2Dict:
685 host1Id = host1Dict.get( 'id', None )
686 host2Id = host2Dict.get( 'id', None )
687 if host1Id and host2Id:
688 nodeNum = ( i % len( main.activeNodes ) )
689 node = main.activeNodes[nodeNum]
690 tmpId = main.CLIs[node].addHostIntent( host1Id, host2Id )
691 if tmpId:
692 main.log.info( "Added intent with id: " + tmpId )
693 intentIds.append( tmpId )
694 else:
695 main.log.error( "addHostIntent returned: " +
696 repr( tmpId ) )
697 else:
698 main.log.error( "Error, getHost() failed for h" + str( i ) +
699 " and/or h" + str( i + 10 ) )
700 node = main.activeNodes[0]
701 hosts = main.CLIs[node].hosts()
702 main.log.warn( "Hosts output: " )
703 try:
704 main.log.warn( json.dumps( json.loads( hosts ),
705 sort_keys=True,
706 indent=4,
707 separators=( ',', ': ' ) ) )
708 except ( ValueError, TypeError ):
709 main.log.warn( repr( hosts ) )
710 hostResult = main.FALSE
711 utilities.assert_equals( expect=main.TRUE, actual=hostResult,
712 onpass="Found a host id for each host",
713 onfail="Error looking up host ids" )
714
715 intentStart = time.time()
716 onosIds = onosCli.getAllIntentsId()
717 main.log.info( "Submitted intents: " + str( intentIds ) )
718 main.log.info( "Intents in ONOS: " + str( onosIds ) )
719 for intent in intentIds:
720 if intent in onosIds:
721 pass # intent submitted is in onos
722 else:
723 intentAddResult = False
724 if intentAddResult:
725 intentStop = time.time()
726 else:
727 intentStop = None
728 # Print the intent states
729 intents = onosCli.intents()
730 intentStates = []
731 installedCheck = True
732 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
733 count = 0
734 try:
735 for intent in json.loads( intents ):
736 state = intent.get( 'state', None )
737 if "INSTALLED" not in state:
738 installedCheck = False
739 intentId = intent.get( 'id', None )
740 intentStates.append( ( intentId, state ) )
741 except ( ValueError, TypeError ):
742 main.log.exception( "Error parsing intents" )
743 # add submitted intents not in the store
744 tmplist = [ i for i, s in intentStates ]
745 missingIntents = False
746 for i in intentIds:
747 if i not in tmplist:
748 intentStates.append( ( i, " - " ) )
749 missingIntents = True
750 intentStates.sort()
751 for i, s in intentStates:
752 count += 1
753 main.log.info( "%-6s%-15s%-15s" %
754 ( str( count ), str( i ), str( s ) ) )
755 leaders = onosCli.leaders()
756 try:
757 missing = False
758 if leaders:
759 parsedLeaders = json.loads( leaders )
760 main.log.warn( json.dumps( parsedLeaders,
761 sort_keys=True,
762 indent=4,
763 separators=( ',', ': ' ) ) )
764 # check for all intent partitions
765 topics = []
766 for i in range( 14 ):
767 topics.append( "intent-partition-" + str( i ) )
768 main.log.debug( topics )
769 ONOStopics = [ j['topic'] for j in parsedLeaders ]
770 for topic in topics:
771 if topic not in ONOStopics:
772 main.log.error( "Error: " + topic +
773 " not in leaders" )
774 missing = True
775 else:
776 main.log.error( "leaders() returned None" )
777 except ( ValueError, TypeError ):
778 main.log.exception( "Error parsing leaders" )
779 main.log.error( repr( leaders ) )
780 # Check all nodes
781 if missing:
782 for i in main.activeNodes:
783 response = main.CLIs[i].leaders( jsonFormat=False)
784 main.log.warn( str( main.CLIs[i].name ) + " leaders output: \n" +
785 str( response ) )
786
787 partitions = onosCli.partitions()
788 try:
789 if partitions :
790 parsedPartitions = json.loads( partitions )
791 main.log.warn( json.dumps( parsedPartitions,
792 sort_keys=True,
793 indent=4,
794 separators=( ',', ': ' ) ) )
795 # TODO check for a leader in all paritions
796 # TODO check for consistency among nodes
797 else:
798 main.log.error( "partitions() returned None" )
799 except ( ValueError, TypeError ):
800 main.log.exception( "Error parsing partitions" )
801 main.log.error( repr( partitions ) )
802 pendingMap = onosCli.pendingMap()
803 try:
804 if pendingMap :
805 parsedPending = json.loads( pendingMap )
806 main.log.warn( json.dumps( parsedPending,
807 sort_keys=True,
808 indent=4,
809 separators=( ',', ': ' ) ) )
810 # TODO check something here?
811 else:
812 main.log.error( "pendingMap() returned None" )
813 except ( ValueError, TypeError ):
814 main.log.exception( "Error parsing pending map" )
815 main.log.error( repr( pendingMap ) )
816
817 intentAddResult = bool( intentAddResult and not missingIntents and
818 installedCheck )
819 if not intentAddResult:
820 main.log.error( "Error in pushing host intents to ONOS" )
821
822 main.step( "Intent Anti-Entropy dispersion" )
823 for j in range(100):
824 correct = True
825 main.log.info( "Submitted intents: " + str( sorted( intentIds ) ) )
826 for i in main.activeNodes:
827 onosIds = []
828 ids = main.CLIs[i].getAllIntentsId()
829 onosIds.append( ids )
830 main.log.debug( "Intents in " + main.CLIs[i].name + ": " +
831 str( sorted( onosIds ) ) )
832 if sorted( ids ) != sorted( intentIds ):
833 main.log.warn( "Set of intent IDs doesn't match" )
834 correct = False
835 break
836 else:
837 intents = json.loads( main.CLIs[i].intents() )
838 for intent in intents:
839 if intent[ 'state' ] != "INSTALLED":
840 main.log.warn( "Intent " + intent[ 'id' ] +
841 " is " + intent[ 'state' ] )
842 correct = False
843 break
844 if correct:
845 break
846 else:
847 time.sleep(1)
848 if not intentStop:
849 intentStop = time.time()
850 global gossipTime
851 gossipTime = intentStop - intentStart
852 main.log.info( "It took about " + str( gossipTime ) +
853 " seconds for all intents to appear in each node" )
854 append = False
855 title = "Gossip Intents"
856 count = 1
857 while append is False:
858 curTitle = title + str( count )
859 if curTitle not in labels:
860 labels.append( curTitle )
861 data.append( str( gossipTime ) )
862 append = True
863 else:
864 count += 1
865 gossipPeriod = int( main.params['timers']['gossip'] )
866 maxGossipTime = gossipPeriod * len( main.activeNodes )
867 utilities.assert_greater_equals(
868 expect=maxGossipTime, actual=gossipTime,
869 onpass="ECM anti-entropy for intents worked within " +
870 "expected time",
871 onfail="Intent ECM anti-entropy took too long. " +
872 "Expected time:{}, Actual time:{}".format( maxGossipTime,
873 gossipTime ) )
874 if gossipTime <= maxGossipTime:
875 intentAddResult = True
876
877 if not intentAddResult or "key" in pendingMap:
878 import time
879 installedCheck = True
880 main.log.info( "Sleeping 60 seconds to see if intents are found" )
881 time.sleep( 60 )
882 onosIds = onosCli.getAllIntentsId()
883 main.log.info( "Submitted intents: " + str( intentIds ) )
884 main.log.info( "Intents in ONOS: " + str( onosIds ) )
885 # Print the intent states
886 intents = onosCli.intents()
887 intentStates = []
888 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
889 count = 0
890 try:
891 for intent in json.loads( intents ):
892 # Iter through intents of a node
893 state = intent.get( 'state', None )
894 if "INSTALLED" not in state:
895 installedCheck = False
896 intentId = intent.get( 'id', None )
897 intentStates.append( ( intentId, state ) )
898 except ( ValueError, TypeError ):
899 main.log.exception( "Error parsing intents" )
900 # add submitted intents not in the store
901 tmplist = [ i for i, s in intentStates ]
902 for i in intentIds:
903 if i not in tmplist:
904 intentStates.append( ( i, " - " ) )
905 intentStates.sort()
906 for i, s in intentStates:
907 count += 1
908 main.log.info( "%-6s%-15s%-15s" %
909 ( str( count ), str( i ), str( s ) ) )
910 leaders = onosCli.leaders()
911 try:
912 missing = False
913 if leaders:
914 parsedLeaders = json.loads( leaders )
915 main.log.warn( json.dumps( parsedLeaders,
916 sort_keys=True,
917 indent=4,
918 separators=( ',', ': ' ) ) )
919 # check for all intent partitions
920 # check for election
921 topics = []
922 for i in range( 14 ):
923 topics.append( "intent-partition-" + str( i ) )
924 # FIXME: this should only be after we start the app
925 topics.append( "org.onosproject.election" )
926 main.log.debug( topics )
927 ONOStopics = [ j['topic'] for j in parsedLeaders ]
928 for topic in topics:
929 if topic not in ONOStopics:
930 main.log.error( "Error: " + topic +
931 " not in leaders" )
932 missing = True
933 else:
934 main.log.error( "leaders() returned None" )
935 except ( ValueError, TypeError ):
936 main.log.exception( "Error parsing leaders" )
937 main.log.error( repr( leaders ) )
938 # Check all nodes
939 if missing:
940 for i in main.activeNodes:
941 node = main.CLIs[i]
942 response = node.leaders( jsonFormat=False)
943 main.log.warn( str( node.name ) + " leaders output: \n" +
944 str( response ) )
945
946 partitions = onosCli.partitions()
947 try:
948 if partitions :
949 parsedPartitions = json.loads( partitions )
950 main.log.warn( json.dumps( parsedPartitions,
951 sort_keys=True,
952 indent=4,
953 separators=( ',', ': ' ) ) )
954 # TODO check for a leader in all paritions
955 # TODO check for consistency among nodes
956 else:
957 main.log.error( "partitions() returned None" )
958 except ( ValueError, TypeError ):
959 main.log.exception( "Error parsing partitions" )
960 main.log.error( repr( partitions ) )
961 pendingMap = onosCli.pendingMap()
962 try:
963 if pendingMap :
964 parsedPending = json.loads( pendingMap )
965 main.log.warn( json.dumps( parsedPending,
966 sort_keys=True,
967 indent=4,
968 separators=( ',', ': ' ) ) )
969 # TODO check something here?
970 else:
971 main.log.error( "pendingMap() returned None" )
972 except ( ValueError, TypeError ):
973 main.log.exception( "Error parsing pending map" )
974 main.log.error( repr( pendingMap ) )
975
976 def CASE4( self, main ):
977 """
978 Ping across added host intents
979 """
980 import json
981 import time
982 assert main.numCtrls, "main.numCtrls not defined"
983 assert main, "main not defined"
984 assert utilities.assert_equals, "utilities.assert_equals not defined"
985 assert main.CLIs, "main.CLIs not defined"
986 assert main.nodes, "main.nodes not defined"
987 main.case( "Verify connectivity by sending traffic across Intents" )
988 main.caseExplanation = "Ping across added host intents to check " +\
989 "functionality and check the state of " +\
990 "the intent"
991
992 onosCli = main.CLIs[ main.activeNodes[0] ]
993 main.step( "Check Intent state" )
994 installedCheck = False
995 loopCount = 0
996 while not installedCheck and loopCount < 40:
997 installedCheck = True
998 # Print the intent states
999 intents = onosCli.intents()
1000 intentStates = []
1001 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
1002 count = 0
1003 # Iter through intents of a node
1004 try:
1005 for intent in json.loads( intents ):
1006 state = intent.get( 'state', None )
1007 if "INSTALLED" not in state:
1008 installedCheck = False
1009 intentId = intent.get( 'id', None )
1010 intentStates.append( ( intentId, state ) )
1011 except ( ValueError, TypeError ):
1012 main.log.exception( "Error parsing intents." )
1013 # Print states
1014 intentStates.sort()
1015 for i, s in intentStates:
1016 count += 1
1017 main.log.info( "%-6s%-15s%-15s" %
1018 ( str( count ), str( i ), str( s ) ) )
1019 if not installedCheck:
1020 time.sleep( 1 )
1021 loopCount += 1
1022 utilities.assert_equals( expect=True, actual=installedCheck,
1023 onpass="Intents are all INSTALLED",
1024 onfail="Intents are not all in " +
1025 "INSTALLED state" )
1026
1027 main.step( "Ping across added host intents" )
1028 PingResult = main.TRUE
1029 for i in range( 8, 18 ):
1030 ping = main.Mininet1.pingHost( src="h" + str( i ),
1031 target="h" + str( i + 10 ) )
1032 PingResult = PingResult and ping
1033 if ping == main.FALSE:
1034 main.log.warn( "Ping failed between h" + str( i ) +
1035 " and h" + str( i + 10 ) )
1036 elif ping == main.TRUE:
1037 main.log.info( "Ping test passed!" )
1038 # Don't set PingResult or you'd override failures
1039 if PingResult == main.FALSE:
1040 main.log.error(
1041 "Intents have not been installed correctly, pings failed." )
1042 # TODO: pretty print
1043 main.log.warn( "ONOS1 intents: " )
1044 try:
1045 tmpIntents = onosCli.intents()
1046 main.log.warn( json.dumps( json.loads( tmpIntents ),
1047 sort_keys=True,
1048 indent=4,
1049 separators=( ',', ': ' ) ) )
1050 except ( ValueError, TypeError ):
1051 main.log.warn( repr( tmpIntents ) )
1052 utilities.assert_equals(
1053 expect=main.TRUE,
1054 actual=PingResult,
1055 onpass="Intents have been installed correctly and pings work",
1056 onfail="Intents have not been installed correctly, pings failed." )
1057
1058 main.step( "Check leadership of topics" )
1059 leaders = onosCli.leaders()
1060 topicCheck = main.TRUE
1061 try:
1062 if leaders:
1063 parsedLeaders = json.loads( leaders )
1064 main.log.warn( json.dumps( parsedLeaders,
1065 sort_keys=True,
1066 indent=4,
1067 separators=( ',', ': ' ) ) )
1068 # check for all intent partitions
1069 # check for election
1070 # TODO: Look at Devices as topics now that it uses this system
1071 topics = []
1072 for i in range( 14 ):
1073 topics.append( "intent-partition-" + str( i ) )
1074 # FIXME: this should only be after we start the app
1075 # FIXME: topics.append( "org.onosproject.election" )
1076 # Print leaders output
1077 main.log.debug( topics )
1078 ONOStopics = [ j['topic'] for j in parsedLeaders ]
1079 for topic in topics:
1080 if topic not in ONOStopics:
1081 main.log.error( "Error: " + topic +
1082 " not in leaders" )
1083 topicCheck = main.FALSE
1084 else:
1085 main.log.error( "leaders() returned None" )
1086 topicCheck = main.FALSE
1087 except ( ValueError, TypeError ):
1088 topicCheck = main.FALSE
1089 main.log.exception( "Error parsing leaders" )
1090 main.log.error( repr( leaders ) )
1091 # TODO: Check for a leader of these topics
1092 # Check all nodes
1093 if topicCheck:
1094 for i in main.activeNodes:
1095 node = main.CLIs[i]
1096 response = node.leaders( jsonFormat=False)
1097 main.log.warn( str( node.name ) + " leaders output: \n" +
1098 str( response ) )
1099
1100 utilities.assert_equals( expect=main.TRUE, actual=topicCheck,
1101 onpass="intent Partitions is in leaders",
1102 onfail="Some topics were lost " )
1103 # Print partitions
1104 partitions = onosCli.partitions()
1105 try:
1106 if partitions :
1107 parsedPartitions = json.loads( partitions )
1108 main.log.warn( json.dumps( parsedPartitions,
1109 sort_keys=True,
1110 indent=4,
1111 separators=( ',', ': ' ) ) )
1112 # TODO check for a leader in all paritions
1113 # TODO check for consistency among nodes
1114 else:
1115 main.log.error( "partitions() returned None" )
1116 except ( ValueError, TypeError ):
1117 main.log.exception( "Error parsing partitions" )
1118 main.log.error( repr( partitions ) )
1119 # Print Pending Map
1120 pendingMap = onosCli.pendingMap()
1121 try:
1122 if pendingMap :
1123 parsedPending = json.loads( pendingMap )
1124 main.log.warn( json.dumps( parsedPending,
1125 sort_keys=True,
1126 indent=4,
1127 separators=( ',', ': ' ) ) )
1128 # TODO check something here?
1129 else:
1130 main.log.error( "pendingMap() returned None" )
1131 except ( ValueError, TypeError ):
1132 main.log.exception( "Error parsing pending map" )
1133 main.log.error( repr( pendingMap ) )
1134
1135 if not installedCheck:
1136 main.log.info( "Waiting 60 seconds to see if the state of " +
1137 "intents change" )
1138 time.sleep( 60 )
1139 # Print the intent states
1140 intents = onosCli.intents()
1141 intentStates = []
1142 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
1143 count = 0
1144 # Iter through intents of a node
1145 try:
1146 for intent in json.loads( intents ):
1147 state = intent.get( 'state', None )
1148 if "INSTALLED" not in state:
1149 installedCheck = False
1150 intentId = intent.get( 'id', None )
1151 intentStates.append( ( intentId, state ) )
1152 except ( ValueError, TypeError ):
1153 main.log.exception( "Error parsing intents." )
1154 intentStates.sort()
1155 for i, s in intentStates:
1156 count += 1
1157 main.log.info( "%-6s%-15s%-15s" %
1158 ( str( count ), str( i ), str( s ) ) )
1159 leaders = onosCli.leaders()
1160 try:
1161 missing = False
1162 if leaders:
1163 parsedLeaders = json.loads( leaders )
1164 main.log.warn( json.dumps( parsedLeaders,
1165 sort_keys=True,
1166 indent=4,
1167 separators=( ',', ': ' ) ) )
1168 # check for all intent partitions
1169 # check for election
1170 topics = []
1171 for i in range( 14 ):
1172 topics.append( "intent-partition-" + str( i ) )
1173 # FIXME: this should only be after we start the app
1174 topics.append( "org.onosproject.election" )
1175 main.log.debug( topics )
1176 ONOStopics = [ j['topic'] for j in parsedLeaders ]
1177 for topic in topics:
1178 if topic not in ONOStopics:
1179 main.log.error( "Error: " + topic +
1180 " not in leaders" )
1181 missing = True
1182 else:
1183 main.log.error( "leaders() returned None" )
1184 except ( ValueError, TypeError ):
1185 main.log.exception( "Error parsing leaders" )
1186 main.log.error( repr( leaders ) )
1187 if missing:
1188 for i in main.activeNodes:
1189 node = main.CLIs[i]
1190 response = node.leaders( jsonFormat=False)
1191 main.log.warn( str( node.name ) + " leaders output: \n" +
1192 str( response ) )
1193
1194 partitions = onosCli.partitions()
1195 try:
1196 if partitions :
1197 parsedPartitions = json.loads( partitions )
1198 main.log.warn( json.dumps( parsedPartitions,
1199 sort_keys=True,
1200 indent=4,
1201 separators=( ',', ': ' ) ) )
1202 # TODO check for a leader in all paritions
1203 # TODO check for consistency among nodes
1204 else:
1205 main.log.error( "partitions() returned None" )
1206 except ( ValueError, TypeError ):
1207 main.log.exception( "Error parsing partitions" )
1208 main.log.error( repr( partitions ) )
1209 pendingMap = onosCli.pendingMap()
1210 try:
1211 if pendingMap :
1212 parsedPending = json.loads( pendingMap )
1213 main.log.warn( json.dumps( parsedPending,
1214 sort_keys=True,
1215 indent=4,
1216 separators=( ',', ': ' ) ) )
1217 # TODO check something here?
1218 else:
1219 main.log.error( "pendingMap() returned None" )
1220 except ( ValueError, TypeError ):
1221 main.log.exception( "Error parsing pending map" )
1222 main.log.error( repr( pendingMap ) )
1223 # Print flowrules
1224 node = main.activeNodes[0]
1225 main.log.debug( main.CLIs[node].flows( jsonFormat=False ) )
1226 main.step( "Wait a minute then ping again" )
1227 # the wait is above
1228 PingResult = main.TRUE
1229 for i in range( 8, 18 ):
1230 ping = main.Mininet1.pingHost( src="h" + str( i ),
1231 target="h" + str( i + 10 ) )
1232 PingResult = PingResult and ping
1233 if ping == main.FALSE:
1234 main.log.warn( "Ping failed between h" + str( i ) +
1235 " and h" + str( i + 10 ) )
1236 elif ping == main.TRUE:
1237 main.log.info( "Ping test passed!" )
1238 # Don't set PingResult or you'd override failures
1239 if PingResult == main.FALSE:
1240 main.log.error(
1241 "Intents have not been installed correctly, pings failed." )
1242 # TODO: pretty print
1243 main.log.warn( "ONOS1 intents: " )
1244 try:
1245 tmpIntents = onosCli.intents()
1246 main.log.warn( json.dumps( json.loads( tmpIntents ),
1247 sort_keys=True,
1248 indent=4,
1249 separators=( ',', ': ' ) ) )
1250 except ( ValueError, TypeError ):
1251 main.log.warn( repr( tmpIntents ) )
1252 utilities.assert_equals(
1253 expect=main.TRUE,
1254 actual=PingResult,
1255 onpass="Intents have been installed correctly and pings work",
1256 onfail="Intents have not been installed correctly, pings failed." )
1257
1258 def CASE5( self, main ):
1259 """
1260 Reading state of ONOS
1261 """
1262 import json
1263 import time
1264 assert main.numCtrls, "main.numCtrls not defined"
1265 assert main, "main not defined"
1266 assert utilities.assert_equals, "utilities.assert_equals not defined"
1267 assert main.CLIs, "main.CLIs not defined"
1268 assert main.nodes, "main.nodes not defined"
1269
1270 main.case( "Setting up and gathering data for current state" )
1271 # The general idea for this test case is to pull the state of
1272 # ( intents,flows, topology,... ) from each ONOS node
1273 # We can then compare them with each other and also with past states
1274
1275 main.step( "Check that each switch has a master" )
1276 global mastershipState
1277 mastershipState = '[]'
1278
1279 # Assert that each device has a master
1280 rolesNotNull = main.TRUE
1281 threads = []
1282 for i in main.activeNodes:
1283 t = main.Thread( target=main.CLIs[i].rolesNotNull,
1284 name="rolesNotNull-" + str( i ),
1285 args=[] )
1286 threads.append( t )
1287 t.start()
1288
1289 for t in threads:
1290 t.join()
1291 rolesNotNull = rolesNotNull and t.result
1292 utilities.assert_equals(
1293 expect=main.TRUE,
1294 actual=rolesNotNull,
1295 onpass="Each device has a master",
1296 onfail="Some devices don't have a master assigned" )
1297
1298 main.step( "Get the Mastership of each switch from each controller" )
1299 ONOSMastership = []
1300 consistentMastership = True
1301 rolesResults = True
1302 threads = []
1303 for i in main.activeNodes:
1304 t = main.Thread( target=main.CLIs[i].roles,
1305 name="roles-" + str( i ),
1306 args=[] )
1307 threads.append( t )
1308 t.start()
1309
1310 for t in threads:
1311 t.join()
1312 ONOSMastership.append( t.result )
1313
1314 for i in range( len( ONOSMastership ) ):
1315 node = str( main.activeNodes[i] + 1 )
1316 if not ONOSMastership[i] or "Error" in ONOSMastership[i]:
1317 main.log.error( "Error in getting ONOS" + node + " roles" )
1318 main.log.warn( "ONOS" + node + " mastership response: " +
1319 repr( ONOSMastership[i] ) )
1320 rolesResults = False
1321 utilities.assert_equals(
1322 expect=True,
1323 actual=rolesResults,
1324 onpass="No error in reading roles output",
1325 onfail="Error in reading roles from ONOS" )
1326
1327 main.step( "Check for consistency in roles from each controller" )
1328 if all([ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
1329 main.log.info(
1330 "Switch roles are consistent across all ONOS nodes" )
1331 else:
1332 consistentMastership = False
1333 utilities.assert_equals(
1334 expect=True,
1335 actual=consistentMastership,
1336 onpass="Switch roles are consistent across all ONOS nodes",
1337 onfail="ONOS nodes have different views of switch roles" )
1338
1339 if rolesResults and not consistentMastership:
1340 for i in range( len( main.activeNodes ) ):
1341 node = str( main.activeNodes[i] + 1 )
1342 try:
1343 main.log.warn(
1344 "ONOS" + node + " roles: ",
1345 json.dumps(
1346 json.loads( ONOSMastership[ i ] ),
1347 sort_keys=True,
1348 indent=4,
1349 separators=( ',', ': ' ) ) )
1350 except ( ValueError, TypeError ):
1351 main.log.warn( repr( ONOSMastership[ i ] ) )
1352 elif rolesResults and consistentMastership:
1353 mastershipState = ONOSMastership[ 0 ]
1354
1355 main.step( "Get the intents from each controller" )
1356 global intentState
1357 intentState = []
1358 ONOSIntents = []
1359 consistentIntents = True # Are Intents consistent across nodes?
1360 intentsResults = True # Could we read Intents from ONOS?
1361 threads = []
1362 for i in main.activeNodes:
1363 t = main.Thread( target=main.CLIs[i].intents,
1364 name="intents-" + str( i ),
1365 args=[],
1366 kwargs={ 'jsonFormat': True } )
1367 threads.append( t )
1368 t.start()
1369
1370 for t in threads:
1371 t.join()
1372 ONOSIntents.append( t.result )
1373
1374 for i in range( len( ONOSIntents ) ):
1375 node = str( main.activeNodes[i] + 1 )
1376 if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
1377 main.log.error( "Error in getting ONOS" + node + " intents" )
1378 main.log.warn( "ONOS" + node + " intents response: " +
1379 repr( ONOSIntents[ i ] ) )
1380 intentsResults = False
1381 utilities.assert_equals(
1382 expect=True,
1383 actual=intentsResults,
1384 onpass="No error in reading intents output",
1385 onfail="Error in reading intents from ONOS" )
1386
1387 main.step( "Check for consistency in Intents from each controller" )
1388 if all([ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
1389 main.log.info( "Intents are consistent across all ONOS " +
1390 "nodes" )
1391 else:
1392 consistentIntents = False
1393 main.log.error( "Intents not consistent" )
1394 utilities.assert_equals(
1395 expect=True,
1396 actual=consistentIntents,
1397 onpass="Intents are consistent across all ONOS nodes",
1398 onfail="ONOS nodes have different views of intents" )
1399
1400 if intentsResults:
1401 # Try to make it easy to figure out what is happening
1402 #
1403 # Intent ONOS1 ONOS2 ...
1404 # 0x01 INSTALLED INSTALLING
1405 # ... ... ...
1406 # ... ... ...
1407 title = " Id"
1408 for n in main.activeNodes:
1409 title += " " * 10 + "ONOS" + str( n + 1 )
1410 main.log.warn( title )
1411 # get all intent keys in the cluster
1412 keys = []
1413 try:
1414 # Get the set of all intent keys
1415 for nodeStr in ONOSIntents:
1416 node = json.loads( nodeStr )
1417 for intent in node:
1418 keys.append( intent.get( 'id' ) )
1419 keys = set( keys )
1420 # For each intent key, print the state on each node
1421 for key in keys:
1422 row = "%-13s" % key
1423 for nodeStr in ONOSIntents:
1424 node = json.loads( nodeStr )
1425 for intent in node:
1426 if intent.get( 'id', "Error" ) == key:
1427 row += "%-15s" % intent.get( 'state' )
1428 main.log.warn( row )
1429 # End of intent state table
1430 except ValueError as e:
1431 main.log.exception( e )
1432 main.log.debug( "nodeStr was: " + repr( nodeStr ) )
1433
1434 if intentsResults and not consistentIntents:
1435 # print the json objects
1436 n = str( main.activeNodes[-1] + 1 )
1437 main.log.debug( "ONOS" + n + " intents: " )
1438 main.log.debug( json.dumps( json.loads( ONOSIntents[ -1 ] ),
1439 sort_keys=True,
1440 indent=4,
1441 separators=( ',', ': ' ) ) )
1442 for i in range( len( ONOSIntents ) ):
1443 node = str( main.activeNodes[i] + 1 )
1444 if ONOSIntents[ i ] != ONOSIntents[ -1 ]:
1445 main.log.debug( "ONOS" + node + " intents: " )
1446 main.log.debug( json.dumps( json.loads( ONOSIntents[i] ),
1447 sort_keys=True,
1448 indent=4,
1449 separators=( ',', ': ' ) ) )
1450 else:
1451 main.log.debug( "ONOS" + node + " intents match ONOS" +
1452 n + " intents" )
1453 elif intentsResults and consistentIntents:
1454 intentState = ONOSIntents[ 0 ]
1455
1456 main.step( "Get the flows from each controller" )
1457 global flowState
1458 flowState = []
1459 ONOSFlows = []
1460 ONOSFlowsJson = []
1461 flowCheck = main.FALSE
1462 consistentFlows = True
1463 flowsResults = True
1464 threads = []
1465 for i in main.activeNodes:
1466 t = main.Thread( target=main.CLIs[i].flows,
1467 name="flows-" + str( i ),
1468 args=[],
1469 kwargs={ 'jsonFormat': True } )
1470 threads.append( t )
1471 t.start()
1472
1473 # NOTE: Flows command can take some time to run
1474 time.sleep(30)
1475 for t in threads:
1476 t.join()
1477 result = t.result
1478 ONOSFlows.append( result )
1479
1480 for i in range( len( ONOSFlows ) ):
1481 num = str( main.activeNodes[i] + 1 )
1482 if not ONOSFlows[ i ] or "Error" in ONOSFlows[ i ]:
1483 main.log.error( "Error in getting ONOS" + num + " flows" )
1484 main.log.warn( "ONOS" + num + " flows response: " +
1485 repr( ONOSFlows[ i ] ) )
1486 flowsResults = False
1487 ONOSFlowsJson.append( None )
1488 else:
1489 try:
1490 ONOSFlowsJson.append( json.loads( ONOSFlows[ i ] ) )
1491 except ( ValueError, TypeError ):
1492 # FIXME: change this to log.error?
1493 main.log.exception( "Error in parsing ONOS" + num +
1494 " response as json." )
1495 main.log.error( repr( ONOSFlows[ i ] ) )
1496 ONOSFlowsJson.append( None )
1497 flowsResults = False
1498 utilities.assert_equals(
1499 expect=True,
1500 actual=flowsResults,
1501 onpass="No error in reading flows output",
1502 onfail="Error in reading flows from ONOS" )
1503
1504 main.step( "Check for consistency in Flows from each controller" )
1505 tmp = [ len( i ) == len( ONOSFlowsJson[ 0 ] ) for i in ONOSFlowsJson ]
1506 if all( tmp ):
1507 main.log.info( "Flow count is consistent across all ONOS nodes" )
1508 else:
1509 consistentFlows = False
1510 utilities.assert_equals(
1511 expect=True,
1512 actual=consistentFlows,
1513 onpass="The flow count is consistent across all ONOS nodes",
1514 onfail="ONOS nodes have different flow counts" )
1515
1516 if flowsResults and not consistentFlows:
1517 for i in range( len( ONOSFlows ) ):
1518 node = str( main.activeNodes[i] + 1 )
1519 try:
1520 main.log.warn(
1521 "ONOS" + node + " flows: " +
1522 json.dumps( json.loads( ONOSFlows[i] ), sort_keys=True,
1523 indent=4, separators=( ',', ': ' ) ) )
1524 except ( ValueError, TypeError ):
1525 main.log.warn( "ONOS" + node + " flows: " +
1526 repr( ONOSFlows[ i ] ) )
1527 elif flowsResults and consistentFlows:
1528 flowCheck = main.TRUE
1529 flowState = ONOSFlows[ 0 ]
1530
1531 main.step( "Get the OF Table entries" )
1532 global flows
1533 flows = []
1534 for i in range( 1, 29 ):
1535 flows.append( main.Mininet1.getFlowTable( "s" + str( i ), version="1.3", debug=False ) )
1536 if flowCheck == main.FALSE:
1537 for table in flows:
1538 main.log.warn( table )
1539 # TODO: Compare switch flow tables with ONOS flow tables
1540
1541 main.step( "Start continuous pings" )
1542 main.Mininet2.pingLong(
1543 src=main.params[ 'PING' ][ 'source1' ],
1544 target=main.params[ 'PING' ][ 'target1' ],
1545 pingTime=500 )
1546 main.Mininet2.pingLong(
1547 src=main.params[ 'PING' ][ 'source2' ],
1548 target=main.params[ 'PING' ][ 'target2' ],
1549 pingTime=500 )
1550 main.Mininet2.pingLong(
1551 src=main.params[ 'PING' ][ 'source3' ],
1552 target=main.params[ 'PING' ][ 'target3' ],
1553 pingTime=500 )
1554 main.Mininet2.pingLong(
1555 src=main.params[ 'PING' ][ 'source4' ],
1556 target=main.params[ 'PING' ][ 'target4' ],
1557 pingTime=500 )
1558 main.Mininet2.pingLong(
1559 src=main.params[ 'PING' ][ 'source5' ],
1560 target=main.params[ 'PING' ][ 'target5' ],
1561 pingTime=500 )
1562 main.Mininet2.pingLong(
1563 src=main.params[ 'PING' ][ 'source6' ],
1564 target=main.params[ 'PING' ][ 'target6' ],
1565 pingTime=500 )
1566 main.Mininet2.pingLong(
1567 src=main.params[ 'PING' ][ 'source7' ],
1568 target=main.params[ 'PING' ][ 'target7' ],
1569 pingTime=500 )
1570 main.Mininet2.pingLong(
1571 src=main.params[ 'PING' ][ 'source8' ],
1572 target=main.params[ 'PING' ][ 'target8' ],
1573 pingTime=500 )
1574 main.Mininet2.pingLong(
1575 src=main.params[ 'PING' ][ 'source9' ],
1576 target=main.params[ 'PING' ][ 'target9' ],
1577 pingTime=500 )
1578 main.Mininet2.pingLong(
1579 src=main.params[ 'PING' ][ 'source10' ],
1580 target=main.params[ 'PING' ][ 'target10' ],
1581 pingTime=500 )
1582
1583 main.step( "Collecting topology information from ONOS" )
1584 devices = []
1585 threads = []
1586 for i in main.activeNodes:
1587 t = main.Thread( target=main.CLIs[i].devices,
1588 name="devices-" + str( i ),
1589 args=[ ] )
1590 threads.append( t )
1591 t.start()
1592
1593 for t in threads:
1594 t.join()
1595 devices.append( t.result )
1596 hosts = []
1597 threads = []
1598 for i in main.activeNodes:
1599 t = main.Thread( target=main.CLIs[i].hosts,
1600 name="hosts-" + str( i ),
1601 args=[ ] )
1602 threads.append( t )
1603 t.start()
1604
1605 for t in threads:
1606 t.join()
1607 try:
1608 hosts.append( json.loads( t.result ) )
1609 except ( ValueError, TypeError ):
1610 # FIXME: better handling of this, print which node
1611 # Maybe use thread name?
1612 main.log.exception( "Error parsing json output of hosts" )
1613 main.log.warn( repr( t.result ) )
1614 hosts.append( None )
1615
1616 ports = []
1617 threads = []
1618 for i in main.activeNodes:
1619 t = main.Thread( target=main.CLIs[i].ports,
1620 name="ports-" + str( i ),
1621 args=[ ] )
1622 threads.append( t )
1623 t.start()
1624
1625 for t in threads:
1626 t.join()
1627 ports.append( t.result )
1628 links = []
1629 threads = []
1630 for i in main.activeNodes:
1631 t = main.Thread( target=main.CLIs[i].links,
1632 name="links-" + str( i ),
1633 args=[ ] )
1634 threads.append( t )
1635 t.start()
1636
1637 for t in threads:
1638 t.join()
1639 links.append( t.result )
1640 clusters = []
1641 threads = []
1642 for i in main.activeNodes:
1643 t = main.Thread( target=main.CLIs[i].clusters,
1644 name="clusters-" + str( i ),
1645 args=[ ] )
1646 threads.append( t )
1647 t.start()
1648
1649 for t in threads:
1650 t.join()
1651 clusters.append( t.result )
1652 # Compare json objects for hosts and dataplane clusters
1653
1654 # hosts
1655 main.step( "Host view is consistent across ONOS nodes" )
1656 consistentHostsResult = main.TRUE
1657 for controller in range( len( hosts ) ):
1658 controllerStr = str( main.activeNodes[controller] + 1 )
1659 if hosts[ controller ] and "Error" not in hosts[ controller ]:
1660 if hosts[ controller ] == hosts[ 0 ]:
1661 continue
1662 else: # hosts not consistent
1663 main.log.error( "hosts from ONOS" +
1664 controllerStr +
1665 " is inconsistent with ONOS1" )
1666 main.log.warn( repr( hosts[ controller ] ) )
1667 consistentHostsResult = main.FALSE
1668
1669 else:
1670 main.log.error( "Error in getting ONOS hosts from ONOS" +
1671 controllerStr )
1672 consistentHostsResult = main.FALSE
1673 main.log.warn( "ONOS" + controllerStr +
1674 " hosts response: " +
1675 repr( hosts[ controller ] ) )
1676 utilities.assert_equals(
1677 expect=main.TRUE,
1678 actual=consistentHostsResult,
1679 onpass="Hosts view is consistent across all ONOS nodes",
1680 onfail="ONOS nodes have different views of hosts" )
1681
1682 main.step( "Each host has an IP address" )
1683 ipResult = main.TRUE
1684 for controller in range( 0, len( hosts ) ):
1685 controllerStr = str( main.activeNodes[controller] + 1 )
1686 if hosts[ controller ]:
1687 for host in hosts[ controller ]:
1688 if not host.get( 'ipAddresses', [ ] ):
1689 main.log.error( "Error with host ips on controller" +
1690 controllerStr + ": " + str( host ) )
1691 ipResult = main.FALSE
1692 utilities.assert_equals(
1693 expect=main.TRUE,
1694 actual=ipResult,
1695 onpass="The ips of the hosts aren't empty",
1696 onfail="The ip of at least one host is missing" )
1697
1698 # Strongly connected clusters of devices
1699 main.step( "Cluster view is consistent across ONOS nodes" )
1700 consistentClustersResult = main.TRUE
1701 for controller in range( len( clusters ) ):
1702 controllerStr = str( main.activeNodes[controller] + 1 )
1703 if "Error" not in clusters[ controller ]:
1704 if clusters[ controller ] == clusters[ 0 ]:
1705 continue
1706 else: # clusters not consistent
1707 main.log.error( "clusters from ONOS" + controllerStr +
1708 " is inconsistent with ONOS1" )
1709 consistentClustersResult = main.FALSE
1710
1711 else:
1712 main.log.error( "Error in getting dataplane clusters " +
1713 "from ONOS" + controllerStr )
1714 consistentClustersResult = main.FALSE
1715 main.log.warn( "ONOS" + controllerStr +
1716 " clusters response: " +
1717 repr( clusters[ controller ] ) )
1718 utilities.assert_equals(
1719 expect=main.TRUE,
1720 actual=consistentClustersResult,
1721 onpass="Clusters view is consistent across all ONOS nodes",
1722 onfail="ONOS nodes have different views of clusters" )
1723 if consistentClustersResult != main.TRUE:
1724 main.log.debug( clusters )
1725 # there should always only be one cluster
1726 main.step( "Cluster view correct across ONOS nodes" )
1727 try:
1728 numClusters = len( json.loads( clusters[ 0 ] ) )
1729 except ( ValueError, TypeError ):
1730 main.log.exception( "Error parsing clusters[0]: " +
1731 repr( clusters[ 0 ] ) )
1732 numClusters = "ERROR"
1733 utilities.assert_equals(
1734 expect=1,
1735 actual=numClusters,
1736 onpass="ONOS shows 1 SCC",
1737 onfail="ONOS shows " + str( numClusters ) + " SCCs" )
1738
1739 main.step( "Comparing ONOS topology to MN" )
1740 devicesResults = main.TRUE
1741 linksResults = main.TRUE
1742 hostsResults = main.TRUE
1743 mnSwitches = main.Mininet1.getSwitches()
1744 mnLinks = main.Mininet1.getLinks()
1745 mnHosts = main.Mininet1.getHosts()
1746 for controller in main.activeNodes:
1747 controllerStr = str( main.activeNodes[controller] + 1 )
1748 if devices[ controller ] and ports[ controller ] and\
1749 "Error" not in devices[ controller ] and\
1750 "Error" not in ports[ controller ]:
1751 currentDevicesResult = main.Mininet1.compareSwitches(
1752 mnSwitches,
1753 json.loads( devices[ controller ] ),
1754 json.loads( ports[ controller ] ) )
1755 else:
1756 currentDevicesResult = main.FALSE
1757 utilities.assert_equals( expect=main.TRUE,
1758 actual=currentDevicesResult,
1759 onpass="ONOS" + controllerStr +
1760 " Switches view is correct",
1761 onfail="ONOS" + controllerStr +
1762 " Switches view is incorrect" )
1763 if links[ controller ] and "Error" not in links[ controller ]:
1764 currentLinksResult = main.Mininet1.compareLinks(
1765 mnSwitches, mnLinks,
1766 json.loads( links[ controller ] ) )
1767 else:
1768 currentLinksResult = main.FALSE
1769 utilities.assert_equals( expect=main.TRUE,
1770 actual=currentLinksResult,
1771 onpass="ONOS" + controllerStr +
1772 " links view is correct",
1773 onfail="ONOS" + controllerStr +
1774 " links view is incorrect" )
1775
1776 if hosts[ controller ] and "Error" not in hosts[ controller ]:
1777 currentHostsResult = main.Mininet1.compareHosts(
1778 mnHosts,
1779 hosts[ controller ] )
1780 else:
1781 currentHostsResult = main.FALSE
1782 utilities.assert_equals( expect=main.TRUE,
1783 actual=currentHostsResult,
1784 onpass="ONOS" + controllerStr +
1785 " hosts exist in Mininet",
1786 onfail="ONOS" + controllerStr +
1787 " hosts don't match Mininet" )
1788
1789 devicesResults = devicesResults and currentDevicesResult
1790 linksResults = linksResults and currentLinksResult
1791 hostsResults = hostsResults and currentHostsResult
1792
1793 main.step( "Device information is correct" )
1794 utilities.assert_equals(
1795 expect=main.TRUE,
1796 actual=devicesResults,
1797 onpass="Device information is correct",
1798 onfail="Device information is incorrect" )
1799
1800 main.step( "Links are correct" )
1801 utilities.assert_equals(
1802 expect=main.TRUE,
1803 actual=linksResults,
1804 onpass="Link are correct",
1805 onfail="Links are incorrect" )
1806
1807 main.step( "Hosts are correct" )
1808 utilities.assert_equals(
1809 expect=main.TRUE,
1810 actual=hostsResults,
1811 onpass="Hosts are correct",
1812 onfail="Hosts are incorrect" )
1813
1814 def CASE6( self, main ):
1815 """
1816 The Scaling case.
1817 """
1818 import time
1819 import re
1820 assert main.numCtrls, "main.numCtrls not defined"
1821 assert main, "main not defined"
1822 assert utilities.assert_equals, "utilities.assert_equals not defined"
1823 assert main.CLIs, "main.CLIs not defined"
1824 assert main.nodes, "main.nodes not defined"
1825 try:
1826 labels
1827 except NameError:
1828 main.log.error( "labels not defined, setting to []" )
1829 global labels
1830 labels = []
1831 try:
1832 data
1833 except NameError:
1834 main.log.error( "data not defined, setting to []" )
1835 global data
1836 data = []
1837
1838 main.case( "Restart entire ONOS cluster" )
1839
1840 main.step( "Checking ONOS Logs for errors" )
1841 for i in main.activeNodes:
1842 node = main.nodes[i]
1843 main.log.debug( "Checking logs for errors on " + node.name + ":" )
1844 main.log.warn( main.ONOSbench.checkLogs( node.ip_address ) )
1845
1846 """
1847 pop # of nodes from a list, might look like 1,3b,3,5b,5,7b,7,7b,5,5b,3...
1848 modify cluster.json file appropriately
1849 install/deactivate node as needed
1850 """
1851
1852 try:
1853 prevNodes = main.activeNodes
1854 scale = main.scaling.pop(0)
1855 if "e" in scale:
1856 equal = True
1857 else:
1858 equal = False
1859 main.numCtrls = int( re.search( "\d+", scale ).group(0) )
1860 main.log.info( "Scaling to {} nodes".format( main.numCtrls ) )
1861 genResult = main.Server.generateFile( main.numCtrls, equal=equal )
1862 utilities.assert_equals( expect=main.TRUE, actual=genResult,
1863 onpass="New cluster metadata file generated",
1864 onfail="Failled to generate new metadata file" )
1865 time.sleep( 5 ) # Give time for nodes to read new file
1866 except IndexError:
1867 main.cleanup()
1868 main.exit()
1869
1870 main.activeNodes = [ i for i in range( 0, main.numCtrls ) ]
1871 newNodes = [ x for x in main.activeNodes if x not in prevNodes ]
1872
1873 main.step( "Start new nodes" ) # OR stop old nodes?
1874 started = main.TRUE
1875 for i in newNodes:
1876 started = main.ONOSbench.onosStart( main.nodes[i].ip_address ) and main.TRUE
1877 utilities.assert_equals( expect=main.TRUE, actual=started,
1878 onpass="ONOS started",
1879 onfail="ONOS start NOT successful" )
1880
1881 main.step( "Checking if ONOS is up yet" )
1882 for i in range( 2 ):
1883 onosIsupResult = main.TRUE
1884 for i in main.activeNodes:
1885 node = main.nodes[i]
1886 started = main.ONOSbench.isup( node.ip_address )
1887 if not started:
1888 main.log.error( node.name + " didn't start!" )
1889 onosIsupResult = onosIsupResult and started
1890 if onosIsupResult == main.TRUE:
1891 break
1892 utilities.assert_equals( expect=main.TRUE, actual=onosIsupResult,
1893 onpass="ONOS started",
1894 onfail="ONOS start NOT successful" )
1895
1896 main.log.step( "Starting ONOS CLI sessions" )
1897 cliResults = main.TRUE
1898 threads = []
1899 for i in main.activeNodes:
1900 t = main.Thread( target=main.CLIs[i].startOnosCli,
1901 name="startOnosCli-" + str( i ),
1902 args=[main.nodes[i].ip_address] )
1903 threads.append( t )
1904 t.start()
1905
1906 for t in threads:
1907 t.join()
1908 cliResults = cliResults and t.result
1909 utilities.assert_equals( expect=main.TRUE, actual=cliResults,
1910 onpass="ONOS cli started",
1911 onfail="ONOS clis did not start" )
1912
1913 main.step( "Checking ONOS nodes" )
1914 nodeResults = utilities.retry( main.HA.nodesCheck,
1915 False,
1916 args=[main.activeNodes],
1917 attempts=5 )
1918 utilities.assert_equals( expect=True, actual=nodeResults,
1919 onpass="Nodes check successful",
1920 onfail="Nodes check NOT successful" )
1921
1922 for i in range( 10 ):
1923 ready = True
1924 for i in main.activeNodes:
1925 cli = main.CLIs[i]
1926 output = cli.summary()
1927 if not output:
1928 ready = False
1929 if ready:
1930 break
1931 time.sleep( 30 )
1932 utilities.assert_equals( expect=True, actual=ready,
1933 onpass="ONOS summary command succeded",
1934 onfail="ONOS summary command failed" )
1935 if not ready:
1936 main.cleanup()
1937 main.exit()
1938
1939 # Rerun for election on new nodes
1940 runResults = main.TRUE
1941 for i in main.activeNodes:
1942 cli = main.CLIs[i]
1943 run = cli.electionTestRun()
1944 if run != main.TRUE:
1945 main.log.error( "Error running for election on " + cli.name )
1946 runResults = runResults and run
1947 utilities.assert_equals( expect=main.TRUE, actual=runResults,
1948 onpass="Reran for election",
1949 onfail="Failed to rerun for election" )
1950
1951 # TODO: Make this configurable
1952 time.sleep( 60 )
1953 for node in main.activeNodes:
1954 main.log.warn( "\n****************** {} **************".format( main.nodes[node].ip_address ) )
1955 main.log.debug( main.CLIs[node].nodes( jsonFormat=False ) )
1956 main.log.debug( main.CLIs[node].leaders( jsonFormat=False ) )
1957 main.log.debug( main.CLIs[node].partitions( jsonFormat=False ) )
1958 main.log.debug( main.CLIs[node].apps( jsonFormat=False ) )
1959
1960 def CASE7( self, main ):
1961 """
1962 Check state after ONOS scaling
1963 """
1964 import json
1965 assert main.numCtrls, "main.numCtrls not defined"
1966 assert main, "main not defined"
1967 assert utilities.assert_equals, "utilities.assert_equals not defined"
1968 assert main.CLIs, "main.CLIs not defined"
1969 assert main.nodes, "main.nodes not defined"
1970 main.case( "Running ONOS Constant State Tests" )
1971
1972 main.step( "Check that each switch has a master" )
1973 # Assert that each device has a master
1974 rolesNotNull = main.TRUE
1975 threads = []
1976 for i in main.activeNodes:
1977 t = main.Thread( target=main.CLIs[i].rolesNotNull,
1978 name="rolesNotNull-" + str( i ),
1979 args=[ ] )
1980 threads.append( t )
1981 t.start()
1982
1983 for t in threads:
1984 t.join()
1985 rolesNotNull = rolesNotNull and t.result
1986 utilities.assert_equals(
1987 expect=main.TRUE,
1988 actual=rolesNotNull,
1989 onpass="Each device has a master",
1990 onfail="Some devices don't have a master assigned" )
1991
1992 main.step( "Read device roles from ONOS" )
1993 ONOSMastership = []
1994 consistentMastership = True
1995 rolesResults = True
1996 threads = []
1997 for i in main.activeNodes:
1998 t = main.Thread( target=main.CLIs[i].roles,
1999 name="roles-" + str( i ),
2000 args=[] )
2001 threads.append( t )
2002 t.start()
2003
2004 for t in threads:
2005 t.join()
2006 ONOSMastership.append( t.result )
2007
2008 for i in range( len( ONOSMastership ) ):
2009 node = str( main.activeNodes[i] + 1 )
2010 if not ONOSMastership[i] or "Error" in ONOSMastership[i]:
2011 main.log.error( "Error in getting ONOS" + node + " roles" )
2012 main.log.warn( "ONOS" + node + " mastership response: " +
2013 repr( ONOSMastership[i] ) )
2014 rolesResults = False
2015 utilities.assert_equals(
2016 expect=True,
2017 actual=rolesResults,
2018 onpass="No error in reading roles output",
2019 onfail="Error in reading roles from ONOS" )
2020
2021 main.step( "Check for consistency in roles from each controller" )
2022 if all([ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
2023 main.log.info(
2024 "Switch roles are consistent across all ONOS nodes" )
2025 else:
2026 consistentMastership = False
2027 utilities.assert_equals(
2028 expect=True,
2029 actual=consistentMastership,
2030 onpass="Switch roles are consistent across all ONOS nodes",
2031 onfail="ONOS nodes have different views of switch roles" )
2032
2033 if rolesResults and not consistentMastership:
2034 for i in range( len( ONOSMastership ) ):
2035 node = str( main.activeNodes[i] + 1 )
2036 main.log.warn( "ONOS" + node + " roles: ",
2037 json.dumps( json.loads( ONOSMastership[ i ] ),
2038 sort_keys=True,
2039 indent=4,
2040 separators=( ',', ': ' ) ) )
2041
2042 # NOTE: we expect mastership to change on controller scaling down
2043
2044 main.step( "Get the intents and compare across all nodes" )
2045 ONOSIntents = []
2046 intentCheck = main.FALSE
2047 consistentIntents = True
2048 intentsResults = True
2049 threads = []
2050 for i in main.activeNodes:
2051 t = main.Thread( target=main.CLIs[i].intents,
2052 name="intents-" + str( i ),
2053 args=[],
2054 kwargs={ 'jsonFormat': True } )
2055 threads.append( t )
2056 t.start()
2057
2058 for t in threads:
2059 t.join()
2060 ONOSIntents.append( t.result )
2061
2062 for i in range( len( ONOSIntents) ):
2063 node = str( main.activeNodes[i] + 1 )
2064 if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
2065 main.log.error( "Error in getting ONOS" + node + " intents" )
2066 main.log.warn( "ONOS" + node + " intents response: " +
2067 repr( ONOSIntents[ i ] ) )
2068 intentsResults = False
2069 utilities.assert_equals(
2070 expect=True,
2071 actual=intentsResults,
2072 onpass="No error in reading intents output",
2073 onfail="Error in reading intents from ONOS" )
2074
2075 main.step( "Check for consistency in Intents from each controller" )
2076 if all([ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
2077 main.log.info( "Intents are consistent across all ONOS " +
2078 "nodes" )
2079 else:
2080 consistentIntents = False
2081
2082 # Try to make it easy to figure out what is happening
2083 #
2084 # Intent ONOS1 ONOS2 ...
2085 # 0x01 INSTALLED INSTALLING
2086 # ... ... ...
2087 # ... ... ...
2088 title = " ID"
2089 for n in main.activeNodes:
2090 title += " " * 10 + "ONOS" + str( n + 1 )
2091 main.log.warn( title )
2092 # get all intent keys in the cluster
2093 keys = []
2094 for nodeStr in ONOSIntents:
2095 node = json.loads( nodeStr )
2096 for intent in node:
2097 keys.append( intent.get( 'id' ) )
2098 keys = set( keys )
2099 for key in keys:
2100 row = "%-13s" % key
2101 for nodeStr in ONOSIntents:
2102 node = json.loads( nodeStr )
2103 for intent in node:
2104 if intent.get( 'id' ) == key:
2105 row += "%-15s" % intent.get( 'state' )
2106 main.log.warn( row )
2107 # End table view
2108
2109 utilities.assert_equals(
2110 expect=True,
2111 actual=consistentIntents,
2112 onpass="Intents are consistent across all ONOS nodes",
2113 onfail="ONOS nodes have different views of intents" )
2114 intentStates = []
2115 for node in ONOSIntents: # Iter through ONOS nodes
2116 nodeStates = []
2117 # Iter through intents of a node
2118 try:
2119 for intent in json.loads( node ):
2120 nodeStates.append( intent[ 'state' ] )
2121 except ( ValueError, TypeError ):
2122 main.log.exception( "Error in parsing intents" )
2123 main.log.error( repr( node ) )
2124 intentStates.append( nodeStates )
2125 out = [ (i, nodeStates.count( i ) ) for i in set( nodeStates ) ]
2126 main.log.info( dict( out ) )
2127
2128 if intentsResults and not consistentIntents:
2129 for i in range( len( main.activeNodes ) ):
2130 node = str( main.activeNodes[i] + 1 )
2131 main.log.warn( "ONOS" + node + " intents: " )
2132 main.log.warn( json.dumps(
2133 json.loads( ONOSIntents[ i ] ),
2134 sort_keys=True,
2135 indent=4,
2136 separators=( ',', ': ' ) ) )
2137 elif intentsResults and consistentIntents:
2138 intentCheck = main.TRUE
2139
2140 main.step( "Compare current intents with intents before the scaling" )
2141 # NOTE: this requires case 5 to pass for intentState to be set.
2142 # maybe we should stop the test if that fails?
2143 sameIntents = main.FALSE
2144 try:
2145 intentState
2146 except NameError:
2147 main.log.warn( "No previous intent state was saved" )
2148 else:
2149 if intentState and intentState == ONOSIntents[ 0 ]:
2150 sameIntents = main.TRUE
2151 main.log.info( "Intents are consistent with before scaling" )
2152 # TODO: possibly the states have changed? we may need to figure out
2153 # what the acceptable states are
2154 elif len( intentState ) == len( ONOSIntents[ 0 ] ):
2155 sameIntents = main.TRUE
2156 try:
2157 before = json.loads( intentState )
2158 after = json.loads( ONOSIntents[ 0 ] )
2159 for intent in before:
2160 if intent not in after:
2161 sameIntents = main.FALSE
2162 main.log.debug( "Intent is not currently in ONOS " +
2163 "(at least in the same form):" )
2164 main.log.debug( json.dumps( intent ) )
2165 except ( ValueError, TypeError ):
2166 main.log.exception( "Exception printing intents" )
2167 main.log.debug( repr( ONOSIntents[0] ) )
2168 main.log.debug( repr( intentState ) )
2169 if sameIntents == main.FALSE:
2170 try:
2171 main.log.debug( "ONOS intents before: " )
2172 main.log.debug( json.dumps( json.loads( intentState ),
2173 sort_keys=True, indent=4,
2174 separators=( ',', ': ' ) ) )
2175 main.log.debug( "Current ONOS intents: " )
2176 main.log.debug( json.dumps( json.loads( ONOSIntents[ 0 ] ),
2177 sort_keys=True, indent=4,
2178 separators=( ',', ': ' ) ) )
2179 except ( ValueError, TypeError ):
2180 main.log.exception( "Exception printing intents" )
2181 main.log.debug( repr( ONOSIntents[0] ) )
2182 main.log.debug( repr( intentState ) )
2183 utilities.assert_equals(
2184 expect=main.TRUE,
2185 actual=sameIntents,
2186 onpass="Intents are consistent with before scaling",
2187 onfail="The Intents changed during scaling" )
2188 intentCheck = intentCheck and sameIntents
2189
2190 main.step( "Get the OF Table entries and compare to before " +
2191 "component scaling" )
2192 FlowTables = main.TRUE
2193 for i in range( 28 ):
2194 main.log.info( "Checking flow table on s" + str( i + 1 ) )
2195 tmpFlows = main.Mininet1.getFlowTable( "s" + str( i + 1 ), version="1.3", debug=False )
2196 curSwitch = main.Mininet1.flowTableComp( flows[i], tmpFlows )
2197 FlowTables = FlowTables and curSwitch
2198 if curSwitch == main.FALSE:
2199 main.log.warn( "Differences in flow table for switch: s{}".format( i + 1 ) )
2200 utilities.assert_equals(
2201 expect=main.TRUE,
2202 actual=FlowTables,
2203 onpass="No changes were found in the flow tables",
2204 onfail="Changes were found in the flow tables" )
2205
2206 main.Mininet2.pingLongKill()
2207 '''
2208 # main.step( "Check the continuous pings to ensure that no packets " +
2209 # "were dropped during component failure" )
2210 main.Mininet2.pingKill( main.params[ 'TESTONUSER' ],
2211 main.params[ 'TESTONIP' ] )
2212 LossInPings = main.FALSE
2213 # NOTE: checkForLoss returns main.FALSE with 0% packet loss
2214 for i in range( 8, 18 ):
2215 main.log.info(
2216 "Checking for a loss in pings along flow from s" +
2217 str( i ) )
2218 LossInPings = main.Mininet2.checkForLoss(
2219 "/tmp/ping.h" +
2220 str( i ) ) or LossInPings
2221 if LossInPings == main.TRUE:
2222 main.log.info( "Loss in ping detected" )
2223 elif LossInPings == main.ERROR:
2224 main.log.info( "There are multiple mininet process running" )
2225 elif LossInPings == main.FALSE:
2226 main.log.info( "No Loss in the pings" )
2227 main.log.info( "No loss of dataplane connectivity" )
2228 # utilities.assert_equals(
2229 # expect=main.FALSE,
2230 # actual=LossInPings,
2231 # onpass="No Loss of connectivity",
2232 # onfail="Loss of dataplane connectivity detected" )
2233
2234 # NOTE: Since intents are not persisted with IntnentStore,
2235 # we expect loss in dataplane connectivity
2236 LossInPings = main.FALSE
2237 '''
2238
2239 main.step( "Leadership Election is still functional" )
2240 # Test of LeadershipElection
2241 leaderList = []
2242 leaderResult = main.TRUE
2243
2244 for i in main.activeNodes:
2245 cli = main.CLIs[i]
2246 leaderN = cli.electionTestLeader()
2247 leaderList.append( leaderN )
2248 if leaderN == main.FALSE:
2249 # error in response
2250 main.log.error( "Something is wrong with " +
2251 "electionTestLeader function, check the" +
2252 " error logs" )
2253 leaderResult = main.FALSE
2254 elif leaderN is None:
2255 main.log.error( cli.name +
2256 " shows no leader for the election-app." )
2257 leaderResult = main.FALSE
2258 if len( set( leaderList ) ) != 1:
2259 leaderResult = main.FALSE
2260 main.log.error(
2261 "Inconsistent view of leader for the election test app" )
2262 # TODO: print the list
2263 utilities.assert_equals(
2264 expect=main.TRUE,
2265 actual=leaderResult,
2266 onpass="Leadership election passed",
2267 onfail="Something went wrong with Leadership election" )
2268
2269 def CASE8( self, main ):
2270 """
2271 Compare topo
2272 """
2273 import json
2274 import time
2275 assert main.numCtrls, "main.numCtrls not defined"
2276 assert main, "main not defined"
2277 assert utilities.assert_equals, "utilities.assert_equals not defined"
2278 assert main.CLIs, "main.CLIs not defined"
2279 assert main.nodes, "main.nodes not defined"
2280
2281 main.case( "Compare ONOS Topology view to Mininet topology" )
2282 main.caseExplanation = "Compare topology objects between Mininet" +\
2283 " and ONOS"
2284 topoResult = main.FALSE
2285 topoFailMsg = "ONOS topology don't match Mininet"
2286 elapsed = 0
2287 count = 0
2288 main.step( "Comparing ONOS topology to MN topology" )
2289 startTime = time.time()
2290 # Give time for Gossip to work
2291 while topoResult == main.FALSE and ( elapsed < 60 or count < 3 ):
2292 devicesResults = main.TRUE
2293 linksResults = main.TRUE
2294 hostsResults = main.TRUE
2295 hostAttachmentResults = True
2296 count += 1
2297 cliStart = time.time()
2298 devices = []
2299 threads = []
2300 for i in main.activeNodes:
2301 t = main.Thread( target=utilities.retry,
2302 name="devices-" + str( i ),
2303 args=[ main.CLIs[i].devices, [ None ] ],
2304 kwargs= { 'sleep': 5, 'attempts': 5,
2305 'randomTime': True } )
2306 threads.append( t )
2307 t.start()
2308
2309 for t in threads:
2310 t.join()
2311 devices.append( t.result )
2312 hosts = []
2313 ipResult = main.TRUE
2314 threads = []
2315 for i in main.activeNodes:
2316 t = main.Thread( target=utilities.retry,
2317 name="hosts-" + str( i ),
2318 args=[ main.CLIs[i].hosts, [ None ] ],
2319 kwargs= { 'sleep': 5, 'attempts': 5,
2320 'randomTime': True } )
2321 threads.append( t )
2322 t.start()
2323
2324 for t in threads:
2325 t.join()
2326 try:
2327 hosts.append( json.loads( t.result ) )
2328 except ( ValueError, TypeError ):
2329 main.log.exception( "Error parsing hosts results" )
2330 main.log.error( repr( t.result ) )
2331 hosts.append( None )
2332 for controller in range( 0, len( hosts ) ):
2333 controllerStr = str( main.activeNodes[controller] + 1 )
2334 if hosts[ controller ]:
2335 for host in hosts[ controller ]:
2336 if host is None or host.get( 'ipAddresses', [] ) == []:
2337 main.log.error(
2338 "Error with host ipAddresses on controller" +
2339 controllerStr + ": " + str( host ) )
2340 ipResult = main.FALSE
2341 ports = []
2342 threads = []
2343 for i in main.activeNodes:
2344 t = main.Thread( target=utilities.retry,
2345 name="ports-" + str( i ),
2346 args=[ main.CLIs[i].ports, [ None ] ],
2347 kwargs= { 'sleep': 5, 'attempts': 5,
2348 'randomTime': True } )
2349 threads.append( t )
2350 t.start()
2351
2352 for t in threads:
2353 t.join()
2354 ports.append( t.result )
2355 links = []
2356 threads = []
2357 for i in main.activeNodes:
2358 t = main.Thread( target=utilities.retry,
2359 name="links-" + str( i ),
2360 args=[ main.CLIs[i].links, [ None ] ],
2361 kwargs= { 'sleep': 5, 'attempts': 5,
2362 'randomTime': True } )
2363 threads.append( t )
2364 t.start()
2365
2366 for t in threads:
2367 t.join()
2368 links.append( t.result )
2369 clusters = []
2370 threads = []
2371 for i in main.activeNodes:
2372 t = main.Thread( target=utilities.retry,
2373 name="clusters-" + str( i ),
2374 args=[ main.CLIs[i].clusters, [ None ] ],
2375 kwargs= { 'sleep': 5, 'attempts': 5,
2376 'randomTime': True } )
2377 threads.append( t )
2378 t.start()
2379
2380 for t in threads:
2381 t.join()
2382 clusters.append( t.result )
2383
2384 elapsed = time.time() - startTime
2385 cliTime = time.time() - cliStart
2386 print "Elapsed time: " + str( elapsed )
2387 print "CLI time: " + str( cliTime )
2388
2389 if all( e is None for e in devices ) and\
2390 all( e is None for e in hosts ) and\
2391 all( e is None for e in ports ) and\
2392 all( e is None for e in links ) and\
2393 all( e is None for e in clusters ):
2394 topoFailMsg = "Could not get topology from ONOS"
2395 main.log.error( topoFailMsg )
2396 continue # Try again, No use trying to compare
2397
2398 mnSwitches = main.Mininet1.getSwitches()
2399 mnLinks = main.Mininet1.getLinks()
2400 mnHosts = main.Mininet1.getHosts()
2401 for controller in range( len( main.activeNodes ) ):
2402 controllerStr = str( main.activeNodes[controller] + 1 )
2403 if devices[ controller ] and ports[ controller ] and\
2404 "Error" not in devices[ controller ] and\
2405 "Error" not in ports[ controller ]:
2406
2407 try:
2408 currentDevicesResult = main.Mininet1.compareSwitches(
2409 mnSwitches,
2410 json.loads( devices[ controller ] ),
2411 json.loads( ports[ controller ] ) )
2412 except ( TypeError, ValueError ):
2413 main.log.exception( "Object not as expected; devices={!r}\nports={!r}".format(
2414 devices[ controller ], ports[ controller ] ) )
2415 else:
2416 currentDevicesResult = main.FALSE
2417 utilities.assert_equals( expect=main.TRUE,
2418 actual=currentDevicesResult,
2419 onpass="ONOS" + controllerStr +
2420 " Switches view is correct",
2421 onfail="ONOS" + controllerStr +
2422 " Switches view is incorrect" )
2423
2424 if links[ controller ] and "Error" not in links[ controller ]:
2425 currentLinksResult = main.Mininet1.compareLinks(
2426 mnSwitches, mnLinks,
2427 json.loads( links[ controller ] ) )
2428 else:
2429 currentLinksResult = main.FALSE
2430 utilities.assert_equals( expect=main.TRUE,
2431 actual=currentLinksResult,
2432 onpass="ONOS" + controllerStr +
2433 " links view is correct",
2434 onfail="ONOS" + controllerStr +
2435 " links view is incorrect" )
2436 if hosts[ controller ] and "Error" not in hosts[ controller ]:
2437 currentHostsResult = main.Mininet1.compareHosts(
2438 mnHosts,
2439 hosts[ controller ] )
2440 elif hosts[ controller ] == []:
2441 currentHostsResult = main.TRUE
2442 else:
2443 currentHostsResult = main.FALSE
2444 utilities.assert_equals( expect=main.TRUE,
2445 actual=currentHostsResult,
2446 onpass="ONOS" + controllerStr +
2447 " hosts exist in Mininet",
2448 onfail="ONOS" + controllerStr +
2449 " hosts don't match Mininet" )
2450 # CHECKING HOST ATTACHMENT POINTS
2451 hostAttachment = True
2452 zeroHosts = False
2453 # FIXME: topo-HA/obelisk specific mappings:
2454 # key is mac and value is dpid
2455 mappings = {}
2456 for i in range( 1, 29 ): # hosts 1 through 28
2457 # set up correct variables:
2458 macId = "00:" * 5 + hex( i ).split( "0x" )[1].upper().zfill(2)
2459 if i == 1:
2460 deviceId = "1000".zfill(16)
2461 elif i == 2:
2462 deviceId = "2000".zfill(16)
2463 elif i == 3:
2464 deviceId = "3000".zfill(16)
2465 elif i == 4:
2466 deviceId = "3004".zfill(16)
2467 elif i == 5:
2468 deviceId = "5000".zfill(16)
2469 elif i == 6:
2470 deviceId = "6000".zfill(16)
2471 elif i == 7:
2472 deviceId = "6007".zfill(16)
2473 elif i >= 8 and i <= 17:
2474 dpid = '3' + str( i ).zfill( 3 )
2475 deviceId = dpid.zfill(16)
2476 elif i >= 18 and i <= 27:
2477 dpid = '6' + str( i ).zfill( 3 )
2478 deviceId = dpid.zfill(16)
2479 elif i == 28:
2480 deviceId = "2800".zfill(16)
2481 mappings[ macId ] = deviceId
2482 if hosts[ controller ] is not None and "Error" not in hosts[ controller ]:
2483 if hosts[ controller ] == []:
2484 main.log.warn( "There are no hosts discovered" )
2485 zeroHosts = True
2486 else:
2487 for host in hosts[ controller ]:
2488 mac = None
2489 location = None
2490 device = None
2491 port = None
2492 try:
2493 mac = host.get( 'mac' )
2494 assert mac, "mac field could not be found for this host object"
2495
2496 location = host.get( 'location' )
2497 assert location, "location field could not be found for this host object"
2498
2499 # Trim the protocol identifier off deviceId
2500 device = str( location.get( 'elementId' ) ).split(':')[1]
2501 assert device, "elementId field could not be found for this host location object"
2502
2503 port = location.get( 'port' )
2504 assert port, "port field could not be found for this host location object"
2505
2506 # Now check if this matches where they should be
2507 if mac and device and port:
2508 if str( port ) != "1":
2509 main.log.error( "The attachment port is incorrect for " +
2510 "host " + str( mac ) +
2511 ". Expected: 1 Actual: " + str( port) )
2512 hostAttachment = False
2513 if device != mappings[ str( mac ) ]:
2514 main.log.error( "The attachment device is incorrect for " +
2515 "host " + str( mac ) +
2516 ". Expected: " + mappings[ str( mac ) ] +
2517 " Actual: " + device )
2518 hostAttachment = False
2519 else:
2520 hostAttachment = False
2521 except AssertionError:
2522 main.log.exception( "Json object not as expected" )
2523 main.log.error( repr( host ) )
2524 hostAttachment = False
2525 else:
2526 main.log.error( "No hosts json output or \"Error\"" +
2527 " in output. hosts = " +
2528 repr( hosts[ controller ] ) )
2529 if zeroHosts is False:
2530 # TODO: Find a way to know if there should be hosts in a
2531 # given point of the test
2532 hostAttachment = True
2533
2534 # END CHECKING HOST ATTACHMENT POINTS
2535 devicesResults = devicesResults and currentDevicesResult
2536 linksResults = linksResults and currentLinksResult
2537 hostsResults = hostsResults and currentHostsResult
2538 hostAttachmentResults = hostAttachmentResults and\
2539 hostAttachment
2540 topoResult = ( devicesResults and linksResults
2541 and hostsResults and ipResult and
2542 hostAttachmentResults )
2543 utilities.assert_equals( expect=True,
2544 actual=topoResult,
2545 onpass="ONOS topology matches Mininet",
2546 onfail=topoFailMsg )
2547 # End of While loop to pull ONOS state
2548
2549 # Compare json objects for hosts and dataplane clusters
2550
2551 # hosts
2552 main.step( "Hosts view is consistent across all ONOS nodes" )
2553 consistentHostsResult = main.TRUE
2554 for controller in range( len( hosts ) ):
2555 controllerStr = str( main.activeNodes[controller] + 1 )
2556 if hosts[ controller ] is not None and "Error" not in hosts[ controller ]:
2557 if hosts[ controller ] == hosts[ 0 ]:
2558 continue
2559 else: # hosts not consistent
2560 main.log.error( "hosts from ONOS" + controllerStr +
2561 " is inconsistent with ONOS1" )
2562 main.log.warn( repr( hosts[ controller ] ) )
2563 consistentHostsResult = main.FALSE
2564
2565 else:
2566 main.log.error( "Error in getting ONOS hosts from ONOS" +
2567 controllerStr )
2568 consistentHostsResult = main.FALSE
2569 main.log.warn( "ONOS" + controllerStr +
2570 " hosts response: " +
2571 repr( hosts[ controller ] ) )
2572 utilities.assert_equals(
2573 expect=main.TRUE,
2574 actual=consistentHostsResult,
2575 onpass="Hosts view is consistent across all ONOS nodes",
2576 onfail="ONOS nodes have different views of hosts" )
2577
2578 main.step( "Hosts information is correct" )
2579 hostsResults = hostsResults and ipResult
2580 utilities.assert_equals(
2581 expect=main.TRUE,
2582 actual=hostsResults,
2583 onpass="Host information is correct",
2584 onfail="Host information is incorrect" )
2585
2586 main.step( "Host attachment points to the network" )
2587 utilities.assert_equals(
2588 expect=True,
2589 actual=hostAttachmentResults,
2590 onpass="Hosts are correctly attached to the network",
2591 onfail="ONOS did not correctly attach hosts to the network" )
2592
2593 # Strongly connected clusters of devices
2594 main.step( "Clusters view is consistent across all ONOS nodes" )
2595 consistentClustersResult = main.TRUE
2596 for controller in range( len( clusters ) ):
2597 controllerStr = str( main.activeNodes[controller] + 1 )
2598 if "Error" not in clusters[ controller ]:
2599 if clusters[ controller ] == clusters[ 0 ]:
2600 continue
2601 else: # clusters not consistent
2602 main.log.error( "clusters from ONOS" +
2603 controllerStr +
2604 " is inconsistent with ONOS1" )
2605 consistentClustersResult = main.FALSE
2606 else:
2607 main.log.error( "Error in getting dataplane clusters " +
2608 "from ONOS" + controllerStr )
2609 consistentClustersResult = main.FALSE
2610 main.log.warn( "ONOS" + controllerStr +
2611 " clusters response: " +
2612 repr( clusters[ controller ] ) )
2613 utilities.assert_equals(
2614 expect=main.TRUE,
2615 actual=consistentClustersResult,
2616 onpass="Clusters view is consistent across all ONOS nodes",
2617 onfail="ONOS nodes have different views of clusters" )
2618
2619 main.step( "There is only one SCC" )
2620 # there should always only be one cluster
2621 try:
2622 numClusters = len( json.loads( clusters[ 0 ] ) )
2623 except ( ValueError, TypeError ):
2624 main.log.exception( "Error parsing clusters[0]: " +
2625 repr( clusters[0] ) )
2626 numClusters = "ERROR"
2627 clusterResults = main.FALSE
2628 if numClusters == 1:
2629 clusterResults = main.TRUE
2630 utilities.assert_equals(
2631 expect=1,
2632 actual=numClusters,
2633 onpass="ONOS shows 1 SCC",
2634 onfail="ONOS shows " + str( numClusters ) + " SCCs" )
2635
2636 topoResult = ( devicesResults and linksResults
2637 and hostsResults and consistentHostsResult
2638 and consistentClustersResult and clusterResults
2639 and ipResult and hostAttachmentResults )
2640
2641 topoResult = topoResult and int( count <= 2 )
2642 note = "note it takes about " + str( int( cliTime ) ) + \
2643 " seconds for the test to make all the cli calls to fetch " +\
2644 "the topology from each ONOS instance"
2645 main.log.info(
2646 "Very crass estimate for topology discovery/convergence( " +
2647 str( note ) + " ): " + str( elapsed ) + " seconds, " +
2648 str( count ) + " tries" )
2649
2650 main.step( "Device information is correct" )
2651 utilities.assert_equals(
2652 expect=main.TRUE,
2653 actual=devicesResults,
2654 onpass="Device information is correct",
2655 onfail="Device information is incorrect" )
2656
2657 main.step( "Links are correct" )
2658 utilities.assert_equals(
2659 expect=main.TRUE,
2660 actual=linksResults,
2661 onpass="Link are correct",
2662 onfail="Links are incorrect" )
2663
2664 main.step( "Hosts are correct" )
2665 utilities.assert_equals(
2666 expect=main.TRUE,
2667 actual=hostsResults,
2668 onpass="Hosts are correct",
2669 onfail="Hosts are incorrect" )
2670
2671 # FIXME: move this to an ONOS state case
2672 main.step( "Checking ONOS nodes" )
2673 nodeResults = utilities.retry( main.HA.nodesCheck,
2674 False,
2675 args=[main.activeNodes],
2676 attempts=5 )
2677 utilities.assert_equals( expect=True, actual=nodeResults,
2678 onpass="Nodes check successful",
2679 onfail="Nodes check NOT successful" )
2680 if not nodeResults:
2681 for i in main.activeNodes:
2682 main.log.debug( "{} components not ACTIVE: \n{}".format(
2683 main.CLIs[i].name,
2684 main.CLIs[i].sendline( "scr:list | grep -v ACTIVE" ) ) )
2685
2686 def CASE9( self, main ):
2687 """
2688 Link s3-s28 down
2689 """
2690 import time
2691 assert main.numCtrls, "main.numCtrls not defined"
2692 assert main, "main not defined"
2693 assert utilities.assert_equals, "utilities.assert_equals not defined"
2694 assert main.CLIs, "main.CLIs not defined"
2695 assert main.nodes, "main.nodes not defined"
2696 # NOTE: You should probably run a topology check after this
2697
2698 linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
2699
2700 description = "Turn off a link to ensure that Link Discovery " +\
2701 "is working properly"
2702 main.case( description )
2703
2704 main.step( "Kill Link between s3 and s28" )
2705 LinkDown = main.Mininet1.link( END1="s3", END2="s28", OPTION="down" )
2706 main.log.info( "Waiting " + str( linkSleep ) +
2707 " seconds for link down to be discovered" )
2708 time.sleep( linkSleep )
2709 utilities.assert_equals( expect=main.TRUE, actual=LinkDown,
2710 onpass="Link down successful",
2711 onfail="Failed to bring link down" )
2712 # TODO do some sort of check here
2713
2714 def CASE10( self, main ):
2715 """
2716 Link s3-s28 up
2717 """
2718 import time
2719 assert main.numCtrls, "main.numCtrls not defined"
2720 assert main, "main not defined"
2721 assert utilities.assert_equals, "utilities.assert_equals not defined"
2722 assert main.CLIs, "main.CLIs not defined"
2723 assert main.nodes, "main.nodes not defined"
2724 # NOTE: You should probably run a topology check after this
2725
2726 linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
2727
2728 description = "Restore a link to ensure that Link Discovery is " + \
2729 "working properly"
2730 main.case( description )
2731
2732 main.step( "Bring link between s3 and s28 back up" )
2733 LinkUp = main.Mininet1.link( END1="s3", END2="s28", OPTION="up" )
2734 main.log.info( "Waiting " + str( linkSleep ) +
2735 " seconds for link up to be discovered" )
2736 time.sleep( linkSleep )
2737 utilities.assert_equals( expect=main.TRUE, actual=LinkUp,
2738 onpass="Link up successful",
2739 onfail="Failed to bring link up" )
2740 # TODO do some sort of check here
2741
2742 def CASE11( self, main ):
2743 """
2744 Switch Down
2745 """
2746 # NOTE: You should probably run a topology check after this
2747 import time
2748 assert main.numCtrls, "main.numCtrls not defined"
2749 assert main, "main not defined"
2750 assert utilities.assert_equals, "utilities.assert_equals not defined"
2751 assert main.CLIs, "main.CLIs not defined"
2752 assert main.nodes, "main.nodes not defined"
2753
2754 switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
2755
2756 description = "Killing a switch to ensure it is discovered correctly"
2757 onosCli = main.CLIs[ main.activeNodes[0] ]
2758 main.case( description )
2759 switch = main.params[ 'kill' ][ 'switch' ]
2760 switchDPID = main.params[ 'kill' ][ 'dpid' ]
2761
2762 # TODO: Make this switch parameterizable
2763 main.step( "Kill " + switch )
2764 main.log.info( "Deleting " + switch )
2765 main.Mininet1.delSwitch( switch )
2766 main.log.info( "Waiting " + str( switchSleep ) +
2767 " seconds for switch down to be discovered" )
2768 time.sleep( switchSleep )
2769 device = onosCli.getDevice( dpid=switchDPID )
2770 # Peek at the deleted switch
2771 main.log.warn( str( device ) )
2772 result = main.FALSE
2773 if device and device[ 'available' ] is False:
2774 result = main.TRUE
2775 utilities.assert_equals( expect=main.TRUE, actual=result,
2776 onpass="Kill switch successful",
2777 onfail="Failed to kill switch?" )
2778
2779 def CASE12( self, main ):
2780 """
2781 Switch Up
2782 """
2783 # NOTE: You should probably run a topology check after this
2784 import time
2785 assert main.numCtrls, "main.numCtrls not defined"
2786 assert main, "main not defined"
2787 assert utilities.assert_equals, "utilities.assert_equals not defined"
2788 assert main.CLIs, "main.CLIs not defined"
2789 assert main.nodes, "main.nodes not defined"
2790
2791 switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
2792 switch = main.params[ 'kill' ][ 'switch' ]
2793 switchDPID = main.params[ 'kill' ][ 'dpid' ]
2794 links = main.params[ 'kill' ][ 'links' ].split()
2795 onosCli = main.CLIs[ main.activeNodes[0] ]
2796 description = "Adding a switch to ensure it is discovered correctly"
2797 main.case( description )
2798
2799 main.step( "Add back " + switch )
2800 main.Mininet1.addSwitch( switch, dpid=switchDPID )
2801 for peer in links:
2802 main.Mininet1.addLink( switch, peer )
2803 ipList = [ node.ip_address for node in main.nodes ]
2804 main.Mininet1.assignSwController( sw=switch, ip=ipList )
2805 main.log.info( "Waiting " + str( switchSleep ) +
2806 " seconds for switch up to be discovered" )
2807 time.sleep( switchSleep )
2808 device = onosCli.getDevice( dpid=switchDPID )
2809 # Peek at the deleted switch
2810 main.log.warn( str( device ) )
2811 result = main.FALSE
2812 if device and device[ 'available' ]:
2813 result = main.TRUE
2814 utilities.assert_equals( expect=main.TRUE, actual=result,
2815 onpass="add switch successful",
2816 onfail="Failed to add switch?" )
2817
2818 def CASE13( self, main ):
2819 """
2820 Clean up
2821 """
2822 assert main.numCtrls, "main.numCtrls not defined"
2823 assert main, "main not defined"
2824 assert utilities.assert_equals, "utilities.assert_equals not defined"
2825 assert main.CLIs, "main.CLIs not defined"
2826 assert main.nodes, "main.nodes not defined"
2827
2828 main.case( "Test Cleanup" )
2829 main.step( "Killing tcpdumps" )
2830 main.Mininet2.stopTcpdump()
2831
2832 if main.params[ 'BACKUP' ][ 'ENABLED' ] == "True":
2833 main.step( "Copying MN pcap and ONOS log files to test station" )
2834 # NOTE: MN Pcap file is being saved to logdir.
2835 # We scp this file as MN and TestON aren't necessarily the same vm
2836
2837 # FIXME: To be replaced with a Jenkin's post script
2838 # TODO: Load these from params
2839 # NOTE: must end in /
2840 logFolder = "/opt/onos/log/"
2841 logFiles = [ "karaf.log", "karaf.log.1" ]
2842 # NOTE: must end in /
2843 for f in logFiles:
2844 for node in main.nodes:
2845 dstName = main.logdir + "/" + node.name + "-" + f
2846 main.ONOSbench.secureCopy( node.user_name, node.ip_address,
2847 logFolder + f, dstName )
2848 # std*.log's
2849 # NOTE: must end in /
2850 logFolder = "/opt/onos/var/"
2851 logFiles = [ "stderr.log", "stdout.log" ]
2852 # NOTE: must end in /
2853 for f in logFiles:
2854 for node in main.nodes:
2855 dstName = main.logdir + "/" + node.name + "-" + f
2856 main.ONOSbench.secureCopy( node.user_name, node.ip_address,
2857 logFolder + f, dstName )
2858 else:
2859 main.log.debug( "skipping saving log files" )
2860
2861 main.step( "Stopping Mininet" )
2862 mnResult = main.Mininet1.stopNet()
2863 utilities.assert_equals( expect=main.TRUE, actual=mnResult,
2864 onpass="Mininet stopped",
2865 onfail="MN cleanup NOT successful" )
2866
2867 main.step( "Checking ONOS Logs for errors" )
2868 for node in main.nodes:
2869 main.log.debug( "Checking logs for errors on " + node.name + ":" )
2870 main.log.warn( main.ONOSbench.checkLogs( node.ip_address ) )
2871
2872 try:
2873 timerLog = open( main.logdir + "/Timers.csv", 'w')
2874 main.log.error( ", ".join( labels ) + "\n" + ", ".join( data ) )
2875 timerLog.write( ", ".join( labels ) + "\n" + ", ".join( data ) )
2876 timerLog.close()
2877 except NameError, e:
2878 main.log.exception(e)
2879
2880 main.step( "Stopping webserver" )
2881 status = main.Server.stop( )
2882 utilities.assert_equals( expect=main.TRUE, actual=status,
2883 onpass="Stop Server",
2884 onfail="Failled to stop SimpleHTTPServer" )
2885 del main.Server
2886
2887 def CASE14( self, main ):
2888 """
2889 start election app on all onos nodes
2890 """
2891 import time
2892 assert main.numCtrls, "main.numCtrls not defined"
2893 assert main, "main not defined"
2894 assert utilities.assert_equals, "utilities.assert_equals not defined"
2895 assert main.CLIs, "main.CLIs not defined"
2896 assert main.nodes, "main.nodes not defined"
2897
2898 main.case("Start Leadership Election app")
2899 main.step( "Install leadership election app" )
2900 onosCli = main.CLIs[ main.activeNodes[0] ]
2901 appResult = onosCli.activateApp( "org.onosproject.election" )
2902 utilities.assert_equals(
2903 expect=main.TRUE,
2904 actual=appResult,
2905 onpass="Election app installed",
2906 onfail="Something went wrong with installing Leadership election" )
2907
2908 main.step( "Run for election on each node" )
2909 for i in main.activeNodes:
2910 main.CLIs[i].electionTestRun()
2911 time.sleep(5)
2912 activeCLIs = [ main.CLIs[i] for i in main.activeNodes ]
2913 sameResult, leaders = main.HA.consistentLeaderboards( activeCLIs )
2914 utilities.assert_equals(
2915 expect=True,
2916 actual=sameResult,
2917 onpass="All nodes see the same leaderboards",
2918 onfail="Inconsistent leaderboards" )
2919
2920 if sameResult:
2921 leader = leaders[ 0 ][ 0 ]
2922 if main.nodes[ main.activeNodes[0] ].ip_address in leader:
2923 correctLeader = True
2924 else:
2925 correctLeader = False
2926 main.step( "First node was elected leader" )
2927 utilities.assert_equals(
2928 expect=True,
2929 actual=correctLeader,
2930 onpass="Correct leader was elected",
2931 onfail="Incorrect leader" )
2932
2933 def CASE15( self, main ):
2934 """
2935 Check that Leadership Election is still functional
2936 15.1 Run election on each node
2937 15.2 Check that each node has the same leaders and candidates
2938 15.3 Find current leader and withdraw
2939 15.4 Check that a new node was elected leader
2940 15.5 Check that that new leader was the candidate of old leader
2941 15.6 Run for election on old leader
2942 15.7 Check that oldLeader is a candidate, and leader if only 1 node
2943 15.8 Make sure that the old leader was added to the candidate list
2944
2945 old and new variable prefixes refer to data from before vs after
2946 withdrawl and later before withdrawl vs after re-election
2947 """
2948 import time
2949 assert main.numCtrls, "main.numCtrls not defined"
2950 assert main, "main not defined"
2951 assert utilities.assert_equals, "utilities.assert_equals not defined"
2952 assert main.CLIs, "main.CLIs not defined"
2953 assert main.nodes, "main.nodes not defined"
2954
2955 description = "Check that Leadership Election is still functional"
2956 main.case( description )
2957 # NOTE: Need to re-run after restarts since being a canidate is not persistant
2958
2959 oldLeaders = [] # list of lists of each nodes' candidates before
2960 newLeaders = [] # list of lists of each nodes' candidates after
2961 oldLeader = '' # the old leader from oldLeaders, None if not same
2962 newLeader = '' # the new leaders fron newLoeaders, None if not same
2963 oldLeaderCLI = None # the CLI of the old leader used for re-electing
2964 expectNoLeader = False # True when there is only one leader
2965 if main.numCtrls == 1:
2966 expectNoLeader = True
2967
2968 main.step( "Run for election on each node" )
2969 electionResult = main.TRUE
2970
2971 for i in main.activeNodes: # run test election on each node
2972 if main.CLIs[i].electionTestRun() == main.FALSE:
2973 electionResult = main.FALSE
2974 utilities.assert_equals(
2975 expect=main.TRUE,
2976 actual=electionResult,
2977 onpass="All nodes successfully ran for leadership",
2978 onfail="At least one node failed to run for leadership" )
2979
2980 if electionResult == main.FALSE:
2981 main.log.error(
2982 "Skipping Test Case because Election Test App isn't loaded" )
2983 main.skipCase()
2984
2985 main.step( "Check that each node shows the same leader and candidates" )
2986 failMessage = "Nodes have different leaderboards"
2987 activeCLIs = [ main.CLIs[i] for i in main.activeNodes ]
2988 sameResult, oldLeaders = main.HA.consistentLeaderboards( activeCLIs )
2989 if sameResult:
2990 oldLeader = oldLeaders[ 0 ][ 0 ]
2991 main.log.warn( oldLeader )
2992 else:
2993 oldLeader = None
2994 utilities.assert_equals(
2995 expect=True,
2996 actual=sameResult,
2997 onpass="Leaderboards are consistent for the election topic",
2998 onfail=failMessage )
2999
3000 main.step( "Find current leader and withdraw" )
3001 withdrawResult = main.TRUE
3002 # do some sanity checking on leader before using it
3003 if oldLeader is None:
3004 main.log.error( "Leadership isn't consistent." )
3005 withdrawResult = main.FALSE
3006 # Get the CLI of the oldLeader
3007 for i in main.activeNodes:
3008 if oldLeader == main.nodes[ i ].ip_address:
3009 oldLeaderCLI = main.CLIs[ i ]
3010 break
3011 else: # FOR/ELSE statement
3012 main.log.error( "Leader election, could not find current leader" )
3013 if oldLeader:
3014 withdrawResult = oldLeaderCLI.electionTestWithdraw()
3015 utilities.assert_equals(
3016 expect=main.TRUE,
3017 actual=withdrawResult,
3018 onpass="Node was withdrawn from election",
3019 onfail="Node was not withdrawn from election" )
3020
3021 main.step( "Check that a new node was elected leader" )
3022 failMessage = "Nodes have different leaders"
3023 # Get new leaders and candidates
3024 newLeaderResult, newLeaders = main.HA.consistentLeaderboards( activeCLIs )
3025 newLeader = None
3026 if newLeaderResult:
3027 if newLeaders[ 0 ][ 0 ] == 'none':
3028 main.log.error( "No leader was elected on at least 1 node" )
3029 if not expectNoLeader:
3030 newLeaderResult = False
3031 newLeader = newLeaders[ 0 ][ 0 ]
3032
3033 # Check that the new leader is not the older leader, which was withdrawn
3034 if newLeader == oldLeader:
3035 newLeaderResult = False
3036 main.log.error( "All nodes still see old leader: " + str( oldLeader ) +
3037 " as the current leader" )
3038 utilities.assert_equals(
3039 expect=True,
3040 actual=newLeaderResult,
3041 onpass="Leadership election passed",
3042 onfail="Something went wrong with Leadership election" )
3043
3044 main.step( "Check that that new leader was the candidate of old leader" )
3045 # candidates[ 2 ] should become the top candidate after withdrawl
3046 correctCandidateResult = main.TRUE
3047 if expectNoLeader:
3048 if newLeader == 'none':
3049 main.log.info( "No leader expected. None found. Pass" )
3050 correctCandidateResult = main.TRUE
3051 else:
3052 main.log.info( "Expected no leader, got: " + str( newLeader ) )
3053 correctCandidateResult = main.FALSE
3054 elif len( oldLeaders[0] ) >= 3:
3055 if newLeader == oldLeaders[ 0 ][ 2 ]:
3056 # correct leader was elected
3057 correctCandidateResult = main.TRUE
3058 else:
3059 correctCandidateResult = main.FALSE
3060 main.log.error( "Candidate {} was elected. {} should have had priority.".format(
3061 newLeader, oldLeaders[ 0 ][ 2 ] ) )
3062 else:
3063 main.log.warn( "Could not determine who should be the correct leader" )
3064 main.log.debug( oldLeaders[ 0 ] )
3065 correctCandidateResult = main.FALSE
3066 utilities.assert_equals(
3067 expect=main.TRUE,
3068 actual=correctCandidateResult,
3069 onpass="Correct Candidate Elected",
3070 onfail="Incorrect Candidate Elected" )
3071
3072 main.step( "Run for election on old leader( just so everyone " +
3073 "is in the hat )" )
3074 if oldLeaderCLI is not None:
3075 runResult = oldLeaderCLI.electionTestRun()
3076 else:
3077 main.log.error( "No old leader to re-elect" )
3078 runResult = main.FALSE
3079 utilities.assert_equals(
3080 expect=main.TRUE,
3081 actual=runResult,
3082 onpass="App re-ran for election",
3083 onfail="App failed to run for election" )
3084
3085 main.step(
3086 "Check that oldLeader is a candidate, and leader if only 1 node" )
3087 # verify leader didn't just change
3088 # Get new leaders and candidates
3089 reRunLeaders = []
3090 time.sleep( 5 ) # Paremterize
3091 positionResult, reRunLeaders = main.HA.consistentLeaderboards( activeCLIs )
3092
3093 # Check that the re-elected node is last on the candidate List
3094 if not reRunLeaders[0]:
3095 positionResult = main.FALSE
3096 elif oldLeader != reRunLeaders[ 0 ][ -1 ]:
3097 main.log.error( "Old Leader ({}) not in the proper position: {} ".format( str( oldLeader),
3098 str( reRunLeaders[ 0 ] ) ) )
3099 positionResult = main.FALSE
3100 utilities.assert_equals(
3101 expect=True,
3102 actual=positionResult,
3103 onpass="Old leader successfully re-ran for election",
3104 onfail="Something went wrong with Leadership election after " +
3105 "the old leader re-ran for election" )
3106
3107 def CASE16( self, main ):
3108 """
3109 Install Distributed Primitives app
3110 """
3111 import time
3112 assert main.numCtrls, "main.numCtrls not defined"
3113 assert main, "main not defined"
3114 assert utilities.assert_equals, "utilities.assert_equals not defined"
3115 assert main.CLIs, "main.CLIs not defined"
3116 assert main.nodes, "main.nodes not defined"
3117
3118 # Variables for the distributed primitives tests
3119 global pCounterName
3120 global pCounterValue
3121 global onosSet
3122 global onosSetName
3123 pCounterName = "TestON-Partitions"
3124 pCounterValue = 0
3125 onosSet = set([])
3126 onosSetName = "TestON-set"
3127
3128 description = "Install Primitives app"
3129 main.case( description )
3130 main.step( "Install Primitives app" )
3131 appName = "org.onosproject.distributedprimitives"
3132 node = main.activeNodes[0]
3133 appResults = main.CLIs[node].activateApp( appName )
3134 utilities.assert_equals( expect=main.TRUE,
3135 actual=appResults,
3136 onpass="Primitives app activated",
3137 onfail="Primitives app not activated" )
3138 time.sleep( 5 ) # To allow all nodes to activate
3139
3140 def CASE17( self, main ):
3141 """
3142 Check for basic functionality with distributed primitives
3143 """
3144 # Make sure variables are defined/set
3145 assert main.numCtrls, "main.numCtrls not defined"
3146 assert main, "main not defined"
3147 assert utilities.assert_equals, "utilities.assert_equals not defined"
3148 assert main.CLIs, "main.CLIs not defined"
3149 assert main.nodes, "main.nodes not defined"
3150 assert pCounterName, "pCounterName not defined"
3151 assert onosSetName, "onosSetName not defined"
3152 # NOTE: assert fails if value is 0/None/Empty/False
3153 try:
3154 pCounterValue
3155 except NameError:
3156 main.log.error( "pCounterValue not defined, setting to 0" )
3157 pCounterValue = 0
3158 try:
3159 onosSet
3160 except NameError:
3161 main.log.error( "onosSet not defined, setting to empty Set" )
3162 onosSet = set([])
3163 # Variables for the distributed primitives tests. These are local only
3164 addValue = "a"
3165 addAllValue = "a b c d e f"
3166 retainValue = "c d e f"
3167
3168 description = "Check for basic functionality with distributed " +\
3169 "primitives"
3170 main.case( description )
3171 main.caseExplanation = "Test the methods of the distributed " +\
3172 "primitives (counters and sets) throught the cli"
3173 # DISTRIBUTED ATOMIC COUNTERS
3174 # Partitioned counters
3175 main.step( "Increment then get a default counter on each node" )
3176 pCounters = []
3177 threads = []
3178 addedPValues = []
3179 for i in main.activeNodes:
3180 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3181 name="counterAddAndGet-" + str( i ),
3182 args=[ pCounterName ] )
3183 pCounterValue += 1
3184 addedPValues.append( pCounterValue )
3185 threads.append( t )
3186 t.start()
3187
3188 for t in threads:
3189 t.join()
3190 pCounters.append( t.result )
3191 # Check that counter incremented numController times
3192 pCounterResults = True
3193 for i in addedPValues:
3194 tmpResult = i in pCounters
3195 pCounterResults = pCounterResults and tmpResult
3196 if not tmpResult:
3197 main.log.error( str( i ) + " is not in partitioned "
3198 "counter incremented results" )
3199 utilities.assert_equals( expect=True,
3200 actual=pCounterResults,
3201 onpass="Default counter incremented",
3202 onfail="Error incrementing default" +
3203 " counter" )
3204
3205 main.step( "Get then Increment a default counter on each node" )
3206 pCounters = []
3207 threads = []
3208 addedPValues = []
3209 for i in main.activeNodes:
3210 t = main.Thread( target=main.CLIs[i].counterTestGetAndAdd,
3211 name="counterGetAndAdd-" + str( i ),
3212 args=[ pCounterName ] )
3213 addedPValues.append( pCounterValue )
3214 pCounterValue += 1
3215 threads.append( t )
3216 t.start()
3217
3218 for t in threads:
3219 t.join()
3220 pCounters.append( t.result )
3221 # Check that counter incremented numController times
3222 pCounterResults = True
3223 for i in addedPValues:
3224 tmpResult = i in pCounters
3225 pCounterResults = pCounterResults and tmpResult
3226 if not tmpResult:
3227 main.log.error( str( i ) + " is not in partitioned "
3228 "counter incremented results" )
3229 utilities.assert_equals( expect=True,
3230 actual=pCounterResults,
3231 onpass="Default counter incremented",
3232 onfail="Error incrementing default" +
3233 " counter" )
3234
3235 main.step( "Counters we added have the correct values" )
3236 incrementCheck = main.HA.counterCheck( pCounterName, pCounterValue )
3237 utilities.assert_equals( expect=main.TRUE,
3238 actual=incrementCheck,
3239 onpass="Added counters are correct",
3240 onfail="Added counters are incorrect" )
3241
3242 main.step( "Add -8 to then get a default counter on each node" )
3243 pCounters = []
3244 threads = []
3245 addedPValues = []
3246 for i in main.activeNodes:
3247 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3248 name="counterIncrement-" + str( i ),
3249 args=[ pCounterName ],
3250 kwargs={ "delta": -8 } )
3251 pCounterValue += -8
3252 addedPValues.append( pCounterValue )
3253 threads.append( t )
3254 t.start()
3255
3256 for t in threads:
3257 t.join()
3258 pCounters.append( t.result )
3259 # Check that counter incremented numController times
3260 pCounterResults = True
3261 for i in addedPValues:
3262 tmpResult = i in pCounters
3263 pCounterResults = pCounterResults and tmpResult
3264 if not tmpResult:
3265 main.log.error( str( i ) + " is not in partitioned "
3266 "counter incremented results" )
3267 utilities.assert_equals( expect=True,
3268 actual=pCounterResults,
3269 onpass="Default counter incremented",
3270 onfail="Error incrementing default" +
3271 " counter" )
3272
3273 main.step( "Add 5 to then get a default counter on each node" )
3274 pCounters = []
3275 threads = []
3276 addedPValues = []
3277 for i in main.activeNodes:
3278 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3279 name="counterIncrement-" + str( i ),
3280 args=[ pCounterName ],
3281 kwargs={ "delta": 5 } )
3282 pCounterValue += 5
3283 addedPValues.append( pCounterValue )
3284 threads.append( t )
3285 t.start()
3286
3287 for t in threads:
3288 t.join()
3289 pCounters.append( t.result )
3290 # Check that counter incremented numController times
3291 pCounterResults = True
3292 for i in addedPValues:
3293 tmpResult = i in pCounters
3294 pCounterResults = pCounterResults and tmpResult
3295 if not tmpResult:
3296 main.log.error( str( i ) + " is not in partitioned "
3297 "counter incremented results" )
3298 utilities.assert_equals( expect=True,
3299 actual=pCounterResults,
3300 onpass="Default counter incremented",
3301 onfail="Error incrementing default" +
3302 " counter" )
3303
3304 main.step( "Get then add 5 to a default counter on each node" )
3305 pCounters = []
3306 threads = []
3307 addedPValues = []
3308 for i in main.activeNodes:
3309 t = main.Thread( target=main.CLIs[i].counterTestGetAndAdd,
3310 name="counterIncrement-" + str( i ),
3311 args=[ pCounterName ],
3312 kwargs={ "delta": 5 } )
3313 addedPValues.append( pCounterValue )
3314 pCounterValue += 5
3315 threads.append( t )
3316 t.start()
3317
3318 for t in threads:
3319 t.join()
3320 pCounters.append( t.result )
3321 # Check that counter incremented numController times
3322 pCounterResults = True
3323 for i in addedPValues:
3324 tmpResult = i in pCounters
3325 pCounterResults = pCounterResults and tmpResult
3326 if not tmpResult:
3327 main.log.error( str( i ) + " is not in partitioned "
3328 "counter incremented results" )
3329 utilities.assert_equals( expect=True,
3330 actual=pCounterResults,
3331 onpass="Default counter incremented",
3332 onfail="Error incrementing default" +
3333 " counter" )
3334
3335 main.step( "Counters we added have the correct values" )
3336 incrementCheck = main.HA.counterCheck( pCounterName, pCounterValue )
3337 utilities.assert_equals( expect=main.TRUE,
3338 actual=incrementCheck,
3339 onpass="Added counters are correct",
3340 onfail="Added counters are incorrect" )
3341
3342 # DISTRIBUTED SETS
3343 main.step( "Distributed Set get" )
3344 size = len( onosSet )
3345 getResponses = []
3346 threads = []
3347 for i in main.activeNodes:
3348 t = main.Thread( target=main.CLIs[i].setTestGet,
3349 name="setTestGet-" + str( i ),
3350 args=[ onosSetName ] )
3351 threads.append( t )
3352 t.start()
3353 for t in threads:
3354 t.join()
3355 getResponses.append( t.result )
3356
3357 getResults = main.TRUE
3358 for i in range( len( main.activeNodes ) ):
3359 node = str( main.activeNodes[i] + 1 )
3360 if isinstance( getResponses[ i ], list):
3361 current = set( getResponses[ i ] )
3362 if len( current ) == len( getResponses[ i ] ):
3363 # no repeats
3364 if onosSet != current:
3365 main.log.error( "ONOS" + node +
3366 " has incorrect view" +
3367 " of set " + onosSetName + ":\n" +
3368 str( getResponses[ i ] ) )
3369 main.log.debug( "Expected: " + str( onosSet ) )
3370 main.log.debug( "Actual: " + str( current ) )
3371 getResults = main.FALSE
3372 else:
3373 # error, set is not a set
3374 main.log.error( "ONOS" + node +
3375 " has repeat elements in" +
3376 " set " + onosSetName + ":\n" +
3377 str( getResponses[ i ] ) )
3378 getResults = main.FALSE
3379 elif getResponses[ i ] == main.ERROR:
3380 getResults = main.FALSE
3381 utilities.assert_equals( expect=main.TRUE,
3382 actual=getResults,
3383 onpass="Set elements are correct",
3384 onfail="Set elements are incorrect" )
3385
3386 main.step( "Distributed Set size" )
3387 sizeResponses = []
3388 threads = []
3389 for i in main.activeNodes:
3390 t = main.Thread( target=main.CLIs[i].setTestSize,
3391 name="setTestSize-" + str( i ),
3392 args=[ onosSetName ] )
3393 threads.append( t )
3394 t.start()
3395 for t in threads:
3396 t.join()
3397 sizeResponses.append( t.result )
3398
3399 sizeResults = main.TRUE
3400 for i in range( len( main.activeNodes ) ):
3401 node = str( main.activeNodes[i] + 1 )
3402 if size != sizeResponses[ i ]:
3403 sizeResults = main.FALSE
3404 main.log.error( "ONOS" + node +
3405 " expected a size of " + str( size ) +
3406 " for set " + onosSetName +
3407 " but got " + str( sizeResponses[ i ] ) )
3408 utilities.assert_equals( expect=main.TRUE,
3409 actual=sizeResults,
3410 onpass="Set sizes are correct",
3411 onfail="Set sizes are incorrect" )
3412
3413 main.step( "Distributed Set add()" )
3414 onosSet.add( addValue )
3415 addResponses = []
3416 threads = []
3417 for i in main.activeNodes:
3418 t = main.Thread( target=main.CLIs[i].setTestAdd,
3419 name="setTestAdd-" + str( i ),
3420 args=[ onosSetName, addValue ] )
3421 threads.append( t )
3422 t.start()
3423 for t in threads:
3424 t.join()
3425 addResponses.append( t.result )
3426
3427 # main.TRUE = successfully changed the set
3428 # main.FALSE = action resulted in no change in set
3429 # main.ERROR - Some error in executing the function
3430 addResults = main.TRUE
3431 for i in range( len( main.activeNodes ) ):
3432 if addResponses[ i ] == main.TRUE:
3433 # All is well
3434 pass
3435 elif addResponses[ i ] == main.FALSE:
3436 # Already in set, probably fine
3437 pass
3438 elif addResponses[ i ] == main.ERROR:
3439 # Error in execution
3440 addResults = main.FALSE
3441 else:
3442 # unexpected result
3443 addResults = main.FALSE
3444 if addResults != main.TRUE:
3445 main.log.error( "Error executing set add" )
3446
3447 # Check if set is still correct
3448 size = len( onosSet )
3449 getResponses = []
3450 threads = []
3451 for i in main.activeNodes:
3452 t = main.Thread( target=main.CLIs[i].setTestGet,
3453 name="setTestGet-" + str( i ),
3454 args=[ onosSetName ] )
3455 threads.append( t )
3456 t.start()
3457 for t in threads:
3458 t.join()
3459 getResponses.append( t.result )
3460 getResults = main.TRUE
3461 for i in range( len( main.activeNodes ) ):
3462 node = str( main.activeNodes[i] + 1 )
3463 if isinstance( getResponses[ i ], list):
3464 current = set( getResponses[ i ] )
3465 if len( current ) == len( getResponses[ i ] ):
3466 # no repeats
3467 if onosSet != current:
3468 main.log.error( "ONOS" + node + " has incorrect view" +
3469 " of set " + onosSetName + ":\n" +
3470 str( getResponses[ i ] ) )
3471 main.log.debug( "Expected: " + str( onosSet ) )
3472 main.log.debug( "Actual: " + str( current ) )
3473 getResults = main.FALSE
3474 else:
3475 # error, set is not a set
3476 main.log.error( "ONOS" + node + " has repeat elements in" +
3477 " set " + onosSetName + ":\n" +
3478 str( getResponses[ i ] ) )
3479 getResults = main.FALSE
3480 elif getResponses[ i ] == main.ERROR:
3481 getResults = main.FALSE
3482 sizeResponses = []
3483 threads = []
3484 for i in main.activeNodes:
3485 t = main.Thread( target=main.CLIs[i].setTestSize,
3486 name="setTestSize-" + str( i ),
3487 args=[ onosSetName ] )
3488 threads.append( t )
3489 t.start()
3490 for t in threads:
3491 t.join()
3492 sizeResponses.append( t.result )
3493 sizeResults = main.TRUE
3494 for i in range( len( main.activeNodes ) ):
3495 node = str( main.activeNodes[i] + 1 )
3496 if size != sizeResponses[ i ]:
3497 sizeResults = main.FALSE
3498 main.log.error( "ONOS" + node +
3499 " expected a size of " + str( size ) +
3500 " for set " + onosSetName +
3501 " but got " + str( sizeResponses[ i ] ) )
3502 addResults = addResults and getResults and sizeResults
3503 utilities.assert_equals( expect=main.TRUE,
3504 actual=addResults,
3505 onpass="Set add correct",
3506 onfail="Set add was incorrect" )
3507
3508 main.step( "Distributed Set addAll()" )
3509 onosSet.update( addAllValue.split() )
3510 addResponses = []
3511 threads = []
3512 for i in main.activeNodes:
3513 t = main.Thread( target=main.CLIs[i].setTestAdd,
3514 name="setTestAddAll-" + str( i ),
3515 args=[ onosSetName, addAllValue ] )
3516 threads.append( t )
3517 t.start()
3518 for t in threads:
3519 t.join()
3520 addResponses.append( t.result )
3521
3522 # main.TRUE = successfully changed the set
3523 # main.FALSE = action resulted in no change in set
3524 # main.ERROR - Some error in executing the function
3525 addAllResults = main.TRUE
3526 for i in range( len( main.activeNodes ) ):
3527 if addResponses[ i ] == main.TRUE:
3528 # All is well
3529 pass
3530 elif addResponses[ i ] == main.FALSE:
3531 # Already in set, probably fine
3532 pass
3533 elif addResponses[ i ] == main.ERROR:
3534 # Error in execution
3535 addAllResults = main.FALSE
3536 else:
3537 # unexpected result
3538 addAllResults = main.FALSE
3539 if addAllResults != main.TRUE:
3540 main.log.error( "Error executing set addAll" )
3541
3542 # Check if set is still correct
3543 size = len( onosSet )
3544 getResponses = []
3545 threads = []
3546 for i in main.activeNodes:
3547 t = main.Thread( target=main.CLIs[i].setTestGet,
3548 name="setTestGet-" + str( i ),
3549 args=[ onosSetName ] )
3550 threads.append( t )
3551 t.start()
3552 for t in threads:
3553 t.join()
3554 getResponses.append( t.result )
3555 getResults = main.TRUE
3556 for i in range( len( main.activeNodes ) ):
3557 node = str( main.activeNodes[i] + 1 )
3558 if isinstance( getResponses[ i ], list):
3559 current = set( getResponses[ i ] )
3560 if len( current ) == len( getResponses[ i ] ):
3561 # no repeats
3562 if onosSet != current:
3563 main.log.error( "ONOS" + node +
3564 " has incorrect view" +
3565 " of set " + onosSetName + ":\n" +
3566 str( getResponses[ i ] ) )
3567 main.log.debug( "Expected: " + str( onosSet ) )
3568 main.log.debug( "Actual: " + str( current ) )
3569 getResults = main.FALSE
3570 else:
3571 # error, set is not a set
3572 main.log.error( "ONOS" + node +
3573 " has repeat elements in" +
3574 " set " + onosSetName + ":\n" +
3575 str( getResponses[ i ] ) )
3576 getResults = main.FALSE
3577 elif getResponses[ i ] == main.ERROR:
3578 getResults = main.FALSE
3579 sizeResponses = []
3580 threads = []
3581 for i in main.activeNodes:
3582 t = main.Thread( target=main.CLIs[i].setTestSize,
3583 name="setTestSize-" + str( i ),
3584 args=[ onosSetName ] )
3585 threads.append( t )
3586 t.start()
3587 for t in threads:
3588 t.join()
3589 sizeResponses.append( t.result )
3590 sizeResults = main.TRUE
3591 for i in range( len( main.activeNodes ) ):
3592 node = str( main.activeNodes[i] + 1 )
3593 if size != sizeResponses[ i ]:
3594 sizeResults = main.FALSE
3595 main.log.error( "ONOS" + node +
3596 " expected a size of " + str( size ) +
3597 " for set " + onosSetName +
3598 " but got " + str( sizeResponses[ i ] ) )
3599 addAllResults = addAllResults and getResults and sizeResults
3600 utilities.assert_equals( expect=main.TRUE,
3601 actual=addAllResults,
3602 onpass="Set addAll correct",
3603 onfail="Set addAll was incorrect" )
3604
3605 main.step( "Distributed Set contains()" )
3606 containsResponses = []
3607 threads = []
3608 for i in main.activeNodes:
3609 t = main.Thread( target=main.CLIs[i].setTestGet,
3610 name="setContains-" + str( i ),
3611 args=[ onosSetName ],
3612 kwargs={ "values": addValue } )
3613 threads.append( t )
3614 t.start()
3615 for t in threads:
3616 t.join()
3617 # NOTE: This is the tuple
3618 containsResponses.append( t.result )
3619
3620 containsResults = main.TRUE
3621 for i in range( len( main.activeNodes ) ):
3622 if containsResponses[ i ] == main.ERROR:
3623 containsResults = main.FALSE
3624 else:
3625 containsResults = containsResults and\
3626 containsResponses[ i ][ 1 ]
3627 utilities.assert_equals( expect=main.TRUE,
3628 actual=containsResults,
3629 onpass="Set contains is functional",
3630 onfail="Set contains failed" )
3631
3632 main.step( "Distributed Set containsAll()" )
3633 containsAllResponses = []
3634 threads = []
3635 for i in main.activeNodes:
3636 t = main.Thread( target=main.CLIs[i].setTestGet,
3637 name="setContainsAll-" + str( i ),
3638 args=[ onosSetName ],
3639 kwargs={ "values": addAllValue } )
3640 threads.append( t )
3641 t.start()
3642 for t in threads:
3643 t.join()
3644 # NOTE: This is the tuple
3645 containsAllResponses.append( t.result )
3646
3647 containsAllResults = main.TRUE
3648 for i in range( len( main.activeNodes ) ):
3649 if containsResponses[ i ] == main.ERROR:
3650 containsResults = main.FALSE
3651 else:
3652 containsResults = containsResults and\
3653 containsResponses[ i ][ 1 ]
3654 utilities.assert_equals( expect=main.TRUE,
3655 actual=containsAllResults,
3656 onpass="Set containsAll is functional",
3657 onfail="Set containsAll failed" )
3658
3659 main.step( "Distributed Set remove()" )
3660 onosSet.remove( addValue )
3661 removeResponses = []
3662 threads = []
3663 for i in main.activeNodes:
3664 t = main.Thread( target=main.CLIs[i].setTestRemove,
3665 name="setTestRemove-" + str( i ),
3666 args=[ onosSetName, addValue ] )
3667 threads.append( t )
3668 t.start()
3669 for t in threads:
3670 t.join()
3671 removeResponses.append( t.result )
3672
3673 # main.TRUE = successfully changed the set
3674 # main.FALSE = action resulted in no change in set
3675 # main.ERROR - Some error in executing the function
3676 removeResults = main.TRUE
3677 for i in range( len( main.activeNodes ) ):
3678 if removeResponses[ i ] == main.TRUE:
3679 # All is well
3680 pass
3681 elif removeResponses[ i ] == main.FALSE:
3682 # not in set, probably fine
3683 pass
3684 elif removeResponses[ i ] == main.ERROR:
3685 # Error in execution
3686 removeResults = main.FALSE
3687 else:
3688 # unexpected result
3689 removeResults = main.FALSE
3690 if removeResults != main.TRUE:
3691 main.log.error( "Error executing set remove" )
3692
3693 # Check if set is still correct
3694 size = len( onosSet )
3695 getResponses = []
3696 threads = []
3697 for i in main.activeNodes:
3698 t = main.Thread( target=main.CLIs[i].setTestGet,
3699 name="setTestGet-" + str( i ),
3700 args=[ onosSetName ] )
3701 threads.append( t )
3702 t.start()
3703 for t in threads:
3704 t.join()
3705 getResponses.append( t.result )
3706 getResults = main.TRUE
3707 for i in range( len( main.activeNodes ) ):
3708 node = str( main.activeNodes[i] + 1 )
3709 if isinstance( getResponses[ i ], list):
3710 current = set( getResponses[ i ] )
3711 if len( current ) == len( getResponses[ i ] ):
3712 # no repeats
3713 if onosSet != current:
3714 main.log.error( "ONOS" + node +
3715 " has incorrect view" +
3716 " of set " + onosSetName + ":\n" +
3717 str( getResponses[ i ] ) )
3718 main.log.debug( "Expected: " + str( onosSet ) )
3719 main.log.debug( "Actual: " + str( current ) )
3720 getResults = main.FALSE
3721 else:
3722 # error, set is not a set
3723 main.log.error( "ONOS" + node +
3724 " has repeat elements in" +
3725 " set " + onosSetName + ":\n" +
3726 str( getResponses[ i ] ) )
3727 getResults = main.FALSE
3728 elif getResponses[ i ] == main.ERROR:
3729 getResults = main.FALSE
3730 sizeResponses = []
3731 threads = []
3732 for i in main.activeNodes:
3733 t = main.Thread( target=main.CLIs[i].setTestSize,
3734 name="setTestSize-" + str( i ),
3735 args=[ onosSetName ] )
3736 threads.append( t )
3737 t.start()
3738 for t in threads:
3739 t.join()
3740 sizeResponses.append( t.result )
3741 sizeResults = main.TRUE
3742 for i in range( len( main.activeNodes ) ):
3743 node = str( main.activeNodes[i] + 1 )
3744 if size != sizeResponses[ i ]:
3745 sizeResults = main.FALSE
3746 main.log.error( "ONOS" + node +
3747 " expected a size of " + str( size ) +
3748 " for set " + onosSetName +
3749 " but got " + str( sizeResponses[ i ] ) )
3750 removeResults = removeResults and getResults and sizeResults
3751 utilities.assert_equals( expect=main.TRUE,
3752 actual=removeResults,
3753 onpass="Set remove correct",
3754 onfail="Set remove was incorrect" )
3755
3756 main.step( "Distributed Set removeAll()" )
3757 onosSet.difference_update( addAllValue.split() )
3758 removeAllResponses = []
3759 threads = []
3760 try:
3761 for i in main.activeNodes:
3762 t = main.Thread( target=main.CLIs[i].setTestRemove,
3763 name="setTestRemoveAll-" + str( i ),
3764 args=[ onosSetName, addAllValue ] )
3765 threads.append( t )
3766 t.start()
3767 for t in threads:
3768 t.join()
3769 removeAllResponses.append( t.result )
3770 except Exception, e:
3771 main.log.exception(e)
3772
3773 # main.TRUE = successfully changed the set
3774 # main.FALSE = action resulted in no change in set
3775 # main.ERROR - Some error in executing the function
3776 removeAllResults = main.TRUE
3777 for i in range( len( main.activeNodes ) ):
3778 if removeAllResponses[ i ] == main.TRUE:
3779 # All is well
3780 pass
3781 elif removeAllResponses[ i ] == main.FALSE:
3782 # not in set, probably fine
3783 pass
3784 elif removeAllResponses[ i ] == main.ERROR:
3785 # Error in execution
3786 removeAllResults = main.FALSE
3787 else:
3788 # unexpected result
3789 removeAllResults = main.FALSE
3790 if removeAllResults != main.TRUE:
3791 main.log.error( "Error executing set removeAll" )
3792
3793 # Check if set is still correct
3794 size = len( onosSet )
3795 getResponses = []
3796 threads = []
3797 for i in main.activeNodes:
3798 t = main.Thread( target=main.CLIs[i].setTestGet,
3799 name="setTestGet-" + str( i ),
3800 args=[ onosSetName ] )
3801 threads.append( t )
3802 t.start()
3803 for t in threads:
3804 t.join()
3805 getResponses.append( t.result )
3806 getResults = main.TRUE
3807 for i in range( len( main.activeNodes ) ):
3808 node = str( main.activeNodes[i] + 1 )
3809 if isinstance( getResponses[ i ], list):
3810 current = set( getResponses[ i ] )
3811 if len( current ) == len( getResponses[ i ] ):
3812 # no repeats
3813 if onosSet != current:
3814 main.log.error( "ONOS" + node +
3815 " has incorrect view" +
3816 " of set " + onosSetName + ":\n" +
3817 str( getResponses[ i ] ) )
3818 main.log.debug( "Expected: " + str( onosSet ) )
3819 main.log.debug( "Actual: " + str( current ) )
3820 getResults = main.FALSE
3821 else:
3822 # error, set is not a set
3823 main.log.error( "ONOS" + node +
3824 " has repeat elements in" +
3825 " set " + onosSetName + ":\n" +
3826 str( getResponses[ i ] ) )
3827 getResults = main.FALSE
3828 elif getResponses[ i ] == main.ERROR:
3829 getResults = main.FALSE
3830 sizeResponses = []
3831 threads = []
3832 for i in main.activeNodes:
3833 t = main.Thread( target=main.CLIs[i].setTestSize,
3834 name="setTestSize-" + str( i ),
3835 args=[ onosSetName ] )
3836 threads.append( t )
3837 t.start()
3838 for t in threads:
3839 t.join()
3840 sizeResponses.append( t.result )
3841 sizeResults = main.TRUE
3842 for i in range( len( main.activeNodes ) ):
3843 node = str( main.activeNodes[i] + 1 )
3844 if size != sizeResponses[ i ]:
3845 sizeResults = main.FALSE
3846 main.log.error( "ONOS" + node +
3847 " expected a size of " + str( size ) +
3848 " for set " + onosSetName +
3849 " but got " + str( sizeResponses[ i ] ) )
3850 removeAllResults = removeAllResults and getResults and sizeResults
3851 utilities.assert_equals( expect=main.TRUE,
3852 actual=removeAllResults,
3853 onpass="Set removeAll correct",
3854 onfail="Set removeAll was incorrect" )
3855
3856 main.step( "Distributed Set addAll()" )
3857 onosSet.update( addAllValue.split() )
3858 addResponses = []
3859 threads = []
3860 for i in main.activeNodes:
3861 t = main.Thread( target=main.CLIs[i].setTestAdd,
3862 name="setTestAddAll-" + str( i ),
3863 args=[ onosSetName, addAllValue ] )
3864 threads.append( t )
3865 t.start()
3866 for t in threads:
3867 t.join()
3868 addResponses.append( t.result )
3869
3870 # main.TRUE = successfully changed the set
3871 # main.FALSE = action resulted in no change in set
3872 # main.ERROR - Some error in executing the function
3873 addAllResults = main.TRUE
3874 for i in range( len( main.activeNodes ) ):
3875 if addResponses[ i ] == main.TRUE:
3876 # All is well
3877 pass
3878 elif addResponses[ i ] == main.FALSE:
3879 # Already in set, probably fine
3880 pass
3881 elif addResponses[ i ] == main.ERROR:
3882 # Error in execution
3883 addAllResults = main.FALSE
3884 else:
3885 # unexpected result
3886 addAllResults = main.FALSE
3887 if addAllResults != main.TRUE:
3888 main.log.error( "Error executing set addAll" )
3889
3890 # Check if set is still correct
3891 size = len( onosSet )
3892 getResponses = []
3893 threads = []
3894 for i in main.activeNodes:
3895 t = main.Thread( target=main.CLIs[i].setTestGet,
3896 name="setTestGet-" + str( i ),
3897 args=[ onosSetName ] )
3898 threads.append( t )
3899 t.start()
3900 for t in threads:
3901 t.join()
3902 getResponses.append( t.result )
3903 getResults = main.TRUE
3904 for i in range( len( main.activeNodes ) ):
3905 node = str( main.activeNodes[i] + 1 )
3906 if isinstance( getResponses[ i ], list):
3907 current = set( getResponses[ i ] )
3908 if len( current ) == len( getResponses[ i ] ):
3909 # no repeats
3910 if onosSet != current:
3911 main.log.error( "ONOS" + node +
3912 " has incorrect view" +
3913 " of set " + onosSetName + ":\n" +
3914 str( getResponses[ i ] ) )
3915 main.log.debug( "Expected: " + str( onosSet ) )
3916 main.log.debug( "Actual: " + str( current ) )
3917 getResults = main.FALSE
3918 else:
3919 # error, set is not a set
3920 main.log.error( "ONOS" + node +
3921 " has repeat elements in" +
3922 " set " + onosSetName + ":\n" +
3923 str( getResponses[ i ] ) )
3924 getResults = main.FALSE
3925 elif getResponses[ i ] == main.ERROR:
3926 getResults = main.FALSE
3927 sizeResponses = []
3928 threads = []
3929 for i in main.activeNodes:
3930 t = main.Thread( target=main.CLIs[i].setTestSize,
3931 name="setTestSize-" + str( i ),
3932 args=[ onosSetName ] )
3933 threads.append( t )
3934 t.start()
3935 for t in threads:
3936 t.join()
3937 sizeResponses.append( t.result )
3938 sizeResults = main.TRUE
3939 for i in range( len( main.activeNodes ) ):
3940 node = str( main.activeNodes[i] + 1 )
3941 if size != sizeResponses[ i ]:
3942 sizeResults = main.FALSE
3943 main.log.error( "ONOS" + node +
3944 " expected a size of " + str( size ) +
3945 " for set " + onosSetName +
3946 " but got " + str( sizeResponses[ i ] ) )
3947 addAllResults = addAllResults and getResults and sizeResults
3948 utilities.assert_equals( expect=main.TRUE,
3949 actual=addAllResults,
3950 onpass="Set addAll correct",
3951 onfail="Set addAll was incorrect" )
3952
3953 main.step( "Distributed Set clear()" )
3954 onosSet.clear()
3955 clearResponses = []
3956 threads = []
3957 for i in main.activeNodes:
3958 t = main.Thread( target=main.CLIs[i].setTestRemove,
3959 name="setTestClear-" + str( i ),
3960 args=[ onosSetName, " "], # Values doesn't matter
3961 kwargs={ "clear": True } )
3962 threads.append( t )
3963 t.start()
3964 for t in threads:
3965 t.join()
3966 clearResponses.append( t.result )
3967
3968 # main.TRUE = successfully changed the set
3969 # main.FALSE = action resulted in no change in set
3970 # main.ERROR - Some error in executing the function
3971 clearResults = main.TRUE
3972 for i in range( len( main.activeNodes ) ):
3973 if clearResponses[ i ] == main.TRUE:
3974 # All is well
3975 pass
3976 elif clearResponses[ i ] == main.FALSE:
3977 # Nothing set, probably fine
3978 pass
3979 elif clearResponses[ i ] == main.ERROR:
3980 # Error in execution
3981 clearResults = main.FALSE
3982 else:
3983 # unexpected result
3984 clearResults = main.FALSE
3985 if clearResults != main.TRUE:
3986 main.log.error( "Error executing set clear" )
3987
3988 # Check if set is still correct
3989 size = len( onosSet )
3990 getResponses = []
3991 threads = []
3992 for i in main.activeNodes:
3993 t = main.Thread( target=main.CLIs[i].setTestGet,
3994 name="setTestGet-" + str( i ),
3995 args=[ onosSetName ] )
3996 threads.append( t )
3997 t.start()
3998 for t in threads:
3999 t.join()
4000 getResponses.append( t.result )
4001 getResults = main.TRUE
4002 for i in range( len( main.activeNodes ) ):
4003 node = str( main.activeNodes[i] + 1 )
4004 if isinstance( getResponses[ i ], list):
4005 current = set( getResponses[ i ] )
4006 if len( current ) == len( getResponses[ i ] ):
4007 # no repeats
4008 if onosSet != current:
4009 main.log.error( "ONOS" + node +
4010 " has incorrect view" +
4011 " of set " + onosSetName + ":\n" +
4012 str( getResponses[ i ] ) )
4013 main.log.debug( "Expected: " + str( onosSet ) )
4014 main.log.debug( "Actual: " + str( current ) )
4015 getResults = main.FALSE
4016 else:
4017 # error, set is not a set
4018 main.log.error( "ONOS" + node +
4019 " has repeat elements in" +
4020 " set " + onosSetName + ":\n" +
4021 str( getResponses[ i ] ) )
4022 getResults = main.FALSE
4023 elif getResponses[ i ] == main.ERROR:
4024 getResults = main.FALSE
4025 sizeResponses = []
4026 threads = []
4027 for i in main.activeNodes:
4028 t = main.Thread( target=main.CLIs[i].setTestSize,
4029 name="setTestSize-" + str( i ),
4030 args=[ onosSetName ] )
4031 threads.append( t )
4032 t.start()
4033 for t in threads:
4034 t.join()
4035 sizeResponses.append( t.result )
4036 sizeResults = main.TRUE
4037 for i in range( len( main.activeNodes ) ):
4038 node = str( main.activeNodes[i] + 1 )
4039 if size != sizeResponses[ i ]:
4040 sizeResults = main.FALSE
4041 main.log.error( "ONOS" + node +
4042 " expected a size of " + str( size ) +
4043 " for set " + onosSetName +
4044 " but got " + str( sizeResponses[ i ] ) )
4045 clearResults = clearResults and getResults and sizeResults
4046 utilities.assert_equals( expect=main.TRUE,
4047 actual=clearResults,
4048 onpass="Set clear correct",
4049 onfail="Set clear was incorrect" )
4050
4051 main.step( "Distributed Set addAll()" )
4052 onosSet.update( addAllValue.split() )
4053 addResponses = []
4054 threads = []
4055 for i in main.activeNodes:
4056 t = main.Thread( target=main.CLIs[i].setTestAdd,
4057 name="setTestAddAll-" + str( i ),
4058 args=[ onosSetName, addAllValue ] )
4059 threads.append( t )
4060 t.start()
4061 for t in threads:
4062 t.join()
4063 addResponses.append( t.result )
4064
4065 # main.TRUE = successfully changed the set
4066 # main.FALSE = action resulted in no change in set
4067 # main.ERROR - Some error in executing the function
4068 addAllResults = main.TRUE
4069 for i in range( len( main.activeNodes ) ):
4070 if addResponses[ i ] == main.TRUE:
4071 # All is well
4072 pass
4073 elif addResponses[ i ] == main.FALSE:
4074 # Already in set, probably fine
4075 pass
4076 elif addResponses[ i ] == main.ERROR:
4077 # Error in execution
4078 addAllResults = main.FALSE
4079 else:
4080 # unexpected result
4081 addAllResults = main.FALSE
4082 if addAllResults != main.TRUE:
4083 main.log.error( "Error executing set addAll" )
4084
4085 # Check if set is still correct
4086 size = len( onosSet )
4087 getResponses = []
4088 threads = []
4089 for i in main.activeNodes:
4090 t = main.Thread( target=main.CLIs[i].setTestGet,
4091 name="setTestGet-" + str( i ),
4092 args=[ onosSetName ] )
4093 threads.append( t )
4094 t.start()
4095 for t in threads:
4096 t.join()
4097 getResponses.append( t.result )
4098 getResults = main.TRUE
4099 for i in range( len( main.activeNodes ) ):
4100 node = str( main.activeNodes[i] + 1 )
4101 if isinstance( getResponses[ i ], list):
4102 current = set( getResponses[ i ] )
4103 if len( current ) == len( getResponses[ i ] ):
4104 # no repeats
4105 if onosSet != current:
4106 main.log.error( "ONOS" + node +
4107 " has incorrect view" +
4108 " of set " + onosSetName + ":\n" +
4109 str( getResponses[ i ] ) )
4110 main.log.debug( "Expected: " + str( onosSet ) )
4111 main.log.debug( "Actual: " + str( current ) )
4112 getResults = main.FALSE
4113 else:
4114 # error, set is not a set
4115 main.log.error( "ONOS" + node +
4116 " has repeat elements in" +
4117 " set " + onosSetName + ":\n" +
4118 str( getResponses[ i ] ) )
4119 getResults = main.FALSE
4120 elif getResponses[ i ] == main.ERROR:
4121 getResults = main.FALSE
4122 sizeResponses = []
4123 threads = []
4124 for i in main.activeNodes:
4125 t = main.Thread( target=main.CLIs[i].setTestSize,
4126 name="setTestSize-" + str( i ),
4127 args=[ onosSetName ] )
4128 threads.append( t )
4129 t.start()
4130 for t in threads:
4131 t.join()
4132 sizeResponses.append( t.result )
4133 sizeResults = main.TRUE
4134 for i in range( len( main.activeNodes ) ):
4135 node = str( main.activeNodes[i] + 1 )
4136 if size != sizeResponses[ i ]:
4137 sizeResults = main.FALSE
4138 main.log.error( "ONOS" + node +
4139 " expected a size of " + str( size ) +
4140 " for set " + onosSetName +
4141 " but got " + str( sizeResponses[ i ] ) )
4142 addAllResults = addAllResults and getResults and sizeResults
4143 utilities.assert_equals( expect=main.TRUE,
4144 actual=addAllResults,
4145 onpass="Set addAll correct",
4146 onfail="Set addAll was incorrect" )
4147
4148 main.step( "Distributed Set retain()" )
4149 onosSet.intersection_update( retainValue.split() )
4150 retainResponses = []
4151 threads = []
4152 for i in main.activeNodes:
4153 t = main.Thread( target=main.CLIs[i].setTestRemove,
4154 name="setTestRetain-" + str( i ),
4155 args=[ onosSetName, retainValue ],
4156 kwargs={ "retain": True } )
4157 threads.append( t )
4158 t.start()
4159 for t in threads:
4160 t.join()
4161 retainResponses.append( t.result )
4162
4163 # main.TRUE = successfully changed the set
4164 # main.FALSE = action resulted in no change in set
4165 # main.ERROR - Some error in executing the function
4166 retainResults = main.TRUE
4167 for i in range( len( main.activeNodes ) ):
4168 if retainResponses[ i ] == main.TRUE:
4169 # All is well
4170 pass
4171 elif retainResponses[ i ] == main.FALSE:
4172 # Already in set, probably fine
4173 pass
4174 elif retainResponses[ i ] == main.ERROR:
4175 # Error in execution
4176 retainResults = main.FALSE
4177 else:
4178 # unexpected result
4179 retainResults = main.FALSE
4180 if retainResults != main.TRUE:
4181 main.log.error( "Error executing set retain" )
4182
4183 # Check if set is still correct
4184 size = len( onosSet )
4185 getResponses = []
4186 threads = []
4187 for i in main.activeNodes:
4188 t = main.Thread( target=main.CLIs[i].setTestGet,
4189 name="setTestGet-" + str( i ),
4190 args=[ onosSetName ] )
4191 threads.append( t )
4192 t.start()
4193 for t in threads:
4194 t.join()
4195 getResponses.append( t.result )
4196 getResults = main.TRUE
4197 for i in range( len( main.activeNodes ) ):
4198 node = str( main.activeNodes[i] + 1 )
4199 if isinstance( getResponses[ i ], list):
4200 current = set( getResponses[ i ] )
4201 if len( current ) == len( getResponses[ i ] ):
4202 # no repeats
4203 if onosSet != current:
4204 main.log.error( "ONOS" + node +
4205 " has incorrect view" +
4206 " of set " + onosSetName + ":\n" +
4207 str( getResponses[ i ] ) )
4208 main.log.debug( "Expected: " + str( onosSet ) )
4209 main.log.debug( "Actual: " + str( current ) )
4210 getResults = main.FALSE
4211 else:
4212 # error, set is not a set
4213 main.log.error( "ONOS" + node +
4214 " has repeat elements in" +
4215 " set " + onosSetName + ":\n" +
4216 str( getResponses[ i ] ) )
4217 getResults = main.FALSE
4218 elif getResponses[ i ] == main.ERROR:
4219 getResults = main.FALSE
4220 sizeResponses = []
4221 threads = []
4222 for i in main.activeNodes:
4223 t = main.Thread( target=main.CLIs[i].setTestSize,
4224 name="setTestSize-" + str( i ),
4225 args=[ onosSetName ] )
4226 threads.append( t )
4227 t.start()
4228 for t in threads:
4229 t.join()
4230 sizeResponses.append( t.result )
4231 sizeResults = main.TRUE
4232 for i in range( len( main.activeNodes ) ):
4233 node = str( main.activeNodes[i] + 1 )
4234 if size != sizeResponses[ i ]:
4235 sizeResults = main.FALSE
4236 main.log.error( "ONOS" + node + " expected a size of " +
4237 str( size ) + " for set " + onosSetName +
4238 " but got " + str( sizeResponses[ i ] ) )
4239 retainResults = retainResults and getResults and sizeResults
4240 utilities.assert_equals( expect=main.TRUE,
4241 actual=retainResults,
4242 onpass="Set retain correct",
4243 onfail="Set retain was incorrect" )
4244
4245 # Transactional maps
4246 main.step( "Partitioned Transactional maps put" )
4247 tMapValue = "Testing"
4248 numKeys = 100
4249 putResult = True
4250 node = main.activeNodes[0]
4251 putResponses = main.CLIs[node].transactionalMapPut( numKeys, tMapValue )
4252 if putResponses and len( putResponses ) == 100:
4253 for i in putResponses:
4254 if putResponses[ i ][ 'value' ] != tMapValue:
4255 putResult = False
4256 else:
4257 putResult = False
4258 if not putResult:
4259 main.log.debug( "Put response values: " + str( putResponses ) )
4260 utilities.assert_equals( expect=True,
4261 actual=putResult,
4262 onpass="Partitioned Transactional Map put successful",
4263 onfail="Partitioned Transactional Map put values are incorrect" )
4264
4265 main.step( "Partitioned Transactional maps get" )
4266 getCheck = True
4267 for n in range( 1, numKeys + 1 ):
4268 getResponses = []
4269 threads = []
4270 valueCheck = True
4271 for i in main.activeNodes:
4272 t = main.Thread( target=main.CLIs[i].transactionalMapGet,
4273 name="TMap-get-" + str( i ),
4274 args=[ "Key" + str( n ) ] )
4275 threads.append( t )
4276 t.start()
4277 for t in threads:
4278 t.join()
4279 getResponses.append( t.result )
4280 for node in getResponses:
4281 if node != tMapValue:
4282 valueCheck = False
4283 if not valueCheck:
4284 main.log.warn( "Values for key 'Key" + str( n ) + "' do not match:" )
4285 main.log.warn( getResponses )
4286 getCheck = getCheck and valueCheck
4287 utilities.assert_equals( expect=True,
4288 actual=getCheck,
4289 onpass="Partitioned Transactional Map get values were correct",
4290 onfail="Partitioned Transactional Map values incorrect" )