blob: 3ecbe4ca23999f079e6cf28dc5ae6e60112f8b11 [file] [log] [blame]
Jon Hall9ebd1bd2016-04-19 01:37:17 -07001"""
2Description: This test is to determine if ONOS can handle
3 dynamic scaling of the cluster size.
4
5List of test cases:
6CASE1: Compile ONOS and push it to the test machines
7CASE2: Assign devices to controllers
8CASE21: Assign mastership to controllers
9CASE3: Assign intents
10CASE4: Ping across added host intents
11CASE5: Reading state of ONOS
12CASE6: The scaling case.
13CASE7: Check state after control plane failure
14CASE8: Compare topo
15CASE9: Link s3-s28 down
16CASE10: Link s3-s28 up
17CASE11: Switch down
18CASE12: Switch up
19CASE13: Clean up
20CASE14: start election app on all onos nodes
21CASE15: Check that Leadership Election is still functional
22CASE16: Install Distributed Primitives app
23CASE17: Check for basic functionality with distributed primitives
24"""
25
26
27class HAscaling:
28
29 def __init__( self ):
30 self.default = ''
31
32 def CASE1( self, main ):
33 """
34 CASE1 is to compile ONOS and push it to the test machines
35
36 Startup sequence:
37 cell <name>
38 onos-verify-cell
39 NOTE: temporary - onos-remove-raft-logs
40 onos-uninstall
41 start mininet
42 git pull
43 mvn clean install
44 onos-package
45 onos-install -f
46 onos-wait-for-start
47 start cli sessions
48 start tcpdump
49 """
50 import time
51 import os
52 import re
53 main.log.info( "ONOS HA test: Restart all ONOS nodes - " +
54 "initialization" )
55 main.case( "Setting up test environment" )
56 main.caseExplanation = "Setup the test environment including " +\
57 "installing ONOS, starting Mininet and ONOS" +\
58 "cli sessions."
59
60 # load some variables from the params file
61 PULLCODE = False
62 if main.params[ 'Git' ] == 'True':
63 PULLCODE = True
64 gitBranch = main.params[ 'branch' ]
65 cellName = main.params[ 'ENV' ][ 'cellName' ]
66
67 main.numCtrls = int( main.params[ 'num_controllers' ] )
68 if main.ONOSbench.maxNodes:
69 if main.ONOSbench.maxNodes < main.numCtrls:
70 main.numCtrls = int( main.ONOSbench.maxNodes )
71 # set global variables
72 # These are for csv plotting in jenkins
73 global labels
74 global data
75 labels = []
76 data = []
77
78 try:
79 from tests.HA.dependencies.HA import HA
80 main.HA = HA()
81 from tests.HA.HAscaling.dependencies.Server import Server
82 main.Server = Server()
83 except Exception as e:
84 main.log.exception( e )
85 main.cleanup()
86 main.exit()
87
88 main.CLIs = []
89 main.nodes = []
90 ipList = []
91 for i in range( 1, main.numCtrls + 1 ):
92 try:
93 main.CLIs.append( getattr( main, 'ONOScli' + str( i ) ) )
94 main.nodes.append( getattr( main, 'ONOS' + str( i ) ) )
95 ipList.append( main.nodes[ -1 ].ip_address )
96 except AttributeError:
97 break
98
99 main.step( "Create cell file" )
100 cellAppString = main.params[ 'ENV' ][ 'appString' ]
101 main.ONOSbench.createCellFile( main.ONOSbench.ip_address, cellName,
102 main.Mininet1.ip_address,
103 cellAppString, ipList )
104
105 main.step( "Applying cell variable to environment" )
106 cellResult = main.ONOSbench.setCell( cellName )
107 utilities.assert_equals( expect=main.TRUE, actual=cellResult,
108 onpass="Set cell successfull",
109 onfail="Failled to set cell" )
110
111 main.step( "Verify connectivity to cell" )
112 verifyResult = main.ONOSbench.verifyCell()
113 utilities.assert_equals( expect=main.TRUE, actual=verifyResult,
114 onpass="Verify cell passed",
115 onfail="Failled to verify cell" )
116
117 # FIXME:this is short term fix
118 main.log.info( "Removing raft logs" )
119 main.ONOSbench.onosRemoveRaftLogs()
120
121 main.log.info( "Uninstalling ONOS" )
122 for node in main.nodes:
123 main.ONOSbench.onosUninstall( node.ip_address )
124
125 # Make sure ONOS is DEAD
126 main.log.info( "Killing any ONOS processes" )
127 killResults = main.TRUE
128 for node in main.nodes:
129 killed = main.ONOSbench.onosKill( node.ip_address )
130 killResults = killResults and killed
131
132 main.step( "Setup server for cluster metadata file" )
133 port = 8000
134 rootDir = os.path.dirname( main.testFile ) + "/dependencies"
135 main.log.debug( "Root dir: {}".format( rootDir ) )
136 status = main.Server.start( main.ONOSbench,
137 rootDir,
138 port=port,
139 logDir=main.logdir + "/server.log" )
140 utilities.assert_equals( expect=main.TRUE, actual=status,
141 onpass="Server started",
142 onfail="Failled to start SimpleHTTPServer" )
143
144 main.step( "Generate initial metadata file" )
145 main.scaling = main.params['scaling'].split( "," )
146 main.log.debug( main.scaling )
147 scale = main.scaling.pop(0)
148 main.log.debug( scale)
149 if "e" in scale:
150 equal = True
151 else:
152 equal = False
153 main.log.debug( equal)
154 main.numCtrls = int( re.search( "\d+", scale ).group(0) )
155 genResult = main.Server.generateFile( main.numCtrls, equal=equal )
156 utilities.assert_equals( expect=main.TRUE, actual=genResult,
157 onpass="New cluster metadata file generated",
158 onfail="Failled to generate new metadata file" )
159
160 cleanInstallResult = main.TRUE
161 gitPullResult = main.TRUE
162
163 main.step( "Starting Mininet" )
164 # scp topo file to mininet
165 # TODO: move to params?
166 topoName = "obelisk.py"
167 filePath = main.ONOSbench.home + "/tools/test/topos/"
168 main.ONOSbench.scp( main.Mininet1,
169 filePath + topoName,
170 main.Mininet1.home,
171 direction="to" )
172 mnResult = main.Mininet1.startNet( )
173 utilities.assert_equals( expect=main.TRUE, actual=mnResult,
174 onpass="Mininet Started",
175 onfail="Error starting Mininet" )
176
177 main.step( "Git checkout and pull " + gitBranch )
178 if PULLCODE:
179 main.ONOSbench.gitCheckout( gitBranch )
180 gitPullResult = main.ONOSbench.gitPull()
181 # values of 1 or 3 are good
182 utilities.assert_lesser( expect=0, actual=gitPullResult,
183 onpass="Git pull successful",
184 onfail="Git pull failed" )
185 main.ONOSbench.getVersion( report=True )
186
187 main.step( "Using mvn clean install" )
188 cleanInstallResult = main.TRUE
189 if PULLCODE and gitPullResult == main.TRUE:
190 cleanInstallResult = main.ONOSbench.cleanInstall()
191 else:
192 main.log.warn( "Did not pull new code so skipping mvn " +
193 "clean install" )
194 utilities.assert_equals( expect=main.TRUE,
195 actual=cleanInstallResult,
196 onpass="MCI successful",
197 onfail="MCI failed" )
198 # GRAPHS
199 # NOTE: important params here:
200 # job = name of Jenkins job
201 # Plot Name = Plot-HA, only can be used if multiple plots
202 # index = The number of the graph under plot name
203 job = "HAscaling"
204 plotName = "Plot-HA"
205 index = "0"
206 graphs = '<ac:structured-macro ac:name="html">\n'
207 graphs += '<ac:plain-text-body><![CDATA[\n'
208 graphs += '<iframe src="https://onos-jenkins.onlab.us/job/' + job +\
209 '/plot/' + plotName + '/getPlot?index=' + index +\
210 '&width=500&height=300"' +\
211 'noborder="0" width="500" height="300" scrolling="yes" ' +\
212 'seamless="seamless"></iframe>\n'
213 graphs += ']]></ac:plain-text-body>\n'
214 graphs += '</ac:structured-macro>\n'
215 main.log.wiki(graphs)
216
217 main.step( "Copying backup config files" )
218 path = "~/onos/tools/package/bin/onos-service"
219 cp = main.ONOSbench.scp( main.ONOSbench,
220 path,
221 path + ".backup",
222 direction="to" )
223
224 utilities.assert_equals( expect=main.TRUE,
225 actual=cp,
226 onpass="Copy backup config file succeeded",
227 onfail="Copy backup config file failed" )
228 # we need to modify the onos-service file to use remote metadata file
229 # url for cluster metadata file
230 ip = main.ONOSbench.getIpAddr()
231 metaFile = "cluster.json"
232 javaArgs = r"-Donos.cluster.metadata.uri=http:\/\/{}:{}\/{}".format( ip, port, metaFile )
233 main.log.warn( javaArgs )
234 main.log.warn( repr( javaArgs ) )
235 handle = main.ONOSbench.handle
236 sed = r"sed -i 's/bash/bash\nexport JAVA_OPTS=${{JAVA_OPTS:-{}}}\n/' {}".format( javaArgs, path )
237 main.log.warn( sed )
238 main.log.warn( repr( sed ) )
239 handle.sendline( sed )
240 handle.expect( "\$" )
241 main.log.debug( repr( handle.before ) )
242
243 main.step( "Creating ONOS package" )
244 packageResult = main.ONOSbench.onosPackage()
245 utilities.assert_equals( expect=main.TRUE, actual=packageResult,
246 onpass="ONOS package successful",
247 onfail="ONOS package failed" )
248
249 main.step( "Installing ONOS package" )
250 onosInstallResult = main.TRUE
251 for i in range( main.ONOSbench.maxNodes ):
252 node = main.nodes[i]
253 options = "-f"
254 if i >= main.numCtrls:
255 options = "-nf" # Don't start more than the current scale
256 tmpResult = main.ONOSbench.onosInstall( options=options,
257 node=node.ip_address )
258 onosInstallResult = onosInstallResult and tmpResult
259 utilities.assert_equals( expect=main.TRUE, actual=onosInstallResult,
260 onpass="ONOS install successful",
261 onfail="ONOS install failed" )
262
263 # Cleanup custom onos-service file
264 main.ONOSbench.scp( main.ONOSbench,
265 path + ".backup",
266 path,
267 direction="to" )
268
269 main.step( "Checking if ONOS is up yet" )
270 for i in range( 2 ):
271 onosIsupResult = main.TRUE
272 for i in range( main.numCtrls ):
273 node = main.nodes[i]
274 started = main.ONOSbench.isup( node.ip_address )
275 if not started:
276 main.log.error( node.name + " hasn't started" )
277 onosIsupResult = onosIsupResult and started
278 if onosIsupResult == main.TRUE:
279 break
280 utilities.assert_equals( expect=main.TRUE, actual=onosIsupResult,
281 onpass="ONOS startup successful",
282 onfail="ONOS startup failed" )
283
284 main.log.step( "Starting ONOS CLI sessions" )
285 cliResults = main.TRUE
286 threads = []
287 for i in range( main.numCtrls ):
288 t = main.Thread( target=main.CLIs[i].startOnosCli,
289 name="startOnosCli-" + str( i ),
290 args=[main.nodes[i].ip_address] )
291 threads.append( t )
292 t.start()
293
294 for t in threads:
295 t.join()
296 cliResults = cliResults and t.result
297 utilities.assert_equals( expect=main.TRUE, actual=cliResults,
298 onpass="ONOS cli startup successful",
299 onfail="ONOS cli startup failed" )
300
301 # Create a list of active nodes for use when some nodes are stopped
302 main.activeNodes = [ i for i in range( 0, main.numCtrls ) ]
303
304 if main.params[ 'tcpdump' ].lower() == "true":
305 main.step( "Start Packet Capture MN" )
306 main.Mininet2.startTcpdump(
307 str( main.params[ 'MNtcpdump' ][ 'folder' ] ) + str( main.TEST )
308 + "-MN.pcap",
309 intf=main.params[ 'MNtcpdump' ][ 'intf' ],
310 port=main.params[ 'MNtcpdump' ][ 'port' ] )
311
312 main.step( "Checking ONOS nodes" )
313 nodeResults = utilities.retry( main.HA.nodesCheck,
314 False,
315 args=[main.activeNodes],
316 attempts=5 )
317 utilities.assert_equals( expect=True, actual=nodeResults,
318 onpass="Nodes check successful",
319 onfail="Nodes check NOT successful" )
320
321 if not nodeResults:
Jon Hall7ac7bc32016-05-05 10:57:02 -0700322 for i in main.activeNodes:
323 cli = main.CLIs[i]
Jon Hall9ebd1bd2016-04-19 01:37:17 -0700324 main.log.debug( "{} components not ACTIVE: \n{}".format(
325 cli.name,
326 cli.sendline( "scr:list | grep -v ACTIVE" ) ) )
327
328 if cliResults == main.FALSE:
329 main.log.error( "Failed to start ONOS, stopping test" )
330 main.cleanup()
331 main.exit()
332
333 main.step( "Activate apps defined in the params file" )
334 # get data from the params
335 apps = main.params.get( 'apps' )
336 if apps:
337 apps = apps.split(',')
338 main.log.warn( apps )
339 activateResult = True
340 for app in apps:
341 main.CLIs[ 0 ].app( app, "Activate" )
342 # TODO: check this worked
343 time.sleep( 10 ) # wait for apps to activate
344 for app in apps:
345 state = main.CLIs[ 0 ].appStatus( app )
346 if state == "ACTIVE":
347 activateResult = activateResult and True
348 else:
349 main.log.error( "{} is in {} state".format( app, state ) )
350 activateResult = False
351 utilities.assert_equals( expect=True,
352 actual=activateResult,
353 onpass="Successfully activated apps",
354 onfail="Failed to activate apps" )
355 else:
356 main.log.warn( "No apps were specified to be loaded after startup" )
357
358 main.step( "Set ONOS configurations" )
359 config = main.params.get( 'ONOS_Configuration' )
360 if config:
361 main.log.debug( config )
362 checkResult = main.TRUE
363 for component in config:
364 for setting in config[component]:
365 value = config[component][setting]
366 check = main.CLIs[ 0 ].setCfg( component, setting, value )
367 main.log.info( "Value was changed? {}".format( main.TRUE == check ) )
368 checkResult = check and checkResult
369 utilities.assert_equals( expect=main.TRUE,
370 actual=checkResult,
371 onpass="Successfully set config",
372 onfail="Failed to set config" )
373 else:
374 main.log.warn( "No configurations were specified to be changed after startup" )
375
376 main.step( "App Ids check" )
377 appCheck = main.TRUE
378 threads = []
379 for i in main.activeNodes:
380 t = main.Thread( target=main.CLIs[i].appToIDCheck,
381 name="appToIDCheck-" + str( i ),
382 args=[] )
383 threads.append( t )
384 t.start()
385
386 for t in threads:
387 t.join()
388 appCheck = appCheck and t.result
389 if appCheck != main.TRUE:
390 node = main.activeNodes[0]
391 main.log.warn( main.CLIs[node].apps() )
392 main.log.warn( main.CLIs[node].appIDs() )
393 utilities.assert_equals( expect=main.TRUE, actual=appCheck,
394 onpass="App Ids seem to be correct",
395 onfail="Something is wrong with app Ids" )
396
397 def CASE2( self, main ):
398 """
399 Assign devices to controllers
400 """
401 import re
402 assert main.numCtrls, "main.numCtrls not defined"
403 assert main, "main not defined"
404 assert utilities.assert_equals, "utilities.assert_equals not defined"
405 assert main.CLIs, "main.CLIs not defined"
406 assert main.nodes, "main.nodes not defined"
407
408 main.case( "Assigning devices to controllers" )
409 main.caseExplanation = "Assign switches to ONOS using 'ovs-vsctl' " +\
410 "and check that an ONOS node becomes the " +\
411 "master of the device."
412 main.step( "Assign switches to controllers" )
413
414 ipList = []
415 for i in range( main.ONOSbench.maxNodes ):
416 ipList.append( main.nodes[ i ].ip_address )
417 swList = []
418 for i in range( 1, 29 ):
419 swList.append( "s" + str( i ) )
420 main.Mininet1.assignSwController( sw=swList, ip=ipList )
421
422 mastershipCheck = main.TRUE
423 for i in range( 1, 29 ):
424 response = main.Mininet1.getSwController( "s" + str( i ) )
425 try:
426 main.log.info( str( response ) )
427 except Exception:
428 main.log.info( repr( response ) )
429 for node in main.nodes:
430 if re.search( "tcp:" + node.ip_address, response ):
431 mastershipCheck = mastershipCheck and main.TRUE
432 else:
433 main.log.error( "Error, node " + node.ip_address + " is " +
434 "not in the list of controllers s" +
435 str( i ) + " is connecting to." )
436 mastershipCheck = main.FALSE
437 utilities.assert_equals(
438 expect=main.TRUE,
439 actual=mastershipCheck,
440 onpass="Switch mastership assigned correctly",
441 onfail="Switches not assigned correctly to controllers" )
442
443 def CASE21( self, main ):
444 """
445 Assign mastership to controllers
446 """
447 import time
448 assert main.numCtrls, "main.numCtrls not defined"
449 assert main, "main not defined"
450 assert utilities.assert_equals, "utilities.assert_equals not defined"
451 assert main.CLIs, "main.CLIs not defined"
452 assert main.nodes, "main.nodes not defined"
453
454 main.case( "Assigning Controller roles for switches" )
455 main.caseExplanation = "Check that ONOS is connected to each " +\
456 "device. Then manually assign" +\
457 " mastership to specific ONOS nodes using" +\
458 " 'device-role'"
459 main.step( "Assign mastership of switches to specific controllers" )
460 # Manually assign mastership to the controller we want
461 roleCall = main.TRUE
462
463 ipList = [ ]
464 deviceList = []
465 onosCli = main.CLIs[ main.activeNodes[0] ]
466 try:
467 # Assign mastership to specific controllers. This assignment was
468 # determined for a 7 node cluser, but will work with any sized
469 # cluster
470 for i in range( 1, 29 ): # switches 1 through 28
471 # set up correct variables:
472 if i == 1:
473 c = 0
474 ip = main.nodes[ c ].ip_address # ONOS1
475 deviceId = onosCli.getDevice( "1000" ).get( 'id' )
476 elif i == 2:
477 c = 1 % main.numCtrls
478 ip = main.nodes[ c ].ip_address # ONOS2
479 deviceId = onosCli.getDevice( "2000" ).get( 'id' )
480 elif i == 3:
481 c = 1 % main.numCtrls
482 ip = main.nodes[ c ].ip_address # ONOS2
483 deviceId = onosCli.getDevice( "3000" ).get( 'id' )
484 elif i == 4:
485 c = 3 % main.numCtrls
486 ip = main.nodes[ c ].ip_address # ONOS4
487 deviceId = onosCli.getDevice( "3004" ).get( 'id' )
488 elif i == 5:
489 c = 2 % main.numCtrls
490 ip = main.nodes[ c ].ip_address # ONOS3
491 deviceId = onosCli.getDevice( "5000" ).get( 'id' )
492 elif i == 6:
493 c = 2 % main.numCtrls
494 ip = main.nodes[ c ].ip_address # ONOS3
495 deviceId = onosCli.getDevice( "6000" ).get( 'id' )
496 elif i == 7:
497 c = 5 % main.numCtrls
498 ip = main.nodes[ c ].ip_address # ONOS6
499 deviceId = onosCli.getDevice( "6007" ).get( 'id' )
500 elif i >= 8 and i <= 17:
501 c = 4 % main.numCtrls
502 ip = main.nodes[ c ].ip_address # ONOS5
503 dpid = '3' + str( i ).zfill( 3 )
504 deviceId = onosCli.getDevice( dpid ).get( 'id' )
505 elif i >= 18 and i <= 27:
506 c = 6 % main.numCtrls
507 ip = main.nodes[ c ].ip_address # ONOS7
508 dpid = '6' + str( i ).zfill( 3 )
509 deviceId = onosCli.getDevice( dpid ).get( 'id' )
510 elif i == 28:
511 c = 0
512 ip = main.nodes[ c ].ip_address # ONOS1
513 deviceId = onosCli.getDevice( "2800" ).get( 'id' )
514 else:
515 main.log.error( "You didn't write an else statement for " +
516 "switch s" + str( i ) )
517 roleCall = main.FALSE
518 # Assign switch
519 assert deviceId, "No device id for s" + str( i ) + " in ONOS"
520 # TODO: make this controller dynamic
521 roleCall = roleCall and onosCli.deviceRole( deviceId, ip )
522 ipList.append( ip )
523 deviceList.append( deviceId )
524 except ( AttributeError, AssertionError ):
525 main.log.exception( "Something is wrong with ONOS device view" )
526 main.log.info( onosCli.devices() )
527 utilities.assert_equals(
528 expect=main.TRUE,
529 actual=roleCall,
530 onpass="Re-assigned switch mastership to designated controller",
531 onfail="Something wrong with deviceRole calls" )
532
533 main.step( "Check mastership was correctly assigned" )
534 roleCheck = main.TRUE
535 # NOTE: This is due to the fact that device mastership change is not
536 # atomic and is actually a multi step process
537 time.sleep( 5 )
538 for i in range( len( ipList ) ):
539 ip = ipList[i]
540 deviceId = deviceList[i]
541 # Check assignment
542 master = onosCli.getRole( deviceId ).get( 'master' )
543 if ip in master:
544 roleCheck = roleCheck and main.TRUE
545 else:
546 roleCheck = roleCheck and main.FALSE
547 main.log.error( "Error, controller " + ip + " is not" +
548 " master " + "of device " +
549 str( deviceId ) + ". Master is " +
550 repr( master ) + "." )
551 utilities.assert_equals(
552 expect=main.TRUE,
553 actual=roleCheck,
554 onpass="Switches were successfully reassigned to designated " +
555 "controller",
556 onfail="Switches were not successfully reassigned" )
557
558 def CASE3( self, main ):
559 """
560 Assign intents
561 """
562 import time
563 import json
564 assert main.numCtrls, "main.numCtrls not defined"
565 assert main, "main not defined"
566 assert utilities.assert_equals, "utilities.assert_equals not defined"
567 assert main.CLIs, "main.CLIs not defined"
568 assert main.nodes, "main.nodes not defined"
569 try:
570 labels
571 except NameError:
572 main.log.error( "labels not defined, setting to []" )
573 labels = []
574 try:
575 data
576 except NameError:
577 main.log.error( "data not defined, setting to []" )
578 data = []
579 # NOTE: we must reinstall intents until we have a persistant intent
580 # datastore!
581 main.case( "Adding host Intents" )
582 main.caseExplanation = "Discover hosts by using pingall then " +\
583 "assign predetermined host-to-host intents." +\
584 " After installation, check that the intent" +\
585 " is distributed to all nodes and the state" +\
586 " is INSTALLED"
587
588 # install onos-app-fwd
589 main.step( "Install reactive forwarding app" )
590 onosCli = main.CLIs[ main.activeNodes[0] ]
591 installResults = onosCli.activateApp( "org.onosproject.fwd" )
592 utilities.assert_equals( expect=main.TRUE, actual=installResults,
593 onpass="Install fwd successful",
594 onfail="Install fwd failed" )
595
596 main.step( "Check app ids" )
597 appCheck = main.TRUE
598 threads = []
599 for i in main.activeNodes:
600 t = main.Thread( target=main.CLIs[i].appToIDCheck,
601 name="appToIDCheck-" + str( i ),
602 args=[] )
603 threads.append( t )
604 t.start()
605
606 for t in threads:
607 t.join()
608 appCheck = appCheck and t.result
609 if appCheck != main.TRUE:
610 main.log.warn( onosCli.apps() )
611 main.log.warn( onosCli.appIDs() )
612 utilities.assert_equals( expect=main.TRUE, actual=appCheck,
613 onpass="App Ids seem to be correct",
614 onfail="Something is wrong with app Ids" )
615
616 main.step( "Discovering Hosts( Via pingall for now )" )
617 # FIXME: Once we have a host discovery mechanism, use that instead
618 # REACTIVE FWD test
619 pingResult = main.FALSE
620 passMsg = "Reactive Pingall test passed"
621 time1 = time.time()
622 pingResult = main.Mininet1.pingall()
623 time2 = time.time()
624 if not pingResult:
625 main.log.warn("First pingall failed. Trying again...")
626 pingResult = main.Mininet1.pingall()
627 passMsg += " on the second try"
628 utilities.assert_equals(
629 expect=main.TRUE,
630 actual=pingResult,
631 onpass= passMsg,
632 onfail="Reactive Pingall failed, " +
633 "one or more ping pairs failed" )
634 main.log.info( "Time for pingall: %2f seconds" %
635 ( time2 - time1 ) )
636 # timeout for fwd flows
637 time.sleep( 11 )
638 # uninstall onos-app-fwd
639 main.step( "Uninstall reactive forwarding app" )
640 node = main.activeNodes[0]
641 uninstallResult = main.CLIs[node].deactivateApp( "org.onosproject.fwd" )
642 utilities.assert_equals( expect=main.TRUE, actual=uninstallResult,
643 onpass="Uninstall fwd successful",
644 onfail="Uninstall fwd failed" )
645
646 main.step( "Check app ids" )
647 threads = []
648 appCheck2 = main.TRUE
649 for i in main.activeNodes:
650 t = main.Thread( target=main.CLIs[i].appToIDCheck,
651 name="appToIDCheck-" + str( i ),
652 args=[] )
653 threads.append( t )
654 t.start()
655
656 for t in threads:
657 t.join()
658 appCheck2 = appCheck2 and t.result
659 if appCheck2 != main.TRUE:
660 node = main.activeNodes[0]
661 main.log.warn( main.CLIs[node].apps() )
662 main.log.warn( main.CLIs[node].appIDs() )
663 utilities.assert_equals( expect=main.TRUE, actual=appCheck2,
664 onpass="App Ids seem to be correct",
665 onfail="Something is wrong with app Ids" )
666
667 main.step( "Add host intents via cli" )
668 intentIds = []
669 # TODO: move the host numbers to params
670 # Maybe look at all the paths we ping?
671 intentAddResult = True
672 hostResult = main.TRUE
673 for i in range( 8, 18 ):
674 main.log.info( "Adding host intent between h" + str( i ) +
675 " and h" + str( i + 10 ) )
676 host1 = "00:00:00:00:00:" + \
677 str( hex( i )[ 2: ] ).zfill( 2 ).upper()
678 host2 = "00:00:00:00:00:" + \
679 str( hex( i + 10 )[ 2: ] ).zfill( 2 ).upper()
680 # NOTE: getHost can return None
681 host1Dict = onosCli.getHost( host1 )
682 host2Dict = onosCli.getHost( host2 )
683 host1Id = None
684 host2Id = None
685 if host1Dict and host2Dict:
686 host1Id = host1Dict.get( 'id', None )
687 host2Id = host2Dict.get( 'id', None )
688 if host1Id and host2Id:
689 nodeNum = ( i % len( main.activeNodes ) )
690 node = main.activeNodes[nodeNum]
691 tmpId = main.CLIs[node].addHostIntent( host1Id, host2Id )
692 if tmpId:
693 main.log.info( "Added intent with id: " + tmpId )
694 intentIds.append( tmpId )
695 else:
696 main.log.error( "addHostIntent returned: " +
697 repr( tmpId ) )
698 else:
699 main.log.error( "Error, getHost() failed for h" + str( i ) +
700 " and/or h" + str( i + 10 ) )
701 node = main.activeNodes[0]
702 hosts = main.CLIs[node].hosts()
703 main.log.warn( "Hosts output: " )
704 try:
705 main.log.warn( json.dumps( json.loads( hosts ),
706 sort_keys=True,
707 indent=4,
708 separators=( ',', ': ' ) ) )
709 except ( ValueError, TypeError ):
710 main.log.warn( repr( hosts ) )
711 hostResult = main.FALSE
712 utilities.assert_equals( expect=main.TRUE, actual=hostResult,
713 onpass="Found a host id for each host",
714 onfail="Error looking up host ids" )
715
716 intentStart = time.time()
717 onosIds = onosCli.getAllIntentsId()
718 main.log.info( "Submitted intents: " + str( intentIds ) )
719 main.log.info( "Intents in ONOS: " + str( onosIds ) )
720 for intent in intentIds:
721 if intent in onosIds:
722 pass # intent submitted is in onos
723 else:
724 intentAddResult = False
725 if intentAddResult:
726 intentStop = time.time()
727 else:
728 intentStop = None
729 # Print the intent states
730 intents = onosCli.intents()
731 intentStates = []
732 installedCheck = True
733 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
734 count = 0
735 try:
736 for intent in json.loads( intents ):
737 state = intent.get( 'state', None )
738 if "INSTALLED" not in state:
739 installedCheck = False
740 intentId = intent.get( 'id', None )
741 intentStates.append( ( intentId, state ) )
742 except ( ValueError, TypeError ):
743 main.log.exception( "Error parsing intents" )
744 # add submitted intents not in the store
745 tmplist = [ i for i, s in intentStates ]
746 missingIntents = False
747 for i in intentIds:
748 if i not in tmplist:
749 intentStates.append( ( i, " - " ) )
750 missingIntents = True
751 intentStates.sort()
752 for i, s in intentStates:
753 count += 1
754 main.log.info( "%-6s%-15s%-15s" %
755 ( str( count ), str( i ), str( s ) ) )
756 leaders = onosCli.leaders()
757 try:
758 missing = False
759 if leaders:
760 parsedLeaders = json.loads( leaders )
761 main.log.warn( json.dumps( parsedLeaders,
762 sort_keys=True,
763 indent=4,
764 separators=( ',', ': ' ) ) )
765 # check for all intent partitions
766 topics = []
767 for i in range( 14 ):
768 topics.append( "intent-partition-" + str( i ) )
769 main.log.debug( topics )
770 ONOStopics = [ j['topic'] for j in parsedLeaders ]
771 for topic in topics:
772 if topic not in ONOStopics:
773 main.log.error( "Error: " + topic +
774 " not in leaders" )
775 missing = True
776 else:
777 main.log.error( "leaders() returned None" )
778 except ( ValueError, TypeError ):
779 main.log.exception( "Error parsing leaders" )
780 main.log.error( repr( leaders ) )
781 # Check all nodes
782 if missing:
783 for i in main.activeNodes:
784 response = main.CLIs[i].leaders( jsonFormat=False)
785 main.log.warn( str( main.CLIs[i].name ) + " leaders output: \n" +
786 str( response ) )
787
788 partitions = onosCli.partitions()
789 try:
790 if partitions :
791 parsedPartitions = json.loads( partitions )
792 main.log.warn( json.dumps( parsedPartitions,
793 sort_keys=True,
794 indent=4,
795 separators=( ',', ': ' ) ) )
796 # TODO check for a leader in all paritions
797 # TODO check for consistency among nodes
798 else:
799 main.log.error( "partitions() returned None" )
800 except ( ValueError, TypeError ):
801 main.log.exception( "Error parsing partitions" )
802 main.log.error( repr( partitions ) )
803 pendingMap = onosCli.pendingMap()
804 try:
805 if pendingMap :
806 parsedPending = json.loads( pendingMap )
807 main.log.warn( json.dumps( parsedPending,
808 sort_keys=True,
809 indent=4,
810 separators=( ',', ': ' ) ) )
811 # TODO check something here?
812 else:
813 main.log.error( "pendingMap() returned None" )
814 except ( ValueError, TypeError ):
815 main.log.exception( "Error parsing pending map" )
816 main.log.error( repr( pendingMap ) )
817
818 intentAddResult = bool( intentAddResult and not missingIntents and
819 installedCheck )
820 if not intentAddResult:
821 main.log.error( "Error in pushing host intents to ONOS" )
822
823 main.step( "Intent Anti-Entropy dispersion" )
824 for j in range(100):
825 correct = True
826 main.log.info( "Submitted intents: " + str( sorted( intentIds ) ) )
827 for i in main.activeNodes:
828 onosIds = []
829 ids = main.CLIs[i].getAllIntentsId()
830 onosIds.append( ids )
831 main.log.debug( "Intents in " + main.CLIs[i].name + ": " +
832 str( sorted( onosIds ) ) )
833 if sorted( ids ) != sorted( intentIds ):
834 main.log.warn( "Set of intent IDs doesn't match" )
835 correct = False
836 break
837 else:
838 intents = json.loads( main.CLIs[i].intents() )
839 for intent in intents:
840 if intent[ 'state' ] != "INSTALLED":
841 main.log.warn( "Intent " + intent[ 'id' ] +
842 " is " + intent[ 'state' ] )
843 correct = False
844 break
845 if correct:
846 break
847 else:
848 time.sleep(1)
849 if not intentStop:
850 intentStop = time.time()
851 global gossipTime
852 gossipTime = intentStop - intentStart
853 main.log.info( "It took about " + str( gossipTime ) +
854 " seconds for all intents to appear in each node" )
855 append = False
856 title = "Gossip Intents"
857 count = 1
858 while append is False:
859 curTitle = title + str( count )
860 if curTitle not in labels:
861 labels.append( curTitle )
862 data.append( str( gossipTime ) )
863 append = True
864 else:
865 count += 1
866 gossipPeriod = int( main.params['timers']['gossip'] )
867 maxGossipTime = gossipPeriod * len( main.activeNodes )
868 utilities.assert_greater_equals(
869 expect=maxGossipTime, actual=gossipTime,
870 onpass="ECM anti-entropy for intents worked within " +
871 "expected time",
872 onfail="Intent ECM anti-entropy took too long. " +
873 "Expected time:{}, Actual time:{}".format( maxGossipTime,
874 gossipTime ) )
875 if gossipTime <= maxGossipTime:
876 intentAddResult = True
877
878 if not intentAddResult or "key" in pendingMap:
879 import time
880 installedCheck = True
881 main.log.info( "Sleeping 60 seconds to see if intents are found" )
882 time.sleep( 60 )
883 onosIds = onosCli.getAllIntentsId()
884 main.log.info( "Submitted intents: " + str( intentIds ) )
885 main.log.info( "Intents in ONOS: " + str( onosIds ) )
886 # Print the intent states
887 intents = onosCli.intents()
888 intentStates = []
889 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
890 count = 0
891 try:
892 for intent in json.loads( intents ):
893 # Iter through intents of a node
894 state = intent.get( 'state', None )
895 if "INSTALLED" not in state:
896 installedCheck = False
897 intentId = intent.get( 'id', None )
898 intentStates.append( ( intentId, state ) )
899 except ( ValueError, TypeError ):
900 main.log.exception( "Error parsing intents" )
901 # add submitted intents not in the store
902 tmplist = [ i for i, s in intentStates ]
903 for i in intentIds:
904 if i not in tmplist:
905 intentStates.append( ( i, " - " ) )
906 intentStates.sort()
907 for i, s in intentStates:
908 count += 1
909 main.log.info( "%-6s%-15s%-15s" %
910 ( str( count ), str( i ), str( s ) ) )
911 leaders = onosCli.leaders()
912 try:
913 missing = False
914 if leaders:
915 parsedLeaders = json.loads( leaders )
916 main.log.warn( json.dumps( parsedLeaders,
917 sort_keys=True,
918 indent=4,
919 separators=( ',', ': ' ) ) )
920 # check for all intent partitions
921 # check for election
922 topics = []
923 for i in range( 14 ):
924 topics.append( "intent-partition-" + str( i ) )
925 # FIXME: this should only be after we start the app
926 topics.append( "org.onosproject.election" )
927 main.log.debug( topics )
928 ONOStopics = [ j['topic'] for j in parsedLeaders ]
929 for topic in topics:
930 if topic not in ONOStopics:
931 main.log.error( "Error: " + topic +
932 " not in leaders" )
933 missing = True
934 else:
935 main.log.error( "leaders() returned None" )
936 except ( ValueError, TypeError ):
937 main.log.exception( "Error parsing leaders" )
938 main.log.error( repr( leaders ) )
939 # Check all nodes
940 if missing:
941 for i in main.activeNodes:
942 node = main.CLIs[i]
943 response = node.leaders( jsonFormat=False)
944 main.log.warn( str( node.name ) + " leaders output: \n" +
945 str( response ) )
946
947 partitions = onosCli.partitions()
948 try:
949 if partitions :
950 parsedPartitions = json.loads( partitions )
951 main.log.warn( json.dumps( parsedPartitions,
952 sort_keys=True,
953 indent=4,
954 separators=( ',', ': ' ) ) )
955 # TODO check for a leader in all paritions
956 # TODO check for consistency among nodes
957 else:
958 main.log.error( "partitions() returned None" )
959 except ( ValueError, TypeError ):
960 main.log.exception( "Error parsing partitions" )
961 main.log.error( repr( partitions ) )
962 pendingMap = onosCli.pendingMap()
963 try:
964 if pendingMap :
965 parsedPending = json.loads( pendingMap )
966 main.log.warn( json.dumps( parsedPending,
967 sort_keys=True,
968 indent=4,
969 separators=( ',', ': ' ) ) )
970 # TODO check something here?
971 else:
972 main.log.error( "pendingMap() returned None" )
973 except ( ValueError, TypeError ):
974 main.log.exception( "Error parsing pending map" )
975 main.log.error( repr( pendingMap ) )
976
977 def CASE4( self, main ):
978 """
979 Ping across added host intents
980 """
981 import json
982 import time
983 assert main.numCtrls, "main.numCtrls not defined"
984 assert main, "main not defined"
985 assert utilities.assert_equals, "utilities.assert_equals not defined"
986 assert main.CLIs, "main.CLIs not defined"
987 assert main.nodes, "main.nodes not defined"
988 main.case( "Verify connectivity by sending traffic across Intents" )
989 main.caseExplanation = "Ping across added host intents to check " +\
990 "functionality and check the state of " +\
991 "the intent"
992
993 onosCli = main.CLIs[ main.activeNodes[0] ]
994 main.step( "Check Intent state" )
995 installedCheck = False
996 loopCount = 0
997 while not installedCheck and loopCount < 40:
998 installedCheck = True
999 # Print the intent states
1000 intents = onosCli.intents()
1001 intentStates = []
1002 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
1003 count = 0
1004 # Iter through intents of a node
1005 try:
1006 for intent in json.loads( intents ):
1007 state = intent.get( 'state', None )
1008 if "INSTALLED" not in state:
1009 installedCheck = False
1010 intentId = intent.get( 'id', None )
1011 intentStates.append( ( intentId, state ) )
1012 except ( ValueError, TypeError ):
1013 main.log.exception( "Error parsing intents." )
1014 # Print states
1015 intentStates.sort()
1016 for i, s in intentStates:
1017 count += 1
1018 main.log.info( "%-6s%-15s%-15s" %
1019 ( str( count ), str( i ), str( s ) ) )
1020 if not installedCheck:
1021 time.sleep( 1 )
1022 loopCount += 1
1023 utilities.assert_equals( expect=True, actual=installedCheck,
1024 onpass="Intents are all INSTALLED",
1025 onfail="Intents are not all in " +
1026 "INSTALLED state" )
1027
1028 main.step( "Ping across added host intents" )
1029 PingResult = main.TRUE
1030 for i in range( 8, 18 ):
1031 ping = main.Mininet1.pingHost( src="h" + str( i ),
1032 target="h" + str( i + 10 ) )
1033 PingResult = PingResult and ping
1034 if ping == main.FALSE:
1035 main.log.warn( "Ping failed between h" + str( i ) +
1036 " and h" + str( i + 10 ) )
1037 elif ping == main.TRUE:
1038 main.log.info( "Ping test passed!" )
1039 # Don't set PingResult or you'd override failures
1040 if PingResult == main.FALSE:
1041 main.log.error(
1042 "Intents have not been installed correctly, pings failed." )
1043 # TODO: pretty print
1044 main.log.warn( "ONOS1 intents: " )
1045 try:
1046 tmpIntents = onosCli.intents()
1047 main.log.warn( json.dumps( json.loads( tmpIntents ),
1048 sort_keys=True,
1049 indent=4,
1050 separators=( ',', ': ' ) ) )
1051 except ( ValueError, TypeError ):
1052 main.log.warn( repr( tmpIntents ) )
1053 utilities.assert_equals(
1054 expect=main.TRUE,
1055 actual=PingResult,
1056 onpass="Intents have been installed correctly and pings work",
1057 onfail="Intents have not been installed correctly, pings failed." )
1058
1059 main.step( "Check leadership of topics" )
1060 leaders = onosCli.leaders()
1061 topicCheck = main.TRUE
1062 try:
1063 if leaders:
1064 parsedLeaders = json.loads( leaders )
1065 main.log.warn( json.dumps( parsedLeaders,
1066 sort_keys=True,
1067 indent=4,
1068 separators=( ',', ': ' ) ) )
1069 # check for all intent partitions
1070 # check for election
1071 # TODO: Look at Devices as topics now that it uses this system
1072 topics = []
1073 for i in range( 14 ):
1074 topics.append( "intent-partition-" + str( i ) )
1075 # FIXME: this should only be after we start the app
1076 # FIXME: topics.append( "org.onosproject.election" )
1077 # Print leaders output
1078 main.log.debug( topics )
1079 ONOStopics = [ j['topic'] for j in parsedLeaders ]
1080 for topic in topics:
1081 if topic not in ONOStopics:
1082 main.log.error( "Error: " + topic +
1083 " not in leaders" )
1084 topicCheck = main.FALSE
1085 else:
1086 main.log.error( "leaders() returned None" )
1087 topicCheck = main.FALSE
1088 except ( ValueError, TypeError ):
1089 topicCheck = main.FALSE
1090 main.log.exception( "Error parsing leaders" )
1091 main.log.error( repr( leaders ) )
1092 # TODO: Check for a leader of these topics
1093 # Check all nodes
1094 if topicCheck:
1095 for i in main.activeNodes:
1096 node = main.CLIs[i]
1097 response = node.leaders( jsonFormat=False)
1098 main.log.warn( str( node.name ) + " leaders output: \n" +
1099 str( response ) )
1100
1101 utilities.assert_equals( expect=main.TRUE, actual=topicCheck,
1102 onpass="intent Partitions is in leaders",
1103 onfail="Some topics were lost " )
1104 # Print partitions
1105 partitions = onosCli.partitions()
1106 try:
1107 if partitions :
1108 parsedPartitions = json.loads( partitions )
1109 main.log.warn( json.dumps( parsedPartitions,
1110 sort_keys=True,
1111 indent=4,
1112 separators=( ',', ': ' ) ) )
1113 # TODO check for a leader in all paritions
1114 # TODO check for consistency among nodes
1115 else:
1116 main.log.error( "partitions() returned None" )
1117 except ( ValueError, TypeError ):
1118 main.log.exception( "Error parsing partitions" )
1119 main.log.error( repr( partitions ) )
1120 # Print Pending Map
1121 pendingMap = onosCli.pendingMap()
1122 try:
1123 if pendingMap :
1124 parsedPending = json.loads( pendingMap )
1125 main.log.warn( json.dumps( parsedPending,
1126 sort_keys=True,
1127 indent=4,
1128 separators=( ',', ': ' ) ) )
1129 # TODO check something here?
1130 else:
1131 main.log.error( "pendingMap() returned None" )
1132 except ( ValueError, TypeError ):
1133 main.log.exception( "Error parsing pending map" )
1134 main.log.error( repr( pendingMap ) )
1135
1136 if not installedCheck:
1137 main.log.info( "Waiting 60 seconds to see if the state of " +
1138 "intents change" )
1139 time.sleep( 60 )
1140 # Print the intent states
1141 intents = onosCli.intents()
1142 intentStates = []
1143 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
1144 count = 0
1145 # Iter through intents of a node
1146 try:
1147 for intent in json.loads( intents ):
1148 state = intent.get( 'state', None )
1149 if "INSTALLED" not in state:
1150 installedCheck = False
1151 intentId = intent.get( 'id', None )
1152 intentStates.append( ( intentId, state ) )
1153 except ( ValueError, TypeError ):
1154 main.log.exception( "Error parsing intents." )
1155 intentStates.sort()
1156 for i, s in intentStates:
1157 count += 1
1158 main.log.info( "%-6s%-15s%-15s" %
1159 ( str( count ), str( i ), str( s ) ) )
1160 leaders = onosCli.leaders()
1161 try:
1162 missing = False
1163 if leaders:
1164 parsedLeaders = json.loads( leaders )
1165 main.log.warn( json.dumps( parsedLeaders,
1166 sort_keys=True,
1167 indent=4,
1168 separators=( ',', ': ' ) ) )
1169 # check for all intent partitions
1170 # check for election
1171 topics = []
1172 for i in range( 14 ):
1173 topics.append( "intent-partition-" + str( i ) )
1174 # FIXME: this should only be after we start the app
1175 topics.append( "org.onosproject.election" )
1176 main.log.debug( topics )
1177 ONOStopics = [ j['topic'] for j in parsedLeaders ]
1178 for topic in topics:
1179 if topic not in ONOStopics:
1180 main.log.error( "Error: " + topic +
1181 " not in leaders" )
1182 missing = True
1183 else:
1184 main.log.error( "leaders() returned None" )
1185 except ( ValueError, TypeError ):
1186 main.log.exception( "Error parsing leaders" )
1187 main.log.error( repr( leaders ) )
1188 if missing:
1189 for i in main.activeNodes:
1190 node = main.CLIs[i]
1191 response = node.leaders( jsonFormat=False)
1192 main.log.warn( str( node.name ) + " leaders output: \n" +
1193 str( response ) )
1194
1195 partitions = onosCli.partitions()
1196 try:
1197 if partitions :
1198 parsedPartitions = json.loads( partitions )
1199 main.log.warn( json.dumps( parsedPartitions,
1200 sort_keys=True,
1201 indent=4,
1202 separators=( ',', ': ' ) ) )
1203 # TODO check for a leader in all paritions
1204 # TODO check for consistency among nodes
1205 else:
1206 main.log.error( "partitions() returned None" )
1207 except ( ValueError, TypeError ):
1208 main.log.exception( "Error parsing partitions" )
1209 main.log.error( repr( partitions ) )
1210 pendingMap = onosCli.pendingMap()
1211 try:
1212 if pendingMap :
1213 parsedPending = json.loads( pendingMap )
1214 main.log.warn( json.dumps( parsedPending,
1215 sort_keys=True,
1216 indent=4,
1217 separators=( ',', ': ' ) ) )
1218 # TODO check something here?
1219 else:
1220 main.log.error( "pendingMap() returned None" )
1221 except ( ValueError, TypeError ):
1222 main.log.exception( "Error parsing pending map" )
1223 main.log.error( repr( pendingMap ) )
1224 # Print flowrules
1225 node = main.activeNodes[0]
1226 main.log.debug( main.CLIs[node].flows( jsonFormat=False ) )
1227 main.step( "Wait a minute then ping again" )
1228 # the wait is above
1229 PingResult = main.TRUE
1230 for i in range( 8, 18 ):
1231 ping = main.Mininet1.pingHost( src="h" + str( i ),
1232 target="h" + str( i + 10 ) )
1233 PingResult = PingResult and ping
1234 if ping == main.FALSE:
1235 main.log.warn( "Ping failed between h" + str( i ) +
1236 " and h" + str( i + 10 ) )
1237 elif ping == main.TRUE:
1238 main.log.info( "Ping test passed!" )
1239 # Don't set PingResult or you'd override failures
1240 if PingResult == main.FALSE:
1241 main.log.error(
1242 "Intents have not been installed correctly, pings failed." )
1243 # TODO: pretty print
1244 main.log.warn( "ONOS1 intents: " )
1245 try:
1246 tmpIntents = onosCli.intents()
1247 main.log.warn( json.dumps( json.loads( tmpIntents ),
1248 sort_keys=True,
1249 indent=4,
1250 separators=( ',', ': ' ) ) )
1251 except ( ValueError, TypeError ):
1252 main.log.warn( repr( tmpIntents ) )
1253 utilities.assert_equals(
1254 expect=main.TRUE,
1255 actual=PingResult,
1256 onpass="Intents have been installed correctly and pings work",
1257 onfail="Intents have not been installed correctly, pings failed." )
1258
1259 def CASE5( self, main ):
1260 """
1261 Reading state of ONOS
1262 """
1263 import json
1264 import time
1265 assert main.numCtrls, "main.numCtrls not defined"
1266 assert main, "main not defined"
1267 assert utilities.assert_equals, "utilities.assert_equals not defined"
1268 assert main.CLIs, "main.CLIs not defined"
1269 assert main.nodes, "main.nodes not defined"
1270
1271 main.case( "Setting up and gathering data for current state" )
1272 # The general idea for this test case is to pull the state of
1273 # ( intents,flows, topology,... ) from each ONOS node
1274 # We can then compare them with each other and also with past states
1275
1276 main.step( "Check that each switch has a master" )
1277 global mastershipState
1278 mastershipState = '[]'
1279
1280 # Assert that each device has a master
1281 rolesNotNull = main.TRUE
1282 threads = []
1283 for i in main.activeNodes:
1284 t = main.Thread( target=main.CLIs[i].rolesNotNull,
1285 name="rolesNotNull-" + str( i ),
1286 args=[] )
1287 threads.append( t )
1288 t.start()
1289
1290 for t in threads:
1291 t.join()
1292 rolesNotNull = rolesNotNull and t.result
1293 utilities.assert_equals(
1294 expect=main.TRUE,
1295 actual=rolesNotNull,
1296 onpass="Each device has a master",
1297 onfail="Some devices don't have a master assigned" )
1298
1299 main.step( "Get the Mastership of each switch from each controller" )
1300 ONOSMastership = []
1301 consistentMastership = True
1302 rolesResults = True
1303 threads = []
1304 for i in main.activeNodes:
1305 t = main.Thread( target=main.CLIs[i].roles,
1306 name="roles-" + str( i ),
1307 args=[] )
1308 threads.append( t )
1309 t.start()
1310
1311 for t in threads:
1312 t.join()
1313 ONOSMastership.append( t.result )
1314
1315 for i in range( len( ONOSMastership ) ):
1316 node = str( main.activeNodes[i] + 1 )
1317 if not ONOSMastership[i] or "Error" in ONOSMastership[i]:
1318 main.log.error( "Error in getting ONOS" + node + " roles" )
1319 main.log.warn( "ONOS" + node + " mastership response: " +
1320 repr( ONOSMastership[i] ) )
1321 rolesResults = False
1322 utilities.assert_equals(
1323 expect=True,
1324 actual=rolesResults,
1325 onpass="No error in reading roles output",
1326 onfail="Error in reading roles from ONOS" )
1327
1328 main.step( "Check for consistency in roles from each controller" )
1329 if all([ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
1330 main.log.info(
1331 "Switch roles are consistent across all ONOS nodes" )
1332 else:
1333 consistentMastership = False
1334 utilities.assert_equals(
1335 expect=True,
1336 actual=consistentMastership,
1337 onpass="Switch roles are consistent across all ONOS nodes",
1338 onfail="ONOS nodes have different views of switch roles" )
1339
1340 if rolesResults and not consistentMastership:
1341 for i in range( len( main.activeNodes ) ):
1342 node = str( main.activeNodes[i] + 1 )
1343 try:
1344 main.log.warn(
1345 "ONOS" + node + " roles: ",
1346 json.dumps(
1347 json.loads( ONOSMastership[ i ] ),
1348 sort_keys=True,
1349 indent=4,
1350 separators=( ',', ': ' ) ) )
1351 except ( ValueError, TypeError ):
1352 main.log.warn( repr( ONOSMastership[ i ] ) )
1353 elif rolesResults and consistentMastership:
1354 mastershipState = ONOSMastership[ 0 ]
1355
1356 main.step( "Get the intents from each controller" )
1357 global intentState
1358 intentState = []
1359 ONOSIntents = []
1360 consistentIntents = True # Are Intents consistent across nodes?
1361 intentsResults = True # Could we read Intents from ONOS?
1362 threads = []
1363 for i in main.activeNodes:
1364 t = main.Thread( target=main.CLIs[i].intents,
1365 name="intents-" + str( i ),
1366 args=[],
1367 kwargs={ 'jsonFormat': True } )
1368 threads.append( t )
1369 t.start()
1370
1371 for t in threads:
1372 t.join()
1373 ONOSIntents.append( t.result )
1374
1375 for i in range( len( ONOSIntents ) ):
1376 node = str( main.activeNodes[i] + 1 )
1377 if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
1378 main.log.error( "Error in getting ONOS" + node + " intents" )
1379 main.log.warn( "ONOS" + node + " intents response: " +
1380 repr( ONOSIntents[ i ] ) )
1381 intentsResults = False
1382 utilities.assert_equals(
1383 expect=True,
1384 actual=intentsResults,
1385 onpass="No error in reading intents output",
1386 onfail="Error in reading intents from ONOS" )
1387
1388 main.step( "Check for consistency in Intents from each controller" )
1389 if all([ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
1390 main.log.info( "Intents are consistent across all ONOS " +
1391 "nodes" )
1392 else:
1393 consistentIntents = False
1394 main.log.error( "Intents not consistent" )
1395 utilities.assert_equals(
1396 expect=True,
1397 actual=consistentIntents,
1398 onpass="Intents are consistent across all ONOS nodes",
1399 onfail="ONOS nodes have different views of intents" )
1400
1401 if intentsResults:
1402 # Try to make it easy to figure out what is happening
1403 #
1404 # Intent ONOS1 ONOS2 ...
1405 # 0x01 INSTALLED INSTALLING
1406 # ... ... ...
1407 # ... ... ...
1408 title = " Id"
1409 for n in main.activeNodes:
1410 title += " " * 10 + "ONOS" + str( n + 1 )
1411 main.log.warn( title )
1412 # get all intent keys in the cluster
1413 keys = []
1414 try:
1415 # Get the set of all intent keys
1416 for nodeStr in ONOSIntents:
1417 node = json.loads( nodeStr )
1418 for intent in node:
1419 keys.append( intent.get( 'id' ) )
1420 keys = set( keys )
1421 # For each intent key, print the state on each node
1422 for key in keys:
1423 row = "%-13s" % key
1424 for nodeStr in ONOSIntents:
1425 node = json.loads( nodeStr )
1426 for intent in node:
1427 if intent.get( 'id', "Error" ) == key:
1428 row += "%-15s" % intent.get( 'state' )
1429 main.log.warn( row )
1430 # End of intent state table
1431 except ValueError as e:
1432 main.log.exception( e )
1433 main.log.debug( "nodeStr was: " + repr( nodeStr ) )
1434
1435 if intentsResults and not consistentIntents:
1436 # print the json objects
1437 n = str( main.activeNodes[-1] + 1 )
1438 main.log.debug( "ONOS" + n + " intents: " )
1439 main.log.debug( json.dumps( json.loads( ONOSIntents[ -1 ] ),
1440 sort_keys=True,
1441 indent=4,
1442 separators=( ',', ': ' ) ) )
1443 for i in range( len( ONOSIntents ) ):
1444 node = str( main.activeNodes[i] + 1 )
1445 if ONOSIntents[ i ] != ONOSIntents[ -1 ]:
1446 main.log.debug( "ONOS" + node + " intents: " )
1447 main.log.debug( json.dumps( json.loads( ONOSIntents[i] ),
1448 sort_keys=True,
1449 indent=4,
1450 separators=( ',', ': ' ) ) )
1451 else:
1452 main.log.debug( "ONOS" + node + " intents match ONOS" +
1453 n + " intents" )
1454 elif intentsResults and consistentIntents:
1455 intentState = ONOSIntents[ 0 ]
1456
1457 main.step( "Get the flows from each controller" )
1458 global flowState
1459 flowState = []
1460 ONOSFlows = []
1461 ONOSFlowsJson = []
1462 flowCheck = main.FALSE
1463 consistentFlows = True
1464 flowsResults = True
1465 threads = []
1466 for i in main.activeNodes:
1467 t = main.Thread( target=main.CLIs[i].flows,
1468 name="flows-" + str( i ),
1469 args=[],
1470 kwargs={ 'jsonFormat': True } )
1471 threads.append( t )
1472 t.start()
1473
1474 # NOTE: Flows command can take some time to run
1475 time.sleep(30)
1476 for t in threads:
1477 t.join()
1478 result = t.result
1479 ONOSFlows.append( result )
1480
1481 for i in range( len( ONOSFlows ) ):
1482 num = str( main.activeNodes[i] + 1 )
1483 if not ONOSFlows[ i ] or "Error" in ONOSFlows[ i ]:
1484 main.log.error( "Error in getting ONOS" + num + " flows" )
1485 main.log.warn( "ONOS" + num + " flows response: " +
1486 repr( ONOSFlows[ i ] ) )
1487 flowsResults = False
1488 ONOSFlowsJson.append( None )
1489 else:
1490 try:
1491 ONOSFlowsJson.append( json.loads( ONOSFlows[ i ] ) )
1492 except ( ValueError, TypeError ):
1493 # FIXME: change this to log.error?
1494 main.log.exception( "Error in parsing ONOS" + num +
1495 " response as json." )
1496 main.log.error( repr( ONOSFlows[ i ] ) )
1497 ONOSFlowsJson.append( None )
1498 flowsResults = False
1499 utilities.assert_equals(
1500 expect=True,
1501 actual=flowsResults,
1502 onpass="No error in reading flows output",
1503 onfail="Error in reading flows from ONOS" )
1504
1505 main.step( "Check for consistency in Flows from each controller" )
1506 tmp = [ len( i ) == len( ONOSFlowsJson[ 0 ] ) for i in ONOSFlowsJson ]
1507 if all( tmp ):
1508 main.log.info( "Flow count is consistent across all ONOS nodes" )
1509 else:
1510 consistentFlows = False
1511 utilities.assert_equals(
1512 expect=True,
1513 actual=consistentFlows,
1514 onpass="The flow count is consistent across all ONOS nodes",
1515 onfail="ONOS nodes have different flow counts" )
1516
1517 if flowsResults and not consistentFlows:
1518 for i in range( len( ONOSFlows ) ):
1519 node = str( main.activeNodes[i] + 1 )
1520 try:
1521 main.log.warn(
1522 "ONOS" + node + " flows: " +
1523 json.dumps( json.loads( ONOSFlows[i] ), sort_keys=True,
1524 indent=4, separators=( ',', ': ' ) ) )
1525 except ( ValueError, TypeError ):
1526 main.log.warn( "ONOS" + node + " flows: " +
1527 repr( ONOSFlows[ i ] ) )
1528 elif flowsResults and consistentFlows:
1529 flowCheck = main.TRUE
1530 flowState = ONOSFlows[ 0 ]
1531
1532 main.step( "Get the OF Table entries" )
1533 global flows
1534 flows = []
1535 for i in range( 1, 29 ):
1536 flows.append( main.Mininet1.getFlowTable( "s" + str( i ), version="1.3", debug=False ) )
1537 if flowCheck == main.FALSE:
1538 for table in flows:
1539 main.log.warn( table )
1540 # TODO: Compare switch flow tables with ONOS flow tables
1541
1542 main.step( "Start continuous pings" )
1543 main.Mininet2.pingLong(
1544 src=main.params[ 'PING' ][ 'source1' ],
1545 target=main.params[ 'PING' ][ 'target1' ],
1546 pingTime=500 )
1547 main.Mininet2.pingLong(
1548 src=main.params[ 'PING' ][ 'source2' ],
1549 target=main.params[ 'PING' ][ 'target2' ],
1550 pingTime=500 )
1551 main.Mininet2.pingLong(
1552 src=main.params[ 'PING' ][ 'source3' ],
1553 target=main.params[ 'PING' ][ 'target3' ],
1554 pingTime=500 )
1555 main.Mininet2.pingLong(
1556 src=main.params[ 'PING' ][ 'source4' ],
1557 target=main.params[ 'PING' ][ 'target4' ],
1558 pingTime=500 )
1559 main.Mininet2.pingLong(
1560 src=main.params[ 'PING' ][ 'source5' ],
1561 target=main.params[ 'PING' ][ 'target5' ],
1562 pingTime=500 )
1563 main.Mininet2.pingLong(
1564 src=main.params[ 'PING' ][ 'source6' ],
1565 target=main.params[ 'PING' ][ 'target6' ],
1566 pingTime=500 )
1567 main.Mininet2.pingLong(
1568 src=main.params[ 'PING' ][ 'source7' ],
1569 target=main.params[ 'PING' ][ 'target7' ],
1570 pingTime=500 )
1571 main.Mininet2.pingLong(
1572 src=main.params[ 'PING' ][ 'source8' ],
1573 target=main.params[ 'PING' ][ 'target8' ],
1574 pingTime=500 )
1575 main.Mininet2.pingLong(
1576 src=main.params[ 'PING' ][ 'source9' ],
1577 target=main.params[ 'PING' ][ 'target9' ],
1578 pingTime=500 )
1579 main.Mininet2.pingLong(
1580 src=main.params[ 'PING' ][ 'source10' ],
1581 target=main.params[ 'PING' ][ 'target10' ],
1582 pingTime=500 )
1583
1584 main.step( "Collecting topology information from ONOS" )
1585 devices = []
1586 threads = []
1587 for i in main.activeNodes:
1588 t = main.Thread( target=main.CLIs[i].devices,
1589 name="devices-" + str( i ),
1590 args=[ ] )
1591 threads.append( t )
1592 t.start()
1593
1594 for t in threads:
1595 t.join()
1596 devices.append( t.result )
1597 hosts = []
1598 threads = []
1599 for i in main.activeNodes:
1600 t = main.Thread( target=main.CLIs[i].hosts,
1601 name="hosts-" + str( i ),
1602 args=[ ] )
1603 threads.append( t )
1604 t.start()
1605
1606 for t in threads:
1607 t.join()
1608 try:
1609 hosts.append( json.loads( t.result ) )
1610 except ( ValueError, TypeError ):
1611 # FIXME: better handling of this, print which node
1612 # Maybe use thread name?
1613 main.log.exception( "Error parsing json output of hosts" )
1614 main.log.warn( repr( t.result ) )
1615 hosts.append( None )
1616
1617 ports = []
1618 threads = []
1619 for i in main.activeNodes:
1620 t = main.Thread( target=main.CLIs[i].ports,
1621 name="ports-" + str( i ),
1622 args=[ ] )
1623 threads.append( t )
1624 t.start()
1625
1626 for t in threads:
1627 t.join()
1628 ports.append( t.result )
1629 links = []
1630 threads = []
1631 for i in main.activeNodes:
1632 t = main.Thread( target=main.CLIs[i].links,
1633 name="links-" + str( i ),
1634 args=[ ] )
1635 threads.append( t )
1636 t.start()
1637
1638 for t in threads:
1639 t.join()
1640 links.append( t.result )
1641 clusters = []
1642 threads = []
1643 for i in main.activeNodes:
1644 t = main.Thread( target=main.CLIs[i].clusters,
1645 name="clusters-" + str( i ),
1646 args=[ ] )
1647 threads.append( t )
1648 t.start()
1649
1650 for t in threads:
1651 t.join()
1652 clusters.append( t.result )
1653 # Compare json objects for hosts and dataplane clusters
1654
1655 # hosts
1656 main.step( "Host view is consistent across ONOS nodes" )
1657 consistentHostsResult = main.TRUE
1658 for controller in range( len( hosts ) ):
1659 controllerStr = str( main.activeNodes[controller] + 1 )
1660 if hosts[ controller ] and "Error" not in hosts[ controller ]:
1661 if hosts[ controller ] == hosts[ 0 ]:
1662 continue
1663 else: # hosts not consistent
1664 main.log.error( "hosts from ONOS" +
1665 controllerStr +
1666 " is inconsistent with ONOS1" )
1667 main.log.warn( repr( hosts[ controller ] ) )
1668 consistentHostsResult = main.FALSE
1669
1670 else:
1671 main.log.error( "Error in getting ONOS hosts from ONOS" +
1672 controllerStr )
1673 consistentHostsResult = main.FALSE
1674 main.log.warn( "ONOS" + controllerStr +
1675 " hosts response: " +
1676 repr( hosts[ controller ] ) )
1677 utilities.assert_equals(
1678 expect=main.TRUE,
1679 actual=consistentHostsResult,
1680 onpass="Hosts view is consistent across all ONOS nodes",
1681 onfail="ONOS nodes have different views of hosts" )
1682
1683 main.step( "Each host has an IP address" )
1684 ipResult = main.TRUE
1685 for controller in range( 0, len( hosts ) ):
1686 controllerStr = str( main.activeNodes[controller] + 1 )
1687 if hosts[ controller ]:
1688 for host in hosts[ controller ]:
1689 if not host.get( 'ipAddresses', [ ] ):
1690 main.log.error( "Error with host ips on controller" +
1691 controllerStr + ": " + str( host ) )
1692 ipResult = main.FALSE
1693 utilities.assert_equals(
1694 expect=main.TRUE,
1695 actual=ipResult,
1696 onpass="The ips of the hosts aren't empty",
1697 onfail="The ip of at least one host is missing" )
1698
1699 # Strongly connected clusters of devices
1700 main.step( "Cluster view is consistent across ONOS nodes" )
1701 consistentClustersResult = main.TRUE
1702 for controller in range( len( clusters ) ):
1703 controllerStr = str( main.activeNodes[controller] + 1 )
1704 if "Error" not in clusters[ controller ]:
1705 if clusters[ controller ] == clusters[ 0 ]:
1706 continue
1707 else: # clusters not consistent
1708 main.log.error( "clusters from ONOS" + controllerStr +
1709 " is inconsistent with ONOS1" )
1710 consistentClustersResult = main.FALSE
1711
1712 else:
1713 main.log.error( "Error in getting dataplane clusters " +
1714 "from ONOS" + controllerStr )
1715 consistentClustersResult = main.FALSE
1716 main.log.warn( "ONOS" + controllerStr +
1717 " clusters response: " +
1718 repr( clusters[ controller ] ) )
1719 utilities.assert_equals(
1720 expect=main.TRUE,
1721 actual=consistentClustersResult,
1722 onpass="Clusters view is consistent across all ONOS nodes",
1723 onfail="ONOS nodes have different views of clusters" )
1724 if consistentClustersResult != main.TRUE:
1725 main.log.debug( clusters )
1726 # there should always only be one cluster
1727 main.step( "Cluster view correct across ONOS nodes" )
1728 try:
1729 numClusters = len( json.loads( clusters[ 0 ] ) )
1730 except ( ValueError, TypeError ):
1731 main.log.exception( "Error parsing clusters[0]: " +
1732 repr( clusters[ 0 ] ) )
1733 numClusters = "ERROR"
1734 utilities.assert_equals(
1735 expect=1,
1736 actual=numClusters,
1737 onpass="ONOS shows 1 SCC",
1738 onfail="ONOS shows " + str( numClusters ) + " SCCs" )
1739
1740 main.step( "Comparing ONOS topology to MN" )
1741 devicesResults = main.TRUE
1742 linksResults = main.TRUE
1743 hostsResults = main.TRUE
1744 mnSwitches = main.Mininet1.getSwitches()
1745 mnLinks = main.Mininet1.getLinks()
1746 mnHosts = main.Mininet1.getHosts()
1747 for controller in main.activeNodes:
1748 controllerStr = str( main.activeNodes[controller] + 1 )
1749 if devices[ controller ] and ports[ controller ] and\
1750 "Error" not in devices[ controller ] and\
1751 "Error" not in ports[ controller ]:
1752 currentDevicesResult = main.Mininet1.compareSwitches(
1753 mnSwitches,
1754 json.loads( devices[ controller ] ),
1755 json.loads( ports[ controller ] ) )
1756 else:
1757 currentDevicesResult = main.FALSE
1758 utilities.assert_equals( expect=main.TRUE,
1759 actual=currentDevicesResult,
1760 onpass="ONOS" + controllerStr +
1761 " Switches view is correct",
1762 onfail="ONOS" + controllerStr +
1763 " Switches view is incorrect" )
1764 if links[ controller ] and "Error" not in links[ controller ]:
1765 currentLinksResult = main.Mininet1.compareLinks(
1766 mnSwitches, mnLinks,
1767 json.loads( links[ controller ] ) )
1768 else:
1769 currentLinksResult = main.FALSE
1770 utilities.assert_equals( expect=main.TRUE,
1771 actual=currentLinksResult,
1772 onpass="ONOS" + controllerStr +
1773 " links view is correct",
1774 onfail="ONOS" + controllerStr +
1775 " links view is incorrect" )
1776
1777 if hosts[ controller ] and "Error" not in hosts[ controller ]:
1778 currentHostsResult = main.Mininet1.compareHosts(
1779 mnHosts,
1780 hosts[ controller ] )
1781 else:
1782 currentHostsResult = main.FALSE
1783 utilities.assert_equals( expect=main.TRUE,
1784 actual=currentHostsResult,
1785 onpass="ONOS" + controllerStr +
1786 " hosts exist in Mininet",
1787 onfail="ONOS" + controllerStr +
1788 " hosts don't match Mininet" )
1789
1790 devicesResults = devicesResults and currentDevicesResult
1791 linksResults = linksResults and currentLinksResult
1792 hostsResults = hostsResults and currentHostsResult
1793
1794 main.step( "Device information is correct" )
1795 utilities.assert_equals(
1796 expect=main.TRUE,
1797 actual=devicesResults,
1798 onpass="Device information is correct",
1799 onfail="Device information is incorrect" )
1800
1801 main.step( "Links are correct" )
1802 utilities.assert_equals(
1803 expect=main.TRUE,
1804 actual=linksResults,
1805 onpass="Link are correct",
1806 onfail="Links are incorrect" )
1807
1808 main.step( "Hosts are correct" )
1809 utilities.assert_equals(
1810 expect=main.TRUE,
1811 actual=hostsResults,
1812 onpass="Hosts are correct",
1813 onfail="Hosts are incorrect" )
1814
1815 def CASE6( self, main ):
1816 """
1817 The Scaling case.
1818 """
1819 import time
1820 import re
1821 assert main.numCtrls, "main.numCtrls not defined"
1822 assert main, "main not defined"
1823 assert utilities.assert_equals, "utilities.assert_equals not defined"
1824 assert main.CLIs, "main.CLIs not defined"
1825 assert main.nodes, "main.nodes not defined"
1826 try:
1827 labels
1828 except NameError:
1829 main.log.error( "labels not defined, setting to []" )
1830 global labels
1831 labels = []
1832 try:
1833 data
1834 except NameError:
1835 main.log.error( "data not defined, setting to []" )
1836 global data
1837 data = []
1838
1839 main.case( "Restart entire ONOS cluster" )
1840
1841 main.step( "Checking ONOS Logs for errors" )
1842 for i in main.activeNodes:
1843 node = main.nodes[i]
1844 main.log.debug( "Checking logs for errors on " + node.name + ":" )
1845 main.log.warn( main.ONOSbench.checkLogs( node.ip_address ) )
1846
1847 """
1848 pop # of nodes from a list, might look like 1,3b,3,5b,5,7b,7,7b,5,5b,3...
1849 modify cluster.json file appropriately
1850 install/deactivate node as needed
1851 """
1852
1853 try:
1854 prevNodes = main.activeNodes
1855 scale = main.scaling.pop(0)
1856 if "e" in scale:
1857 equal = True
1858 else:
1859 equal = False
1860 main.numCtrls = int( re.search( "\d+", scale ).group(0) )
1861 main.log.info( "Scaling to {} nodes".format( main.numCtrls ) )
1862 genResult = main.Server.generateFile( main.numCtrls, equal=equal )
1863 utilities.assert_equals( expect=main.TRUE, actual=genResult,
1864 onpass="New cluster metadata file generated",
1865 onfail="Failled to generate new metadata file" )
1866 time.sleep( 5 ) # Give time for nodes to read new file
1867 except IndexError:
1868 main.cleanup()
1869 main.exit()
1870
1871 main.activeNodes = [ i for i in range( 0, main.numCtrls ) ]
1872 newNodes = [ x for x in main.activeNodes if x not in prevNodes ]
1873
1874 main.step( "Start new nodes" ) # OR stop old nodes?
1875 started = main.TRUE
1876 for i in newNodes:
1877 started = main.ONOSbench.onosStart( main.nodes[i].ip_address ) and main.TRUE
1878 utilities.assert_equals( expect=main.TRUE, actual=started,
1879 onpass="ONOS started",
1880 onfail="ONOS start NOT successful" )
1881
1882 main.step( "Checking if ONOS is up yet" )
1883 for i in range( 2 ):
1884 onosIsupResult = main.TRUE
1885 for i in main.activeNodes:
1886 node = main.nodes[i]
1887 started = main.ONOSbench.isup( node.ip_address )
1888 if not started:
1889 main.log.error( node.name + " didn't start!" )
1890 onosIsupResult = onosIsupResult and started
1891 if onosIsupResult == main.TRUE:
1892 break
1893 utilities.assert_equals( expect=main.TRUE, actual=onosIsupResult,
1894 onpass="ONOS started",
1895 onfail="ONOS start NOT successful" )
1896
1897 main.log.step( "Starting ONOS CLI sessions" )
1898 cliResults = main.TRUE
1899 threads = []
1900 for i in main.activeNodes:
1901 t = main.Thread( target=main.CLIs[i].startOnosCli,
1902 name="startOnosCli-" + str( i ),
1903 args=[main.nodes[i].ip_address] )
1904 threads.append( t )
1905 t.start()
1906
1907 for t in threads:
1908 t.join()
1909 cliResults = cliResults and t.result
1910 utilities.assert_equals( expect=main.TRUE, actual=cliResults,
1911 onpass="ONOS cli started",
1912 onfail="ONOS clis did not start" )
1913
1914 main.step( "Checking ONOS nodes" )
1915 nodeResults = utilities.retry( main.HA.nodesCheck,
1916 False,
1917 args=[main.activeNodes],
1918 attempts=5 )
1919 utilities.assert_equals( expect=True, actual=nodeResults,
1920 onpass="Nodes check successful",
1921 onfail="Nodes check NOT successful" )
1922
1923 for i in range( 10 ):
1924 ready = True
1925 for i in main.activeNodes:
1926 cli = main.CLIs[i]
1927 output = cli.summary()
1928 if not output:
1929 ready = False
1930 if ready:
1931 break
1932 time.sleep( 30 )
1933 utilities.assert_equals( expect=True, actual=ready,
1934 onpass="ONOS summary command succeded",
1935 onfail="ONOS summary command failed" )
1936 if not ready:
1937 main.cleanup()
1938 main.exit()
1939
1940 # Rerun for election on new nodes
1941 runResults = main.TRUE
1942 for i in main.activeNodes:
1943 cli = main.CLIs[i]
1944 run = cli.electionTestRun()
1945 if run != main.TRUE:
1946 main.log.error( "Error running for election on " + cli.name )
1947 runResults = runResults and run
1948 utilities.assert_equals( expect=main.TRUE, actual=runResults,
1949 onpass="Reran for election",
1950 onfail="Failed to rerun for election" )
1951
1952 # TODO: Make this configurable
1953 time.sleep( 60 )
1954 for node in main.activeNodes:
1955 main.log.warn( "\n****************** {} **************".format( main.nodes[node].ip_address ) )
1956 main.log.debug( main.CLIs[node].nodes( jsonFormat=False ) )
1957 main.log.debug( main.CLIs[node].leaders( jsonFormat=False ) )
1958 main.log.debug( main.CLIs[node].partitions( jsonFormat=False ) )
1959 main.log.debug( main.CLIs[node].apps( jsonFormat=False ) )
1960
1961 def CASE7( self, main ):
1962 """
1963 Check state after ONOS scaling
1964 """
1965 import json
1966 assert main.numCtrls, "main.numCtrls not defined"
1967 assert main, "main not defined"
1968 assert utilities.assert_equals, "utilities.assert_equals not defined"
1969 assert main.CLIs, "main.CLIs not defined"
1970 assert main.nodes, "main.nodes not defined"
1971 main.case( "Running ONOS Constant State Tests" )
1972
1973 main.step( "Check that each switch has a master" )
1974 # Assert that each device has a master
1975 rolesNotNull = main.TRUE
1976 threads = []
1977 for i in main.activeNodes:
1978 t = main.Thread( target=main.CLIs[i].rolesNotNull,
1979 name="rolesNotNull-" + str( i ),
1980 args=[ ] )
1981 threads.append( t )
1982 t.start()
1983
1984 for t in threads:
1985 t.join()
1986 rolesNotNull = rolesNotNull and t.result
1987 utilities.assert_equals(
1988 expect=main.TRUE,
1989 actual=rolesNotNull,
1990 onpass="Each device has a master",
1991 onfail="Some devices don't have a master assigned" )
1992
1993 main.step( "Read device roles from ONOS" )
1994 ONOSMastership = []
1995 consistentMastership = True
1996 rolesResults = True
1997 threads = []
1998 for i in main.activeNodes:
1999 t = main.Thread( target=main.CLIs[i].roles,
2000 name="roles-" + str( i ),
2001 args=[] )
2002 threads.append( t )
2003 t.start()
2004
2005 for t in threads:
2006 t.join()
2007 ONOSMastership.append( t.result )
2008
2009 for i in range( len( ONOSMastership ) ):
2010 node = str( main.activeNodes[i] + 1 )
2011 if not ONOSMastership[i] or "Error" in ONOSMastership[i]:
2012 main.log.error( "Error in getting ONOS" + node + " roles" )
2013 main.log.warn( "ONOS" + node + " mastership response: " +
2014 repr( ONOSMastership[i] ) )
2015 rolesResults = False
2016 utilities.assert_equals(
2017 expect=True,
2018 actual=rolesResults,
2019 onpass="No error in reading roles output",
2020 onfail="Error in reading roles from ONOS" )
2021
2022 main.step( "Check for consistency in roles from each controller" )
2023 if all([ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
2024 main.log.info(
2025 "Switch roles are consistent across all ONOS nodes" )
2026 else:
2027 consistentMastership = False
2028 utilities.assert_equals(
2029 expect=True,
2030 actual=consistentMastership,
2031 onpass="Switch roles are consistent across all ONOS nodes",
2032 onfail="ONOS nodes have different views of switch roles" )
2033
2034 if rolesResults and not consistentMastership:
2035 for i in range( len( ONOSMastership ) ):
2036 node = str( main.activeNodes[i] + 1 )
2037 main.log.warn( "ONOS" + node + " roles: ",
2038 json.dumps( json.loads( ONOSMastership[ i ] ),
2039 sort_keys=True,
2040 indent=4,
2041 separators=( ',', ': ' ) ) )
2042
2043 # NOTE: we expect mastership to change on controller scaling down
2044
2045 main.step( "Get the intents and compare across all nodes" )
2046 ONOSIntents = []
2047 intentCheck = main.FALSE
2048 consistentIntents = True
2049 intentsResults = True
2050 threads = []
2051 for i in main.activeNodes:
2052 t = main.Thread( target=main.CLIs[i].intents,
2053 name="intents-" + str( i ),
2054 args=[],
2055 kwargs={ 'jsonFormat': True } )
2056 threads.append( t )
2057 t.start()
2058
2059 for t in threads:
2060 t.join()
2061 ONOSIntents.append( t.result )
2062
2063 for i in range( len( ONOSIntents) ):
2064 node = str( main.activeNodes[i] + 1 )
2065 if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
2066 main.log.error( "Error in getting ONOS" + node + " intents" )
2067 main.log.warn( "ONOS" + node + " intents response: " +
2068 repr( ONOSIntents[ i ] ) )
2069 intentsResults = False
2070 utilities.assert_equals(
2071 expect=True,
2072 actual=intentsResults,
2073 onpass="No error in reading intents output",
2074 onfail="Error in reading intents from ONOS" )
2075
2076 main.step( "Check for consistency in Intents from each controller" )
2077 if all([ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
2078 main.log.info( "Intents are consistent across all ONOS " +
2079 "nodes" )
2080 else:
2081 consistentIntents = False
2082
2083 # Try to make it easy to figure out what is happening
2084 #
2085 # Intent ONOS1 ONOS2 ...
2086 # 0x01 INSTALLED INSTALLING
2087 # ... ... ...
2088 # ... ... ...
2089 title = " ID"
2090 for n in main.activeNodes:
2091 title += " " * 10 + "ONOS" + str( n + 1 )
2092 main.log.warn( title )
2093 # get all intent keys in the cluster
2094 keys = []
2095 for nodeStr in ONOSIntents:
2096 node = json.loads( nodeStr )
2097 for intent in node:
2098 keys.append( intent.get( 'id' ) )
2099 keys = set( keys )
2100 for key in keys:
2101 row = "%-13s" % key
2102 for nodeStr in ONOSIntents:
2103 node = json.loads( nodeStr )
2104 for intent in node:
2105 if intent.get( 'id' ) == key:
2106 row += "%-15s" % intent.get( 'state' )
2107 main.log.warn( row )
2108 # End table view
2109
2110 utilities.assert_equals(
2111 expect=True,
2112 actual=consistentIntents,
2113 onpass="Intents are consistent across all ONOS nodes",
2114 onfail="ONOS nodes have different views of intents" )
2115 intentStates = []
2116 for node in ONOSIntents: # Iter through ONOS nodes
2117 nodeStates = []
2118 # Iter through intents of a node
2119 try:
2120 for intent in json.loads( node ):
2121 nodeStates.append( intent[ 'state' ] )
2122 except ( ValueError, TypeError ):
2123 main.log.exception( "Error in parsing intents" )
2124 main.log.error( repr( node ) )
2125 intentStates.append( nodeStates )
2126 out = [ (i, nodeStates.count( i ) ) for i in set( nodeStates ) ]
2127 main.log.info( dict( out ) )
2128
2129 if intentsResults and not consistentIntents:
2130 for i in range( len( main.activeNodes ) ):
2131 node = str( main.activeNodes[i] + 1 )
2132 main.log.warn( "ONOS" + node + " intents: " )
2133 main.log.warn( json.dumps(
2134 json.loads( ONOSIntents[ i ] ),
2135 sort_keys=True,
2136 indent=4,
2137 separators=( ',', ': ' ) ) )
2138 elif intentsResults and consistentIntents:
2139 intentCheck = main.TRUE
2140
2141 main.step( "Compare current intents with intents before the scaling" )
2142 # NOTE: this requires case 5 to pass for intentState to be set.
2143 # maybe we should stop the test if that fails?
2144 sameIntents = main.FALSE
2145 try:
2146 intentState
2147 except NameError:
2148 main.log.warn( "No previous intent state was saved" )
2149 else:
2150 if intentState and intentState == ONOSIntents[ 0 ]:
2151 sameIntents = main.TRUE
2152 main.log.info( "Intents are consistent with before scaling" )
2153 # TODO: possibly the states have changed? we may need to figure out
2154 # what the acceptable states are
2155 elif len( intentState ) == len( ONOSIntents[ 0 ] ):
2156 sameIntents = main.TRUE
2157 try:
2158 before = json.loads( intentState )
2159 after = json.loads( ONOSIntents[ 0 ] )
2160 for intent in before:
2161 if intent not in after:
2162 sameIntents = main.FALSE
2163 main.log.debug( "Intent is not currently in ONOS " +
2164 "(at least in the same form):" )
2165 main.log.debug( json.dumps( intent ) )
2166 except ( ValueError, TypeError ):
2167 main.log.exception( "Exception printing intents" )
2168 main.log.debug( repr( ONOSIntents[0] ) )
2169 main.log.debug( repr( intentState ) )
2170 if sameIntents == main.FALSE:
2171 try:
2172 main.log.debug( "ONOS intents before: " )
2173 main.log.debug( json.dumps( json.loads( intentState ),
2174 sort_keys=True, indent=4,
2175 separators=( ',', ': ' ) ) )
2176 main.log.debug( "Current ONOS intents: " )
2177 main.log.debug( json.dumps( json.loads( ONOSIntents[ 0 ] ),
2178 sort_keys=True, indent=4,
2179 separators=( ',', ': ' ) ) )
2180 except ( ValueError, TypeError ):
2181 main.log.exception( "Exception printing intents" )
2182 main.log.debug( repr( ONOSIntents[0] ) )
2183 main.log.debug( repr( intentState ) )
2184 utilities.assert_equals(
2185 expect=main.TRUE,
2186 actual=sameIntents,
2187 onpass="Intents are consistent with before scaling",
2188 onfail="The Intents changed during scaling" )
2189 intentCheck = intentCheck and sameIntents
2190
2191 main.step( "Get the OF Table entries and compare to before " +
2192 "component scaling" )
2193 FlowTables = main.TRUE
2194 for i in range( 28 ):
2195 main.log.info( "Checking flow table on s" + str( i + 1 ) )
2196 tmpFlows = main.Mininet1.getFlowTable( "s" + str( i + 1 ), version="1.3", debug=False )
2197 curSwitch = main.Mininet1.flowTableComp( flows[i], tmpFlows )
2198 FlowTables = FlowTables and curSwitch
2199 if curSwitch == main.FALSE:
2200 main.log.warn( "Differences in flow table for switch: s{}".format( i + 1 ) )
2201 utilities.assert_equals(
2202 expect=main.TRUE,
2203 actual=FlowTables,
2204 onpass="No changes were found in the flow tables",
2205 onfail="Changes were found in the flow tables" )
2206
2207 main.Mininet2.pingLongKill()
2208 '''
2209 # main.step( "Check the continuous pings to ensure that no packets " +
2210 # "were dropped during component failure" )
2211 main.Mininet2.pingKill( main.params[ 'TESTONUSER' ],
2212 main.params[ 'TESTONIP' ] )
2213 LossInPings = main.FALSE
2214 # NOTE: checkForLoss returns main.FALSE with 0% packet loss
2215 for i in range( 8, 18 ):
2216 main.log.info(
2217 "Checking for a loss in pings along flow from s" +
2218 str( i ) )
2219 LossInPings = main.Mininet2.checkForLoss(
2220 "/tmp/ping.h" +
2221 str( i ) ) or LossInPings
2222 if LossInPings == main.TRUE:
2223 main.log.info( "Loss in ping detected" )
2224 elif LossInPings == main.ERROR:
2225 main.log.info( "There are multiple mininet process running" )
2226 elif LossInPings == main.FALSE:
2227 main.log.info( "No Loss in the pings" )
2228 main.log.info( "No loss of dataplane connectivity" )
2229 # utilities.assert_equals(
2230 # expect=main.FALSE,
2231 # actual=LossInPings,
2232 # onpass="No Loss of connectivity",
2233 # onfail="Loss of dataplane connectivity detected" )
2234
2235 # NOTE: Since intents are not persisted with IntnentStore,
2236 # we expect loss in dataplane connectivity
2237 LossInPings = main.FALSE
2238 '''
2239
2240 main.step( "Leadership Election is still functional" )
2241 # Test of LeadershipElection
2242 leaderList = []
2243 leaderResult = main.TRUE
2244
2245 for i in main.activeNodes:
2246 cli = main.CLIs[i]
2247 leaderN = cli.electionTestLeader()
2248 leaderList.append( leaderN )
2249 if leaderN == main.FALSE:
2250 # error in response
2251 main.log.error( "Something is wrong with " +
2252 "electionTestLeader function, check the" +
2253 " error logs" )
2254 leaderResult = main.FALSE
2255 elif leaderN is None:
2256 main.log.error( cli.name +
2257 " shows no leader for the election-app." )
2258 leaderResult = main.FALSE
2259 if len( set( leaderList ) ) != 1:
2260 leaderResult = main.FALSE
2261 main.log.error(
2262 "Inconsistent view of leader for the election test app" )
2263 # TODO: print the list
2264 utilities.assert_equals(
2265 expect=main.TRUE,
2266 actual=leaderResult,
2267 onpass="Leadership election passed",
2268 onfail="Something went wrong with Leadership election" )
2269
2270 def CASE8( self, main ):
2271 """
2272 Compare topo
2273 """
2274 import json
2275 import time
2276 assert main.numCtrls, "main.numCtrls not defined"
2277 assert main, "main not defined"
2278 assert utilities.assert_equals, "utilities.assert_equals not defined"
2279 assert main.CLIs, "main.CLIs not defined"
2280 assert main.nodes, "main.nodes not defined"
2281
2282 main.case( "Compare ONOS Topology view to Mininet topology" )
2283 main.caseExplanation = "Compare topology objects between Mininet" +\
2284 " and ONOS"
2285 topoResult = main.FALSE
2286 topoFailMsg = "ONOS topology don't match Mininet"
2287 elapsed = 0
2288 count = 0
2289 main.step( "Comparing ONOS topology to MN topology" )
2290 startTime = time.time()
2291 # Give time for Gossip to work
2292 while topoResult == main.FALSE and ( elapsed < 60 or count < 3 ):
2293 devicesResults = main.TRUE
2294 linksResults = main.TRUE
2295 hostsResults = main.TRUE
2296 hostAttachmentResults = True
2297 count += 1
2298 cliStart = time.time()
2299 devices = []
2300 threads = []
2301 for i in main.activeNodes:
2302 t = main.Thread( target=utilities.retry,
2303 name="devices-" + str( i ),
2304 args=[ main.CLIs[i].devices, [ None ] ],
2305 kwargs= { 'sleep': 5, 'attempts': 5,
2306 'randomTime': True } )
2307 threads.append( t )
2308 t.start()
2309
2310 for t in threads:
2311 t.join()
2312 devices.append( t.result )
2313 hosts = []
2314 ipResult = main.TRUE
2315 threads = []
2316 for i in main.activeNodes:
2317 t = main.Thread( target=utilities.retry,
2318 name="hosts-" + str( i ),
2319 args=[ main.CLIs[i].hosts, [ None ] ],
2320 kwargs= { 'sleep': 5, 'attempts': 5,
2321 'randomTime': True } )
2322 threads.append( t )
2323 t.start()
2324
2325 for t in threads:
2326 t.join()
2327 try:
2328 hosts.append( json.loads( t.result ) )
2329 except ( ValueError, TypeError ):
2330 main.log.exception( "Error parsing hosts results" )
2331 main.log.error( repr( t.result ) )
2332 hosts.append( None )
2333 for controller in range( 0, len( hosts ) ):
2334 controllerStr = str( main.activeNodes[controller] + 1 )
2335 if hosts[ controller ]:
2336 for host in hosts[ controller ]:
2337 if host is None or host.get( 'ipAddresses', [] ) == []:
2338 main.log.error(
2339 "Error with host ipAddresses on controller" +
2340 controllerStr + ": " + str( host ) )
2341 ipResult = main.FALSE
2342 ports = []
2343 threads = []
2344 for i in main.activeNodes:
2345 t = main.Thread( target=utilities.retry,
2346 name="ports-" + str( i ),
2347 args=[ main.CLIs[i].ports, [ None ] ],
2348 kwargs= { 'sleep': 5, 'attempts': 5,
2349 'randomTime': True } )
2350 threads.append( t )
2351 t.start()
2352
2353 for t in threads:
2354 t.join()
2355 ports.append( t.result )
2356 links = []
2357 threads = []
2358 for i in main.activeNodes:
2359 t = main.Thread( target=utilities.retry,
2360 name="links-" + str( i ),
2361 args=[ main.CLIs[i].links, [ None ] ],
2362 kwargs= { 'sleep': 5, 'attempts': 5,
2363 'randomTime': True } )
2364 threads.append( t )
2365 t.start()
2366
2367 for t in threads:
2368 t.join()
2369 links.append( t.result )
2370 clusters = []
2371 threads = []
2372 for i in main.activeNodes:
2373 t = main.Thread( target=utilities.retry,
2374 name="clusters-" + str( i ),
2375 args=[ main.CLIs[i].clusters, [ None ] ],
2376 kwargs= { 'sleep': 5, 'attempts': 5,
2377 'randomTime': True } )
2378 threads.append( t )
2379 t.start()
2380
2381 for t in threads:
2382 t.join()
2383 clusters.append( t.result )
2384
2385 elapsed = time.time() - startTime
2386 cliTime = time.time() - cliStart
2387 print "Elapsed time: " + str( elapsed )
2388 print "CLI time: " + str( cliTime )
2389
2390 if all( e is None for e in devices ) and\
2391 all( e is None for e in hosts ) and\
2392 all( e is None for e in ports ) and\
2393 all( e is None for e in links ) and\
2394 all( e is None for e in clusters ):
2395 topoFailMsg = "Could not get topology from ONOS"
2396 main.log.error( topoFailMsg )
2397 continue # Try again, No use trying to compare
2398
2399 mnSwitches = main.Mininet1.getSwitches()
2400 mnLinks = main.Mininet1.getLinks()
2401 mnHosts = main.Mininet1.getHosts()
2402 for controller in range( len( main.activeNodes ) ):
2403 controllerStr = str( main.activeNodes[controller] + 1 )
2404 if devices[ controller ] and ports[ controller ] and\
2405 "Error" not in devices[ controller ] and\
2406 "Error" not in ports[ controller ]:
2407
2408 try:
2409 currentDevicesResult = main.Mininet1.compareSwitches(
2410 mnSwitches,
2411 json.loads( devices[ controller ] ),
2412 json.loads( ports[ controller ] ) )
2413 except ( TypeError, ValueError ):
2414 main.log.exception( "Object not as expected; devices={!r}\nports={!r}".format(
2415 devices[ controller ], ports[ controller ] ) )
2416 else:
2417 currentDevicesResult = main.FALSE
2418 utilities.assert_equals( expect=main.TRUE,
2419 actual=currentDevicesResult,
2420 onpass="ONOS" + controllerStr +
2421 " Switches view is correct",
2422 onfail="ONOS" + controllerStr +
2423 " Switches view is incorrect" )
2424
2425 if links[ controller ] and "Error" not in links[ controller ]:
2426 currentLinksResult = main.Mininet1.compareLinks(
2427 mnSwitches, mnLinks,
2428 json.loads( links[ controller ] ) )
2429 else:
2430 currentLinksResult = main.FALSE
2431 utilities.assert_equals( expect=main.TRUE,
2432 actual=currentLinksResult,
2433 onpass="ONOS" + controllerStr +
2434 " links view is correct",
2435 onfail="ONOS" + controllerStr +
2436 " links view is incorrect" )
2437 if hosts[ controller ] and "Error" not in hosts[ controller ]:
2438 currentHostsResult = main.Mininet1.compareHosts(
2439 mnHosts,
2440 hosts[ controller ] )
2441 elif hosts[ controller ] == []:
2442 currentHostsResult = main.TRUE
2443 else:
2444 currentHostsResult = main.FALSE
2445 utilities.assert_equals( expect=main.TRUE,
2446 actual=currentHostsResult,
2447 onpass="ONOS" + controllerStr +
2448 " hosts exist in Mininet",
2449 onfail="ONOS" + controllerStr +
2450 " hosts don't match Mininet" )
2451 # CHECKING HOST ATTACHMENT POINTS
2452 hostAttachment = True
2453 zeroHosts = False
2454 # FIXME: topo-HA/obelisk specific mappings:
2455 # key is mac and value is dpid
2456 mappings = {}
2457 for i in range( 1, 29 ): # hosts 1 through 28
2458 # set up correct variables:
2459 macId = "00:" * 5 + hex( i ).split( "0x" )[1].upper().zfill(2)
2460 if i == 1:
2461 deviceId = "1000".zfill(16)
2462 elif i == 2:
2463 deviceId = "2000".zfill(16)
2464 elif i == 3:
2465 deviceId = "3000".zfill(16)
2466 elif i == 4:
2467 deviceId = "3004".zfill(16)
2468 elif i == 5:
2469 deviceId = "5000".zfill(16)
2470 elif i == 6:
2471 deviceId = "6000".zfill(16)
2472 elif i == 7:
2473 deviceId = "6007".zfill(16)
2474 elif i >= 8 and i <= 17:
2475 dpid = '3' + str( i ).zfill( 3 )
2476 deviceId = dpid.zfill(16)
2477 elif i >= 18 and i <= 27:
2478 dpid = '6' + str( i ).zfill( 3 )
2479 deviceId = dpid.zfill(16)
2480 elif i == 28:
2481 deviceId = "2800".zfill(16)
2482 mappings[ macId ] = deviceId
2483 if hosts[ controller ] is not None and "Error" not in hosts[ controller ]:
2484 if hosts[ controller ] == []:
2485 main.log.warn( "There are no hosts discovered" )
2486 zeroHosts = True
2487 else:
2488 for host in hosts[ controller ]:
2489 mac = None
2490 location = None
2491 device = None
2492 port = None
2493 try:
2494 mac = host.get( 'mac' )
2495 assert mac, "mac field could not be found for this host object"
2496
2497 location = host.get( 'location' )
2498 assert location, "location field could not be found for this host object"
2499
2500 # Trim the protocol identifier off deviceId
2501 device = str( location.get( 'elementId' ) ).split(':')[1]
2502 assert device, "elementId field could not be found for this host location object"
2503
2504 port = location.get( 'port' )
2505 assert port, "port field could not be found for this host location object"
2506
2507 # Now check if this matches where they should be
2508 if mac and device and port:
2509 if str( port ) != "1":
2510 main.log.error( "The attachment port is incorrect for " +
2511 "host " + str( mac ) +
2512 ". Expected: 1 Actual: " + str( port) )
2513 hostAttachment = False
2514 if device != mappings[ str( mac ) ]:
2515 main.log.error( "The attachment device is incorrect for " +
2516 "host " + str( mac ) +
2517 ". Expected: " + mappings[ str( mac ) ] +
2518 " Actual: " + device )
2519 hostAttachment = False
2520 else:
2521 hostAttachment = False
2522 except AssertionError:
2523 main.log.exception( "Json object not as expected" )
2524 main.log.error( repr( host ) )
2525 hostAttachment = False
2526 else:
2527 main.log.error( "No hosts json output or \"Error\"" +
2528 " in output. hosts = " +
2529 repr( hosts[ controller ] ) )
2530 if zeroHosts is False:
2531 # TODO: Find a way to know if there should be hosts in a
2532 # given point of the test
2533 hostAttachment = True
2534
2535 # END CHECKING HOST ATTACHMENT POINTS
2536 devicesResults = devicesResults and currentDevicesResult
2537 linksResults = linksResults and currentLinksResult
2538 hostsResults = hostsResults and currentHostsResult
2539 hostAttachmentResults = hostAttachmentResults and\
2540 hostAttachment
2541 topoResult = ( devicesResults and linksResults
2542 and hostsResults and ipResult and
2543 hostAttachmentResults )
2544 utilities.assert_equals( expect=True,
2545 actual=topoResult,
2546 onpass="ONOS topology matches Mininet",
2547 onfail=topoFailMsg )
2548 # End of While loop to pull ONOS state
2549
2550 # Compare json objects for hosts and dataplane clusters
2551
2552 # hosts
2553 main.step( "Hosts view is consistent across all ONOS nodes" )
2554 consistentHostsResult = main.TRUE
2555 for controller in range( len( hosts ) ):
2556 controllerStr = str( main.activeNodes[controller] + 1 )
2557 if hosts[ controller ] is not None and "Error" not in hosts[ controller ]:
2558 if hosts[ controller ] == hosts[ 0 ]:
2559 continue
2560 else: # hosts not consistent
2561 main.log.error( "hosts from ONOS" + controllerStr +
2562 " is inconsistent with ONOS1" )
2563 main.log.warn( repr( hosts[ controller ] ) )
2564 consistentHostsResult = main.FALSE
2565
2566 else:
2567 main.log.error( "Error in getting ONOS hosts from ONOS" +
2568 controllerStr )
2569 consistentHostsResult = main.FALSE
2570 main.log.warn( "ONOS" + controllerStr +
2571 " hosts response: " +
2572 repr( hosts[ controller ] ) )
2573 utilities.assert_equals(
2574 expect=main.TRUE,
2575 actual=consistentHostsResult,
2576 onpass="Hosts view is consistent across all ONOS nodes",
2577 onfail="ONOS nodes have different views of hosts" )
2578
2579 main.step( "Hosts information is correct" )
2580 hostsResults = hostsResults and ipResult
2581 utilities.assert_equals(
2582 expect=main.TRUE,
2583 actual=hostsResults,
2584 onpass="Host information is correct",
2585 onfail="Host information is incorrect" )
2586
2587 main.step( "Host attachment points to the network" )
2588 utilities.assert_equals(
2589 expect=True,
2590 actual=hostAttachmentResults,
2591 onpass="Hosts are correctly attached to the network",
2592 onfail="ONOS did not correctly attach hosts to the network" )
2593
2594 # Strongly connected clusters of devices
2595 main.step( "Clusters view is consistent across all ONOS nodes" )
2596 consistentClustersResult = main.TRUE
2597 for controller in range( len( clusters ) ):
2598 controllerStr = str( main.activeNodes[controller] + 1 )
2599 if "Error" not in clusters[ controller ]:
2600 if clusters[ controller ] == clusters[ 0 ]:
2601 continue
2602 else: # clusters not consistent
2603 main.log.error( "clusters from ONOS" +
2604 controllerStr +
2605 " is inconsistent with ONOS1" )
2606 consistentClustersResult = main.FALSE
2607 else:
2608 main.log.error( "Error in getting dataplane clusters " +
2609 "from ONOS" + controllerStr )
2610 consistentClustersResult = main.FALSE
2611 main.log.warn( "ONOS" + controllerStr +
2612 " clusters response: " +
2613 repr( clusters[ controller ] ) )
2614 utilities.assert_equals(
2615 expect=main.TRUE,
2616 actual=consistentClustersResult,
2617 onpass="Clusters view is consistent across all ONOS nodes",
2618 onfail="ONOS nodes have different views of clusters" )
2619
2620 main.step( "There is only one SCC" )
2621 # there should always only be one cluster
2622 try:
2623 numClusters = len( json.loads( clusters[ 0 ] ) )
2624 except ( ValueError, TypeError ):
2625 main.log.exception( "Error parsing clusters[0]: " +
2626 repr( clusters[0] ) )
2627 numClusters = "ERROR"
2628 clusterResults = main.FALSE
2629 if numClusters == 1:
2630 clusterResults = main.TRUE
2631 utilities.assert_equals(
2632 expect=1,
2633 actual=numClusters,
2634 onpass="ONOS shows 1 SCC",
2635 onfail="ONOS shows " + str( numClusters ) + " SCCs" )
2636
2637 topoResult = ( devicesResults and linksResults
2638 and hostsResults and consistentHostsResult
2639 and consistentClustersResult and clusterResults
2640 and ipResult and hostAttachmentResults )
2641
2642 topoResult = topoResult and int( count <= 2 )
2643 note = "note it takes about " + str( int( cliTime ) ) + \
2644 " seconds for the test to make all the cli calls to fetch " +\
2645 "the topology from each ONOS instance"
2646 main.log.info(
2647 "Very crass estimate for topology discovery/convergence( " +
2648 str( note ) + " ): " + str( elapsed ) + " seconds, " +
2649 str( count ) + " tries" )
2650
2651 main.step( "Device information is correct" )
2652 utilities.assert_equals(
2653 expect=main.TRUE,
2654 actual=devicesResults,
2655 onpass="Device information is correct",
2656 onfail="Device information is incorrect" )
2657
2658 main.step( "Links are correct" )
2659 utilities.assert_equals(
2660 expect=main.TRUE,
2661 actual=linksResults,
2662 onpass="Link are correct",
2663 onfail="Links are incorrect" )
2664
2665 main.step( "Hosts are correct" )
2666 utilities.assert_equals(
2667 expect=main.TRUE,
2668 actual=hostsResults,
2669 onpass="Hosts are correct",
2670 onfail="Hosts are incorrect" )
2671
2672 # FIXME: move this to an ONOS state case
2673 main.step( "Checking ONOS nodes" )
2674 nodeResults = utilities.retry( main.HA.nodesCheck,
2675 False,
2676 args=[main.activeNodes],
2677 attempts=5 )
2678 utilities.assert_equals( expect=True, actual=nodeResults,
2679 onpass="Nodes check successful",
2680 onfail="Nodes check NOT successful" )
2681 if not nodeResults:
2682 for i in main.activeNodes:
2683 main.log.debug( "{} components not ACTIVE: \n{}".format(
2684 main.CLIs[i].name,
2685 main.CLIs[i].sendline( "scr:list | grep -v ACTIVE" ) ) )
2686
2687 def CASE9( self, main ):
2688 """
2689 Link s3-s28 down
2690 """
2691 import time
2692 assert main.numCtrls, "main.numCtrls not defined"
2693 assert main, "main not defined"
2694 assert utilities.assert_equals, "utilities.assert_equals not defined"
2695 assert main.CLIs, "main.CLIs not defined"
2696 assert main.nodes, "main.nodes not defined"
2697 # NOTE: You should probably run a topology check after this
2698
2699 linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
2700
2701 description = "Turn off a link to ensure that Link Discovery " +\
2702 "is working properly"
2703 main.case( description )
2704
2705 main.step( "Kill Link between s3 and s28" )
2706 LinkDown = main.Mininet1.link( END1="s3", END2="s28", OPTION="down" )
2707 main.log.info( "Waiting " + str( linkSleep ) +
2708 " seconds for link down to be discovered" )
2709 time.sleep( linkSleep )
2710 utilities.assert_equals( expect=main.TRUE, actual=LinkDown,
2711 onpass="Link down successful",
2712 onfail="Failed to bring link down" )
2713 # TODO do some sort of check here
2714
2715 def CASE10( self, main ):
2716 """
2717 Link s3-s28 up
2718 """
2719 import time
2720 assert main.numCtrls, "main.numCtrls not defined"
2721 assert main, "main not defined"
2722 assert utilities.assert_equals, "utilities.assert_equals not defined"
2723 assert main.CLIs, "main.CLIs not defined"
2724 assert main.nodes, "main.nodes not defined"
2725 # NOTE: You should probably run a topology check after this
2726
2727 linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
2728
2729 description = "Restore a link to ensure that Link Discovery is " + \
2730 "working properly"
2731 main.case( description )
2732
2733 main.step( "Bring link between s3 and s28 back up" )
2734 LinkUp = main.Mininet1.link( END1="s3", END2="s28", OPTION="up" )
2735 main.log.info( "Waiting " + str( linkSleep ) +
2736 " seconds for link up to be discovered" )
2737 time.sleep( linkSleep )
2738 utilities.assert_equals( expect=main.TRUE, actual=LinkUp,
2739 onpass="Link up successful",
2740 onfail="Failed to bring link up" )
2741 # TODO do some sort of check here
2742
2743 def CASE11( self, main ):
2744 """
2745 Switch Down
2746 """
2747 # NOTE: You should probably run a topology check after this
2748 import time
2749 assert main.numCtrls, "main.numCtrls not defined"
2750 assert main, "main not defined"
2751 assert utilities.assert_equals, "utilities.assert_equals not defined"
2752 assert main.CLIs, "main.CLIs not defined"
2753 assert main.nodes, "main.nodes not defined"
2754
2755 switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
2756
2757 description = "Killing a switch to ensure it is discovered correctly"
2758 onosCli = main.CLIs[ main.activeNodes[0] ]
2759 main.case( description )
2760 switch = main.params[ 'kill' ][ 'switch' ]
2761 switchDPID = main.params[ 'kill' ][ 'dpid' ]
2762
2763 # TODO: Make this switch parameterizable
2764 main.step( "Kill " + switch )
2765 main.log.info( "Deleting " + switch )
2766 main.Mininet1.delSwitch( switch )
2767 main.log.info( "Waiting " + str( switchSleep ) +
2768 " seconds for switch down to be discovered" )
2769 time.sleep( switchSleep )
2770 device = onosCli.getDevice( dpid=switchDPID )
2771 # Peek at the deleted switch
2772 main.log.warn( str( device ) )
2773 result = main.FALSE
2774 if device and device[ 'available' ] is False:
2775 result = main.TRUE
2776 utilities.assert_equals( expect=main.TRUE, actual=result,
2777 onpass="Kill switch successful",
2778 onfail="Failed to kill switch?" )
2779
2780 def CASE12( self, main ):
2781 """
2782 Switch Up
2783 """
2784 # NOTE: You should probably run a topology check after this
2785 import time
2786 assert main.numCtrls, "main.numCtrls not defined"
2787 assert main, "main not defined"
2788 assert utilities.assert_equals, "utilities.assert_equals not defined"
2789 assert main.CLIs, "main.CLIs not defined"
2790 assert main.nodes, "main.nodes not defined"
2791
2792 switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
2793 switch = main.params[ 'kill' ][ 'switch' ]
2794 switchDPID = main.params[ 'kill' ][ 'dpid' ]
2795 links = main.params[ 'kill' ][ 'links' ].split()
2796 onosCli = main.CLIs[ main.activeNodes[0] ]
2797 description = "Adding a switch to ensure it is discovered correctly"
2798 main.case( description )
2799
2800 main.step( "Add back " + switch )
2801 main.Mininet1.addSwitch( switch, dpid=switchDPID )
2802 for peer in links:
2803 main.Mininet1.addLink( switch, peer )
2804 ipList = [ node.ip_address for node in main.nodes ]
2805 main.Mininet1.assignSwController( sw=switch, ip=ipList )
2806 main.log.info( "Waiting " + str( switchSleep ) +
2807 " seconds for switch up to be discovered" )
2808 time.sleep( switchSleep )
2809 device = onosCli.getDevice( dpid=switchDPID )
2810 # Peek at the deleted switch
2811 main.log.warn( str( device ) )
2812 result = main.FALSE
2813 if device and device[ 'available' ]:
2814 result = main.TRUE
2815 utilities.assert_equals( expect=main.TRUE, actual=result,
2816 onpass="add switch successful",
2817 onfail="Failed to add switch?" )
2818
2819 def CASE13( self, main ):
2820 """
2821 Clean up
2822 """
2823 assert main.numCtrls, "main.numCtrls not defined"
2824 assert main, "main not defined"
2825 assert utilities.assert_equals, "utilities.assert_equals not defined"
2826 assert main.CLIs, "main.CLIs not defined"
2827 assert main.nodes, "main.nodes not defined"
2828
2829 main.case( "Test Cleanup" )
2830 main.step( "Killing tcpdumps" )
2831 main.Mininet2.stopTcpdump()
2832
2833 if main.params[ 'BACKUP' ][ 'ENABLED' ] == "True":
2834 main.step( "Copying MN pcap and ONOS log files to test station" )
2835 # NOTE: MN Pcap file is being saved to logdir.
2836 # We scp this file as MN and TestON aren't necessarily the same vm
2837
2838 # FIXME: To be replaced with a Jenkin's post script
2839 # TODO: Load these from params
2840 # NOTE: must end in /
2841 logFolder = "/opt/onos/log/"
2842 logFiles = [ "karaf.log", "karaf.log.1" ]
2843 # NOTE: must end in /
2844 for f in logFiles:
2845 for node in main.nodes:
2846 dstName = main.logdir + "/" + node.name + "-" + f
2847 main.ONOSbench.secureCopy( node.user_name, node.ip_address,
2848 logFolder + f, dstName )
2849 # std*.log's
2850 # NOTE: must end in /
2851 logFolder = "/opt/onos/var/"
2852 logFiles = [ "stderr.log", "stdout.log" ]
2853 # NOTE: must end in /
2854 for f in logFiles:
2855 for node in main.nodes:
2856 dstName = main.logdir + "/" + node.name + "-" + f
2857 main.ONOSbench.secureCopy( node.user_name, node.ip_address,
2858 logFolder + f, dstName )
2859 else:
2860 main.log.debug( "skipping saving log files" )
2861
2862 main.step( "Stopping Mininet" )
2863 mnResult = main.Mininet1.stopNet()
2864 utilities.assert_equals( expect=main.TRUE, actual=mnResult,
2865 onpass="Mininet stopped",
2866 onfail="MN cleanup NOT successful" )
2867
2868 main.step( "Checking ONOS Logs for errors" )
2869 for node in main.nodes:
2870 main.log.debug( "Checking logs for errors on " + node.name + ":" )
2871 main.log.warn( main.ONOSbench.checkLogs( node.ip_address ) )
2872
2873 try:
2874 timerLog = open( main.logdir + "/Timers.csv", 'w')
2875 main.log.error( ", ".join( labels ) + "\n" + ", ".join( data ) )
2876 timerLog.write( ", ".join( labels ) + "\n" + ", ".join( data ) )
2877 timerLog.close()
2878 except NameError, e:
2879 main.log.exception(e)
2880
2881 main.step( "Stopping webserver" )
2882 status = main.Server.stop( )
2883 utilities.assert_equals( expect=main.TRUE, actual=status,
2884 onpass="Stop Server",
2885 onfail="Failled to stop SimpleHTTPServer" )
2886 del main.Server
2887
2888 def CASE14( self, main ):
2889 """
2890 start election app on all onos nodes
2891 """
2892 import time
2893 assert main.numCtrls, "main.numCtrls not defined"
2894 assert main, "main not defined"
2895 assert utilities.assert_equals, "utilities.assert_equals not defined"
2896 assert main.CLIs, "main.CLIs not defined"
2897 assert main.nodes, "main.nodes not defined"
2898
2899 main.case("Start Leadership Election app")
2900 main.step( "Install leadership election app" )
2901 onosCli = main.CLIs[ main.activeNodes[0] ]
2902 appResult = onosCli.activateApp( "org.onosproject.election" )
2903 utilities.assert_equals(
2904 expect=main.TRUE,
2905 actual=appResult,
2906 onpass="Election app installed",
2907 onfail="Something went wrong with installing Leadership election" )
2908
2909 main.step( "Run for election on each node" )
2910 for i in main.activeNodes:
2911 main.CLIs[i].electionTestRun()
2912 time.sleep(5)
2913 activeCLIs = [ main.CLIs[i] for i in main.activeNodes ]
2914 sameResult, leaders = main.HA.consistentLeaderboards( activeCLIs )
2915 utilities.assert_equals(
2916 expect=True,
2917 actual=sameResult,
2918 onpass="All nodes see the same leaderboards",
2919 onfail="Inconsistent leaderboards" )
2920
2921 if sameResult:
2922 leader = leaders[ 0 ][ 0 ]
2923 if main.nodes[ main.activeNodes[0] ].ip_address in leader:
2924 correctLeader = True
2925 else:
2926 correctLeader = False
2927 main.step( "First node was elected leader" )
2928 utilities.assert_equals(
2929 expect=True,
2930 actual=correctLeader,
2931 onpass="Correct leader was elected",
2932 onfail="Incorrect leader" )
2933
2934 def CASE15( self, main ):
2935 """
2936 Check that Leadership Election is still functional
2937 15.1 Run election on each node
2938 15.2 Check that each node has the same leaders and candidates
2939 15.3 Find current leader and withdraw
2940 15.4 Check that a new node was elected leader
2941 15.5 Check that that new leader was the candidate of old leader
2942 15.6 Run for election on old leader
2943 15.7 Check that oldLeader is a candidate, and leader if only 1 node
2944 15.8 Make sure that the old leader was added to the candidate list
2945
2946 old and new variable prefixes refer to data from before vs after
2947 withdrawl and later before withdrawl vs after re-election
2948 """
2949 import time
2950 assert main.numCtrls, "main.numCtrls not defined"
2951 assert main, "main not defined"
2952 assert utilities.assert_equals, "utilities.assert_equals not defined"
2953 assert main.CLIs, "main.CLIs not defined"
2954 assert main.nodes, "main.nodes not defined"
2955
2956 description = "Check that Leadership Election is still functional"
2957 main.case( description )
2958 # NOTE: Need to re-run after restarts since being a canidate is not persistant
2959
2960 oldLeaders = [] # list of lists of each nodes' candidates before
2961 newLeaders = [] # list of lists of each nodes' candidates after
2962 oldLeader = '' # the old leader from oldLeaders, None if not same
2963 newLeader = '' # the new leaders fron newLoeaders, None if not same
2964 oldLeaderCLI = None # the CLI of the old leader used for re-electing
2965 expectNoLeader = False # True when there is only one leader
2966 if main.numCtrls == 1:
2967 expectNoLeader = True
2968
2969 main.step( "Run for election on each node" )
2970 electionResult = main.TRUE
2971
2972 for i in main.activeNodes: # run test election on each node
2973 if main.CLIs[i].electionTestRun() == main.FALSE:
2974 electionResult = main.FALSE
2975 utilities.assert_equals(
2976 expect=main.TRUE,
2977 actual=electionResult,
2978 onpass="All nodes successfully ran for leadership",
2979 onfail="At least one node failed to run for leadership" )
2980
2981 if electionResult == main.FALSE:
2982 main.log.error(
2983 "Skipping Test Case because Election Test App isn't loaded" )
2984 main.skipCase()
2985
2986 main.step( "Check that each node shows the same leader and candidates" )
2987 failMessage = "Nodes have different leaderboards"
2988 activeCLIs = [ main.CLIs[i] for i in main.activeNodes ]
2989 sameResult, oldLeaders = main.HA.consistentLeaderboards( activeCLIs )
2990 if sameResult:
2991 oldLeader = oldLeaders[ 0 ][ 0 ]
2992 main.log.warn( oldLeader )
2993 else:
2994 oldLeader = None
2995 utilities.assert_equals(
2996 expect=True,
2997 actual=sameResult,
2998 onpass="Leaderboards are consistent for the election topic",
2999 onfail=failMessage )
3000
3001 main.step( "Find current leader and withdraw" )
3002 withdrawResult = main.TRUE
3003 # do some sanity checking on leader before using it
3004 if oldLeader is None:
3005 main.log.error( "Leadership isn't consistent." )
3006 withdrawResult = main.FALSE
3007 # Get the CLI of the oldLeader
3008 for i in main.activeNodes:
3009 if oldLeader == main.nodes[ i ].ip_address:
3010 oldLeaderCLI = main.CLIs[ i ]
3011 break
3012 else: # FOR/ELSE statement
3013 main.log.error( "Leader election, could not find current leader" )
3014 if oldLeader:
3015 withdrawResult = oldLeaderCLI.electionTestWithdraw()
3016 utilities.assert_equals(
3017 expect=main.TRUE,
3018 actual=withdrawResult,
3019 onpass="Node was withdrawn from election",
3020 onfail="Node was not withdrawn from election" )
3021
3022 main.step( "Check that a new node was elected leader" )
3023 failMessage = "Nodes have different leaders"
3024 # Get new leaders and candidates
3025 newLeaderResult, newLeaders = main.HA.consistentLeaderboards( activeCLIs )
3026 newLeader = None
3027 if newLeaderResult:
3028 if newLeaders[ 0 ][ 0 ] == 'none':
3029 main.log.error( "No leader was elected on at least 1 node" )
3030 if not expectNoLeader:
3031 newLeaderResult = False
3032 newLeader = newLeaders[ 0 ][ 0 ]
3033
3034 # Check that the new leader is not the older leader, which was withdrawn
3035 if newLeader == oldLeader:
3036 newLeaderResult = False
3037 main.log.error( "All nodes still see old leader: " + str( oldLeader ) +
3038 " as the current leader" )
3039 utilities.assert_equals(
3040 expect=True,
3041 actual=newLeaderResult,
3042 onpass="Leadership election passed",
3043 onfail="Something went wrong with Leadership election" )
3044
3045 main.step( "Check that that new leader was the candidate of old leader" )
3046 # candidates[ 2 ] should become the top candidate after withdrawl
3047 correctCandidateResult = main.TRUE
3048 if expectNoLeader:
3049 if newLeader == 'none':
3050 main.log.info( "No leader expected. None found. Pass" )
3051 correctCandidateResult = main.TRUE
3052 else:
3053 main.log.info( "Expected no leader, got: " + str( newLeader ) )
3054 correctCandidateResult = main.FALSE
3055 elif len( oldLeaders[0] ) >= 3:
3056 if newLeader == oldLeaders[ 0 ][ 2 ]:
3057 # correct leader was elected
3058 correctCandidateResult = main.TRUE
3059 else:
3060 correctCandidateResult = main.FALSE
3061 main.log.error( "Candidate {} was elected. {} should have had priority.".format(
3062 newLeader, oldLeaders[ 0 ][ 2 ] ) )
3063 else:
3064 main.log.warn( "Could not determine who should be the correct leader" )
3065 main.log.debug( oldLeaders[ 0 ] )
3066 correctCandidateResult = main.FALSE
3067 utilities.assert_equals(
3068 expect=main.TRUE,
3069 actual=correctCandidateResult,
3070 onpass="Correct Candidate Elected",
3071 onfail="Incorrect Candidate Elected" )
3072
3073 main.step( "Run for election on old leader( just so everyone " +
3074 "is in the hat )" )
3075 if oldLeaderCLI is not None:
3076 runResult = oldLeaderCLI.electionTestRun()
3077 else:
3078 main.log.error( "No old leader to re-elect" )
3079 runResult = main.FALSE
3080 utilities.assert_equals(
3081 expect=main.TRUE,
3082 actual=runResult,
3083 onpass="App re-ran for election",
3084 onfail="App failed to run for election" )
3085
3086 main.step(
3087 "Check that oldLeader is a candidate, and leader if only 1 node" )
3088 # verify leader didn't just change
3089 # Get new leaders and candidates
3090 reRunLeaders = []
3091 time.sleep( 5 ) # Paremterize
3092 positionResult, reRunLeaders = main.HA.consistentLeaderboards( activeCLIs )
3093
3094 # Check that the re-elected node is last on the candidate List
3095 if not reRunLeaders[0]:
3096 positionResult = main.FALSE
3097 elif oldLeader != reRunLeaders[ 0 ][ -1 ]:
3098 main.log.error( "Old Leader ({}) not in the proper position: {} ".format( str( oldLeader),
3099 str( reRunLeaders[ 0 ] ) ) )
3100 positionResult = main.FALSE
3101 utilities.assert_equals(
3102 expect=True,
3103 actual=positionResult,
3104 onpass="Old leader successfully re-ran for election",
3105 onfail="Something went wrong with Leadership election after " +
3106 "the old leader re-ran for election" )
3107
3108 def CASE16( self, main ):
3109 """
3110 Install Distributed Primitives app
3111 """
3112 import time
3113 assert main.numCtrls, "main.numCtrls not defined"
3114 assert main, "main not defined"
3115 assert utilities.assert_equals, "utilities.assert_equals not defined"
3116 assert main.CLIs, "main.CLIs not defined"
3117 assert main.nodes, "main.nodes not defined"
3118
3119 # Variables for the distributed primitives tests
3120 global pCounterName
3121 global pCounterValue
3122 global onosSet
3123 global onosSetName
3124 pCounterName = "TestON-Partitions"
3125 pCounterValue = 0
3126 onosSet = set([])
3127 onosSetName = "TestON-set"
3128
3129 description = "Install Primitives app"
3130 main.case( description )
3131 main.step( "Install Primitives app" )
3132 appName = "org.onosproject.distributedprimitives"
3133 node = main.activeNodes[0]
3134 appResults = main.CLIs[node].activateApp( appName )
3135 utilities.assert_equals( expect=main.TRUE,
3136 actual=appResults,
3137 onpass="Primitives app activated",
3138 onfail="Primitives app not activated" )
3139 time.sleep( 5 ) # To allow all nodes to activate
3140
3141 def CASE17( self, main ):
3142 """
3143 Check for basic functionality with distributed primitives
3144 """
3145 # Make sure variables are defined/set
3146 assert main.numCtrls, "main.numCtrls not defined"
3147 assert main, "main not defined"
3148 assert utilities.assert_equals, "utilities.assert_equals not defined"
3149 assert main.CLIs, "main.CLIs not defined"
3150 assert main.nodes, "main.nodes not defined"
3151 assert pCounterName, "pCounterName not defined"
3152 assert onosSetName, "onosSetName not defined"
3153 # NOTE: assert fails if value is 0/None/Empty/False
3154 try:
3155 pCounterValue
3156 except NameError:
3157 main.log.error( "pCounterValue not defined, setting to 0" )
3158 pCounterValue = 0
3159 try:
3160 onosSet
3161 except NameError:
3162 main.log.error( "onosSet not defined, setting to empty Set" )
3163 onosSet = set([])
3164 # Variables for the distributed primitives tests. These are local only
3165 addValue = "a"
3166 addAllValue = "a b c d e f"
3167 retainValue = "c d e f"
3168
3169 description = "Check for basic functionality with distributed " +\
3170 "primitives"
3171 main.case( description )
3172 main.caseExplanation = "Test the methods of the distributed " +\
3173 "primitives (counters and sets) throught the cli"
3174 # DISTRIBUTED ATOMIC COUNTERS
3175 # Partitioned counters
3176 main.step( "Increment then get a default counter on each node" )
3177 pCounters = []
3178 threads = []
3179 addedPValues = []
3180 for i in main.activeNodes:
3181 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3182 name="counterAddAndGet-" + str( i ),
3183 args=[ pCounterName ] )
3184 pCounterValue += 1
3185 addedPValues.append( pCounterValue )
3186 threads.append( t )
3187 t.start()
3188
3189 for t in threads:
3190 t.join()
3191 pCounters.append( t.result )
3192 # Check that counter incremented numController times
3193 pCounterResults = True
3194 for i in addedPValues:
3195 tmpResult = i in pCounters
3196 pCounterResults = pCounterResults and tmpResult
3197 if not tmpResult:
3198 main.log.error( str( i ) + " is not in partitioned "
3199 "counter incremented results" )
3200 utilities.assert_equals( expect=True,
3201 actual=pCounterResults,
3202 onpass="Default counter incremented",
3203 onfail="Error incrementing default" +
3204 " counter" )
3205
3206 main.step( "Get then Increment a default counter on each node" )
3207 pCounters = []
3208 threads = []
3209 addedPValues = []
3210 for i in main.activeNodes:
3211 t = main.Thread( target=main.CLIs[i].counterTestGetAndAdd,
3212 name="counterGetAndAdd-" + str( i ),
3213 args=[ pCounterName ] )
3214 addedPValues.append( pCounterValue )
3215 pCounterValue += 1
3216 threads.append( t )
3217 t.start()
3218
3219 for t in threads:
3220 t.join()
3221 pCounters.append( t.result )
3222 # Check that counter incremented numController times
3223 pCounterResults = True
3224 for i in addedPValues:
3225 tmpResult = i in pCounters
3226 pCounterResults = pCounterResults and tmpResult
3227 if not tmpResult:
3228 main.log.error( str( i ) + " is not in partitioned "
3229 "counter incremented results" )
3230 utilities.assert_equals( expect=True,
3231 actual=pCounterResults,
3232 onpass="Default counter incremented",
3233 onfail="Error incrementing default" +
3234 " counter" )
3235
3236 main.step( "Counters we added have the correct values" )
3237 incrementCheck = main.HA.counterCheck( pCounterName, pCounterValue )
3238 utilities.assert_equals( expect=main.TRUE,
3239 actual=incrementCheck,
3240 onpass="Added counters are correct",
3241 onfail="Added counters are incorrect" )
3242
3243 main.step( "Add -8 to then get a default counter on each node" )
3244 pCounters = []
3245 threads = []
3246 addedPValues = []
3247 for i in main.activeNodes:
3248 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3249 name="counterIncrement-" + str( i ),
3250 args=[ pCounterName ],
3251 kwargs={ "delta": -8 } )
3252 pCounterValue += -8
3253 addedPValues.append( pCounterValue )
3254 threads.append( t )
3255 t.start()
3256
3257 for t in threads:
3258 t.join()
3259 pCounters.append( t.result )
3260 # Check that counter incremented numController times
3261 pCounterResults = True
3262 for i in addedPValues:
3263 tmpResult = i in pCounters
3264 pCounterResults = pCounterResults and tmpResult
3265 if not tmpResult:
3266 main.log.error( str( i ) + " is not in partitioned "
3267 "counter incremented results" )
3268 utilities.assert_equals( expect=True,
3269 actual=pCounterResults,
3270 onpass="Default counter incremented",
3271 onfail="Error incrementing default" +
3272 " counter" )
3273
3274 main.step( "Add 5 to then get a default counter on each node" )
3275 pCounters = []
3276 threads = []
3277 addedPValues = []
3278 for i in main.activeNodes:
3279 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3280 name="counterIncrement-" + str( i ),
3281 args=[ pCounterName ],
3282 kwargs={ "delta": 5 } )
3283 pCounterValue += 5
3284 addedPValues.append( pCounterValue )
3285 threads.append( t )
3286 t.start()
3287
3288 for t in threads:
3289 t.join()
3290 pCounters.append( t.result )
3291 # Check that counter incremented numController times
3292 pCounterResults = True
3293 for i in addedPValues:
3294 tmpResult = i in pCounters
3295 pCounterResults = pCounterResults and tmpResult
3296 if not tmpResult:
3297 main.log.error( str( i ) + " is not in partitioned "
3298 "counter incremented results" )
3299 utilities.assert_equals( expect=True,
3300 actual=pCounterResults,
3301 onpass="Default counter incremented",
3302 onfail="Error incrementing default" +
3303 " counter" )
3304
3305 main.step( "Get then add 5 to a default counter on each node" )
3306 pCounters = []
3307 threads = []
3308 addedPValues = []
3309 for i in main.activeNodes:
3310 t = main.Thread( target=main.CLIs[i].counterTestGetAndAdd,
3311 name="counterIncrement-" + str( i ),
3312 args=[ pCounterName ],
3313 kwargs={ "delta": 5 } )
3314 addedPValues.append( pCounterValue )
3315 pCounterValue += 5
3316 threads.append( t )
3317 t.start()
3318
3319 for t in threads:
3320 t.join()
3321 pCounters.append( t.result )
3322 # Check that counter incremented numController times
3323 pCounterResults = True
3324 for i in addedPValues:
3325 tmpResult = i in pCounters
3326 pCounterResults = pCounterResults and tmpResult
3327 if not tmpResult:
3328 main.log.error( str( i ) + " is not in partitioned "
3329 "counter incremented results" )
3330 utilities.assert_equals( expect=True,
3331 actual=pCounterResults,
3332 onpass="Default counter incremented",
3333 onfail="Error incrementing default" +
3334 " counter" )
3335
3336 main.step( "Counters we added have the correct values" )
3337 incrementCheck = main.HA.counterCheck( pCounterName, pCounterValue )
3338 utilities.assert_equals( expect=main.TRUE,
3339 actual=incrementCheck,
3340 onpass="Added counters are correct",
3341 onfail="Added counters are incorrect" )
3342
3343 # DISTRIBUTED SETS
3344 main.step( "Distributed Set get" )
3345 size = len( onosSet )
3346 getResponses = []
3347 threads = []
3348 for i in main.activeNodes:
3349 t = main.Thread( target=main.CLIs[i].setTestGet,
3350 name="setTestGet-" + str( i ),
3351 args=[ onosSetName ] )
3352 threads.append( t )
3353 t.start()
3354 for t in threads:
3355 t.join()
3356 getResponses.append( t.result )
3357
3358 getResults = main.TRUE
3359 for i in range( len( main.activeNodes ) ):
3360 node = str( main.activeNodes[i] + 1 )
3361 if isinstance( getResponses[ i ], list):
3362 current = set( getResponses[ i ] )
3363 if len( current ) == len( getResponses[ i ] ):
3364 # no repeats
3365 if onosSet != current:
3366 main.log.error( "ONOS" + node +
3367 " has incorrect view" +
3368 " of set " + onosSetName + ":\n" +
3369 str( getResponses[ i ] ) )
3370 main.log.debug( "Expected: " + str( onosSet ) )
3371 main.log.debug( "Actual: " + str( current ) )
3372 getResults = main.FALSE
3373 else:
3374 # error, set is not a set
3375 main.log.error( "ONOS" + node +
3376 " has repeat elements in" +
3377 " set " + onosSetName + ":\n" +
3378 str( getResponses[ i ] ) )
3379 getResults = main.FALSE
3380 elif getResponses[ i ] == main.ERROR:
3381 getResults = main.FALSE
3382 utilities.assert_equals( expect=main.TRUE,
3383 actual=getResults,
3384 onpass="Set elements are correct",
3385 onfail="Set elements are incorrect" )
3386
3387 main.step( "Distributed Set size" )
3388 sizeResponses = []
3389 threads = []
3390 for i in main.activeNodes:
3391 t = main.Thread( target=main.CLIs[i].setTestSize,
3392 name="setTestSize-" + str( i ),
3393 args=[ onosSetName ] )
3394 threads.append( t )
3395 t.start()
3396 for t in threads:
3397 t.join()
3398 sizeResponses.append( t.result )
3399
3400 sizeResults = main.TRUE
3401 for i in range( len( main.activeNodes ) ):
3402 node = str( main.activeNodes[i] + 1 )
3403 if size != sizeResponses[ i ]:
3404 sizeResults = main.FALSE
3405 main.log.error( "ONOS" + node +
3406 " expected a size of " + str( size ) +
3407 " for set " + onosSetName +
3408 " but got " + str( sizeResponses[ i ] ) )
3409 utilities.assert_equals( expect=main.TRUE,
3410 actual=sizeResults,
3411 onpass="Set sizes are correct",
3412 onfail="Set sizes are incorrect" )
3413
3414 main.step( "Distributed Set add()" )
3415 onosSet.add( addValue )
3416 addResponses = []
3417 threads = []
3418 for i in main.activeNodes:
3419 t = main.Thread( target=main.CLIs[i].setTestAdd,
3420 name="setTestAdd-" + str( i ),
3421 args=[ onosSetName, addValue ] )
3422 threads.append( t )
3423 t.start()
3424 for t in threads:
3425 t.join()
3426 addResponses.append( t.result )
3427
3428 # main.TRUE = successfully changed the set
3429 # main.FALSE = action resulted in no change in set
3430 # main.ERROR - Some error in executing the function
3431 addResults = main.TRUE
3432 for i in range( len( main.activeNodes ) ):
3433 if addResponses[ i ] == main.TRUE:
3434 # All is well
3435 pass
3436 elif addResponses[ i ] == main.FALSE:
3437 # Already in set, probably fine
3438 pass
3439 elif addResponses[ i ] == main.ERROR:
3440 # Error in execution
3441 addResults = main.FALSE
3442 else:
3443 # unexpected result
3444 addResults = main.FALSE
3445 if addResults != main.TRUE:
3446 main.log.error( "Error executing set add" )
3447
3448 # Check if set is still correct
3449 size = len( onosSet )
3450 getResponses = []
3451 threads = []
3452 for i in main.activeNodes:
3453 t = main.Thread( target=main.CLIs[i].setTestGet,
3454 name="setTestGet-" + str( i ),
3455 args=[ onosSetName ] )
3456 threads.append( t )
3457 t.start()
3458 for t in threads:
3459 t.join()
3460 getResponses.append( t.result )
3461 getResults = main.TRUE
3462 for i in range( len( main.activeNodes ) ):
3463 node = str( main.activeNodes[i] + 1 )
3464 if isinstance( getResponses[ i ], list):
3465 current = set( getResponses[ i ] )
3466 if len( current ) == len( getResponses[ i ] ):
3467 # no repeats
3468 if onosSet != current:
3469 main.log.error( "ONOS" + node + " has incorrect view" +
3470 " of set " + onosSetName + ":\n" +
3471 str( getResponses[ i ] ) )
3472 main.log.debug( "Expected: " + str( onosSet ) )
3473 main.log.debug( "Actual: " + str( current ) )
3474 getResults = main.FALSE
3475 else:
3476 # error, set is not a set
3477 main.log.error( "ONOS" + node + " has repeat elements in" +
3478 " set " + onosSetName + ":\n" +
3479 str( getResponses[ i ] ) )
3480 getResults = main.FALSE
3481 elif getResponses[ i ] == main.ERROR:
3482 getResults = main.FALSE
3483 sizeResponses = []
3484 threads = []
3485 for i in main.activeNodes:
3486 t = main.Thread( target=main.CLIs[i].setTestSize,
3487 name="setTestSize-" + str( i ),
3488 args=[ onosSetName ] )
3489 threads.append( t )
3490 t.start()
3491 for t in threads:
3492 t.join()
3493 sizeResponses.append( t.result )
3494 sizeResults = main.TRUE
3495 for i in range( len( main.activeNodes ) ):
3496 node = str( main.activeNodes[i] + 1 )
3497 if size != sizeResponses[ i ]:
3498 sizeResults = main.FALSE
3499 main.log.error( "ONOS" + node +
3500 " expected a size of " + str( size ) +
3501 " for set " + onosSetName +
3502 " but got " + str( sizeResponses[ i ] ) )
3503 addResults = addResults and getResults and sizeResults
3504 utilities.assert_equals( expect=main.TRUE,
3505 actual=addResults,
3506 onpass="Set add correct",
3507 onfail="Set add was incorrect" )
3508
3509 main.step( "Distributed Set addAll()" )
3510 onosSet.update( addAllValue.split() )
3511 addResponses = []
3512 threads = []
3513 for i in main.activeNodes:
3514 t = main.Thread( target=main.CLIs[i].setTestAdd,
3515 name="setTestAddAll-" + str( i ),
3516 args=[ onosSetName, addAllValue ] )
3517 threads.append( t )
3518 t.start()
3519 for t in threads:
3520 t.join()
3521 addResponses.append( t.result )
3522
3523 # main.TRUE = successfully changed the set
3524 # main.FALSE = action resulted in no change in set
3525 # main.ERROR - Some error in executing the function
3526 addAllResults = main.TRUE
3527 for i in range( len( main.activeNodes ) ):
3528 if addResponses[ i ] == main.TRUE:
3529 # All is well
3530 pass
3531 elif addResponses[ i ] == main.FALSE:
3532 # Already in set, probably fine
3533 pass
3534 elif addResponses[ i ] == main.ERROR:
3535 # Error in execution
3536 addAllResults = main.FALSE
3537 else:
3538 # unexpected result
3539 addAllResults = main.FALSE
3540 if addAllResults != main.TRUE:
3541 main.log.error( "Error executing set addAll" )
3542
3543 # Check if set is still correct
3544 size = len( onosSet )
3545 getResponses = []
3546 threads = []
3547 for i in main.activeNodes:
3548 t = main.Thread( target=main.CLIs[i].setTestGet,
3549 name="setTestGet-" + str( i ),
3550 args=[ onosSetName ] )
3551 threads.append( t )
3552 t.start()
3553 for t in threads:
3554 t.join()
3555 getResponses.append( t.result )
3556 getResults = main.TRUE
3557 for i in range( len( main.activeNodes ) ):
3558 node = str( main.activeNodes[i] + 1 )
3559 if isinstance( getResponses[ i ], list):
3560 current = set( getResponses[ i ] )
3561 if len( current ) == len( getResponses[ i ] ):
3562 # no repeats
3563 if onosSet != current:
3564 main.log.error( "ONOS" + node +
3565 " has incorrect view" +
3566 " of set " + onosSetName + ":\n" +
3567 str( getResponses[ i ] ) )
3568 main.log.debug( "Expected: " + str( onosSet ) )
3569 main.log.debug( "Actual: " + str( current ) )
3570 getResults = main.FALSE
3571 else:
3572 # error, set is not a set
3573 main.log.error( "ONOS" + node +
3574 " has repeat elements in" +
3575 " set " + onosSetName + ":\n" +
3576 str( getResponses[ i ] ) )
3577 getResults = main.FALSE
3578 elif getResponses[ i ] == main.ERROR:
3579 getResults = main.FALSE
3580 sizeResponses = []
3581 threads = []
3582 for i in main.activeNodes:
3583 t = main.Thread( target=main.CLIs[i].setTestSize,
3584 name="setTestSize-" + str( i ),
3585 args=[ onosSetName ] )
3586 threads.append( t )
3587 t.start()
3588 for t in threads:
3589 t.join()
3590 sizeResponses.append( t.result )
3591 sizeResults = main.TRUE
3592 for i in range( len( main.activeNodes ) ):
3593 node = str( main.activeNodes[i] + 1 )
3594 if size != sizeResponses[ i ]:
3595 sizeResults = main.FALSE
3596 main.log.error( "ONOS" + node +
3597 " expected a size of " + str( size ) +
3598 " for set " + onosSetName +
3599 " but got " + str( sizeResponses[ i ] ) )
3600 addAllResults = addAllResults and getResults and sizeResults
3601 utilities.assert_equals( expect=main.TRUE,
3602 actual=addAllResults,
3603 onpass="Set addAll correct",
3604 onfail="Set addAll was incorrect" )
3605
3606 main.step( "Distributed Set contains()" )
3607 containsResponses = []
3608 threads = []
3609 for i in main.activeNodes:
3610 t = main.Thread( target=main.CLIs[i].setTestGet,
3611 name="setContains-" + str( i ),
3612 args=[ onosSetName ],
3613 kwargs={ "values": addValue } )
3614 threads.append( t )
3615 t.start()
3616 for t in threads:
3617 t.join()
3618 # NOTE: This is the tuple
3619 containsResponses.append( t.result )
3620
3621 containsResults = main.TRUE
3622 for i in range( len( main.activeNodes ) ):
3623 if containsResponses[ i ] == main.ERROR:
3624 containsResults = main.FALSE
3625 else:
3626 containsResults = containsResults and\
3627 containsResponses[ i ][ 1 ]
3628 utilities.assert_equals( expect=main.TRUE,
3629 actual=containsResults,
3630 onpass="Set contains is functional",
3631 onfail="Set contains failed" )
3632
3633 main.step( "Distributed Set containsAll()" )
3634 containsAllResponses = []
3635 threads = []
3636 for i in main.activeNodes:
3637 t = main.Thread( target=main.CLIs[i].setTestGet,
3638 name="setContainsAll-" + str( i ),
3639 args=[ onosSetName ],
3640 kwargs={ "values": addAllValue } )
3641 threads.append( t )
3642 t.start()
3643 for t in threads:
3644 t.join()
3645 # NOTE: This is the tuple
3646 containsAllResponses.append( t.result )
3647
3648 containsAllResults = main.TRUE
3649 for i in range( len( main.activeNodes ) ):
3650 if containsResponses[ i ] == main.ERROR:
3651 containsResults = main.FALSE
3652 else:
3653 containsResults = containsResults and\
3654 containsResponses[ i ][ 1 ]
3655 utilities.assert_equals( expect=main.TRUE,
3656 actual=containsAllResults,
3657 onpass="Set containsAll is functional",
3658 onfail="Set containsAll failed" )
3659
3660 main.step( "Distributed Set remove()" )
3661 onosSet.remove( addValue )
3662 removeResponses = []
3663 threads = []
3664 for i in main.activeNodes:
3665 t = main.Thread( target=main.CLIs[i].setTestRemove,
3666 name="setTestRemove-" + str( i ),
3667 args=[ onosSetName, addValue ] )
3668 threads.append( t )
3669 t.start()
3670 for t in threads:
3671 t.join()
3672 removeResponses.append( t.result )
3673
3674 # main.TRUE = successfully changed the set
3675 # main.FALSE = action resulted in no change in set
3676 # main.ERROR - Some error in executing the function
3677 removeResults = main.TRUE
3678 for i in range( len( main.activeNodes ) ):
3679 if removeResponses[ i ] == main.TRUE:
3680 # All is well
3681 pass
3682 elif removeResponses[ i ] == main.FALSE:
3683 # not in set, probably fine
3684 pass
3685 elif removeResponses[ i ] == main.ERROR:
3686 # Error in execution
3687 removeResults = main.FALSE
3688 else:
3689 # unexpected result
3690 removeResults = main.FALSE
3691 if removeResults != main.TRUE:
3692 main.log.error( "Error executing set remove" )
3693
3694 # Check if set is still correct
3695 size = len( onosSet )
3696 getResponses = []
3697 threads = []
3698 for i in main.activeNodes:
3699 t = main.Thread( target=main.CLIs[i].setTestGet,
3700 name="setTestGet-" + str( i ),
3701 args=[ onosSetName ] )
3702 threads.append( t )
3703 t.start()
3704 for t in threads:
3705 t.join()
3706 getResponses.append( t.result )
3707 getResults = main.TRUE
3708 for i in range( len( main.activeNodes ) ):
3709 node = str( main.activeNodes[i] + 1 )
3710 if isinstance( getResponses[ i ], list):
3711 current = set( getResponses[ i ] )
3712 if len( current ) == len( getResponses[ i ] ):
3713 # no repeats
3714 if onosSet != current:
3715 main.log.error( "ONOS" + node +
3716 " has incorrect view" +
3717 " of set " + onosSetName + ":\n" +
3718 str( getResponses[ i ] ) )
3719 main.log.debug( "Expected: " + str( onosSet ) )
3720 main.log.debug( "Actual: " + str( current ) )
3721 getResults = main.FALSE
3722 else:
3723 # error, set is not a set
3724 main.log.error( "ONOS" + node +
3725 " has repeat elements in" +
3726 " set " + onosSetName + ":\n" +
3727 str( getResponses[ i ] ) )
3728 getResults = main.FALSE
3729 elif getResponses[ i ] == main.ERROR:
3730 getResults = main.FALSE
3731 sizeResponses = []
3732 threads = []
3733 for i in main.activeNodes:
3734 t = main.Thread( target=main.CLIs[i].setTestSize,
3735 name="setTestSize-" + str( i ),
3736 args=[ onosSetName ] )
3737 threads.append( t )
3738 t.start()
3739 for t in threads:
3740 t.join()
3741 sizeResponses.append( t.result )
3742 sizeResults = main.TRUE
3743 for i in range( len( main.activeNodes ) ):
3744 node = str( main.activeNodes[i] + 1 )
3745 if size != sizeResponses[ i ]:
3746 sizeResults = main.FALSE
3747 main.log.error( "ONOS" + node +
3748 " expected a size of " + str( size ) +
3749 " for set " + onosSetName +
3750 " but got " + str( sizeResponses[ i ] ) )
3751 removeResults = removeResults and getResults and sizeResults
3752 utilities.assert_equals( expect=main.TRUE,
3753 actual=removeResults,
3754 onpass="Set remove correct",
3755 onfail="Set remove was incorrect" )
3756
3757 main.step( "Distributed Set removeAll()" )
3758 onosSet.difference_update( addAllValue.split() )
3759 removeAllResponses = []
3760 threads = []
3761 try:
3762 for i in main.activeNodes:
3763 t = main.Thread( target=main.CLIs[i].setTestRemove,
3764 name="setTestRemoveAll-" + str( i ),
3765 args=[ onosSetName, addAllValue ] )
3766 threads.append( t )
3767 t.start()
3768 for t in threads:
3769 t.join()
3770 removeAllResponses.append( t.result )
3771 except Exception, e:
3772 main.log.exception(e)
3773
3774 # main.TRUE = successfully changed the set
3775 # main.FALSE = action resulted in no change in set
3776 # main.ERROR - Some error in executing the function
3777 removeAllResults = main.TRUE
3778 for i in range( len( main.activeNodes ) ):
3779 if removeAllResponses[ i ] == main.TRUE:
3780 # All is well
3781 pass
3782 elif removeAllResponses[ i ] == main.FALSE:
3783 # not in set, probably fine
3784 pass
3785 elif removeAllResponses[ i ] == main.ERROR:
3786 # Error in execution
3787 removeAllResults = main.FALSE
3788 else:
3789 # unexpected result
3790 removeAllResults = main.FALSE
3791 if removeAllResults != main.TRUE:
3792 main.log.error( "Error executing set removeAll" )
3793
3794 # Check if set is still correct
3795 size = len( onosSet )
3796 getResponses = []
3797 threads = []
3798 for i in main.activeNodes:
3799 t = main.Thread( target=main.CLIs[i].setTestGet,
3800 name="setTestGet-" + str( i ),
3801 args=[ onosSetName ] )
3802 threads.append( t )
3803 t.start()
3804 for t in threads:
3805 t.join()
3806 getResponses.append( t.result )
3807 getResults = main.TRUE
3808 for i in range( len( main.activeNodes ) ):
3809 node = str( main.activeNodes[i] + 1 )
3810 if isinstance( getResponses[ i ], list):
3811 current = set( getResponses[ i ] )
3812 if len( current ) == len( getResponses[ i ] ):
3813 # no repeats
3814 if onosSet != current:
3815 main.log.error( "ONOS" + node +
3816 " has incorrect view" +
3817 " of set " + onosSetName + ":\n" +
3818 str( getResponses[ i ] ) )
3819 main.log.debug( "Expected: " + str( onosSet ) )
3820 main.log.debug( "Actual: " + str( current ) )
3821 getResults = main.FALSE
3822 else:
3823 # error, set is not a set
3824 main.log.error( "ONOS" + node +
3825 " has repeat elements in" +
3826 " set " + onosSetName + ":\n" +
3827 str( getResponses[ i ] ) )
3828 getResults = main.FALSE
3829 elif getResponses[ i ] == main.ERROR:
3830 getResults = main.FALSE
3831 sizeResponses = []
3832 threads = []
3833 for i in main.activeNodes:
3834 t = main.Thread( target=main.CLIs[i].setTestSize,
3835 name="setTestSize-" + str( i ),
3836 args=[ onosSetName ] )
3837 threads.append( t )
3838 t.start()
3839 for t in threads:
3840 t.join()
3841 sizeResponses.append( t.result )
3842 sizeResults = main.TRUE
3843 for i in range( len( main.activeNodes ) ):
3844 node = str( main.activeNodes[i] + 1 )
3845 if size != sizeResponses[ i ]:
3846 sizeResults = main.FALSE
3847 main.log.error( "ONOS" + node +
3848 " expected a size of " + str( size ) +
3849 " for set " + onosSetName +
3850 " but got " + str( sizeResponses[ i ] ) )
3851 removeAllResults = removeAllResults and getResults and sizeResults
3852 utilities.assert_equals( expect=main.TRUE,
3853 actual=removeAllResults,
3854 onpass="Set removeAll correct",
3855 onfail="Set removeAll was incorrect" )
3856
3857 main.step( "Distributed Set addAll()" )
3858 onosSet.update( addAllValue.split() )
3859 addResponses = []
3860 threads = []
3861 for i in main.activeNodes:
3862 t = main.Thread( target=main.CLIs[i].setTestAdd,
3863 name="setTestAddAll-" + str( i ),
3864 args=[ onosSetName, addAllValue ] )
3865 threads.append( t )
3866 t.start()
3867 for t in threads:
3868 t.join()
3869 addResponses.append( t.result )
3870
3871 # main.TRUE = successfully changed the set
3872 # main.FALSE = action resulted in no change in set
3873 # main.ERROR - Some error in executing the function
3874 addAllResults = main.TRUE
3875 for i in range( len( main.activeNodes ) ):
3876 if addResponses[ i ] == main.TRUE:
3877 # All is well
3878 pass
3879 elif addResponses[ i ] == main.FALSE:
3880 # Already in set, probably fine
3881 pass
3882 elif addResponses[ i ] == main.ERROR:
3883 # Error in execution
3884 addAllResults = main.FALSE
3885 else:
3886 # unexpected result
3887 addAllResults = main.FALSE
3888 if addAllResults != main.TRUE:
3889 main.log.error( "Error executing set addAll" )
3890
3891 # Check if set is still correct
3892 size = len( onosSet )
3893 getResponses = []
3894 threads = []
3895 for i in main.activeNodes:
3896 t = main.Thread( target=main.CLIs[i].setTestGet,
3897 name="setTestGet-" + str( i ),
3898 args=[ onosSetName ] )
3899 threads.append( t )
3900 t.start()
3901 for t in threads:
3902 t.join()
3903 getResponses.append( t.result )
3904 getResults = main.TRUE
3905 for i in range( len( main.activeNodes ) ):
3906 node = str( main.activeNodes[i] + 1 )
3907 if isinstance( getResponses[ i ], list):
3908 current = set( getResponses[ i ] )
3909 if len( current ) == len( getResponses[ i ] ):
3910 # no repeats
3911 if onosSet != current:
3912 main.log.error( "ONOS" + node +
3913 " has incorrect view" +
3914 " of set " + onosSetName + ":\n" +
3915 str( getResponses[ i ] ) )
3916 main.log.debug( "Expected: " + str( onosSet ) )
3917 main.log.debug( "Actual: " + str( current ) )
3918 getResults = main.FALSE
3919 else:
3920 # error, set is not a set
3921 main.log.error( "ONOS" + node +
3922 " has repeat elements in" +
3923 " set " + onosSetName + ":\n" +
3924 str( getResponses[ i ] ) )
3925 getResults = main.FALSE
3926 elif getResponses[ i ] == main.ERROR:
3927 getResults = main.FALSE
3928 sizeResponses = []
3929 threads = []
3930 for i in main.activeNodes:
3931 t = main.Thread( target=main.CLIs[i].setTestSize,
3932 name="setTestSize-" + str( i ),
3933 args=[ onosSetName ] )
3934 threads.append( t )
3935 t.start()
3936 for t in threads:
3937 t.join()
3938 sizeResponses.append( t.result )
3939 sizeResults = main.TRUE
3940 for i in range( len( main.activeNodes ) ):
3941 node = str( main.activeNodes[i] + 1 )
3942 if size != sizeResponses[ i ]:
3943 sizeResults = main.FALSE
3944 main.log.error( "ONOS" + node +
3945 " expected a size of " + str( size ) +
3946 " for set " + onosSetName +
3947 " but got " + str( sizeResponses[ i ] ) )
3948 addAllResults = addAllResults and getResults and sizeResults
3949 utilities.assert_equals( expect=main.TRUE,
3950 actual=addAllResults,
3951 onpass="Set addAll correct",
3952 onfail="Set addAll was incorrect" )
3953
3954 main.step( "Distributed Set clear()" )
3955 onosSet.clear()
3956 clearResponses = []
3957 threads = []
3958 for i in main.activeNodes:
3959 t = main.Thread( target=main.CLIs[i].setTestRemove,
3960 name="setTestClear-" + str( i ),
3961 args=[ onosSetName, " "], # Values doesn't matter
3962 kwargs={ "clear": True } )
3963 threads.append( t )
3964 t.start()
3965 for t in threads:
3966 t.join()
3967 clearResponses.append( t.result )
3968
3969 # main.TRUE = successfully changed the set
3970 # main.FALSE = action resulted in no change in set
3971 # main.ERROR - Some error in executing the function
3972 clearResults = main.TRUE
3973 for i in range( len( main.activeNodes ) ):
3974 if clearResponses[ i ] == main.TRUE:
3975 # All is well
3976 pass
3977 elif clearResponses[ i ] == main.FALSE:
3978 # Nothing set, probably fine
3979 pass
3980 elif clearResponses[ i ] == main.ERROR:
3981 # Error in execution
3982 clearResults = main.FALSE
3983 else:
3984 # unexpected result
3985 clearResults = main.FALSE
3986 if clearResults != main.TRUE:
3987 main.log.error( "Error executing set clear" )
3988
3989 # Check if set is still correct
3990 size = len( onosSet )
3991 getResponses = []
3992 threads = []
3993 for i in main.activeNodes:
3994 t = main.Thread( target=main.CLIs[i].setTestGet,
3995 name="setTestGet-" + str( i ),
3996 args=[ onosSetName ] )
3997 threads.append( t )
3998 t.start()
3999 for t in threads:
4000 t.join()
4001 getResponses.append( t.result )
4002 getResults = main.TRUE
4003 for i in range( len( main.activeNodes ) ):
4004 node = str( main.activeNodes[i] + 1 )
4005 if isinstance( getResponses[ i ], list):
4006 current = set( getResponses[ i ] )
4007 if len( current ) == len( getResponses[ i ] ):
4008 # no repeats
4009 if onosSet != current:
4010 main.log.error( "ONOS" + node +
4011 " has incorrect view" +
4012 " of set " + onosSetName + ":\n" +
4013 str( getResponses[ i ] ) )
4014 main.log.debug( "Expected: " + str( onosSet ) )
4015 main.log.debug( "Actual: " + str( current ) )
4016 getResults = main.FALSE
4017 else:
4018 # error, set is not a set
4019 main.log.error( "ONOS" + node +
4020 " has repeat elements in" +
4021 " set " + onosSetName + ":\n" +
4022 str( getResponses[ i ] ) )
4023 getResults = main.FALSE
4024 elif getResponses[ i ] == main.ERROR:
4025 getResults = main.FALSE
4026 sizeResponses = []
4027 threads = []
4028 for i in main.activeNodes:
4029 t = main.Thread( target=main.CLIs[i].setTestSize,
4030 name="setTestSize-" + str( i ),
4031 args=[ onosSetName ] )
4032 threads.append( t )
4033 t.start()
4034 for t in threads:
4035 t.join()
4036 sizeResponses.append( t.result )
4037 sizeResults = main.TRUE
4038 for i in range( len( main.activeNodes ) ):
4039 node = str( main.activeNodes[i] + 1 )
4040 if size != sizeResponses[ i ]:
4041 sizeResults = main.FALSE
4042 main.log.error( "ONOS" + node +
4043 " expected a size of " + str( size ) +
4044 " for set " + onosSetName +
4045 " but got " + str( sizeResponses[ i ] ) )
4046 clearResults = clearResults and getResults and sizeResults
4047 utilities.assert_equals( expect=main.TRUE,
4048 actual=clearResults,
4049 onpass="Set clear correct",
4050 onfail="Set clear was incorrect" )
4051
4052 main.step( "Distributed Set addAll()" )
4053 onosSet.update( addAllValue.split() )
4054 addResponses = []
4055 threads = []
4056 for i in main.activeNodes:
4057 t = main.Thread( target=main.CLIs[i].setTestAdd,
4058 name="setTestAddAll-" + str( i ),
4059 args=[ onosSetName, addAllValue ] )
4060 threads.append( t )
4061 t.start()
4062 for t in threads:
4063 t.join()
4064 addResponses.append( t.result )
4065
4066 # main.TRUE = successfully changed the set
4067 # main.FALSE = action resulted in no change in set
4068 # main.ERROR - Some error in executing the function
4069 addAllResults = main.TRUE
4070 for i in range( len( main.activeNodes ) ):
4071 if addResponses[ i ] == main.TRUE:
4072 # All is well
4073 pass
4074 elif addResponses[ i ] == main.FALSE:
4075 # Already in set, probably fine
4076 pass
4077 elif addResponses[ i ] == main.ERROR:
4078 # Error in execution
4079 addAllResults = main.FALSE
4080 else:
4081 # unexpected result
4082 addAllResults = main.FALSE
4083 if addAllResults != main.TRUE:
4084 main.log.error( "Error executing set addAll" )
4085
4086 # Check if set is still correct
4087 size = len( onosSet )
4088 getResponses = []
4089 threads = []
4090 for i in main.activeNodes:
4091 t = main.Thread( target=main.CLIs[i].setTestGet,
4092 name="setTestGet-" + str( i ),
4093 args=[ onosSetName ] )
4094 threads.append( t )
4095 t.start()
4096 for t in threads:
4097 t.join()
4098 getResponses.append( t.result )
4099 getResults = main.TRUE
4100 for i in range( len( main.activeNodes ) ):
4101 node = str( main.activeNodes[i] + 1 )
4102 if isinstance( getResponses[ i ], list):
4103 current = set( getResponses[ i ] )
4104 if len( current ) == len( getResponses[ i ] ):
4105 # no repeats
4106 if onosSet != current:
4107 main.log.error( "ONOS" + node +
4108 " has incorrect view" +
4109 " of set " + onosSetName + ":\n" +
4110 str( getResponses[ i ] ) )
4111 main.log.debug( "Expected: " + str( onosSet ) )
4112 main.log.debug( "Actual: " + str( current ) )
4113 getResults = main.FALSE
4114 else:
4115 # error, set is not a set
4116 main.log.error( "ONOS" + node +
4117 " has repeat elements in" +
4118 " set " + onosSetName + ":\n" +
4119 str( getResponses[ i ] ) )
4120 getResults = main.FALSE
4121 elif getResponses[ i ] == main.ERROR:
4122 getResults = main.FALSE
4123 sizeResponses = []
4124 threads = []
4125 for i in main.activeNodes:
4126 t = main.Thread( target=main.CLIs[i].setTestSize,
4127 name="setTestSize-" + str( i ),
4128 args=[ onosSetName ] )
4129 threads.append( t )
4130 t.start()
4131 for t in threads:
4132 t.join()
4133 sizeResponses.append( t.result )
4134 sizeResults = main.TRUE
4135 for i in range( len( main.activeNodes ) ):
4136 node = str( main.activeNodes[i] + 1 )
4137 if size != sizeResponses[ i ]:
4138 sizeResults = main.FALSE
4139 main.log.error( "ONOS" + node +
4140 " expected a size of " + str( size ) +
4141 " for set " + onosSetName +
4142 " but got " + str( sizeResponses[ i ] ) )
4143 addAllResults = addAllResults and getResults and sizeResults
4144 utilities.assert_equals( expect=main.TRUE,
4145 actual=addAllResults,
4146 onpass="Set addAll correct",
4147 onfail="Set addAll was incorrect" )
4148
4149 main.step( "Distributed Set retain()" )
4150 onosSet.intersection_update( retainValue.split() )
4151 retainResponses = []
4152 threads = []
4153 for i in main.activeNodes:
4154 t = main.Thread( target=main.CLIs[i].setTestRemove,
4155 name="setTestRetain-" + str( i ),
4156 args=[ onosSetName, retainValue ],
4157 kwargs={ "retain": True } )
4158 threads.append( t )
4159 t.start()
4160 for t in threads:
4161 t.join()
4162 retainResponses.append( t.result )
4163
4164 # main.TRUE = successfully changed the set
4165 # main.FALSE = action resulted in no change in set
4166 # main.ERROR - Some error in executing the function
4167 retainResults = main.TRUE
4168 for i in range( len( main.activeNodes ) ):
4169 if retainResponses[ i ] == main.TRUE:
4170 # All is well
4171 pass
4172 elif retainResponses[ i ] == main.FALSE:
4173 # Already in set, probably fine
4174 pass
4175 elif retainResponses[ i ] == main.ERROR:
4176 # Error in execution
4177 retainResults = main.FALSE
4178 else:
4179 # unexpected result
4180 retainResults = main.FALSE
4181 if retainResults != main.TRUE:
4182 main.log.error( "Error executing set retain" )
4183
4184 # Check if set is still correct
4185 size = len( onosSet )
4186 getResponses = []
4187 threads = []
4188 for i in main.activeNodes:
4189 t = main.Thread( target=main.CLIs[i].setTestGet,
4190 name="setTestGet-" + str( i ),
4191 args=[ onosSetName ] )
4192 threads.append( t )
4193 t.start()
4194 for t in threads:
4195 t.join()
4196 getResponses.append( t.result )
4197 getResults = main.TRUE
4198 for i in range( len( main.activeNodes ) ):
4199 node = str( main.activeNodes[i] + 1 )
4200 if isinstance( getResponses[ i ], list):
4201 current = set( getResponses[ i ] )
4202 if len( current ) == len( getResponses[ i ] ):
4203 # no repeats
4204 if onosSet != current:
4205 main.log.error( "ONOS" + node +
4206 " has incorrect view" +
4207 " of set " + onosSetName + ":\n" +
4208 str( getResponses[ i ] ) )
4209 main.log.debug( "Expected: " + str( onosSet ) )
4210 main.log.debug( "Actual: " + str( current ) )
4211 getResults = main.FALSE
4212 else:
4213 # error, set is not a set
4214 main.log.error( "ONOS" + node +
4215 " has repeat elements in" +
4216 " set " + onosSetName + ":\n" +
4217 str( getResponses[ i ] ) )
4218 getResults = main.FALSE
4219 elif getResponses[ i ] == main.ERROR:
4220 getResults = main.FALSE
4221 sizeResponses = []
4222 threads = []
4223 for i in main.activeNodes:
4224 t = main.Thread( target=main.CLIs[i].setTestSize,
4225 name="setTestSize-" + str( i ),
4226 args=[ onosSetName ] )
4227 threads.append( t )
4228 t.start()
4229 for t in threads:
4230 t.join()
4231 sizeResponses.append( t.result )
4232 sizeResults = main.TRUE
4233 for i in range( len( main.activeNodes ) ):
4234 node = str( main.activeNodes[i] + 1 )
4235 if size != sizeResponses[ i ]:
4236 sizeResults = main.FALSE
4237 main.log.error( "ONOS" + node + " expected a size of " +
4238 str( size ) + " for set " + onosSetName +
4239 " but got " + str( sizeResponses[ i ] ) )
4240 retainResults = retainResults and getResults and sizeResults
4241 utilities.assert_equals( expect=main.TRUE,
4242 actual=retainResults,
4243 onpass="Set retain correct",
4244 onfail="Set retain was incorrect" )
4245
4246 # Transactional maps
4247 main.step( "Partitioned Transactional maps put" )
4248 tMapValue = "Testing"
4249 numKeys = 100
4250 putResult = True
4251 node = main.activeNodes[0]
4252 putResponses = main.CLIs[node].transactionalMapPut( numKeys, tMapValue )
4253 if putResponses and len( putResponses ) == 100:
4254 for i in putResponses:
4255 if putResponses[ i ][ 'value' ] != tMapValue:
4256 putResult = False
4257 else:
4258 putResult = False
4259 if not putResult:
4260 main.log.debug( "Put response values: " + str( putResponses ) )
4261 utilities.assert_equals( expect=True,
4262 actual=putResult,
4263 onpass="Partitioned Transactional Map put successful",
4264 onfail="Partitioned Transactional Map put values are incorrect" )
4265
4266 main.step( "Partitioned Transactional maps get" )
4267 getCheck = True
4268 for n in range( 1, numKeys + 1 ):
4269 getResponses = []
4270 threads = []
4271 valueCheck = True
4272 for i in main.activeNodes:
4273 t = main.Thread( target=main.CLIs[i].transactionalMapGet,
4274 name="TMap-get-" + str( i ),
4275 args=[ "Key" + str( n ) ] )
4276 threads.append( t )
4277 t.start()
4278 for t in threads:
4279 t.join()
4280 getResponses.append( t.result )
4281 for node in getResponses:
4282 if node != tMapValue:
4283 valueCheck = False
4284 if not valueCheck:
4285 main.log.warn( "Values for key 'Key" + str( n ) + "' do not match:" )
4286 main.log.warn( getResponses )
4287 getCheck = getCheck and valueCheck
4288 utilities.assert_equals( expect=True,
4289 actual=getCheck,
4290 onpass="Partitioned Transactional Map get values were correct",
4291 onfail="Partitioned Transactional Map values incorrect" )