blob: 04000ba7de4d1b5f3e5d025a27ec4d55d78bfea7 [file] [log] [blame]
Jon Hall9ebd1bd2016-04-19 01:37:17 -07001"""
2Description: This test is to determine if ONOS can handle
3 dynamic scaling of the cluster size.
4
5List of test cases:
6CASE1: Compile ONOS and push it to the test machines
7CASE2: Assign devices to controllers
8CASE21: Assign mastership to controllers
9CASE3: Assign intents
10CASE4: Ping across added host intents
11CASE5: Reading state of ONOS
12CASE6: The scaling case.
13CASE7: Check state after control plane failure
14CASE8: Compare topo
15CASE9: Link s3-s28 down
16CASE10: Link s3-s28 up
17CASE11: Switch down
18CASE12: Switch up
19CASE13: Clean up
20CASE14: start election app on all onos nodes
21CASE15: Check that Leadership Election is still functional
22CASE16: Install Distributed Primitives app
23CASE17: Check for basic functionality with distributed primitives
24"""
25
26
27class HAscaling:
28
29 def __init__( self ):
30 self.default = ''
31
32 def CASE1( self, main ):
33 """
34 CASE1 is to compile ONOS and push it to the test machines
35
36 Startup sequence:
37 cell <name>
38 onos-verify-cell
39 NOTE: temporary - onos-remove-raft-logs
40 onos-uninstall
41 start mininet
42 git pull
43 mvn clean install
44 onos-package
45 onos-install -f
46 onos-wait-for-start
47 start cli sessions
48 start tcpdump
49 """
50 import time
51 import os
52 import re
53 main.log.info( "ONOS HA test: Restart all ONOS nodes - " +
54 "initialization" )
55 main.case( "Setting up test environment" )
56 main.caseExplanation = "Setup the test environment including " +\
57 "installing ONOS, starting Mininet and ONOS" +\
58 "cli sessions."
59
60 # load some variables from the params file
61 PULLCODE = False
62 if main.params[ 'Git' ] == 'True':
63 PULLCODE = True
64 gitBranch = main.params[ 'branch' ]
65 cellName = main.params[ 'ENV' ][ 'cellName' ]
66
67 main.numCtrls = int( main.params[ 'num_controllers' ] )
68 if main.ONOSbench.maxNodes:
69 if main.ONOSbench.maxNodes < main.numCtrls:
70 main.numCtrls = int( main.ONOSbench.maxNodes )
71 # set global variables
72 # These are for csv plotting in jenkins
73 global labels
74 global data
75 labels = []
76 data = []
77
78 try:
79 from tests.HA.dependencies.HA import HA
80 main.HA = HA()
81 from tests.HA.HAscaling.dependencies.Server import Server
82 main.Server = Server()
83 except Exception as e:
84 main.log.exception( e )
85 main.cleanup()
86 main.exit()
87
88 main.CLIs = []
89 main.nodes = []
90 ipList = []
91 for i in range( 1, main.numCtrls + 1 ):
92 try:
93 main.CLIs.append( getattr( main, 'ONOScli' + str( i ) ) )
94 main.nodes.append( getattr( main, 'ONOS' + str( i ) ) )
95 ipList.append( main.nodes[ -1 ].ip_address )
96 except AttributeError:
97 break
98
99 main.step( "Create cell file" )
100 cellAppString = main.params[ 'ENV' ][ 'appString' ]
101 main.ONOSbench.createCellFile( main.ONOSbench.ip_address, cellName,
102 main.Mininet1.ip_address,
103 cellAppString, ipList )
104
105 main.step( "Applying cell variable to environment" )
106 cellResult = main.ONOSbench.setCell( cellName )
107 utilities.assert_equals( expect=main.TRUE, actual=cellResult,
108 onpass="Set cell successfull",
109 onfail="Failled to set cell" )
110
111 main.step( "Verify connectivity to cell" )
112 verifyResult = main.ONOSbench.verifyCell()
113 utilities.assert_equals( expect=main.TRUE, actual=verifyResult,
114 onpass="Verify cell passed",
115 onfail="Failled to verify cell" )
116
117 # FIXME:this is short term fix
118 main.log.info( "Removing raft logs" )
119 main.ONOSbench.onosRemoveRaftLogs()
120
121 main.log.info( "Uninstalling ONOS" )
122 for node in main.nodes:
123 main.ONOSbench.onosUninstall( node.ip_address )
124
125 # Make sure ONOS is DEAD
126 main.log.info( "Killing any ONOS processes" )
127 killResults = main.TRUE
128 for node in main.nodes:
129 killed = main.ONOSbench.onosKill( node.ip_address )
130 killResults = killResults and killed
131
132 main.step( "Setup server for cluster metadata file" )
Jon Hall8f6d4622016-05-23 15:27:18 -0700133 port = main.params['server']['port']
Jon Hall9ebd1bd2016-04-19 01:37:17 -0700134 rootDir = os.path.dirname( main.testFile ) + "/dependencies"
135 main.log.debug( "Root dir: {}".format( rootDir ) )
136 status = main.Server.start( main.ONOSbench,
137 rootDir,
138 port=port,
139 logDir=main.logdir + "/server.log" )
140 utilities.assert_equals( expect=main.TRUE, actual=status,
141 onpass="Server started",
142 onfail="Failled to start SimpleHTTPServer" )
143
144 main.step( "Generate initial metadata file" )
145 main.scaling = main.params['scaling'].split( "," )
146 main.log.debug( main.scaling )
147 scale = main.scaling.pop(0)
148 main.log.debug( scale)
149 if "e" in scale:
150 equal = True
151 else:
152 equal = False
153 main.log.debug( equal)
154 main.numCtrls = int( re.search( "\d+", scale ).group(0) )
155 genResult = main.Server.generateFile( main.numCtrls, equal=equal )
156 utilities.assert_equals( expect=main.TRUE, actual=genResult,
157 onpass="New cluster metadata file generated",
158 onfail="Failled to generate new metadata file" )
159
160 cleanInstallResult = main.TRUE
161 gitPullResult = main.TRUE
162
163 main.step( "Starting Mininet" )
164 # scp topo file to mininet
165 # TODO: move to params?
166 topoName = "obelisk.py"
167 filePath = main.ONOSbench.home + "/tools/test/topos/"
168 main.ONOSbench.scp( main.Mininet1,
169 filePath + topoName,
170 main.Mininet1.home,
171 direction="to" )
172 mnResult = main.Mininet1.startNet( )
173 utilities.assert_equals( expect=main.TRUE, actual=mnResult,
174 onpass="Mininet Started",
175 onfail="Error starting Mininet" )
176
177 main.step( "Git checkout and pull " + gitBranch )
178 if PULLCODE:
179 main.ONOSbench.gitCheckout( gitBranch )
180 gitPullResult = main.ONOSbench.gitPull()
181 # values of 1 or 3 are good
182 utilities.assert_lesser( expect=0, actual=gitPullResult,
183 onpass="Git pull successful",
184 onfail="Git pull failed" )
185 main.ONOSbench.getVersion( report=True )
186
187 main.step( "Using mvn clean install" )
188 cleanInstallResult = main.TRUE
189 if PULLCODE and gitPullResult == main.TRUE:
190 cleanInstallResult = main.ONOSbench.cleanInstall()
191 else:
192 main.log.warn( "Did not pull new code so skipping mvn " +
193 "clean install" )
194 utilities.assert_equals( expect=main.TRUE,
195 actual=cleanInstallResult,
196 onpass="MCI successful",
197 onfail="MCI failed" )
198 # GRAPHS
199 # NOTE: important params here:
200 # job = name of Jenkins job
201 # Plot Name = Plot-HA, only can be used if multiple plots
202 # index = The number of the graph under plot name
203 job = "HAscaling"
204 plotName = "Plot-HA"
205 index = "0"
206 graphs = '<ac:structured-macro ac:name="html">\n'
207 graphs += '<ac:plain-text-body><![CDATA[\n'
208 graphs += '<iframe src="https://onos-jenkins.onlab.us/job/' + job +\
209 '/plot/' + plotName + '/getPlot?index=' + index +\
210 '&width=500&height=300"' +\
211 'noborder="0" width="500" height="300" scrolling="yes" ' +\
212 'seamless="seamless"></iframe>\n'
213 graphs += ']]></ac:plain-text-body>\n'
214 graphs += '</ac:structured-macro>\n'
215 main.log.wiki(graphs)
216
217 main.step( "Copying backup config files" )
218 path = "~/onos/tools/package/bin/onos-service"
219 cp = main.ONOSbench.scp( main.ONOSbench,
220 path,
221 path + ".backup",
222 direction="to" )
223
224 utilities.assert_equals( expect=main.TRUE,
225 actual=cp,
226 onpass="Copy backup config file succeeded",
227 onfail="Copy backup config file failed" )
228 # we need to modify the onos-service file to use remote metadata file
229 # url for cluster metadata file
Jon Hall8f6d4622016-05-23 15:27:18 -0700230 iface = main.params['server'].get( 'interface' )
231 ip = main.ONOSbench.getIpAddr( iface=iface )
Jon Hall9ebd1bd2016-04-19 01:37:17 -0700232 metaFile = "cluster.json"
233 javaArgs = r"-Donos.cluster.metadata.uri=http:\/\/{}:{}\/{}".format( ip, port, metaFile )
234 main.log.warn( javaArgs )
235 main.log.warn( repr( javaArgs ) )
236 handle = main.ONOSbench.handle
237 sed = r"sed -i 's/bash/bash\nexport JAVA_OPTS=${{JAVA_OPTS:-{}}}\n/' {}".format( javaArgs, path )
238 main.log.warn( sed )
239 main.log.warn( repr( sed ) )
240 handle.sendline( sed )
241 handle.expect( "\$" )
242 main.log.debug( repr( handle.before ) )
243
244 main.step( "Creating ONOS package" )
245 packageResult = main.ONOSbench.onosPackage()
246 utilities.assert_equals( expect=main.TRUE, actual=packageResult,
247 onpass="ONOS package successful",
248 onfail="ONOS package failed" )
249
250 main.step( "Installing ONOS package" )
251 onosInstallResult = main.TRUE
252 for i in range( main.ONOSbench.maxNodes ):
253 node = main.nodes[i]
254 options = "-f"
255 if i >= main.numCtrls:
256 options = "-nf" # Don't start more than the current scale
257 tmpResult = main.ONOSbench.onosInstall( options=options,
258 node=node.ip_address )
259 onosInstallResult = onosInstallResult and tmpResult
260 utilities.assert_equals( expect=main.TRUE, actual=onosInstallResult,
261 onpass="ONOS install successful",
262 onfail="ONOS install failed" )
263
264 # Cleanup custom onos-service file
265 main.ONOSbench.scp( main.ONOSbench,
266 path + ".backup",
267 path,
268 direction="to" )
269
270 main.step( "Checking if ONOS is up yet" )
271 for i in range( 2 ):
272 onosIsupResult = main.TRUE
273 for i in range( main.numCtrls ):
274 node = main.nodes[i]
275 started = main.ONOSbench.isup( node.ip_address )
276 if not started:
277 main.log.error( node.name + " hasn't started" )
278 onosIsupResult = onosIsupResult and started
279 if onosIsupResult == main.TRUE:
280 break
281 utilities.assert_equals( expect=main.TRUE, actual=onosIsupResult,
282 onpass="ONOS startup successful",
283 onfail="ONOS startup failed" )
284
Jon Hall6509dbf2016-06-21 17:01:17 -0700285 main.step( "Starting ONOS CLI sessions" )
Jon Hall9ebd1bd2016-04-19 01:37:17 -0700286 cliResults = main.TRUE
287 threads = []
288 for i in range( main.numCtrls ):
289 t = main.Thread( target=main.CLIs[i].startOnosCli,
290 name="startOnosCli-" + str( i ),
291 args=[main.nodes[i].ip_address] )
292 threads.append( t )
293 t.start()
294
295 for t in threads:
296 t.join()
297 cliResults = cliResults and t.result
298 utilities.assert_equals( expect=main.TRUE, actual=cliResults,
299 onpass="ONOS cli startup successful",
300 onfail="ONOS cli startup failed" )
301
302 # Create a list of active nodes for use when some nodes are stopped
303 main.activeNodes = [ i for i in range( 0, main.numCtrls ) ]
304
305 if main.params[ 'tcpdump' ].lower() == "true":
306 main.step( "Start Packet Capture MN" )
307 main.Mininet2.startTcpdump(
308 str( main.params[ 'MNtcpdump' ][ 'folder' ] ) + str( main.TEST )
309 + "-MN.pcap",
310 intf=main.params[ 'MNtcpdump' ][ 'intf' ],
311 port=main.params[ 'MNtcpdump' ][ 'port' ] )
312
313 main.step( "Checking ONOS nodes" )
314 nodeResults = utilities.retry( main.HA.nodesCheck,
315 False,
316 args=[main.activeNodes],
317 attempts=5 )
318 utilities.assert_equals( expect=True, actual=nodeResults,
319 onpass="Nodes check successful",
320 onfail="Nodes check NOT successful" )
321
322 if not nodeResults:
Jon Hall7ac7bc32016-05-05 10:57:02 -0700323 for i in main.activeNodes:
324 cli = main.CLIs[i]
Jon Hall9ebd1bd2016-04-19 01:37:17 -0700325 main.log.debug( "{} components not ACTIVE: \n{}".format(
326 cli.name,
327 cli.sendline( "scr:list | grep -v ACTIVE" ) ) )
Jon Hall9ebd1bd2016-04-19 01:37:17 -0700328 main.log.error( "Failed to start ONOS, stopping test" )
329 main.cleanup()
330 main.exit()
331
332 main.step( "Activate apps defined in the params file" )
333 # get data from the params
334 apps = main.params.get( 'apps' )
335 if apps:
336 apps = apps.split(',')
337 main.log.warn( apps )
338 activateResult = True
339 for app in apps:
340 main.CLIs[ 0 ].app( app, "Activate" )
341 # TODO: check this worked
342 time.sleep( 10 ) # wait for apps to activate
343 for app in apps:
344 state = main.CLIs[ 0 ].appStatus( app )
345 if state == "ACTIVE":
346 activateResult = activateResult and True
347 else:
348 main.log.error( "{} is in {} state".format( app, state ) )
349 activateResult = False
350 utilities.assert_equals( expect=True,
351 actual=activateResult,
352 onpass="Successfully activated apps",
353 onfail="Failed to activate apps" )
354 else:
355 main.log.warn( "No apps were specified to be loaded after startup" )
356
357 main.step( "Set ONOS configurations" )
358 config = main.params.get( 'ONOS_Configuration' )
359 if config:
360 main.log.debug( config )
361 checkResult = main.TRUE
362 for component in config:
363 for setting in config[component]:
364 value = config[component][setting]
365 check = main.CLIs[ 0 ].setCfg( component, setting, value )
366 main.log.info( "Value was changed? {}".format( main.TRUE == check ) )
367 checkResult = check and checkResult
368 utilities.assert_equals( expect=main.TRUE,
369 actual=checkResult,
370 onpass="Successfully set config",
371 onfail="Failed to set config" )
372 else:
373 main.log.warn( "No configurations were specified to be changed after startup" )
374
375 main.step( "App Ids check" )
376 appCheck = main.TRUE
377 threads = []
378 for i in main.activeNodes:
379 t = main.Thread( target=main.CLIs[i].appToIDCheck,
380 name="appToIDCheck-" + str( i ),
381 args=[] )
382 threads.append( t )
383 t.start()
384
385 for t in threads:
386 t.join()
387 appCheck = appCheck and t.result
388 if appCheck != main.TRUE:
389 node = main.activeNodes[0]
390 main.log.warn( main.CLIs[node].apps() )
391 main.log.warn( main.CLIs[node].appIDs() )
392 utilities.assert_equals( expect=main.TRUE, actual=appCheck,
393 onpass="App Ids seem to be correct",
394 onfail="Something is wrong with app Ids" )
395
396 def CASE2( self, main ):
397 """
398 Assign devices to controllers
399 """
400 import re
401 assert main.numCtrls, "main.numCtrls not defined"
402 assert main, "main not defined"
403 assert utilities.assert_equals, "utilities.assert_equals not defined"
404 assert main.CLIs, "main.CLIs not defined"
405 assert main.nodes, "main.nodes not defined"
406
407 main.case( "Assigning devices to controllers" )
408 main.caseExplanation = "Assign switches to ONOS using 'ovs-vsctl' " +\
409 "and check that an ONOS node becomes the " +\
410 "master of the device."
411 main.step( "Assign switches to controllers" )
412
413 ipList = []
414 for i in range( main.ONOSbench.maxNodes ):
415 ipList.append( main.nodes[ i ].ip_address )
416 swList = []
417 for i in range( 1, 29 ):
418 swList.append( "s" + str( i ) )
419 main.Mininet1.assignSwController( sw=swList, ip=ipList )
420
421 mastershipCheck = main.TRUE
422 for i in range( 1, 29 ):
423 response = main.Mininet1.getSwController( "s" + str( i ) )
424 try:
425 main.log.info( str( response ) )
426 except Exception:
427 main.log.info( repr( response ) )
428 for node in main.nodes:
429 if re.search( "tcp:" + node.ip_address, response ):
430 mastershipCheck = mastershipCheck and main.TRUE
431 else:
432 main.log.error( "Error, node " + node.ip_address + " is " +
433 "not in the list of controllers s" +
434 str( i ) + " is connecting to." )
435 mastershipCheck = main.FALSE
436 utilities.assert_equals(
437 expect=main.TRUE,
438 actual=mastershipCheck,
439 onpass="Switch mastership assigned correctly",
440 onfail="Switches not assigned correctly to controllers" )
441
442 def CASE21( self, main ):
443 """
444 Assign mastership to controllers
445 """
446 import time
447 assert main.numCtrls, "main.numCtrls not defined"
448 assert main, "main not defined"
449 assert utilities.assert_equals, "utilities.assert_equals not defined"
450 assert main.CLIs, "main.CLIs not defined"
451 assert main.nodes, "main.nodes not defined"
452
453 main.case( "Assigning Controller roles for switches" )
454 main.caseExplanation = "Check that ONOS is connected to each " +\
455 "device. Then manually assign" +\
456 " mastership to specific ONOS nodes using" +\
457 " 'device-role'"
458 main.step( "Assign mastership of switches to specific controllers" )
459 # Manually assign mastership to the controller we want
460 roleCall = main.TRUE
461
462 ipList = [ ]
463 deviceList = []
464 onosCli = main.CLIs[ main.activeNodes[0] ]
465 try:
466 # Assign mastership to specific controllers. This assignment was
467 # determined for a 7 node cluser, but will work with any sized
468 # cluster
469 for i in range( 1, 29 ): # switches 1 through 28
470 # set up correct variables:
471 if i == 1:
472 c = 0
473 ip = main.nodes[ c ].ip_address # ONOS1
474 deviceId = onosCli.getDevice( "1000" ).get( 'id' )
475 elif i == 2:
476 c = 1 % main.numCtrls
477 ip = main.nodes[ c ].ip_address # ONOS2
478 deviceId = onosCli.getDevice( "2000" ).get( 'id' )
479 elif i == 3:
480 c = 1 % main.numCtrls
481 ip = main.nodes[ c ].ip_address # ONOS2
482 deviceId = onosCli.getDevice( "3000" ).get( 'id' )
483 elif i == 4:
484 c = 3 % main.numCtrls
485 ip = main.nodes[ c ].ip_address # ONOS4
486 deviceId = onosCli.getDevice( "3004" ).get( 'id' )
487 elif i == 5:
488 c = 2 % main.numCtrls
489 ip = main.nodes[ c ].ip_address # ONOS3
490 deviceId = onosCli.getDevice( "5000" ).get( 'id' )
491 elif i == 6:
492 c = 2 % main.numCtrls
493 ip = main.nodes[ c ].ip_address # ONOS3
494 deviceId = onosCli.getDevice( "6000" ).get( 'id' )
495 elif i == 7:
496 c = 5 % main.numCtrls
497 ip = main.nodes[ c ].ip_address # ONOS6
498 deviceId = onosCli.getDevice( "6007" ).get( 'id' )
499 elif i >= 8 and i <= 17:
500 c = 4 % main.numCtrls
501 ip = main.nodes[ c ].ip_address # ONOS5
502 dpid = '3' + str( i ).zfill( 3 )
503 deviceId = onosCli.getDevice( dpid ).get( 'id' )
504 elif i >= 18 and i <= 27:
505 c = 6 % main.numCtrls
506 ip = main.nodes[ c ].ip_address # ONOS7
507 dpid = '6' + str( i ).zfill( 3 )
508 deviceId = onosCli.getDevice( dpid ).get( 'id' )
509 elif i == 28:
510 c = 0
511 ip = main.nodes[ c ].ip_address # ONOS1
512 deviceId = onosCli.getDevice( "2800" ).get( 'id' )
513 else:
514 main.log.error( "You didn't write an else statement for " +
515 "switch s" + str( i ) )
516 roleCall = main.FALSE
517 # Assign switch
518 assert deviceId, "No device id for s" + str( i ) + " in ONOS"
519 # TODO: make this controller dynamic
520 roleCall = roleCall and onosCli.deviceRole( deviceId, ip )
521 ipList.append( ip )
522 deviceList.append( deviceId )
523 except ( AttributeError, AssertionError ):
524 main.log.exception( "Something is wrong with ONOS device view" )
525 main.log.info( onosCli.devices() )
526 utilities.assert_equals(
527 expect=main.TRUE,
528 actual=roleCall,
529 onpass="Re-assigned switch mastership to designated controller",
530 onfail="Something wrong with deviceRole calls" )
531
532 main.step( "Check mastership was correctly assigned" )
533 roleCheck = main.TRUE
534 # NOTE: This is due to the fact that device mastership change is not
535 # atomic and is actually a multi step process
536 time.sleep( 5 )
537 for i in range( len( ipList ) ):
538 ip = ipList[i]
539 deviceId = deviceList[i]
540 # Check assignment
541 master = onosCli.getRole( deviceId ).get( 'master' )
542 if ip in master:
543 roleCheck = roleCheck and main.TRUE
544 else:
545 roleCheck = roleCheck and main.FALSE
546 main.log.error( "Error, controller " + ip + " is not" +
547 " master " + "of device " +
548 str( deviceId ) + ". Master is " +
549 repr( master ) + "." )
550 utilities.assert_equals(
551 expect=main.TRUE,
552 actual=roleCheck,
553 onpass="Switches were successfully reassigned to designated " +
554 "controller",
555 onfail="Switches were not successfully reassigned" )
556
557 def CASE3( self, main ):
558 """
559 Assign intents
560 """
561 import time
562 import json
563 assert main.numCtrls, "main.numCtrls not defined"
564 assert main, "main not defined"
565 assert utilities.assert_equals, "utilities.assert_equals not defined"
566 assert main.CLIs, "main.CLIs not defined"
567 assert main.nodes, "main.nodes not defined"
568 try:
569 labels
570 except NameError:
571 main.log.error( "labels not defined, setting to []" )
572 labels = []
573 try:
574 data
575 except NameError:
576 main.log.error( "data not defined, setting to []" )
577 data = []
578 # NOTE: we must reinstall intents until we have a persistant intent
579 # datastore!
580 main.case( "Adding host Intents" )
581 main.caseExplanation = "Discover hosts by using pingall then " +\
582 "assign predetermined host-to-host intents." +\
583 " After installation, check that the intent" +\
584 " is distributed to all nodes and the state" +\
585 " is INSTALLED"
586
587 # install onos-app-fwd
588 main.step( "Install reactive forwarding app" )
589 onosCli = main.CLIs[ main.activeNodes[0] ]
590 installResults = onosCli.activateApp( "org.onosproject.fwd" )
591 utilities.assert_equals( expect=main.TRUE, actual=installResults,
592 onpass="Install fwd successful",
593 onfail="Install fwd failed" )
594
595 main.step( "Check app ids" )
596 appCheck = main.TRUE
597 threads = []
598 for i in main.activeNodes:
599 t = main.Thread( target=main.CLIs[i].appToIDCheck,
600 name="appToIDCheck-" + str( i ),
601 args=[] )
602 threads.append( t )
603 t.start()
604
605 for t in threads:
606 t.join()
607 appCheck = appCheck and t.result
608 if appCheck != main.TRUE:
609 main.log.warn( onosCli.apps() )
610 main.log.warn( onosCli.appIDs() )
611 utilities.assert_equals( expect=main.TRUE, actual=appCheck,
612 onpass="App Ids seem to be correct",
613 onfail="Something is wrong with app Ids" )
614
615 main.step( "Discovering Hosts( Via pingall for now )" )
616 # FIXME: Once we have a host discovery mechanism, use that instead
617 # REACTIVE FWD test
618 pingResult = main.FALSE
619 passMsg = "Reactive Pingall test passed"
620 time1 = time.time()
621 pingResult = main.Mininet1.pingall()
622 time2 = time.time()
623 if not pingResult:
624 main.log.warn("First pingall failed. Trying again...")
625 pingResult = main.Mininet1.pingall()
626 passMsg += " on the second try"
627 utilities.assert_equals(
628 expect=main.TRUE,
629 actual=pingResult,
630 onpass= passMsg,
631 onfail="Reactive Pingall failed, " +
632 "one or more ping pairs failed" )
633 main.log.info( "Time for pingall: %2f seconds" %
634 ( time2 - time1 ) )
635 # timeout for fwd flows
636 time.sleep( 11 )
637 # uninstall onos-app-fwd
638 main.step( "Uninstall reactive forwarding app" )
639 node = main.activeNodes[0]
640 uninstallResult = main.CLIs[node].deactivateApp( "org.onosproject.fwd" )
641 utilities.assert_equals( expect=main.TRUE, actual=uninstallResult,
642 onpass="Uninstall fwd successful",
643 onfail="Uninstall fwd failed" )
644
645 main.step( "Check app ids" )
646 threads = []
647 appCheck2 = main.TRUE
648 for i in main.activeNodes:
649 t = main.Thread( target=main.CLIs[i].appToIDCheck,
650 name="appToIDCheck-" + str( i ),
651 args=[] )
652 threads.append( t )
653 t.start()
654
655 for t in threads:
656 t.join()
657 appCheck2 = appCheck2 and t.result
658 if appCheck2 != main.TRUE:
659 node = main.activeNodes[0]
660 main.log.warn( main.CLIs[node].apps() )
661 main.log.warn( main.CLIs[node].appIDs() )
662 utilities.assert_equals( expect=main.TRUE, actual=appCheck2,
663 onpass="App Ids seem to be correct",
664 onfail="Something is wrong with app Ids" )
665
666 main.step( "Add host intents via cli" )
667 intentIds = []
668 # TODO: move the host numbers to params
669 # Maybe look at all the paths we ping?
670 intentAddResult = True
671 hostResult = main.TRUE
672 for i in range( 8, 18 ):
673 main.log.info( "Adding host intent between h" + str( i ) +
674 " and h" + str( i + 10 ) )
675 host1 = "00:00:00:00:00:" + \
676 str( hex( i )[ 2: ] ).zfill( 2 ).upper()
677 host2 = "00:00:00:00:00:" + \
678 str( hex( i + 10 )[ 2: ] ).zfill( 2 ).upper()
679 # NOTE: getHost can return None
680 host1Dict = onosCli.getHost( host1 )
681 host2Dict = onosCli.getHost( host2 )
682 host1Id = None
683 host2Id = None
684 if host1Dict and host2Dict:
685 host1Id = host1Dict.get( 'id', None )
686 host2Id = host2Dict.get( 'id', None )
687 if host1Id and host2Id:
688 nodeNum = ( i % len( main.activeNodes ) )
689 node = main.activeNodes[nodeNum]
690 tmpId = main.CLIs[node].addHostIntent( host1Id, host2Id )
691 if tmpId:
692 main.log.info( "Added intent with id: " + tmpId )
693 intentIds.append( tmpId )
694 else:
695 main.log.error( "addHostIntent returned: " +
696 repr( tmpId ) )
697 else:
698 main.log.error( "Error, getHost() failed for h" + str( i ) +
699 " and/or h" + str( i + 10 ) )
700 node = main.activeNodes[0]
701 hosts = main.CLIs[node].hosts()
702 main.log.warn( "Hosts output: " )
703 try:
704 main.log.warn( json.dumps( json.loads( hosts ),
705 sort_keys=True,
706 indent=4,
707 separators=( ',', ': ' ) ) )
708 except ( ValueError, TypeError ):
709 main.log.warn( repr( hosts ) )
710 hostResult = main.FALSE
711 utilities.assert_equals( expect=main.TRUE, actual=hostResult,
712 onpass="Found a host id for each host",
713 onfail="Error looking up host ids" )
714
715 intentStart = time.time()
716 onosIds = onosCli.getAllIntentsId()
717 main.log.info( "Submitted intents: " + str( intentIds ) )
718 main.log.info( "Intents in ONOS: " + str( onosIds ) )
719 for intent in intentIds:
720 if intent in onosIds:
721 pass # intent submitted is in onos
722 else:
723 intentAddResult = False
724 if intentAddResult:
725 intentStop = time.time()
726 else:
727 intentStop = None
728 # Print the intent states
729 intents = onosCli.intents()
730 intentStates = []
731 installedCheck = True
732 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
733 count = 0
734 try:
735 for intent in json.loads( intents ):
736 state = intent.get( 'state', None )
737 if "INSTALLED" not in state:
738 installedCheck = False
739 intentId = intent.get( 'id', None )
740 intentStates.append( ( intentId, state ) )
741 except ( ValueError, TypeError ):
742 main.log.exception( "Error parsing intents" )
743 # add submitted intents not in the store
744 tmplist = [ i for i, s in intentStates ]
745 missingIntents = False
746 for i in intentIds:
747 if i not in tmplist:
748 intentStates.append( ( i, " - " ) )
749 missingIntents = True
750 intentStates.sort()
751 for i, s in intentStates:
752 count += 1
753 main.log.info( "%-6s%-15s%-15s" %
754 ( str( count ), str( i ), str( s ) ) )
755 leaders = onosCli.leaders()
756 try:
757 missing = False
758 if leaders:
759 parsedLeaders = json.loads( leaders )
760 main.log.warn( json.dumps( parsedLeaders,
761 sort_keys=True,
762 indent=4,
763 separators=( ',', ': ' ) ) )
764 # check for all intent partitions
765 topics = []
766 for i in range( 14 ):
767 topics.append( "intent-partition-" + str( i ) )
768 main.log.debug( topics )
769 ONOStopics = [ j['topic'] for j in parsedLeaders ]
770 for topic in topics:
771 if topic not in ONOStopics:
772 main.log.error( "Error: " + topic +
773 " not in leaders" )
774 missing = True
775 else:
776 main.log.error( "leaders() returned None" )
777 except ( ValueError, TypeError ):
778 main.log.exception( "Error parsing leaders" )
779 main.log.error( repr( leaders ) )
780 # Check all nodes
781 if missing:
782 for i in main.activeNodes:
783 response = main.CLIs[i].leaders( jsonFormat=False)
784 main.log.warn( str( main.CLIs[i].name ) + " leaders output: \n" +
785 str( response ) )
786
787 partitions = onosCli.partitions()
788 try:
789 if partitions :
790 parsedPartitions = json.loads( partitions )
791 main.log.warn( json.dumps( parsedPartitions,
792 sort_keys=True,
793 indent=4,
794 separators=( ',', ': ' ) ) )
795 # TODO check for a leader in all paritions
796 # TODO check for consistency among nodes
797 else:
798 main.log.error( "partitions() returned None" )
799 except ( ValueError, TypeError ):
800 main.log.exception( "Error parsing partitions" )
801 main.log.error( repr( partitions ) )
802 pendingMap = onosCli.pendingMap()
803 try:
804 if pendingMap :
805 parsedPending = json.loads( pendingMap )
806 main.log.warn( json.dumps( parsedPending,
807 sort_keys=True,
808 indent=4,
809 separators=( ',', ': ' ) ) )
810 # TODO check something here?
811 else:
812 main.log.error( "pendingMap() returned None" )
813 except ( ValueError, TypeError ):
814 main.log.exception( "Error parsing pending map" )
815 main.log.error( repr( pendingMap ) )
816
817 intentAddResult = bool( intentAddResult and not missingIntents and
818 installedCheck )
819 if not intentAddResult:
820 main.log.error( "Error in pushing host intents to ONOS" )
821
822 main.step( "Intent Anti-Entropy dispersion" )
823 for j in range(100):
824 correct = True
825 main.log.info( "Submitted intents: " + str( sorted( intentIds ) ) )
826 for i in main.activeNodes:
827 onosIds = []
828 ids = main.CLIs[i].getAllIntentsId()
829 onosIds.append( ids )
830 main.log.debug( "Intents in " + main.CLIs[i].name + ": " +
831 str( sorted( onosIds ) ) )
832 if sorted( ids ) != sorted( intentIds ):
833 main.log.warn( "Set of intent IDs doesn't match" )
834 correct = False
835 break
836 else:
837 intents = json.loads( main.CLIs[i].intents() )
838 for intent in intents:
839 if intent[ 'state' ] != "INSTALLED":
840 main.log.warn( "Intent " + intent[ 'id' ] +
841 " is " + intent[ 'state' ] )
842 correct = False
843 break
844 if correct:
845 break
846 else:
847 time.sleep(1)
848 if not intentStop:
849 intentStop = time.time()
850 global gossipTime
851 gossipTime = intentStop - intentStart
852 main.log.info( "It took about " + str( gossipTime ) +
853 " seconds for all intents to appear in each node" )
854 append = False
855 title = "Gossip Intents"
856 count = 1
857 while append is False:
858 curTitle = title + str( count )
859 if curTitle not in labels:
860 labels.append( curTitle )
861 data.append( str( gossipTime ) )
862 append = True
863 else:
864 count += 1
865 gossipPeriod = int( main.params['timers']['gossip'] )
866 maxGossipTime = gossipPeriod * len( main.activeNodes )
867 utilities.assert_greater_equals(
868 expect=maxGossipTime, actual=gossipTime,
869 onpass="ECM anti-entropy for intents worked within " +
870 "expected time",
871 onfail="Intent ECM anti-entropy took too long. " +
872 "Expected time:{}, Actual time:{}".format( maxGossipTime,
873 gossipTime ) )
874 if gossipTime <= maxGossipTime:
875 intentAddResult = True
876
877 if not intentAddResult or "key" in pendingMap:
878 import time
879 installedCheck = True
880 main.log.info( "Sleeping 60 seconds to see if intents are found" )
881 time.sleep( 60 )
882 onosIds = onosCli.getAllIntentsId()
883 main.log.info( "Submitted intents: " + str( intentIds ) )
884 main.log.info( "Intents in ONOS: " + str( onosIds ) )
885 # Print the intent states
886 intents = onosCli.intents()
887 intentStates = []
888 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
889 count = 0
890 try:
891 for intent in json.loads( intents ):
892 # Iter through intents of a node
893 state = intent.get( 'state', None )
894 if "INSTALLED" not in state:
895 installedCheck = False
896 intentId = intent.get( 'id', None )
897 intentStates.append( ( intentId, state ) )
898 except ( ValueError, TypeError ):
899 main.log.exception( "Error parsing intents" )
900 # add submitted intents not in the store
901 tmplist = [ i for i, s in intentStates ]
902 for i in intentIds:
903 if i not in tmplist:
904 intentStates.append( ( i, " - " ) )
905 intentStates.sort()
906 for i, s in intentStates:
907 count += 1
908 main.log.info( "%-6s%-15s%-15s" %
909 ( str( count ), str( i ), str( s ) ) )
910 leaders = onosCli.leaders()
911 try:
912 missing = False
913 if leaders:
914 parsedLeaders = json.loads( leaders )
915 main.log.warn( json.dumps( parsedLeaders,
916 sort_keys=True,
917 indent=4,
918 separators=( ',', ': ' ) ) )
919 # check for all intent partitions
920 # check for election
921 topics = []
922 for i in range( 14 ):
923 topics.append( "intent-partition-" + str( i ) )
924 # FIXME: this should only be after we start the app
925 topics.append( "org.onosproject.election" )
926 main.log.debug( topics )
927 ONOStopics = [ j['topic'] for j in parsedLeaders ]
928 for topic in topics:
929 if topic not in ONOStopics:
930 main.log.error( "Error: " + topic +
931 " not in leaders" )
932 missing = True
933 else:
934 main.log.error( "leaders() returned None" )
935 except ( ValueError, TypeError ):
936 main.log.exception( "Error parsing leaders" )
937 main.log.error( repr( leaders ) )
938 # Check all nodes
939 if missing:
940 for i in main.activeNodes:
941 node = main.CLIs[i]
942 response = node.leaders( jsonFormat=False)
943 main.log.warn( str( node.name ) + " leaders output: \n" +
944 str( response ) )
945
946 partitions = onosCli.partitions()
947 try:
948 if partitions :
949 parsedPartitions = json.loads( partitions )
950 main.log.warn( json.dumps( parsedPartitions,
951 sort_keys=True,
952 indent=4,
953 separators=( ',', ': ' ) ) )
954 # TODO check for a leader in all paritions
955 # TODO check for consistency among nodes
956 else:
957 main.log.error( "partitions() returned None" )
958 except ( ValueError, TypeError ):
959 main.log.exception( "Error parsing partitions" )
960 main.log.error( repr( partitions ) )
961 pendingMap = onosCli.pendingMap()
962 try:
963 if pendingMap :
964 parsedPending = json.loads( pendingMap )
965 main.log.warn( json.dumps( parsedPending,
966 sort_keys=True,
967 indent=4,
968 separators=( ',', ': ' ) ) )
969 # TODO check something here?
970 else:
971 main.log.error( "pendingMap() returned None" )
972 except ( ValueError, TypeError ):
973 main.log.exception( "Error parsing pending map" )
974 main.log.error( repr( pendingMap ) )
975
976 def CASE4( self, main ):
977 """
978 Ping across added host intents
979 """
980 import json
981 import time
982 assert main.numCtrls, "main.numCtrls not defined"
983 assert main, "main not defined"
984 assert utilities.assert_equals, "utilities.assert_equals not defined"
985 assert main.CLIs, "main.CLIs not defined"
986 assert main.nodes, "main.nodes not defined"
987 main.case( "Verify connectivity by sending traffic across Intents" )
988 main.caseExplanation = "Ping across added host intents to check " +\
989 "functionality and check the state of " +\
990 "the intent"
991
992 onosCli = main.CLIs[ main.activeNodes[0] ]
993 main.step( "Check Intent state" )
994 installedCheck = False
995 loopCount = 0
996 while not installedCheck and loopCount < 40:
997 installedCheck = True
998 # Print the intent states
999 intents = onosCli.intents()
1000 intentStates = []
1001 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
1002 count = 0
1003 # Iter through intents of a node
1004 try:
1005 for intent in json.loads( intents ):
1006 state = intent.get( 'state', None )
1007 if "INSTALLED" not in state:
1008 installedCheck = False
1009 intentId = intent.get( 'id', None )
1010 intentStates.append( ( intentId, state ) )
1011 except ( ValueError, TypeError ):
1012 main.log.exception( "Error parsing intents." )
1013 # Print states
1014 intentStates.sort()
1015 for i, s in intentStates:
1016 count += 1
1017 main.log.info( "%-6s%-15s%-15s" %
1018 ( str( count ), str( i ), str( s ) ) )
1019 if not installedCheck:
1020 time.sleep( 1 )
1021 loopCount += 1
1022 utilities.assert_equals( expect=True, actual=installedCheck,
1023 onpass="Intents are all INSTALLED",
1024 onfail="Intents are not all in " +
1025 "INSTALLED state" )
1026
1027 main.step( "Ping across added host intents" )
1028 PingResult = main.TRUE
1029 for i in range( 8, 18 ):
1030 ping = main.Mininet1.pingHost( src="h" + str( i ),
1031 target="h" + str( i + 10 ) )
1032 PingResult = PingResult and ping
1033 if ping == main.FALSE:
1034 main.log.warn( "Ping failed between h" + str( i ) +
1035 " and h" + str( i + 10 ) )
1036 elif ping == main.TRUE:
1037 main.log.info( "Ping test passed!" )
1038 # Don't set PingResult or you'd override failures
1039 if PingResult == main.FALSE:
1040 main.log.error(
1041 "Intents have not been installed correctly, pings failed." )
1042 # TODO: pretty print
1043 main.log.warn( "ONOS1 intents: " )
1044 try:
1045 tmpIntents = onosCli.intents()
1046 main.log.warn( json.dumps( json.loads( tmpIntents ),
1047 sort_keys=True,
1048 indent=4,
1049 separators=( ',', ': ' ) ) )
1050 except ( ValueError, TypeError ):
1051 main.log.warn( repr( tmpIntents ) )
1052 utilities.assert_equals(
1053 expect=main.TRUE,
1054 actual=PingResult,
1055 onpass="Intents have been installed correctly and pings work",
1056 onfail="Intents have not been installed correctly, pings failed." )
1057
1058 main.step( "Check leadership of topics" )
1059 leaders = onosCli.leaders()
1060 topicCheck = main.TRUE
1061 try:
1062 if leaders:
1063 parsedLeaders = json.loads( leaders )
1064 main.log.warn( json.dumps( parsedLeaders,
1065 sort_keys=True,
1066 indent=4,
1067 separators=( ',', ': ' ) ) )
1068 # check for all intent partitions
1069 # check for election
1070 # TODO: Look at Devices as topics now that it uses this system
1071 topics = []
1072 for i in range( 14 ):
1073 topics.append( "intent-partition-" + str( i ) )
1074 # FIXME: this should only be after we start the app
1075 # FIXME: topics.append( "org.onosproject.election" )
1076 # Print leaders output
1077 main.log.debug( topics )
1078 ONOStopics = [ j['topic'] for j in parsedLeaders ]
1079 for topic in topics:
1080 if topic not in ONOStopics:
1081 main.log.error( "Error: " + topic +
1082 " not in leaders" )
1083 topicCheck = main.FALSE
1084 else:
1085 main.log.error( "leaders() returned None" )
1086 topicCheck = main.FALSE
1087 except ( ValueError, TypeError ):
1088 topicCheck = main.FALSE
1089 main.log.exception( "Error parsing leaders" )
1090 main.log.error( repr( leaders ) )
1091 # TODO: Check for a leader of these topics
1092 # Check all nodes
1093 if topicCheck:
1094 for i in main.activeNodes:
1095 node = main.CLIs[i]
1096 response = node.leaders( jsonFormat=False)
1097 main.log.warn( str( node.name ) + " leaders output: \n" +
1098 str( response ) )
1099
1100 utilities.assert_equals( expect=main.TRUE, actual=topicCheck,
1101 onpass="intent Partitions is in leaders",
1102 onfail="Some topics were lost " )
1103 # Print partitions
1104 partitions = onosCli.partitions()
1105 try:
1106 if partitions :
1107 parsedPartitions = json.loads( partitions )
1108 main.log.warn( json.dumps( parsedPartitions,
1109 sort_keys=True,
1110 indent=4,
1111 separators=( ',', ': ' ) ) )
1112 # TODO check for a leader in all paritions
1113 # TODO check for consistency among nodes
1114 else:
1115 main.log.error( "partitions() returned None" )
1116 except ( ValueError, TypeError ):
1117 main.log.exception( "Error parsing partitions" )
1118 main.log.error( repr( partitions ) )
1119 # Print Pending Map
1120 pendingMap = onosCli.pendingMap()
1121 try:
1122 if pendingMap :
1123 parsedPending = json.loads( pendingMap )
1124 main.log.warn( json.dumps( parsedPending,
1125 sort_keys=True,
1126 indent=4,
1127 separators=( ',', ': ' ) ) )
1128 # TODO check something here?
1129 else:
1130 main.log.error( "pendingMap() returned None" )
1131 except ( ValueError, TypeError ):
1132 main.log.exception( "Error parsing pending map" )
1133 main.log.error( repr( pendingMap ) )
1134
1135 if not installedCheck:
1136 main.log.info( "Waiting 60 seconds to see if the state of " +
1137 "intents change" )
1138 time.sleep( 60 )
1139 # Print the intent states
1140 intents = onosCli.intents()
1141 intentStates = []
1142 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
1143 count = 0
1144 # Iter through intents of a node
1145 try:
1146 for intent in json.loads( intents ):
1147 state = intent.get( 'state', None )
1148 if "INSTALLED" not in state:
1149 installedCheck = False
1150 intentId = intent.get( 'id', None )
1151 intentStates.append( ( intentId, state ) )
1152 except ( ValueError, TypeError ):
1153 main.log.exception( "Error parsing intents." )
1154 intentStates.sort()
1155 for i, s in intentStates:
1156 count += 1
1157 main.log.info( "%-6s%-15s%-15s" %
1158 ( str( count ), str( i ), str( s ) ) )
1159 leaders = onosCli.leaders()
1160 try:
1161 missing = False
1162 if leaders:
1163 parsedLeaders = json.loads( leaders )
1164 main.log.warn( json.dumps( parsedLeaders,
1165 sort_keys=True,
1166 indent=4,
1167 separators=( ',', ': ' ) ) )
1168 # check for all intent partitions
1169 # check for election
1170 topics = []
1171 for i in range( 14 ):
1172 topics.append( "intent-partition-" + str( i ) )
1173 # FIXME: this should only be after we start the app
1174 topics.append( "org.onosproject.election" )
1175 main.log.debug( topics )
1176 ONOStopics = [ j['topic'] for j in parsedLeaders ]
1177 for topic in topics:
1178 if topic not in ONOStopics:
1179 main.log.error( "Error: " + topic +
1180 " not in leaders" )
1181 missing = True
1182 else:
1183 main.log.error( "leaders() returned None" )
1184 except ( ValueError, TypeError ):
1185 main.log.exception( "Error parsing leaders" )
1186 main.log.error( repr( leaders ) )
1187 if missing:
1188 for i in main.activeNodes:
1189 node = main.CLIs[i]
1190 response = node.leaders( jsonFormat=False)
1191 main.log.warn( str( node.name ) + " leaders output: \n" +
1192 str( response ) )
1193
1194 partitions = onosCli.partitions()
1195 try:
1196 if partitions :
1197 parsedPartitions = json.loads( partitions )
1198 main.log.warn( json.dumps( parsedPartitions,
1199 sort_keys=True,
1200 indent=4,
1201 separators=( ',', ': ' ) ) )
1202 # TODO check for a leader in all paritions
1203 # TODO check for consistency among nodes
1204 else:
1205 main.log.error( "partitions() returned None" )
1206 except ( ValueError, TypeError ):
1207 main.log.exception( "Error parsing partitions" )
1208 main.log.error( repr( partitions ) )
1209 pendingMap = onosCli.pendingMap()
1210 try:
1211 if pendingMap :
1212 parsedPending = json.loads( pendingMap )
1213 main.log.warn( json.dumps( parsedPending,
1214 sort_keys=True,
1215 indent=4,
1216 separators=( ',', ': ' ) ) )
1217 # TODO check something here?
1218 else:
1219 main.log.error( "pendingMap() returned None" )
1220 except ( ValueError, TypeError ):
1221 main.log.exception( "Error parsing pending map" )
1222 main.log.error( repr( pendingMap ) )
1223 # Print flowrules
1224 node = main.activeNodes[0]
1225 main.log.debug( main.CLIs[node].flows( jsonFormat=False ) )
1226 main.step( "Wait a minute then ping again" )
1227 # the wait is above
1228 PingResult = main.TRUE
1229 for i in range( 8, 18 ):
1230 ping = main.Mininet1.pingHost( src="h" + str( i ),
1231 target="h" + str( i + 10 ) )
1232 PingResult = PingResult and ping
1233 if ping == main.FALSE:
1234 main.log.warn( "Ping failed between h" + str( i ) +
1235 " and h" + str( i + 10 ) )
1236 elif ping == main.TRUE:
1237 main.log.info( "Ping test passed!" )
1238 # Don't set PingResult or you'd override failures
1239 if PingResult == main.FALSE:
1240 main.log.error(
1241 "Intents have not been installed correctly, pings failed." )
1242 # TODO: pretty print
1243 main.log.warn( "ONOS1 intents: " )
1244 try:
1245 tmpIntents = onosCli.intents()
1246 main.log.warn( json.dumps( json.loads( tmpIntents ),
1247 sort_keys=True,
1248 indent=4,
1249 separators=( ',', ': ' ) ) )
1250 except ( ValueError, TypeError ):
1251 main.log.warn( repr( tmpIntents ) )
1252 utilities.assert_equals(
1253 expect=main.TRUE,
1254 actual=PingResult,
1255 onpass="Intents have been installed correctly and pings work",
1256 onfail="Intents have not been installed correctly, pings failed." )
1257
1258 def CASE5( self, main ):
1259 """
1260 Reading state of ONOS
1261 """
1262 import json
1263 import time
1264 assert main.numCtrls, "main.numCtrls not defined"
1265 assert main, "main not defined"
1266 assert utilities.assert_equals, "utilities.assert_equals not defined"
1267 assert main.CLIs, "main.CLIs not defined"
1268 assert main.nodes, "main.nodes not defined"
1269
1270 main.case( "Setting up and gathering data for current state" )
1271 # The general idea for this test case is to pull the state of
1272 # ( intents,flows, topology,... ) from each ONOS node
1273 # We can then compare them with each other and also with past states
1274
1275 main.step( "Check that each switch has a master" )
1276 global mastershipState
1277 mastershipState = '[]'
1278
1279 # Assert that each device has a master
1280 rolesNotNull = main.TRUE
1281 threads = []
1282 for i in main.activeNodes:
1283 t = main.Thread( target=main.CLIs[i].rolesNotNull,
1284 name="rolesNotNull-" + str( i ),
1285 args=[] )
1286 threads.append( t )
1287 t.start()
1288
1289 for t in threads:
1290 t.join()
1291 rolesNotNull = rolesNotNull and t.result
1292 utilities.assert_equals(
1293 expect=main.TRUE,
1294 actual=rolesNotNull,
1295 onpass="Each device has a master",
1296 onfail="Some devices don't have a master assigned" )
1297
1298 main.step( "Get the Mastership of each switch from each controller" )
1299 ONOSMastership = []
1300 consistentMastership = True
1301 rolesResults = True
1302 threads = []
1303 for i in main.activeNodes:
1304 t = main.Thread( target=main.CLIs[i].roles,
1305 name="roles-" + str( i ),
1306 args=[] )
1307 threads.append( t )
1308 t.start()
1309
1310 for t in threads:
1311 t.join()
1312 ONOSMastership.append( t.result )
1313
1314 for i in range( len( ONOSMastership ) ):
1315 node = str( main.activeNodes[i] + 1 )
1316 if not ONOSMastership[i] or "Error" in ONOSMastership[i]:
1317 main.log.error( "Error in getting ONOS" + node + " roles" )
1318 main.log.warn( "ONOS" + node + " mastership response: " +
1319 repr( ONOSMastership[i] ) )
1320 rolesResults = False
1321 utilities.assert_equals(
1322 expect=True,
1323 actual=rolesResults,
1324 onpass="No error in reading roles output",
1325 onfail="Error in reading roles from ONOS" )
1326
1327 main.step( "Check for consistency in roles from each controller" )
1328 if all([ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
1329 main.log.info(
1330 "Switch roles are consistent across all ONOS nodes" )
1331 else:
1332 consistentMastership = False
1333 utilities.assert_equals(
1334 expect=True,
1335 actual=consistentMastership,
1336 onpass="Switch roles are consistent across all ONOS nodes",
1337 onfail="ONOS nodes have different views of switch roles" )
1338
1339 if rolesResults and not consistentMastership:
1340 for i in range( len( main.activeNodes ) ):
1341 node = str( main.activeNodes[i] + 1 )
1342 try:
1343 main.log.warn(
1344 "ONOS" + node + " roles: ",
1345 json.dumps(
1346 json.loads( ONOSMastership[ i ] ),
1347 sort_keys=True,
1348 indent=4,
1349 separators=( ',', ': ' ) ) )
1350 except ( ValueError, TypeError ):
1351 main.log.warn( repr( ONOSMastership[ i ] ) )
1352 elif rolesResults and consistentMastership:
1353 mastershipState = ONOSMastership[ 0 ]
1354
1355 main.step( "Get the intents from each controller" )
1356 global intentState
1357 intentState = []
1358 ONOSIntents = []
1359 consistentIntents = True # Are Intents consistent across nodes?
1360 intentsResults = True # Could we read Intents from ONOS?
1361 threads = []
1362 for i in main.activeNodes:
1363 t = main.Thread( target=main.CLIs[i].intents,
1364 name="intents-" + str( i ),
1365 args=[],
1366 kwargs={ 'jsonFormat': True } )
1367 threads.append( t )
1368 t.start()
1369
1370 for t in threads:
1371 t.join()
1372 ONOSIntents.append( t.result )
1373
1374 for i in range( len( ONOSIntents ) ):
1375 node = str( main.activeNodes[i] + 1 )
1376 if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
1377 main.log.error( "Error in getting ONOS" + node + " intents" )
1378 main.log.warn( "ONOS" + node + " intents response: " +
1379 repr( ONOSIntents[ i ] ) )
1380 intentsResults = False
1381 utilities.assert_equals(
1382 expect=True,
1383 actual=intentsResults,
1384 onpass="No error in reading intents output",
1385 onfail="Error in reading intents from ONOS" )
1386
1387 main.step( "Check for consistency in Intents from each controller" )
1388 if all([ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
1389 main.log.info( "Intents are consistent across all ONOS " +
1390 "nodes" )
1391 else:
1392 consistentIntents = False
1393 main.log.error( "Intents not consistent" )
1394 utilities.assert_equals(
1395 expect=True,
1396 actual=consistentIntents,
1397 onpass="Intents are consistent across all ONOS nodes",
1398 onfail="ONOS nodes have different views of intents" )
1399
1400 if intentsResults:
1401 # Try to make it easy to figure out what is happening
1402 #
1403 # Intent ONOS1 ONOS2 ...
1404 # 0x01 INSTALLED INSTALLING
1405 # ... ... ...
1406 # ... ... ...
1407 title = " Id"
1408 for n in main.activeNodes:
1409 title += " " * 10 + "ONOS" + str( n + 1 )
1410 main.log.warn( title )
1411 # get all intent keys in the cluster
1412 keys = []
1413 try:
1414 # Get the set of all intent keys
1415 for nodeStr in ONOSIntents:
1416 node = json.loads( nodeStr )
1417 for intent in node:
1418 keys.append( intent.get( 'id' ) )
1419 keys = set( keys )
1420 # For each intent key, print the state on each node
1421 for key in keys:
1422 row = "%-13s" % key
1423 for nodeStr in ONOSIntents:
1424 node = json.loads( nodeStr )
1425 for intent in node:
1426 if intent.get( 'id', "Error" ) == key:
1427 row += "%-15s" % intent.get( 'state' )
1428 main.log.warn( row )
1429 # End of intent state table
1430 except ValueError as e:
1431 main.log.exception( e )
1432 main.log.debug( "nodeStr was: " + repr( nodeStr ) )
1433
1434 if intentsResults and not consistentIntents:
1435 # print the json objects
1436 n = str( main.activeNodes[-1] + 1 )
1437 main.log.debug( "ONOS" + n + " intents: " )
1438 main.log.debug( json.dumps( json.loads( ONOSIntents[ -1 ] ),
1439 sort_keys=True,
1440 indent=4,
1441 separators=( ',', ': ' ) ) )
1442 for i in range( len( ONOSIntents ) ):
1443 node = str( main.activeNodes[i] + 1 )
1444 if ONOSIntents[ i ] != ONOSIntents[ -1 ]:
1445 main.log.debug( "ONOS" + node + " intents: " )
1446 main.log.debug( json.dumps( json.loads( ONOSIntents[i] ),
1447 sort_keys=True,
1448 indent=4,
1449 separators=( ',', ': ' ) ) )
1450 else:
1451 main.log.debug( "ONOS" + node + " intents match ONOS" +
1452 n + " intents" )
1453 elif intentsResults and consistentIntents:
1454 intentState = ONOSIntents[ 0 ]
1455
1456 main.step( "Get the flows from each controller" )
1457 global flowState
1458 flowState = []
1459 ONOSFlows = []
1460 ONOSFlowsJson = []
1461 flowCheck = main.FALSE
1462 consistentFlows = True
1463 flowsResults = True
1464 threads = []
1465 for i in main.activeNodes:
1466 t = main.Thread( target=main.CLIs[i].flows,
1467 name="flows-" + str( i ),
1468 args=[],
1469 kwargs={ 'jsonFormat': True } )
1470 threads.append( t )
1471 t.start()
1472
1473 # NOTE: Flows command can take some time to run
1474 time.sleep(30)
1475 for t in threads:
1476 t.join()
1477 result = t.result
1478 ONOSFlows.append( result )
1479
1480 for i in range( len( ONOSFlows ) ):
1481 num = str( main.activeNodes[i] + 1 )
1482 if not ONOSFlows[ i ] or "Error" in ONOSFlows[ i ]:
1483 main.log.error( "Error in getting ONOS" + num + " flows" )
1484 main.log.warn( "ONOS" + num + " flows response: " +
1485 repr( ONOSFlows[ i ] ) )
1486 flowsResults = False
1487 ONOSFlowsJson.append( None )
1488 else:
1489 try:
1490 ONOSFlowsJson.append( json.loads( ONOSFlows[ i ] ) )
1491 except ( ValueError, TypeError ):
1492 # FIXME: change this to log.error?
1493 main.log.exception( "Error in parsing ONOS" + num +
1494 " response as json." )
1495 main.log.error( repr( ONOSFlows[ i ] ) )
1496 ONOSFlowsJson.append( None )
1497 flowsResults = False
1498 utilities.assert_equals(
1499 expect=True,
1500 actual=flowsResults,
1501 onpass="No error in reading flows output",
1502 onfail="Error in reading flows from ONOS" )
1503
1504 main.step( "Check for consistency in Flows from each controller" )
1505 tmp = [ len( i ) == len( ONOSFlowsJson[ 0 ] ) for i in ONOSFlowsJson ]
1506 if all( tmp ):
1507 main.log.info( "Flow count is consistent across all ONOS nodes" )
1508 else:
1509 consistentFlows = False
1510 utilities.assert_equals(
1511 expect=True,
1512 actual=consistentFlows,
1513 onpass="The flow count is consistent across all ONOS nodes",
1514 onfail="ONOS nodes have different flow counts" )
1515
1516 if flowsResults and not consistentFlows:
1517 for i in range( len( ONOSFlows ) ):
1518 node = str( main.activeNodes[i] + 1 )
1519 try:
1520 main.log.warn(
1521 "ONOS" + node + " flows: " +
1522 json.dumps( json.loads( ONOSFlows[i] ), sort_keys=True,
1523 indent=4, separators=( ',', ': ' ) ) )
1524 except ( ValueError, TypeError ):
1525 main.log.warn( "ONOS" + node + " flows: " +
1526 repr( ONOSFlows[ i ] ) )
1527 elif flowsResults and consistentFlows:
1528 flowCheck = main.TRUE
1529 flowState = ONOSFlows[ 0 ]
1530
1531 main.step( "Get the OF Table entries" )
1532 global flows
1533 flows = []
1534 for i in range( 1, 29 ):
1535 flows.append( main.Mininet1.getFlowTable( "s" + str( i ), version="1.3", debug=False ) )
1536 if flowCheck == main.FALSE:
1537 for table in flows:
1538 main.log.warn( table )
1539 # TODO: Compare switch flow tables with ONOS flow tables
1540
1541 main.step( "Start continuous pings" )
1542 main.Mininet2.pingLong(
1543 src=main.params[ 'PING' ][ 'source1' ],
1544 target=main.params[ 'PING' ][ 'target1' ],
1545 pingTime=500 )
1546 main.Mininet2.pingLong(
1547 src=main.params[ 'PING' ][ 'source2' ],
1548 target=main.params[ 'PING' ][ 'target2' ],
1549 pingTime=500 )
1550 main.Mininet2.pingLong(
1551 src=main.params[ 'PING' ][ 'source3' ],
1552 target=main.params[ 'PING' ][ 'target3' ],
1553 pingTime=500 )
1554 main.Mininet2.pingLong(
1555 src=main.params[ 'PING' ][ 'source4' ],
1556 target=main.params[ 'PING' ][ 'target4' ],
1557 pingTime=500 )
1558 main.Mininet2.pingLong(
1559 src=main.params[ 'PING' ][ 'source5' ],
1560 target=main.params[ 'PING' ][ 'target5' ],
1561 pingTime=500 )
1562 main.Mininet2.pingLong(
1563 src=main.params[ 'PING' ][ 'source6' ],
1564 target=main.params[ 'PING' ][ 'target6' ],
1565 pingTime=500 )
1566 main.Mininet2.pingLong(
1567 src=main.params[ 'PING' ][ 'source7' ],
1568 target=main.params[ 'PING' ][ 'target7' ],
1569 pingTime=500 )
1570 main.Mininet2.pingLong(
1571 src=main.params[ 'PING' ][ 'source8' ],
1572 target=main.params[ 'PING' ][ 'target8' ],
1573 pingTime=500 )
1574 main.Mininet2.pingLong(
1575 src=main.params[ 'PING' ][ 'source9' ],
1576 target=main.params[ 'PING' ][ 'target9' ],
1577 pingTime=500 )
1578 main.Mininet2.pingLong(
1579 src=main.params[ 'PING' ][ 'source10' ],
1580 target=main.params[ 'PING' ][ 'target10' ],
1581 pingTime=500 )
1582
1583 main.step( "Collecting topology information from ONOS" )
1584 devices = []
1585 threads = []
1586 for i in main.activeNodes:
1587 t = main.Thread( target=main.CLIs[i].devices,
1588 name="devices-" + str( i ),
1589 args=[ ] )
1590 threads.append( t )
1591 t.start()
1592
1593 for t in threads:
1594 t.join()
1595 devices.append( t.result )
1596 hosts = []
1597 threads = []
1598 for i in main.activeNodes:
1599 t = main.Thread( target=main.CLIs[i].hosts,
1600 name="hosts-" + str( i ),
1601 args=[ ] )
1602 threads.append( t )
1603 t.start()
1604
1605 for t in threads:
1606 t.join()
1607 try:
1608 hosts.append( json.loads( t.result ) )
1609 except ( ValueError, TypeError ):
1610 # FIXME: better handling of this, print which node
1611 # Maybe use thread name?
1612 main.log.exception( "Error parsing json output of hosts" )
1613 main.log.warn( repr( t.result ) )
1614 hosts.append( None )
1615
1616 ports = []
1617 threads = []
1618 for i in main.activeNodes:
1619 t = main.Thread( target=main.CLIs[i].ports,
1620 name="ports-" + str( i ),
1621 args=[ ] )
1622 threads.append( t )
1623 t.start()
1624
1625 for t in threads:
1626 t.join()
1627 ports.append( t.result )
1628 links = []
1629 threads = []
1630 for i in main.activeNodes:
1631 t = main.Thread( target=main.CLIs[i].links,
1632 name="links-" + str( i ),
1633 args=[ ] )
1634 threads.append( t )
1635 t.start()
1636
1637 for t in threads:
1638 t.join()
1639 links.append( t.result )
1640 clusters = []
1641 threads = []
1642 for i in main.activeNodes:
1643 t = main.Thread( target=main.CLIs[i].clusters,
1644 name="clusters-" + str( i ),
1645 args=[ ] )
1646 threads.append( t )
1647 t.start()
1648
1649 for t in threads:
1650 t.join()
1651 clusters.append( t.result )
1652 # Compare json objects for hosts and dataplane clusters
1653
1654 # hosts
1655 main.step( "Host view is consistent across ONOS nodes" )
1656 consistentHostsResult = main.TRUE
1657 for controller in range( len( hosts ) ):
1658 controllerStr = str( main.activeNodes[controller] + 1 )
1659 if hosts[ controller ] and "Error" not in hosts[ controller ]:
1660 if hosts[ controller ] == hosts[ 0 ]:
1661 continue
1662 else: # hosts not consistent
1663 main.log.error( "hosts from ONOS" +
1664 controllerStr +
1665 " is inconsistent with ONOS1" )
1666 main.log.warn( repr( hosts[ controller ] ) )
1667 consistentHostsResult = main.FALSE
1668
1669 else:
1670 main.log.error( "Error in getting ONOS hosts from ONOS" +
1671 controllerStr )
1672 consistentHostsResult = main.FALSE
1673 main.log.warn( "ONOS" + controllerStr +
1674 " hosts response: " +
1675 repr( hosts[ controller ] ) )
1676 utilities.assert_equals(
1677 expect=main.TRUE,
1678 actual=consistentHostsResult,
1679 onpass="Hosts view is consistent across all ONOS nodes",
1680 onfail="ONOS nodes have different views of hosts" )
1681
1682 main.step( "Each host has an IP address" )
1683 ipResult = main.TRUE
1684 for controller in range( 0, len( hosts ) ):
1685 controllerStr = str( main.activeNodes[controller] + 1 )
1686 if hosts[ controller ]:
1687 for host in hosts[ controller ]:
1688 if not host.get( 'ipAddresses', [ ] ):
1689 main.log.error( "Error with host ips on controller" +
1690 controllerStr + ": " + str( host ) )
1691 ipResult = main.FALSE
1692 utilities.assert_equals(
1693 expect=main.TRUE,
1694 actual=ipResult,
1695 onpass="The ips of the hosts aren't empty",
1696 onfail="The ip of at least one host is missing" )
1697
1698 # Strongly connected clusters of devices
1699 main.step( "Cluster view is consistent across ONOS nodes" )
1700 consistentClustersResult = main.TRUE
1701 for controller in range( len( clusters ) ):
1702 controllerStr = str( main.activeNodes[controller] + 1 )
1703 if "Error" not in clusters[ controller ]:
1704 if clusters[ controller ] == clusters[ 0 ]:
1705 continue
1706 else: # clusters not consistent
1707 main.log.error( "clusters from ONOS" + controllerStr +
1708 " is inconsistent with ONOS1" )
1709 consistentClustersResult = main.FALSE
1710
1711 else:
1712 main.log.error( "Error in getting dataplane clusters " +
1713 "from ONOS" + controllerStr )
1714 consistentClustersResult = main.FALSE
1715 main.log.warn( "ONOS" + controllerStr +
1716 " clusters response: " +
1717 repr( clusters[ controller ] ) )
1718 utilities.assert_equals(
1719 expect=main.TRUE,
1720 actual=consistentClustersResult,
1721 onpass="Clusters view is consistent across all ONOS nodes",
1722 onfail="ONOS nodes have different views of clusters" )
Jon Hall64948022016-05-12 13:38:50 -07001723 if not consistentClustersResult:
Jon Hall9ebd1bd2016-04-19 01:37:17 -07001724 main.log.debug( clusters )
Jon Hall64948022016-05-12 13:38:50 -07001725
Jon Hall9ebd1bd2016-04-19 01:37:17 -07001726 # there should always only be one cluster
1727 main.step( "Cluster view correct across ONOS nodes" )
1728 try:
1729 numClusters = len( json.loads( clusters[ 0 ] ) )
1730 except ( ValueError, TypeError ):
1731 main.log.exception( "Error parsing clusters[0]: " +
1732 repr( clusters[ 0 ] ) )
1733 numClusters = "ERROR"
1734 utilities.assert_equals(
1735 expect=1,
1736 actual=numClusters,
1737 onpass="ONOS shows 1 SCC",
1738 onfail="ONOS shows " + str( numClusters ) + " SCCs" )
1739
1740 main.step( "Comparing ONOS topology to MN" )
1741 devicesResults = main.TRUE
1742 linksResults = main.TRUE
1743 hostsResults = main.TRUE
1744 mnSwitches = main.Mininet1.getSwitches()
1745 mnLinks = main.Mininet1.getLinks()
1746 mnHosts = main.Mininet1.getHosts()
1747 for controller in main.activeNodes:
1748 controllerStr = str( main.activeNodes[controller] + 1 )
1749 if devices[ controller ] and ports[ controller ] and\
1750 "Error" not in devices[ controller ] and\
1751 "Error" not in ports[ controller ]:
1752 currentDevicesResult = main.Mininet1.compareSwitches(
1753 mnSwitches,
1754 json.loads( devices[ controller ] ),
1755 json.loads( ports[ controller ] ) )
1756 else:
1757 currentDevicesResult = main.FALSE
1758 utilities.assert_equals( expect=main.TRUE,
1759 actual=currentDevicesResult,
1760 onpass="ONOS" + controllerStr +
1761 " Switches view is correct",
1762 onfail="ONOS" + controllerStr +
1763 " Switches view is incorrect" )
1764 if links[ controller ] and "Error" not in links[ controller ]:
1765 currentLinksResult = main.Mininet1.compareLinks(
1766 mnSwitches, mnLinks,
1767 json.loads( links[ controller ] ) )
1768 else:
1769 currentLinksResult = main.FALSE
1770 utilities.assert_equals( expect=main.TRUE,
1771 actual=currentLinksResult,
1772 onpass="ONOS" + controllerStr +
1773 " links view is correct",
1774 onfail="ONOS" + controllerStr +
1775 " links view is incorrect" )
1776
1777 if hosts[ controller ] and "Error" not in hosts[ controller ]:
1778 currentHostsResult = main.Mininet1.compareHosts(
1779 mnHosts,
1780 hosts[ controller ] )
1781 else:
1782 currentHostsResult = main.FALSE
1783 utilities.assert_equals( expect=main.TRUE,
1784 actual=currentHostsResult,
1785 onpass="ONOS" + controllerStr +
1786 " hosts exist in Mininet",
1787 onfail="ONOS" + controllerStr +
1788 " hosts don't match Mininet" )
1789
1790 devicesResults = devicesResults and currentDevicesResult
1791 linksResults = linksResults and currentLinksResult
1792 hostsResults = hostsResults and currentHostsResult
1793
1794 main.step( "Device information is correct" )
1795 utilities.assert_equals(
1796 expect=main.TRUE,
1797 actual=devicesResults,
1798 onpass="Device information is correct",
1799 onfail="Device information is incorrect" )
1800
1801 main.step( "Links are correct" )
1802 utilities.assert_equals(
1803 expect=main.TRUE,
1804 actual=linksResults,
1805 onpass="Link are correct",
1806 onfail="Links are incorrect" )
1807
1808 main.step( "Hosts are correct" )
1809 utilities.assert_equals(
1810 expect=main.TRUE,
1811 actual=hostsResults,
1812 onpass="Hosts are correct",
1813 onfail="Hosts are incorrect" )
1814
1815 def CASE6( self, main ):
1816 """
1817 The Scaling case.
1818 """
1819 import time
1820 import re
1821 assert main.numCtrls, "main.numCtrls not defined"
1822 assert main, "main not defined"
1823 assert utilities.assert_equals, "utilities.assert_equals not defined"
1824 assert main.CLIs, "main.CLIs not defined"
1825 assert main.nodes, "main.nodes not defined"
1826 try:
1827 labels
1828 except NameError:
1829 main.log.error( "labels not defined, setting to []" )
1830 global labels
1831 labels = []
1832 try:
1833 data
1834 except NameError:
1835 main.log.error( "data not defined, setting to []" )
1836 global data
1837 data = []
1838
Jon Hall69b2b982016-05-11 12:04:59 -07001839 main.case( "Scale the number of nodes in the ONOS cluster" )
Jon Hall9ebd1bd2016-04-19 01:37:17 -07001840
1841 main.step( "Checking ONOS Logs for errors" )
1842 for i in main.activeNodes:
1843 node = main.nodes[i]
1844 main.log.debug( "Checking logs for errors on " + node.name + ":" )
1845 main.log.warn( main.ONOSbench.checkLogs( node.ip_address ) )
1846
1847 """
1848 pop # of nodes from a list, might look like 1,3b,3,5b,5,7b,7,7b,5,5b,3...
1849 modify cluster.json file appropriately
1850 install/deactivate node as needed
1851 """
1852
1853 try:
1854 prevNodes = main.activeNodes
1855 scale = main.scaling.pop(0)
1856 if "e" in scale:
1857 equal = True
1858 else:
1859 equal = False
1860 main.numCtrls = int( re.search( "\d+", scale ).group(0) )
1861 main.log.info( "Scaling to {} nodes".format( main.numCtrls ) )
1862 genResult = main.Server.generateFile( main.numCtrls, equal=equal )
1863 utilities.assert_equals( expect=main.TRUE, actual=genResult,
1864 onpass="New cluster metadata file generated",
1865 onfail="Failled to generate new metadata file" )
1866 time.sleep( 5 ) # Give time for nodes to read new file
1867 except IndexError:
1868 main.cleanup()
1869 main.exit()
1870
1871 main.activeNodes = [ i for i in range( 0, main.numCtrls ) ]
1872 newNodes = [ x for x in main.activeNodes if x not in prevNodes ]
1873
1874 main.step( "Start new nodes" ) # OR stop old nodes?
1875 started = main.TRUE
1876 for i in newNodes:
1877 started = main.ONOSbench.onosStart( main.nodes[i].ip_address ) and main.TRUE
1878 utilities.assert_equals( expect=main.TRUE, actual=started,
1879 onpass="ONOS started",
1880 onfail="ONOS start NOT successful" )
1881
1882 main.step( "Checking if ONOS is up yet" )
1883 for i in range( 2 ):
1884 onosIsupResult = main.TRUE
1885 for i in main.activeNodes:
1886 node = main.nodes[i]
1887 started = main.ONOSbench.isup( node.ip_address )
1888 if not started:
1889 main.log.error( node.name + " didn't start!" )
1890 onosIsupResult = onosIsupResult and started
1891 if onosIsupResult == main.TRUE:
1892 break
1893 utilities.assert_equals( expect=main.TRUE, actual=onosIsupResult,
1894 onpass="ONOS started",
1895 onfail="ONOS start NOT successful" )
1896
Jon Hall6509dbf2016-06-21 17:01:17 -07001897 main.step( "Starting ONOS CLI sessions" )
Jon Hall9ebd1bd2016-04-19 01:37:17 -07001898 cliResults = main.TRUE
1899 threads = []
1900 for i in main.activeNodes:
1901 t = main.Thread( target=main.CLIs[i].startOnosCli,
1902 name="startOnosCli-" + str( i ),
1903 args=[main.nodes[i].ip_address] )
1904 threads.append( t )
1905 t.start()
1906
1907 for t in threads:
1908 t.join()
1909 cliResults = cliResults and t.result
1910 utilities.assert_equals( expect=main.TRUE, actual=cliResults,
1911 onpass="ONOS cli started",
1912 onfail="ONOS clis did not start" )
1913
1914 main.step( "Checking ONOS nodes" )
1915 nodeResults = utilities.retry( main.HA.nodesCheck,
1916 False,
1917 args=[main.activeNodes],
1918 attempts=5 )
1919 utilities.assert_equals( expect=True, actual=nodeResults,
1920 onpass="Nodes check successful",
1921 onfail="Nodes check NOT successful" )
1922
1923 for i in range( 10 ):
1924 ready = True
1925 for i in main.activeNodes:
1926 cli = main.CLIs[i]
1927 output = cli.summary()
1928 if not output:
1929 ready = False
1930 if ready:
1931 break
1932 time.sleep( 30 )
1933 utilities.assert_equals( expect=True, actual=ready,
1934 onpass="ONOS summary command succeded",
1935 onfail="ONOS summary command failed" )
1936 if not ready:
1937 main.cleanup()
1938 main.exit()
1939
1940 # Rerun for election on new nodes
1941 runResults = main.TRUE
1942 for i in main.activeNodes:
1943 cli = main.CLIs[i]
1944 run = cli.electionTestRun()
1945 if run != main.TRUE:
1946 main.log.error( "Error running for election on " + cli.name )
1947 runResults = runResults and run
1948 utilities.assert_equals( expect=main.TRUE, actual=runResults,
1949 onpass="Reran for election",
1950 onfail="Failed to rerun for election" )
1951
1952 # TODO: Make this configurable
1953 time.sleep( 60 )
1954 for node in main.activeNodes:
1955 main.log.warn( "\n****************** {} **************".format( main.nodes[node].ip_address ) )
1956 main.log.debug( main.CLIs[node].nodes( jsonFormat=False ) )
1957 main.log.debug( main.CLIs[node].leaders( jsonFormat=False ) )
1958 main.log.debug( main.CLIs[node].partitions( jsonFormat=False ) )
1959 main.log.debug( main.CLIs[node].apps( jsonFormat=False ) )
1960
1961 def CASE7( self, main ):
1962 """
1963 Check state after ONOS scaling
1964 """
1965 import json
1966 assert main.numCtrls, "main.numCtrls not defined"
1967 assert main, "main not defined"
1968 assert utilities.assert_equals, "utilities.assert_equals not defined"
1969 assert main.CLIs, "main.CLIs not defined"
1970 assert main.nodes, "main.nodes not defined"
1971 main.case( "Running ONOS Constant State Tests" )
1972
1973 main.step( "Check that each switch has a master" )
1974 # Assert that each device has a master
1975 rolesNotNull = main.TRUE
1976 threads = []
1977 for i in main.activeNodes:
1978 t = main.Thread( target=main.CLIs[i].rolesNotNull,
1979 name="rolesNotNull-" + str( i ),
1980 args=[ ] )
1981 threads.append( t )
1982 t.start()
1983
1984 for t in threads:
1985 t.join()
1986 rolesNotNull = rolesNotNull and t.result
1987 utilities.assert_equals(
1988 expect=main.TRUE,
1989 actual=rolesNotNull,
1990 onpass="Each device has a master",
1991 onfail="Some devices don't have a master assigned" )
1992
1993 main.step( "Read device roles from ONOS" )
1994 ONOSMastership = []
1995 consistentMastership = True
1996 rolesResults = True
1997 threads = []
1998 for i in main.activeNodes:
1999 t = main.Thread( target=main.CLIs[i].roles,
2000 name="roles-" + str( i ),
2001 args=[] )
2002 threads.append( t )
2003 t.start()
2004
2005 for t in threads:
2006 t.join()
2007 ONOSMastership.append( t.result )
2008
2009 for i in range( len( ONOSMastership ) ):
2010 node = str( main.activeNodes[i] + 1 )
2011 if not ONOSMastership[i] or "Error" in ONOSMastership[i]:
2012 main.log.error( "Error in getting ONOS" + node + " roles" )
2013 main.log.warn( "ONOS" + node + " mastership response: " +
2014 repr( ONOSMastership[i] ) )
2015 rolesResults = False
2016 utilities.assert_equals(
2017 expect=True,
2018 actual=rolesResults,
2019 onpass="No error in reading roles output",
2020 onfail="Error in reading roles from ONOS" )
2021
2022 main.step( "Check for consistency in roles from each controller" )
2023 if all([ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
2024 main.log.info(
2025 "Switch roles are consistent across all ONOS nodes" )
2026 else:
2027 consistentMastership = False
2028 utilities.assert_equals(
2029 expect=True,
2030 actual=consistentMastership,
2031 onpass="Switch roles are consistent across all ONOS nodes",
2032 onfail="ONOS nodes have different views of switch roles" )
2033
2034 if rolesResults and not consistentMastership:
2035 for i in range( len( ONOSMastership ) ):
2036 node = str( main.activeNodes[i] + 1 )
2037 main.log.warn( "ONOS" + node + " roles: ",
2038 json.dumps( json.loads( ONOSMastership[ i ] ),
2039 sort_keys=True,
2040 indent=4,
2041 separators=( ',', ': ' ) ) )
2042
2043 # NOTE: we expect mastership to change on controller scaling down
2044
2045 main.step( "Get the intents and compare across all nodes" )
2046 ONOSIntents = []
2047 intentCheck = main.FALSE
2048 consistentIntents = True
2049 intentsResults = True
2050 threads = []
2051 for i in main.activeNodes:
2052 t = main.Thread( target=main.CLIs[i].intents,
2053 name="intents-" + str( i ),
2054 args=[],
2055 kwargs={ 'jsonFormat': True } )
2056 threads.append( t )
2057 t.start()
2058
2059 for t in threads:
2060 t.join()
2061 ONOSIntents.append( t.result )
2062
2063 for i in range( len( ONOSIntents) ):
2064 node = str( main.activeNodes[i] + 1 )
2065 if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
2066 main.log.error( "Error in getting ONOS" + node + " intents" )
2067 main.log.warn( "ONOS" + node + " intents response: " +
2068 repr( ONOSIntents[ i ] ) )
2069 intentsResults = False
2070 utilities.assert_equals(
2071 expect=True,
2072 actual=intentsResults,
2073 onpass="No error in reading intents output",
2074 onfail="Error in reading intents from ONOS" )
2075
2076 main.step( "Check for consistency in Intents from each controller" )
2077 if all([ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
2078 main.log.info( "Intents are consistent across all ONOS " +
2079 "nodes" )
2080 else:
2081 consistentIntents = False
2082
2083 # Try to make it easy to figure out what is happening
2084 #
2085 # Intent ONOS1 ONOS2 ...
2086 # 0x01 INSTALLED INSTALLING
2087 # ... ... ...
2088 # ... ... ...
2089 title = " ID"
2090 for n in main.activeNodes:
2091 title += " " * 10 + "ONOS" + str( n + 1 )
2092 main.log.warn( title )
2093 # get all intent keys in the cluster
2094 keys = []
2095 for nodeStr in ONOSIntents:
2096 node = json.loads( nodeStr )
2097 for intent in node:
2098 keys.append( intent.get( 'id' ) )
2099 keys = set( keys )
2100 for key in keys:
2101 row = "%-13s" % key
2102 for nodeStr in ONOSIntents:
2103 node = json.loads( nodeStr )
2104 for intent in node:
2105 if intent.get( 'id' ) == key:
2106 row += "%-15s" % intent.get( 'state' )
2107 main.log.warn( row )
2108 # End table view
2109
2110 utilities.assert_equals(
2111 expect=True,
2112 actual=consistentIntents,
2113 onpass="Intents are consistent across all ONOS nodes",
2114 onfail="ONOS nodes have different views of intents" )
2115 intentStates = []
2116 for node in ONOSIntents: # Iter through ONOS nodes
2117 nodeStates = []
2118 # Iter through intents of a node
2119 try:
2120 for intent in json.loads( node ):
2121 nodeStates.append( intent[ 'state' ] )
2122 except ( ValueError, TypeError ):
2123 main.log.exception( "Error in parsing intents" )
2124 main.log.error( repr( node ) )
2125 intentStates.append( nodeStates )
2126 out = [ (i, nodeStates.count( i ) ) for i in set( nodeStates ) ]
2127 main.log.info( dict( out ) )
2128
2129 if intentsResults and not consistentIntents:
2130 for i in range( len( main.activeNodes ) ):
2131 node = str( main.activeNodes[i] + 1 )
2132 main.log.warn( "ONOS" + node + " intents: " )
2133 main.log.warn( json.dumps(
2134 json.loads( ONOSIntents[ i ] ),
2135 sort_keys=True,
2136 indent=4,
2137 separators=( ',', ': ' ) ) )
2138 elif intentsResults and consistentIntents:
2139 intentCheck = main.TRUE
2140
2141 main.step( "Compare current intents with intents before the scaling" )
2142 # NOTE: this requires case 5 to pass for intentState to be set.
2143 # maybe we should stop the test if that fails?
2144 sameIntents = main.FALSE
2145 try:
2146 intentState
2147 except NameError:
2148 main.log.warn( "No previous intent state was saved" )
2149 else:
2150 if intentState and intentState == ONOSIntents[ 0 ]:
2151 sameIntents = main.TRUE
2152 main.log.info( "Intents are consistent with before scaling" )
2153 # TODO: possibly the states have changed? we may need to figure out
2154 # what the acceptable states are
2155 elif len( intentState ) == len( ONOSIntents[ 0 ] ):
2156 sameIntents = main.TRUE
2157 try:
2158 before = json.loads( intentState )
2159 after = json.loads( ONOSIntents[ 0 ] )
2160 for intent in before:
2161 if intent not in after:
2162 sameIntents = main.FALSE
2163 main.log.debug( "Intent is not currently in ONOS " +
2164 "(at least in the same form):" )
2165 main.log.debug( json.dumps( intent ) )
2166 except ( ValueError, TypeError ):
2167 main.log.exception( "Exception printing intents" )
2168 main.log.debug( repr( ONOSIntents[0] ) )
2169 main.log.debug( repr( intentState ) )
2170 if sameIntents == main.FALSE:
2171 try:
2172 main.log.debug( "ONOS intents before: " )
2173 main.log.debug( json.dumps( json.loads( intentState ),
2174 sort_keys=True, indent=4,
2175 separators=( ',', ': ' ) ) )
2176 main.log.debug( "Current ONOS intents: " )
2177 main.log.debug( json.dumps( json.loads( ONOSIntents[ 0 ] ),
2178 sort_keys=True, indent=4,
2179 separators=( ',', ': ' ) ) )
2180 except ( ValueError, TypeError ):
2181 main.log.exception( "Exception printing intents" )
2182 main.log.debug( repr( ONOSIntents[0] ) )
2183 main.log.debug( repr( intentState ) )
2184 utilities.assert_equals(
2185 expect=main.TRUE,
2186 actual=sameIntents,
2187 onpass="Intents are consistent with before scaling",
2188 onfail="The Intents changed during scaling" )
2189 intentCheck = intentCheck and sameIntents
2190
2191 main.step( "Get the OF Table entries and compare to before " +
2192 "component scaling" )
2193 FlowTables = main.TRUE
2194 for i in range( 28 ):
2195 main.log.info( "Checking flow table on s" + str( i + 1 ) )
2196 tmpFlows = main.Mininet1.getFlowTable( "s" + str( i + 1 ), version="1.3", debug=False )
2197 curSwitch = main.Mininet1.flowTableComp( flows[i], tmpFlows )
2198 FlowTables = FlowTables and curSwitch
2199 if curSwitch == main.FALSE:
2200 main.log.warn( "Differences in flow table for switch: s{}".format( i + 1 ) )
2201 utilities.assert_equals(
2202 expect=main.TRUE,
2203 actual=FlowTables,
2204 onpass="No changes were found in the flow tables",
2205 onfail="Changes were found in the flow tables" )
2206
2207 main.Mininet2.pingLongKill()
2208 '''
2209 # main.step( "Check the continuous pings to ensure that no packets " +
2210 # "were dropped during component failure" )
2211 main.Mininet2.pingKill( main.params[ 'TESTONUSER' ],
2212 main.params[ 'TESTONIP' ] )
2213 LossInPings = main.FALSE
2214 # NOTE: checkForLoss returns main.FALSE with 0% packet loss
2215 for i in range( 8, 18 ):
2216 main.log.info(
2217 "Checking for a loss in pings along flow from s" +
2218 str( i ) )
2219 LossInPings = main.Mininet2.checkForLoss(
2220 "/tmp/ping.h" +
2221 str( i ) ) or LossInPings
2222 if LossInPings == main.TRUE:
2223 main.log.info( "Loss in ping detected" )
2224 elif LossInPings == main.ERROR:
2225 main.log.info( "There are multiple mininet process running" )
2226 elif LossInPings == main.FALSE:
2227 main.log.info( "No Loss in the pings" )
2228 main.log.info( "No loss of dataplane connectivity" )
2229 # utilities.assert_equals(
2230 # expect=main.FALSE,
2231 # actual=LossInPings,
2232 # onpass="No Loss of connectivity",
2233 # onfail="Loss of dataplane connectivity detected" )
2234
2235 # NOTE: Since intents are not persisted with IntnentStore,
2236 # we expect loss in dataplane connectivity
2237 LossInPings = main.FALSE
2238 '''
2239
2240 main.step( "Leadership Election is still functional" )
2241 # Test of LeadershipElection
2242 leaderList = []
2243 leaderResult = main.TRUE
2244
2245 for i in main.activeNodes:
2246 cli = main.CLIs[i]
2247 leaderN = cli.electionTestLeader()
2248 leaderList.append( leaderN )
2249 if leaderN == main.FALSE:
2250 # error in response
2251 main.log.error( "Something is wrong with " +
2252 "electionTestLeader function, check the" +
2253 " error logs" )
2254 leaderResult = main.FALSE
2255 elif leaderN is None:
2256 main.log.error( cli.name +
2257 " shows no leader for the election-app." )
2258 leaderResult = main.FALSE
2259 if len( set( leaderList ) ) != 1:
2260 leaderResult = main.FALSE
2261 main.log.error(
2262 "Inconsistent view of leader for the election test app" )
2263 # TODO: print the list
2264 utilities.assert_equals(
2265 expect=main.TRUE,
2266 actual=leaderResult,
2267 onpass="Leadership election passed",
2268 onfail="Something went wrong with Leadership election" )
2269
2270 def CASE8( self, main ):
2271 """
2272 Compare topo
2273 """
2274 import json
2275 import time
2276 assert main.numCtrls, "main.numCtrls not defined"
2277 assert main, "main not defined"
2278 assert utilities.assert_equals, "utilities.assert_equals not defined"
2279 assert main.CLIs, "main.CLIs not defined"
2280 assert main.nodes, "main.nodes not defined"
2281
2282 main.case( "Compare ONOS Topology view to Mininet topology" )
2283 main.caseExplanation = "Compare topology objects between Mininet" +\
2284 " and ONOS"
2285 topoResult = main.FALSE
2286 topoFailMsg = "ONOS topology don't match Mininet"
2287 elapsed = 0
2288 count = 0
2289 main.step( "Comparing ONOS topology to MN topology" )
2290 startTime = time.time()
2291 # Give time for Gossip to work
2292 while topoResult == main.FALSE and ( elapsed < 60 or count < 3 ):
2293 devicesResults = main.TRUE
2294 linksResults = main.TRUE
2295 hostsResults = main.TRUE
2296 hostAttachmentResults = True
2297 count += 1
2298 cliStart = time.time()
2299 devices = []
2300 threads = []
2301 for i in main.activeNodes:
2302 t = main.Thread( target=utilities.retry,
2303 name="devices-" + str( i ),
2304 args=[ main.CLIs[i].devices, [ None ] ],
2305 kwargs= { 'sleep': 5, 'attempts': 5,
2306 'randomTime': True } )
2307 threads.append( t )
2308 t.start()
2309
2310 for t in threads:
2311 t.join()
2312 devices.append( t.result )
2313 hosts = []
2314 ipResult = main.TRUE
2315 threads = []
2316 for i in main.activeNodes:
2317 t = main.Thread( target=utilities.retry,
2318 name="hosts-" + str( i ),
2319 args=[ main.CLIs[i].hosts, [ None ] ],
2320 kwargs= { 'sleep': 5, 'attempts': 5,
2321 'randomTime': True } )
2322 threads.append( t )
2323 t.start()
2324
2325 for t in threads:
2326 t.join()
2327 try:
2328 hosts.append( json.loads( t.result ) )
2329 except ( ValueError, TypeError ):
2330 main.log.exception( "Error parsing hosts results" )
2331 main.log.error( repr( t.result ) )
2332 hosts.append( None )
2333 for controller in range( 0, len( hosts ) ):
2334 controllerStr = str( main.activeNodes[controller] + 1 )
2335 if hosts[ controller ]:
2336 for host in hosts[ controller ]:
2337 if host is None or host.get( 'ipAddresses', [] ) == []:
2338 main.log.error(
2339 "Error with host ipAddresses on controller" +
2340 controllerStr + ": " + str( host ) )
2341 ipResult = main.FALSE
2342 ports = []
2343 threads = []
2344 for i in main.activeNodes:
2345 t = main.Thread( target=utilities.retry,
2346 name="ports-" + str( i ),
2347 args=[ main.CLIs[i].ports, [ None ] ],
2348 kwargs= { 'sleep': 5, 'attempts': 5,
2349 'randomTime': True } )
2350 threads.append( t )
2351 t.start()
2352
2353 for t in threads:
2354 t.join()
2355 ports.append( t.result )
2356 links = []
2357 threads = []
2358 for i in main.activeNodes:
2359 t = main.Thread( target=utilities.retry,
2360 name="links-" + str( i ),
2361 args=[ main.CLIs[i].links, [ None ] ],
2362 kwargs= { 'sleep': 5, 'attempts': 5,
2363 'randomTime': True } )
2364 threads.append( t )
2365 t.start()
2366
2367 for t in threads:
2368 t.join()
2369 links.append( t.result )
2370 clusters = []
2371 threads = []
2372 for i in main.activeNodes:
2373 t = main.Thread( target=utilities.retry,
2374 name="clusters-" + str( i ),
2375 args=[ main.CLIs[i].clusters, [ None ] ],
2376 kwargs= { 'sleep': 5, 'attempts': 5,
2377 'randomTime': True } )
2378 threads.append( t )
2379 t.start()
2380
2381 for t in threads:
2382 t.join()
2383 clusters.append( t.result )
2384
2385 elapsed = time.time() - startTime
2386 cliTime = time.time() - cliStart
2387 print "Elapsed time: " + str( elapsed )
2388 print "CLI time: " + str( cliTime )
2389
2390 if all( e is None for e in devices ) and\
2391 all( e is None for e in hosts ) and\
2392 all( e is None for e in ports ) and\
2393 all( e is None for e in links ) and\
2394 all( e is None for e in clusters ):
2395 topoFailMsg = "Could not get topology from ONOS"
2396 main.log.error( topoFailMsg )
2397 continue # Try again, No use trying to compare
2398
2399 mnSwitches = main.Mininet1.getSwitches()
2400 mnLinks = main.Mininet1.getLinks()
2401 mnHosts = main.Mininet1.getHosts()
2402 for controller in range( len( main.activeNodes ) ):
2403 controllerStr = str( main.activeNodes[controller] + 1 )
2404 if devices[ controller ] and ports[ controller ] and\
2405 "Error" not in devices[ controller ] and\
2406 "Error" not in ports[ controller ]:
2407
2408 try:
2409 currentDevicesResult = main.Mininet1.compareSwitches(
2410 mnSwitches,
2411 json.loads( devices[ controller ] ),
2412 json.loads( ports[ controller ] ) )
2413 except ( TypeError, ValueError ):
2414 main.log.exception( "Object not as expected; devices={!r}\nports={!r}".format(
2415 devices[ controller ], ports[ controller ] ) )
2416 else:
2417 currentDevicesResult = main.FALSE
2418 utilities.assert_equals( expect=main.TRUE,
2419 actual=currentDevicesResult,
2420 onpass="ONOS" + controllerStr +
2421 " Switches view is correct",
2422 onfail="ONOS" + controllerStr +
2423 " Switches view is incorrect" )
2424
2425 if links[ controller ] and "Error" not in links[ controller ]:
2426 currentLinksResult = main.Mininet1.compareLinks(
2427 mnSwitches, mnLinks,
2428 json.loads( links[ controller ] ) )
2429 else:
2430 currentLinksResult = main.FALSE
2431 utilities.assert_equals( expect=main.TRUE,
2432 actual=currentLinksResult,
2433 onpass="ONOS" + controllerStr +
2434 " links view is correct",
2435 onfail="ONOS" + controllerStr +
2436 " links view is incorrect" )
2437 if hosts[ controller ] and "Error" not in hosts[ controller ]:
2438 currentHostsResult = main.Mininet1.compareHosts(
2439 mnHosts,
2440 hosts[ controller ] )
2441 elif hosts[ controller ] == []:
2442 currentHostsResult = main.TRUE
2443 else:
2444 currentHostsResult = main.FALSE
2445 utilities.assert_equals( expect=main.TRUE,
2446 actual=currentHostsResult,
2447 onpass="ONOS" + controllerStr +
2448 " hosts exist in Mininet",
2449 onfail="ONOS" + controllerStr +
2450 " hosts don't match Mininet" )
2451 # CHECKING HOST ATTACHMENT POINTS
2452 hostAttachment = True
2453 zeroHosts = False
2454 # FIXME: topo-HA/obelisk specific mappings:
2455 # key is mac and value is dpid
2456 mappings = {}
2457 for i in range( 1, 29 ): # hosts 1 through 28
2458 # set up correct variables:
2459 macId = "00:" * 5 + hex( i ).split( "0x" )[1].upper().zfill(2)
2460 if i == 1:
2461 deviceId = "1000".zfill(16)
2462 elif i == 2:
2463 deviceId = "2000".zfill(16)
2464 elif i == 3:
2465 deviceId = "3000".zfill(16)
2466 elif i == 4:
2467 deviceId = "3004".zfill(16)
2468 elif i == 5:
2469 deviceId = "5000".zfill(16)
2470 elif i == 6:
2471 deviceId = "6000".zfill(16)
2472 elif i == 7:
2473 deviceId = "6007".zfill(16)
2474 elif i >= 8 and i <= 17:
2475 dpid = '3' + str( i ).zfill( 3 )
2476 deviceId = dpid.zfill(16)
2477 elif i >= 18 and i <= 27:
2478 dpid = '6' + str( i ).zfill( 3 )
2479 deviceId = dpid.zfill(16)
2480 elif i == 28:
2481 deviceId = "2800".zfill(16)
2482 mappings[ macId ] = deviceId
2483 if hosts[ controller ] is not None and "Error" not in hosts[ controller ]:
2484 if hosts[ controller ] == []:
2485 main.log.warn( "There are no hosts discovered" )
2486 zeroHosts = True
2487 else:
2488 for host in hosts[ controller ]:
2489 mac = None
2490 location = None
2491 device = None
2492 port = None
2493 try:
2494 mac = host.get( 'mac' )
2495 assert mac, "mac field could not be found for this host object"
2496
2497 location = host.get( 'location' )
2498 assert location, "location field could not be found for this host object"
2499
2500 # Trim the protocol identifier off deviceId
2501 device = str( location.get( 'elementId' ) ).split(':')[1]
2502 assert device, "elementId field could not be found for this host location object"
2503
2504 port = location.get( 'port' )
2505 assert port, "port field could not be found for this host location object"
2506
2507 # Now check if this matches where they should be
2508 if mac and device and port:
2509 if str( port ) != "1":
2510 main.log.error( "The attachment port is incorrect for " +
2511 "host " + str( mac ) +
2512 ". Expected: 1 Actual: " + str( port) )
2513 hostAttachment = False
2514 if device != mappings[ str( mac ) ]:
2515 main.log.error( "The attachment device is incorrect for " +
2516 "host " + str( mac ) +
2517 ". Expected: " + mappings[ str( mac ) ] +
2518 " Actual: " + device )
2519 hostAttachment = False
2520 else:
2521 hostAttachment = False
2522 except AssertionError:
2523 main.log.exception( "Json object not as expected" )
2524 main.log.error( repr( host ) )
2525 hostAttachment = False
2526 else:
2527 main.log.error( "No hosts json output or \"Error\"" +
2528 " in output. hosts = " +
2529 repr( hosts[ controller ] ) )
2530 if zeroHosts is False:
2531 # TODO: Find a way to know if there should be hosts in a
2532 # given point of the test
2533 hostAttachment = True
2534
2535 # END CHECKING HOST ATTACHMENT POINTS
2536 devicesResults = devicesResults and currentDevicesResult
2537 linksResults = linksResults and currentLinksResult
2538 hostsResults = hostsResults and currentHostsResult
2539 hostAttachmentResults = hostAttachmentResults and\
2540 hostAttachment
2541 topoResult = ( devicesResults and linksResults
2542 and hostsResults and ipResult and
2543 hostAttachmentResults )
2544 utilities.assert_equals( expect=True,
2545 actual=topoResult,
2546 onpass="ONOS topology matches Mininet",
2547 onfail=topoFailMsg )
2548 # End of While loop to pull ONOS state
2549
2550 # Compare json objects for hosts and dataplane clusters
2551
2552 # hosts
2553 main.step( "Hosts view is consistent across all ONOS nodes" )
2554 consistentHostsResult = main.TRUE
2555 for controller in range( len( hosts ) ):
2556 controllerStr = str( main.activeNodes[controller] + 1 )
2557 if hosts[ controller ] is not None and "Error" not in hosts[ controller ]:
2558 if hosts[ controller ] == hosts[ 0 ]:
2559 continue
2560 else: # hosts not consistent
2561 main.log.error( "hosts from ONOS" + controllerStr +
2562 " is inconsistent with ONOS1" )
2563 main.log.warn( repr( hosts[ controller ] ) )
2564 consistentHostsResult = main.FALSE
2565
2566 else:
2567 main.log.error( "Error in getting ONOS hosts from ONOS" +
2568 controllerStr )
2569 consistentHostsResult = main.FALSE
2570 main.log.warn( "ONOS" + controllerStr +
2571 " hosts response: " +
2572 repr( hosts[ controller ] ) )
2573 utilities.assert_equals(
2574 expect=main.TRUE,
2575 actual=consistentHostsResult,
2576 onpass="Hosts view is consistent across all ONOS nodes",
2577 onfail="ONOS nodes have different views of hosts" )
2578
2579 main.step( "Hosts information is correct" )
2580 hostsResults = hostsResults and ipResult
2581 utilities.assert_equals(
2582 expect=main.TRUE,
2583 actual=hostsResults,
2584 onpass="Host information is correct",
2585 onfail="Host information is incorrect" )
2586
2587 main.step( "Host attachment points to the network" )
2588 utilities.assert_equals(
2589 expect=True,
2590 actual=hostAttachmentResults,
2591 onpass="Hosts are correctly attached to the network",
2592 onfail="ONOS did not correctly attach hosts to the network" )
2593
2594 # Strongly connected clusters of devices
2595 main.step( "Clusters view is consistent across all ONOS nodes" )
2596 consistentClustersResult = main.TRUE
2597 for controller in range( len( clusters ) ):
2598 controllerStr = str( main.activeNodes[controller] + 1 )
2599 if "Error" not in clusters[ controller ]:
2600 if clusters[ controller ] == clusters[ 0 ]:
2601 continue
2602 else: # clusters not consistent
2603 main.log.error( "clusters from ONOS" +
2604 controllerStr +
2605 " is inconsistent with ONOS1" )
2606 consistentClustersResult = main.FALSE
2607 else:
2608 main.log.error( "Error in getting dataplane clusters " +
2609 "from ONOS" + controllerStr )
2610 consistentClustersResult = main.FALSE
2611 main.log.warn( "ONOS" + controllerStr +
2612 " clusters response: " +
2613 repr( clusters[ controller ] ) )
2614 utilities.assert_equals(
2615 expect=main.TRUE,
2616 actual=consistentClustersResult,
2617 onpass="Clusters view is consistent across all ONOS nodes",
2618 onfail="ONOS nodes have different views of clusters" )
Jon Hall64948022016-05-12 13:38:50 -07002619 if not consistentClustersResult:
2620 main.log.debug( clusters )
Jon Hall9ebd1bd2016-04-19 01:37:17 -07002621
2622 main.step( "There is only one SCC" )
2623 # there should always only be one cluster
2624 try:
2625 numClusters = len( json.loads( clusters[ 0 ] ) )
2626 except ( ValueError, TypeError ):
2627 main.log.exception( "Error parsing clusters[0]: " +
2628 repr( clusters[0] ) )
2629 numClusters = "ERROR"
2630 clusterResults = main.FALSE
2631 if numClusters == 1:
2632 clusterResults = main.TRUE
2633 utilities.assert_equals(
2634 expect=1,
2635 actual=numClusters,
2636 onpass="ONOS shows 1 SCC",
2637 onfail="ONOS shows " + str( numClusters ) + " SCCs" )
2638
2639 topoResult = ( devicesResults and linksResults
2640 and hostsResults and consistentHostsResult
2641 and consistentClustersResult and clusterResults
2642 and ipResult and hostAttachmentResults )
2643
2644 topoResult = topoResult and int( count <= 2 )
2645 note = "note it takes about " + str( int( cliTime ) ) + \
2646 " seconds for the test to make all the cli calls to fetch " +\
2647 "the topology from each ONOS instance"
2648 main.log.info(
2649 "Very crass estimate for topology discovery/convergence( " +
2650 str( note ) + " ): " + str( elapsed ) + " seconds, " +
2651 str( count ) + " tries" )
2652
2653 main.step( "Device information is correct" )
2654 utilities.assert_equals(
2655 expect=main.TRUE,
2656 actual=devicesResults,
2657 onpass="Device information is correct",
2658 onfail="Device information is incorrect" )
2659
2660 main.step( "Links are correct" )
2661 utilities.assert_equals(
2662 expect=main.TRUE,
2663 actual=linksResults,
2664 onpass="Link are correct",
2665 onfail="Links are incorrect" )
2666
2667 main.step( "Hosts are correct" )
2668 utilities.assert_equals(
2669 expect=main.TRUE,
2670 actual=hostsResults,
2671 onpass="Hosts are correct",
2672 onfail="Hosts are incorrect" )
2673
2674 # FIXME: move this to an ONOS state case
2675 main.step( "Checking ONOS nodes" )
2676 nodeResults = utilities.retry( main.HA.nodesCheck,
2677 False,
2678 args=[main.activeNodes],
2679 attempts=5 )
2680 utilities.assert_equals( expect=True, actual=nodeResults,
2681 onpass="Nodes check successful",
2682 onfail="Nodes check NOT successful" )
2683 if not nodeResults:
2684 for i in main.activeNodes:
2685 main.log.debug( "{} components not ACTIVE: \n{}".format(
2686 main.CLIs[i].name,
2687 main.CLIs[i].sendline( "scr:list | grep -v ACTIVE" ) ) )
2688
2689 def CASE9( self, main ):
2690 """
2691 Link s3-s28 down
2692 """
2693 import time
2694 assert main.numCtrls, "main.numCtrls not defined"
2695 assert main, "main not defined"
2696 assert utilities.assert_equals, "utilities.assert_equals not defined"
2697 assert main.CLIs, "main.CLIs not defined"
2698 assert main.nodes, "main.nodes not defined"
2699 # NOTE: You should probably run a topology check after this
2700
2701 linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
2702
2703 description = "Turn off a link to ensure that Link Discovery " +\
2704 "is working properly"
2705 main.case( description )
2706
2707 main.step( "Kill Link between s3 and s28" )
2708 LinkDown = main.Mininet1.link( END1="s3", END2="s28", OPTION="down" )
2709 main.log.info( "Waiting " + str( linkSleep ) +
2710 " seconds for link down to be discovered" )
2711 time.sleep( linkSleep )
2712 utilities.assert_equals( expect=main.TRUE, actual=LinkDown,
2713 onpass="Link down successful",
2714 onfail="Failed to bring link down" )
2715 # TODO do some sort of check here
2716
2717 def CASE10( self, main ):
2718 """
2719 Link s3-s28 up
2720 """
2721 import time
2722 assert main.numCtrls, "main.numCtrls not defined"
2723 assert main, "main not defined"
2724 assert utilities.assert_equals, "utilities.assert_equals not defined"
2725 assert main.CLIs, "main.CLIs not defined"
2726 assert main.nodes, "main.nodes not defined"
2727 # NOTE: You should probably run a topology check after this
2728
2729 linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
2730
2731 description = "Restore a link to ensure that Link Discovery is " + \
2732 "working properly"
2733 main.case( description )
2734
2735 main.step( "Bring link between s3 and s28 back up" )
2736 LinkUp = main.Mininet1.link( END1="s3", END2="s28", OPTION="up" )
2737 main.log.info( "Waiting " + str( linkSleep ) +
2738 " seconds for link up to be discovered" )
2739 time.sleep( linkSleep )
2740 utilities.assert_equals( expect=main.TRUE, actual=LinkUp,
2741 onpass="Link up successful",
2742 onfail="Failed to bring link up" )
2743 # TODO do some sort of check here
2744
2745 def CASE11( self, main ):
2746 """
2747 Switch Down
2748 """
2749 # NOTE: You should probably run a topology check after this
2750 import time
2751 assert main.numCtrls, "main.numCtrls not defined"
2752 assert main, "main not defined"
2753 assert utilities.assert_equals, "utilities.assert_equals not defined"
2754 assert main.CLIs, "main.CLIs not defined"
2755 assert main.nodes, "main.nodes not defined"
2756
2757 switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
2758
2759 description = "Killing a switch to ensure it is discovered correctly"
2760 onosCli = main.CLIs[ main.activeNodes[0] ]
2761 main.case( description )
2762 switch = main.params[ 'kill' ][ 'switch' ]
2763 switchDPID = main.params[ 'kill' ][ 'dpid' ]
2764
2765 # TODO: Make this switch parameterizable
2766 main.step( "Kill " + switch )
2767 main.log.info( "Deleting " + switch )
2768 main.Mininet1.delSwitch( switch )
2769 main.log.info( "Waiting " + str( switchSleep ) +
2770 " seconds for switch down to be discovered" )
2771 time.sleep( switchSleep )
2772 device = onosCli.getDevice( dpid=switchDPID )
2773 # Peek at the deleted switch
2774 main.log.warn( str( device ) )
2775 result = main.FALSE
2776 if device and device[ 'available' ] is False:
2777 result = main.TRUE
2778 utilities.assert_equals( expect=main.TRUE, actual=result,
2779 onpass="Kill switch successful",
2780 onfail="Failed to kill switch?" )
2781
2782 def CASE12( self, main ):
2783 """
2784 Switch Up
2785 """
2786 # NOTE: You should probably run a topology check after this
2787 import time
2788 assert main.numCtrls, "main.numCtrls not defined"
2789 assert main, "main not defined"
2790 assert utilities.assert_equals, "utilities.assert_equals not defined"
2791 assert main.CLIs, "main.CLIs not defined"
2792 assert main.nodes, "main.nodes not defined"
2793
2794 switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
2795 switch = main.params[ 'kill' ][ 'switch' ]
2796 switchDPID = main.params[ 'kill' ][ 'dpid' ]
2797 links = main.params[ 'kill' ][ 'links' ].split()
2798 onosCli = main.CLIs[ main.activeNodes[0] ]
2799 description = "Adding a switch to ensure it is discovered correctly"
2800 main.case( description )
2801
2802 main.step( "Add back " + switch )
2803 main.Mininet1.addSwitch( switch, dpid=switchDPID )
2804 for peer in links:
2805 main.Mininet1.addLink( switch, peer )
2806 ipList = [ node.ip_address for node in main.nodes ]
2807 main.Mininet1.assignSwController( sw=switch, ip=ipList )
2808 main.log.info( "Waiting " + str( switchSleep ) +
2809 " seconds for switch up to be discovered" )
2810 time.sleep( switchSleep )
2811 device = onosCli.getDevice( dpid=switchDPID )
2812 # Peek at the deleted switch
2813 main.log.warn( str( device ) )
2814 result = main.FALSE
2815 if device and device[ 'available' ]:
2816 result = main.TRUE
2817 utilities.assert_equals( expect=main.TRUE, actual=result,
2818 onpass="add switch successful",
2819 onfail="Failed to add switch?" )
2820
2821 def CASE13( self, main ):
2822 """
2823 Clean up
2824 """
2825 assert main.numCtrls, "main.numCtrls not defined"
2826 assert main, "main not defined"
2827 assert utilities.assert_equals, "utilities.assert_equals not defined"
2828 assert main.CLIs, "main.CLIs not defined"
2829 assert main.nodes, "main.nodes not defined"
2830
2831 main.case( "Test Cleanup" )
2832 main.step( "Killing tcpdumps" )
2833 main.Mininet2.stopTcpdump()
2834
2835 if main.params[ 'BACKUP' ][ 'ENABLED' ] == "True":
2836 main.step( "Copying MN pcap and ONOS log files to test station" )
2837 # NOTE: MN Pcap file is being saved to logdir.
2838 # We scp this file as MN and TestON aren't necessarily the same vm
2839
2840 # FIXME: To be replaced with a Jenkin's post script
2841 # TODO: Load these from params
2842 # NOTE: must end in /
2843 logFolder = "/opt/onos/log/"
2844 logFiles = [ "karaf.log", "karaf.log.1" ]
2845 # NOTE: must end in /
2846 for f in logFiles:
2847 for node in main.nodes:
2848 dstName = main.logdir + "/" + node.name + "-" + f
2849 main.ONOSbench.secureCopy( node.user_name, node.ip_address,
2850 logFolder + f, dstName )
2851 # std*.log's
2852 # NOTE: must end in /
2853 logFolder = "/opt/onos/var/"
2854 logFiles = [ "stderr.log", "stdout.log" ]
2855 # NOTE: must end in /
2856 for f in logFiles:
2857 for node in main.nodes:
2858 dstName = main.logdir + "/" + node.name + "-" + f
2859 main.ONOSbench.secureCopy( node.user_name, node.ip_address,
2860 logFolder + f, dstName )
2861 else:
2862 main.log.debug( "skipping saving log files" )
2863
2864 main.step( "Stopping Mininet" )
2865 mnResult = main.Mininet1.stopNet()
2866 utilities.assert_equals( expect=main.TRUE, actual=mnResult,
2867 onpass="Mininet stopped",
2868 onfail="MN cleanup NOT successful" )
2869
2870 main.step( "Checking ONOS Logs for errors" )
2871 for node in main.nodes:
2872 main.log.debug( "Checking logs for errors on " + node.name + ":" )
2873 main.log.warn( main.ONOSbench.checkLogs( node.ip_address ) )
2874
2875 try:
2876 timerLog = open( main.logdir + "/Timers.csv", 'w')
2877 main.log.error( ", ".join( labels ) + "\n" + ", ".join( data ) )
2878 timerLog.write( ", ".join( labels ) + "\n" + ", ".join( data ) )
2879 timerLog.close()
2880 except NameError, e:
2881 main.log.exception(e)
2882
2883 main.step( "Stopping webserver" )
2884 status = main.Server.stop( )
2885 utilities.assert_equals( expect=main.TRUE, actual=status,
2886 onpass="Stop Server",
2887 onfail="Failled to stop SimpleHTTPServer" )
2888 del main.Server
2889
2890 def CASE14( self, main ):
2891 """
2892 start election app on all onos nodes
2893 """
2894 import time
2895 assert main.numCtrls, "main.numCtrls not defined"
2896 assert main, "main not defined"
2897 assert utilities.assert_equals, "utilities.assert_equals not defined"
2898 assert main.CLIs, "main.CLIs not defined"
2899 assert main.nodes, "main.nodes not defined"
2900
2901 main.case("Start Leadership Election app")
2902 main.step( "Install leadership election app" )
2903 onosCli = main.CLIs[ main.activeNodes[0] ]
2904 appResult = onosCli.activateApp( "org.onosproject.election" )
2905 utilities.assert_equals(
2906 expect=main.TRUE,
2907 actual=appResult,
2908 onpass="Election app installed",
2909 onfail="Something went wrong with installing Leadership election" )
2910
2911 main.step( "Run for election on each node" )
2912 for i in main.activeNodes:
2913 main.CLIs[i].electionTestRun()
2914 time.sleep(5)
2915 activeCLIs = [ main.CLIs[i] for i in main.activeNodes ]
2916 sameResult, leaders = main.HA.consistentLeaderboards( activeCLIs )
2917 utilities.assert_equals(
2918 expect=True,
2919 actual=sameResult,
2920 onpass="All nodes see the same leaderboards",
2921 onfail="Inconsistent leaderboards" )
2922
2923 if sameResult:
2924 leader = leaders[ 0 ][ 0 ]
2925 if main.nodes[ main.activeNodes[0] ].ip_address in leader:
2926 correctLeader = True
2927 else:
2928 correctLeader = False
2929 main.step( "First node was elected leader" )
2930 utilities.assert_equals(
2931 expect=True,
2932 actual=correctLeader,
2933 onpass="Correct leader was elected",
2934 onfail="Incorrect leader" )
2935
2936 def CASE15( self, main ):
2937 """
2938 Check that Leadership Election is still functional
2939 15.1 Run election on each node
2940 15.2 Check that each node has the same leaders and candidates
2941 15.3 Find current leader and withdraw
2942 15.4 Check that a new node was elected leader
2943 15.5 Check that that new leader was the candidate of old leader
2944 15.6 Run for election on old leader
2945 15.7 Check that oldLeader is a candidate, and leader if only 1 node
2946 15.8 Make sure that the old leader was added to the candidate list
2947
2948 old and new variable prefixes refer to data from before vs after
2949 withdrawl and later before withdrawl vs after re-election
2950 """
2951 import time
2952 assert main.numCtrls, "main.numCtrls not defined"
2953 assert main, "main not defined"
2954 assert utilities.assert_equals, "utilities.assert_equals not defined"
2955 assert main.CLIs, "main.CLIs not defined"
2956 assert main.nodes, "main.nodes not defined"
2957
2958 description = "Check that Leadership Election is still functional"
2959 main.case( description )
2960 # NOTE: Need to re-run after restarts since being a canidate is not persistant
2961
2962 oldLeaders = [] # list of lists of each nodes' candidates before
2963 newLeaders = [] # list of lists of each nodes' candidates after
2964 oldLeader = '' # the old leader from oldLeaders, None if not same
2965 newLeader = '' # the new leaders fron newLoeaders, None if not same
2966 oldLeaderCLI = None # the CLI of the old leader used for re-electing
2967 expectNoLeader = False # True when there is only one leader
2968 if main.numCtrls == 1:
2969 expectNoLeader = True
2970
2971 main.step( "Run for election on each node" )
2972 electionResult = main.TRUE
2973
2974 for i in main.activeNodes: # run test election on each node
2975 if main.CLIs[i].electionTestRun() == main.FALSE:
2976 electionResult = main.FALSE
2977 utilities.assert_equals(
2978 expect=main.TRUE,
2979 actual=electionResult,
2980 onpass="All nodes successfully ran for leadership",
2981 onfail="At least one node failed to run for leadership" )
2982
2983 if electionResult == main.FALSE:
2984 main.log.error(
2985 "Skipping Test Case because Election Test App isn't loaded" )
2986 main.skipCase()
2987
2988 main.step( "Check that each node shows the same leader and candidates" )
2989 failMessage = "Nodes have different leaderboards"
2990 activeCLIs = [ main.CLIs[i] for i in main.activeNodes ]
2991 sameResult, oldLeaders = main.HA.consistentLeaderboards( activeCLIs )
2992 if sameResult:
2993 oldLeader = oldLeaders[ 0 ][ 0 ]
2994 main.log.warn( oldLeader )
2995 else:
2996 oldLeader = None
2997 utilities.assert_equals(
2998 expect=True,
2999 actual=sameResult,
3000 onpass="Leaderboards are consistent for the election topic",
3001 onfail=failMessage )
3002
3003 main.step( "Find current leader and withdraw" )
3004 withdrawResult = main.TRUE
3005 # do some sanity checking on leader before using it
3006 if oldLeader is None:
3007 main.log.error( "Leadership isn't consistent." )
3008 withdrawResult = main.FALSE
3009 # Get the CLI of the oldLeader
3010 for i in main.activeNodes:
3011 if oldLeader == main.nodes[ i ].ip_address:
3012 oldLeaderCLI = main.CLIs[ i ]
3013 break
3014 else: # FOR/ELSE statement
3015 main.log.error( "Leader election, could not find current leader" )
3016 if oldLeader:
3017 withdrawResult = oldLeaderCLI.electionTestWithdraw()
3018 utilities.assert_equals(
3019 expect=main.TRUE,
3020 actual=withdrawResult,
3021 onpass="Node was withdrawn from election",
3022 onfail="Node was not withdrawn from election" )
3023
3024 main.step( "Check that a new node was elected leader" )
3025 failMessage = "Nodes have different leaders"
3026 # Get new leaders and candidates
3027 newLeaderResult, newLeaders = main.HA.consistentLeaderboards( activeCLIs )
3028 newLeader = None
3029 if newLeaderResult:
3030 if newLeaders[ 0 ][ 0 ] == 'none':
3031 main.log.error( "No leader was elected on at least 1 node" )
3032 if not expectNoLeader:
3033 newLeaderResult = False
3034 newLeader = newLeaders[ 0 ][ 0 ]
3035
3036 # Check that the new leader is not the older leader, which was withdrawn
3037 if newLeader == oldLeader:
3038 newLeaderResult = False
3039 main.log.error( "All nodes still see old leader: " + str( oldLeader ) +
3040 " as the current leader" )
3041 utilities.assert_equals(
3042 expect=True,
3043 actual=newLeaderResult,
3044 onpass="Leadership election passed",
3045 onfail="Something went wrong with Leadership election" )
3046
3047 main.step( "Check that that new leader was the candidate of old leader" )
3048 # candidates[ 2 ] should become the top candidate after withdrawl
3049 correctCandidateResult = main.TRUE
3050 if expectNoLeader:
3051 if newLeader == 'none':
3052 main.log.info( "No leader expected. None found. Pass" )
3053 correctCandidateResult = main.TRUE
3054 else:
3055 main.log.info( "Expected no leader, got: " + str( newLeader ) )
3056 correctCandidateResult = main.FALSE
3057 elif len( oldLeaders[0] ) >= 3:
3058 if newLeader == oldLeaders[ 0 ][ 2 ]:
3059 # correct leader was elected
3060 correctCandidateResult = main.TRUE
3061 else:
3062 correctCandidateResult = main.FALSE
3063 main.log.error( "Candidate {} was elected. {} should have had priority.".format(
3064 newLeader, oldLeaders[ 0 ][ 2 ] ) )
3065 else:
3066 main.log.warn( "Could not determine who should be the correct leader" )
3067 main.log.debug( oldLeaders[ 0 ] )
3068 correctCandidateResult = main.FALSE
3069 utilities.assert_equals(
3070 expect=main.TRUE,
3071 actual=correctCandidateResult,
3072 onpass="Correct Candidate Elected",
3073 onfail="Incorrect Candidate Elected" )
3074
3075 main.step( "Run for election on old leader( just so everyone " +
3076 "is in the hat )" )
3077 if oldLeaderCLI is not None:
3078 runResult = oldLeaderCLI.electionTestRun()
3079 else:
3080 main.log.error( "No old leader to re-elect" )
3081 runResult = main.FALSE
3082 utilities.assert_equals(
3083 expect=main.TRUE,
3084 actual=runResult,
3085 onpass="App re-ran for election",
3086 onfail="App failed to run for election" )
3087
3088 main.step(
3089 "Check that oldLeader is a candidate, and leader if only 1 node" )
3090 # verify leader didn't just change
3091 # Get new leaders and candidates
3092 reRunLeaders = []
3093 time.sleep( 5 ) # Paremterize
3094 positionResult, reRunLeaders = main.HA.consistentLeaderboards( activeCLIs )
3095
3096 # Check that the re-elected node is last on the candidate List
3097 if not reRunLeaders[0]:
3098 positionResult = main.FALSE
3099 elif oldLeader != reRunLeaders[ 0 ][ -1 ]:
3100 main.log.error( "Old Leader ({}) not in the proper position: {} ".format( str( oldLeader),
3101 str( reRunLeaders[ 0 ] ) ) )
3102 positionResult = main.FALSE
3103 utilities.assert_equals(
3104 expect=True,
3105 actual=positionResult,
3106 onpass="Old leader successfully re-ran for election",
3107 onfail="Something went wrong with Leadership election after " +
3108 "the old leader re-ran for election" )
3109
3110 def CASE16( self, main ):
3111 """
3112 Install Distributed Primitives app
3113 """
3114 import time
3115 assert main.numCtrls, "main.numCtrls not defined"
3116 assert main, "main not defined"
3117 assert utilities.assert_equals, "utilities.assert_equals not defined"
3118 assert main.CLIs, "main.CLIs not defined"
3119 assert main.nodes, "main.nodes not defined"
3120
3121 # Variables for the distributed primitives tests
3122 global pCounterName
3123 global pCounterValue
3124 global onosSet
3125 global onosSetName
3126 pCounterName = "TestON-Partitions"
3127 pCounterValue = 0
3128 onosSet = set([])
3129 onosSetName = "TestON-set"
3130
3131 description = "Install Primitives app"
3132 main.case( description )
3133 main.step( "Install Primitives app" )
3134 appName = "org.onosproject.distributedprimitives"
3135 node = main.activeNodes[0]
3136 appResults = main.CLIs[node].activateApp( appName )
3137 utilities.assert_equals( expect=main.TRUE,
3138 actual=appResults,
3139 onpass="Primitives app activated",
3140 onfail="Primitives app not activated" )
3141 time.sleep( 5 ) # To allow all nodes to activate
3142
3143 def CASE17( self, main ):
3144 """
3145 Check for basic functionality with distributed primitives
3146 """
3147 # Make sure variables are defined/set
3148 assert main.numCtrls, "main.numCtrls not defined"
3149 assert main, "main not defined"
3150 assert utilities.assert_equals, "utilities.assert_equals not defined"
3151 assert main.CLIs, "main.CLIs not defined"
3152 assert main.nodes, "main.nodes not defined"
3153 assert pCounterName, "pCounterName not defined"
3154 assert onosSetName, "onosSetName not defined"
3155 # NOTE: assert fails if value is 0/None/Empty/False
3156 try:
3157 pCounterValue
3158 except NameError:
3159 main.log.error( "pCounterValue not defined, setting to 0" )
3160 pCounterValue = 0
3161 try:
3162 onosSet
3163 except NameError:
3164 main.log.error( "onosSet not defined, setting to empty Set" )
3165 onosSet = set([])
3166 # Variables for the distributed primitives tests. These are local only
3167 addValue = "a"
3168 addAllValue = "a b c d e f"
3169 retainValue = "c d e f"
3170
3171 description = "Check for basic functionality with distributed " +\
3172 "primitives"
3173 main.case( description )
3174 main.caseExplanation = "Test the methods of the distributed " +\
3175 "primitives (counters and sets) throught the cli"
3176 # DISTRIBUTED ATOMIC COUNTERS
3177 # Partitioned counters
3178 main.step( "Increment then get a default counter on each node" )
3179 pCounters = []
3180 threads = []
3181 addedPValues = []
3182 for i in main.activeNodes:
3183 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3184 name="counterAddAndGet-" + str( i ),
3185 args=[ pCounterName ] )
3186 pCounterValue += 1
3187 addedPValues.append( pCounterValue )
3188 threads.append( t )
3189 t.start()
3190
3191 for t in threads:
3192 t.join()
3193 pCounters.append( t.result )
3194 # Check that counter incremented numController times
3195 pCounterResults = True
3196 for i in addedPValues:
3197 tmpResult = i in pCounters
3198 pCounterResults = pCounterResults and tmpResult
3199 if not tmpResult:
3200 main.log.error( str( i ) + " is not in partitioned "
3201 "counter incremented results" )
3202 utilities.assert_equals( expect=True,
3203 actual=pCounterResults,
3204 onpass="Default counter incremented",
3205 onfail="Error incrementing default" +
3206 " counter" )
3207
3208 main.step( "Get then Increment a default counter on each node" )
3209 pCounters = []
3210 threads = []
3211 addedPValues = []
3212 for i in main.activeNodes:
3213 t = main.Thread( target=main.CLIs[i].counterTestGetAndAdd,
3214 name="counterGetAndAdd-" + str( i ),
3215 args=[ pCounterName ] )
3216 addedPValues.append( pCounterValue )
3217 pCounterValue += 1
3218 threads.append( t )
3219 t.start()
3220
3221 for t in threads:
3222 t.join()
3223 pCounters.append( t.result )
3224 # Check that counter incremented numController times
3225 pCounterResults = True
3226 for i in addedPValues:
3227 tmpResult = i in pCounters
3228 pCounterResults = pCounterResults and tmpResult
3229 if not tmpResult:
3230 main.log.error( str( i ) + " is not in partitioned "
3231 "counter incremented results" )
3232 utilities.assert_equals( expect=True,
3233 actual=pCounterResults,
3234 onpass="Default counter incremented",
3235 onfail="Error incrementing default" +
3236 " counter" )
3237
3238 main.step( "Counters we added have the correct values" )
3239 incrementCheck = main.HA.counterCheck( pCounterName, pCounterValue )
3240 utilities.assert_equals( expect=main.TRUE,
3241 actual=incrementCheck,
3242 onpass="Added counters are correct",
3243 onfail="Added counters are incorrect" )
3244
3245 main.step( "Add -8 to then get a default counter on each node" )
3246 pCounters = []
3247 threads = []
3248 addedPValues = []
3249 for i in main.activeNodes:
3250 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3251 name="counterIncrement-" + str( i ),
3252 args=[ pCounterName ],
3253 kwargs={ "delta": -8 } )
3254 pCounterValue += -8
3255 addedPValues.append( pCounterValue )
3256 threads.append( t )
3257 t.start()
3258
3259 for t in threads:
3260 t.join()
3261 pCounters.append( t.result )
3262 # Check that counter incremented numController times
3263 pCounterResults = True
3264 for i in addedPValues:
3265 tmpResult = i in pCounters
3266 pCounterResults = pCounterResults and tmpResult
3267 if not tmpResult:
3268 main.log.error( str( i ) + " is not in partitioned "
3269 "counter incremented results" )
3270 utilities.assert_equals( expect=True,
3271 actual=pCounterResults,
3272 onpass="Default counter incremented",
3273 onfail="Error incrementing default" +
3274 " counter" )
3275
3276 main.step( "Add 5 to then get a default counter on each node" )
3277 pCounters = []
3278 threads = []
3279 addedPValues = []
3280 for i in main.activeNodes:
3281 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3282 name="counterIncrement-" + str( i ),
3283 args=[ pCounterName ],
3284 kwargs={ "delta": 5 } )
3285 pCounterValue += 5
3286 addedPValues.append( pCounterValue )
3287 threads.append( t )
3288 t.start()
3289
3290 for t in threads:
3291 t.join()
3292 pCounters.append( t.result )
3293 # Check that counter incremented numController times
3294 pCounterResults = True
3295 for i in addedPValues:
3296 tmpResult = i in pCounters
3297 pCounterResults = pCounterResults and tmpResult
3298 if not tmpResult:
3299 main.log.error( str( i ) + " is not in partitioned "
3300 "counter incremented results" )
3301 utilities.assert_equals( expect=True,
3302 actual=pCounterResults,
3303 onpass="Default counter incremented",
3304 onfail="Error incrementing default" +
3305 " counter" )
3306
3307 main.step( "Get then add 5 to a default counter on each node" )
3308 pCounters = []
3309 threads = []
3310 addedPValues = []
3311 for i in main.activeNodes:
3312 t = main.Thread( target=main.CLIs[i].counterTestGetAndAdd,
3313 name="counterIncrement-" + str( i ),
3314 args=[ pCounterName ],
3315 kwargs={ "delta": 5 } )
3316 addedPValues.append( pCounterValue )
3317 pCounterValue += 5
3318 threads.append( t )
3319 t.start()
3320
3321 for t in threads:
3322 t.join()
3323 pCounters.append( t.result )
3324 # Check that counter incremented numController times
3325 pCounterResults = True
3326 for i in addedPValues:
3327 tmpResult = i in pCounters
3328 pCounterResults = pCounterResults and tmpResult
3329 if not tmpResult:
3330 main.log.error( str( i ) + " is not in partitioned "
3331 "counter incremented results" )
3332 utilities.assert_equals( expect=True,
3333 actual=pCounterResults,
3334 onpass="Default counter incremented",
3335 onfail="Error incrementing default" +
3336 " counter" )
3337
3338 main.step( "Counters we added have the correct values" )
3339 incrementCheck = main.HA.counterCheck( pCounterName, pCounterValue )
3340 utilities.assert_equals( expect=main.TRUE,
3341 actual=incrementCheck,
3342 onpass="Added counters are correct",
3343 onfail="Added counters are incorrect" )
3344
3345 # DISTRIBUTED SETS
3346 main.step( "Distributed Set get" )
3347 size = len( onosSet )
3348 getResponses = []
3349 threads = []
3350 for i in main.activeNodes:
3351 t = main.Thread( target=main.CLIs[i].setTestGet,
3352 name="setTestGet-" + str( i ),
3353 args=[ onosSetName ] )
3354 threads.append( t )
3355 t.start()
3356 for t in threads:
3357 t.join()
3358 getResponses.append( t.result )
3359
3360 getResults = main.TRUE
3361 for i in range( len( main.activeNodes ) ):
3362 node = str( main.activeNodes[i] + 1 )
3363 if isinstance( getResponses[ i ], list):
3364 current = set( getResponses[ i ] )
3365 if len( current ) == len( getResponses[ i ] ):
3366 # no repeats
3367 if onosSet != current:
3368 main.log.error( "ONOS" + node +
3369 " has incorrect view" +
3370 " of set " + onosSetName + ":\n" +
3371 str( getResponses[ i ] ) )
3372 main.log.debug( "Expected: " + str( onosSet ) )
3373 main.log.debug( "Actual: " + str( current ) )
3374 getResults = main.FALSE
3375 else:
3376 # error, set is not a set
3377 main.log.error( "ONOS" + node +
3378 " has repeat elements in" +
3379 " set " + onosSetName + ":\n" +
3380 str( getResponses[ i ] ) )
3381 getResults = main.FALSE
3382 elif getResponses[ i ] == main.ERROR:
3383 getResults = main.FALSE
3384 utilities.assert_equals( expect=main.TRUE,
3385 actual=getResults,
3386 onpass="Set elements are correct",
3387 onfail="Set elements are incorrect" )
3388
3389 main.step( "Distributed Set size" )
3390 sizeResponses = []
3391 threads = []
3392 for i in main.activeNodes:
3393 t = main.Thread( target=main.CLIs[i].setTestSize,
3394 name="setTestSize-" + str( i ),
3395 args=[ onosSetName ] )
3396 threads.append( t )
3397 t.start()
3398 for t in threads:
3399 t.join()
3400 sizeResponses.append( t.result )
3401
3402 sizeResults = main.TRUE
3403 for i in range( len( main.activeNodes ) ):
3404 node = str( main.activeNodes[i] + 1 )
3405 if size != sizeResponses[ i ]:
3406 sizeResults = main.FALSE
3407 main.log.error( "ONOS" + node +
3408 " expected a size of " + str( size ) +
3409 " for set " + onosSetName +
3410 " but got " + str( sizeResponses[ i ] ) )
3411 utilities.assert_equals( expect=main.TRUE,
3412 actual=sizeResults,
3413 onpass="Set sizes are correct",
3414 onfail="Set sizes are incorrect" )
3415
3416 main.step( "Distributed Set add()" )
3417 onosSet.add( addValue )
3418 addResponses = []
3419 threads = []
3420 for i in main.activeNodes:
3421 t = main.Thread( target=main.CLIs[i].setTestAdd,
3422 name="setTestAdd-" + str( i ),
3423 args=[ onosSetName, addValue ] )
3424 threads.append( t )
3425 t.start()
3426 for t in threads:
3427 t.join()
3428 addResponses.append( t.result )
3429
3430 # main.TRUE = successfully changed the set
3431 # main.FALSE = action resulted in no change in set
3432 # main.ERROR - Some error in executing the function
3433 addResults = main.TRUE
3434 for i in range( len( main.activeNodes ) ):
3435 if addResponses[ i ] == main.TRUE:
3436 # All is well
3437 pass
3438 elif addResponses[ i ] == main.FALSE:
3439 # Already in set, probably fine
3440 pass
3441 elif addResponses[ i ] == main.ERROR:
3442 # Error in execution
3443 addResults = main.FALSE
3444 else:
3445 # unexpected result
3446 addResults = main.FALSE
3447 if addResults != main.TRUE:
3448 main.log.error( "Error executing set add" )
3449
3450 # Check if set is still correct
3451 size = len( onosSet )
3452 getResponses = []
3453 threads = []
3454 for i in main.activeNodes:
3455 t = main.Thread( target=main.CLIs[i].setTestGet,
3456 name="setTestGet-" + str( i ),
3457 args=[ onosSetName ] )
3458 threads.append( t )
3459 t.start()
3460 for t in threads:
3461 t.join()
3462 getResponses.append( t.result )
3463 getResults = main.TRUE
3464 for i in range( len( main.activeNodes ) ):
3465 node = str( main.activeNodes[i] + 1 )
3466 if isinstance( getResponses[ i ], list):
3467 current = set( getResponses[ i ] )
3468 if len( current ) == len( getResponses[ i ] ):
3469 # no repeats
3470 if onosSet != current:
3471 main.log.error( "ONOS" + node + " has incorrect view" +
3472 " of set " + onosSetName + ":\n" +
3473 str( getResponses[ i ] ) )
3474 main.log.debug( "Expected: " + str( onosSet ) )
3475 main.log.debug( "Actual: " + str( current ) )
3476 getResults = main.FALSE
3477 else:
3478 # error, set is not a set
3479 main.log.error( "ONOS" + node + " has repeat elements in" +
3480 " set " + onosSetName + ":\n" +
3481 str( getResponses[ i ] ) )
3482 getResults = main.FALSE
3483 elif getResponses[ i ] == main.ERROR:
3484 getResults = main.FALSE
3485 sizeResponses = []
3486 threads = []
3487 for i in main.activeNodes:
3488 t = main.Thread( target=main.CLIs[i].setTestSize,
3489 name="setTestSize-" + str( i ),
3490 args=[ onosSetName ] )
3491 threads.append( t )
3492 t.start()
3493 for t in threads:
3494 t.join()
3495 sizeResponses.append( t.result )
3496 sizeResults = main.TRUE
3497 for i in range( len( main.activeNodes ) ):
3498 node = str( main.activeNodes[i] + 1 )
3499 if size != sizeResponses[ i ]:
3500 sizeResults = main.FALSE
3501 main.log.error( "ONOS" + node +
3502 " expected a size of " + str( size ) +
3503 " for set " + onosSetName +
3504 " but got " + str( sizeResponses[ i ] ) )
3505 addResults = addResults and getResults and sizeResults
3506 utilities.assert_equals( expect=main.TRUE,
3507 actual=addResults,
3508 onpass="Set add correct",
3509 onfail="Set add was incorrect" )
3510
3511 main.step( "Distributed Set addAll()" )
3512 onosSet.update( addAllValue.split() )
3513 addResponses = []
3514 threads = []
3515 for i in main.activeNodes:
3516 t = main.Thread( target=main.CLIs[i].setTestAdd,
3517 name="setTestAddAll-" + str( i ),
3518 args=[ onosSetName, addAllValue ] )
3519 threads.append( t )
3520 t.start()
3521 for t in threads:
3522 t.join()
3523 addResponses.append( t.result )
3524
3525 # main.TRUE = successfully changed the set
3526 # main.FALSE = action resulted in no change in set
3527 # main.ERROR - Some error in executing the function
3528 addAllResults = main.TRUE
3529 for i in range( len( main.activeNodes ) ):
3530 if addResponses[ i ] == main.TRUE:
3531 # All is well
3532 pass
3533 elif addResponses[ i ] == main.FALSE:
3534 # Already in set, probably fine
3535 pass
3536 elif addResponses[ i ] == main.ERROR:
3537 # Error in execution
3538 addAllResults = main.FALSE
3539 else:
3540 # unexpected result
3541 addAllResults = main.FALSE
3542 if addAllResults != main.TRUE:
3543 main.log.error( "Error executing set addAll" )
3544
3545 # Check if set is still correct
3546 size = len( onosSet )
3547 getResponses = []
3548 threads = []
3549 for i in main.activeNodes:
3550 t = main.Thread( target=main.CLIs[i].setTestGet,
3551 name="setTestGet-" + str( i ),
3552 args=[ onosSetName ] )
3553 threads.append( t )
3554 t.start()
3555 for t in threads:
3556 t.join()
3557 getResponses.append( t.result )
3558 getResults = main.TRUE
3559 for i in range( len( main.activeNodes ) ):
3560 node = str( main.activeNodes[i] + 1 )
3561 if isinstance( getResponses[ i ], list):
3562 current = set( getResponses[ i ] )
3563 if len( current ) == len( getResponses[ i ] ):
3564 # no repeats
3565 if onosSet != current:
3566 main.log.error( "ONOS" + node +
3567 " has incorrect view" +
3568 " of set " + onosSetName + ":\n" +
3569 str( getResponses[ i ] ) )
3570 main.log.debug( "Expected: " + str( onosSet ) )
3571 main.log.debug( "Actual: " + str( current ) )
3572 getResults = main.FALSE
3573 else:
3574 # error, set is not a set
3575 main.log.error( "ONOS" + node +
3576 " has repeat elements in" +
3577 " set " + onosSetName + ":\n" +
3578 str( getResponses[ i ] ) )
3579 getResults = main.FALSE
3580 elif getResponses[ i ] == main.ERROR:
3581 getResults = main.FALSE
3582 sizeResponses = []
3583 threads = []
3584 for i in main.activeNodes:
3585 t = main.Thread( target=main.CLIs[i].setTestSize,
3586 name="setTestSize-" + str( i ),
3587 args=[ onosSetName ] )
3588 threads.append( t )
3589 t.start()
3590 for t in threads:
3591 t.join()
3592 sizeResponses.append( t.result )
3593 sizeResults = main.TRUE
3594 for i in range( len( main.activeNodes ) ):
3595 node = str( main.activeNodes[i] + 1 )
3596 if size != sizeResponses[ i ]:
3597 sizeResults = main.FALSE
3598 main.log.error( "ONOS" + node +
3599 " expected a size of " + str( size ) +
3600 " for set " + onosSetName +
3601 " but got " + str( sizeResponses[ i ] ) )
3602 addAllResults = addAllResults and getResults and sizeResults
3603 utilities.assert_equals( expect=main.TRUE,
3604 actual=addAllResults,
3605 onpass="Set addAll correct",
3606 onfail="Set addAll was incorrect" )
3607
3608 main.step( "Distributed Set contains()" )
3609 containsResponses = []
3610 threads = []
3611 for i in main.activeNodes:
3612 t = main.Thread( target=main.CLIs[i].setTestGet,
3613 name="setContains-" + str( i ),
3614 args=[ onosSetName ],
3615 kwargs={ "values": addValue } )
3616 threads.append( t )
3617 t.start()
3618 for t in threads:
3619 t.join()
3620 # NOTE: This is the tuple
3621 containsResponses.append( t.result )
3622
3623 containsResults = main.TRUE
3624 for i in range( len( main.activeNodes ) ):
3625 if containsResponses[ i ] == main.ERROR:
3626 containsResults = main.FALSE
3627 else:
3628 containsResults = containsResults and\
3629 containsResponses[ i ][ 1 ]
3630 utilities.assert_equals( expect=main.TRUE,
3631 actual=containsResults,
3632 onpass="Set contains is functional",
3633 onfail="Set contains failed" )
3634
3635 main.step( "Distributed Set containsAll()" )
3636 containsAllResponses = []
3637 threads = []
3638 for i in main.activeNodes:
3639 t = main.Thread( target=main.CLIs[i].setTestGet,
3640 name="setContainsAll-" + str( i ),
3641 args=[ onosSetName ],
3642 kwargs={ "values": addAllValue } )
3643 threads.append( t )
3644 t.start()
3645 for t in threads:
3646 t.join()
3647 # NOTE: This is the tuple
3648 containsAllResponses.append( t.result )
3649
3650 containsAllResults = main.TRUE
3651 for i in range( len( main.activeNodes ) ):
3652 if containsResponses[ i ] == main.ERROR:
3653 containsResults = main.FALSE
3654 else:
3655 containsResults = containsResults and\
3656 containsResponses[ i ][ 1 ]
3657 utilities.assert_equals( expect=main.TRUE,
3658 actual=containsAllResults,
3659 onpass="Set containsAll is functional",
3660 onfail="Set containsAll failed" )
3661
3662 main.step( "Distributed Set remove()" )
3663 onosSet.remove( addValue )
3664 removeResponses = []
3665 threads = []
3666 for i in main.activeNodes:
3667 t = main.Thread( target=main.CLIs[i].setTestRemove,
3668 name="setTestRemove-" + str( i ),
3669 args=[ onosSetName, addValue ] )
3670 threads.append( t )
3671 t.start()
3672 for t in threads:
3673 t.join()
3674 removeResponses.append( t.result )
3675
3676 # main.TRUE = successfully changed the set
3677 # main.FALSE = action resulted in no change in set
3678 # main.ERROR - Some error in executing the function
3679 removeResults = main.TRUE
3680 for i in range( len( main.activeNodes ) ):
3681 if removeResponses[ i ] == main.TRUE:
3682 # All is well
3683 pass
3684 elif removeResponses[ i ] == main.FALSE:
3685 # not in set, probably fine
3686 pass
3687 elif removeResponses[ i ] == main.ERROR:
3688 # Error in execution
3689 removeResults = main.FALSE
3690 else:
3691 # unexpected result
3692 removeResults = main.FALSE
3693 if removeResults != main.TRUE:
3694 main.log.error( "Error executing set remove" )
3695
3696 # Check if set is still correct
3697 size = len( onosSet )
3698 getResponses = []
3699 threads = []
3700 for i in main.activeNodes:
3701 t = main.Thread( target=main.CLIs[i].setTestGet,
3702 name="setTestGet-" + str( i ),
3703 args=[ onosSetName ] )
3704 threads.append( t )
3705 t.start()
3706 for t in threads:
3707 t.join()
3708 getResponses.append( t.result )
3709 getResults = main.TRUE
3710 for i in range( len( main.activeNodes ) ):
3711 node = str( main.activeNodes[i] + 1 )
3712 if isinstance( getResponses[ i ], list):
3713 current = set( getResponses[ i ] )
3714 if len( current ) == len( getResponses[ i ] ):
3715 # no repeats
3716 if onosSet != current:
3717 main.log.error( "ONOS" + node +
3718 " has incorrect view" +
3719 " of set " + onosSetName + ":\n" +
3720 str( getResponses[ i ] ) )
3721 main.log.debug( "Expected: " + str( onosSet ) )
3722 main.log.debug( "Actual: " + str( current ) )
3723 getResults = main.FALSE
3724 else:
3725 # error, set is not a set
3726 main.log.error( "ONOS" + node +
3727 " has repeat elements in" +
3728 " set " + onosSetName + ":\n" +
3729 str( getResponses[ i ] ) )
3730 getResults = main.FALSE
3731 elif getResponses[ i ] == main.ERROR:
3732 getResults = main.FALSE
3733 sizeResponses = []
3734 threads = []
3735 for i in main.activeNodes:
3736 t = main.Thread( target=main.CLIs[i].setTestSize,
3737 name="setTestSize-" + str( i ),
3738 args=[ onosSetName ] )
3739 threads.append( t )
3740 t.start()
3741 for t in threads:
3742 t.join()
3743 sizeResponses.append( t.result )
3744 sizeResults = main.TRUE
3745 for i in range( len( main.activeNodes ) ):
3746 node = str( main.activeNodes[i] + 1 )
3747 if size != sizeResponses[ i ]:
3748 sizeResults = main.FALSE
3749 main.log.error( "ONOS" + node +
3750 " expected a size of " + str( size ) +
3751 " for set " + onosSetName +
3752 " but got " + str( sizeResponses[ i ] ) )
3753 removeResults = removeResults and getResults and sizeResults
3754 utilities.assert_equals( expect=main.TRUE,
3755 actual=removeResults,
3756 onpass="Set remove correct",
3757 onfail="Set remove was incorrect" )
3758
3759 main.step( "Distributed Set removeAll()" )
3760 onosSet.difference_update( addAllValue.split() )
3761 removeAllResponses = []
3762 threads = []
3763 try:
3764 for i in main.activeNodes:
3765 t = main.Thread( target=main.CLIs[i].setTestRemove,
3766 name="setTestRemoveAll-" + str( i ),
3767 args=[ onosSetName, addAllValue ] )
3768 threads.append( t )
3769 t.start()
3770 for t in threads:
3771 t.join()
3772 removeAllResponses.append( t.result )
3773 except Exception, e:
3774 main.log.exception(e)
3775
3776 # main.TRUE = successfully changed the set
3777 # main.FALSE = action resulted in no change in set
3778 # main.ERROR - Some error in executing the function
3779 removeAllResults = main.TRUE
3780 for i in range( len( main.activeNodes ) ):
3781 if removeAllResponses[ i ] == main.TRUE:
3782 # All is well
3783 pass
3784 elif removeAllResponses[ i ] == main.FALSE:
3785 # not in set, probably fine
3786 pass
3787 elif removeAllResponses[ i ] == main.ERROR:
3788 # Error in execution
3789 removeAllResults = main.FALSE
3790 else:
3791 # unexpected result
3792 removeAllResults = main.FALSE
3793 if removeAllResults != main.TRUE:
3794 main.log.error( "Error executing set removeAll" )
3795
3796 # Check if set is still correct
3797 size = len( onosSet )
3798 getResponses = []
3799 threads = []
3800 for i in main.activeNodes:
3801 t = main.Thread( target=main.CLIs[i].setTestGet,
3802 name="setTestGet-" + str( i ),
3803 args=[ onosSetName ] )
3804 threads.append( t )
3805 t.start()
3806 for t in threads:
3807 t.join()
3808 getResponses.append( t.result )
3809 getResults = main.TRUE
3810 for i in range( len( main.activeNodes ) ):
3811 node = str( main.activeNodes[i] + 1 )
3812 if isinstance( getResponses[ i ], list):
3813 current = set( getResponses[ i ] )
3814 if len( current ) == len( getResponses[ i ] ):
3815 # no repeats
3816 if onosSet != current:
3817 main.log.error( "ONOS" + node +
3818 " has incorrect view" +
3819 " of set " + onosSetName + ":\n" +
3820 str( getResponses[ i ] ) )
3821 main.log.debug( "Expected: " + str( onosSet ) )
3822 main.log.debug( "Actual: " + str( current ) )
3823 getResults = main.FALSE
3824 else:
3825 # error, set is not a set
3826 main.log.error( "ONOS" + node +
3827 " has repeat elements in" +
3828 " set " + onosSetName + ":\n" +
3829 str( getResponses[ i ] ) )
3830 getResults = main.FALSE
3831 elif getResponses[ i ] == main.ERROR:
3832 getResults = main.FALSE
3833 sizeResponses = []
3834 threads = []
3835 for i in main.activeNodes:
3836 t = main.Thread( target=main.CLIs[i].setTestSize,
3837 name="setTestSize-" + str( i ),
3838 args=[ onosSetName ] )
3839 threads.append( t )
3840 t.start()
3841 for t in threads:
3842 t.join()
3843 sizeResponses.append( t.result )
3844 sizeResults = main.TRUE
3845 for i in range( len( main.activeNodes ) ):
3846 node = str( main.activeNodes[i] + 1 )
3847 if size != sizeResponses[ i ]:
3848 sizeResults = main.FALSE
3849 main.log.error( "ONOS" + node +
3850 " expected a size of " + str( size ) +
3851 " for set " + onosSetName +
3852 " but got " + str( sizeResponses[ i ] ) )
3853 removeAllResults = removeAllResults and getResults and sizeResults
3854 utilities.assert_equals( expect=main.TRUE,
3855 actual=removeAllResults,
3856 onpass="Set removeAll correct",
3857 onfail="Set removeAll was incorrect" )
3858
3859 main.step( "Distributed Set addAll()" )
3860 onosSet.update( addAllValue.split() )
3861 addResponses = []
3862 threads = []
3863 for i in main.activeNodes:
3864 t = main.Thread( target=main.CLIs[i].setTestAdd,
3865 name="setTestAddAll-" + str( i ),
3866 args=[ onosSetName, addAllValue ] )
3867 threads.append( t )
3868 t.start()
3869 for t in threads:
3870 t.join()
3871 addResponses.append( t.result )
3872
3873 # main.TRUE = successfully changed the set
3874 # main.FALSE = action resulted in no change in set
3875 # main.ERROR - Some error in executing the function
3876 addAllResults = main.TRUE
3877 for i in range( len( main.activeNodes ) ):
3878 if addResponses[ i ] == main.TRUE:
3879 # All is well
3880 pass
3881 elif addResponses[ i ] == main.FALSE:
3882 # Already in set, probably fine
3883 pass
3884 elif addResponses[ i ] == main.ERROR:
3885 # Error in execution
3886 addAllResults = main.FALSE
3887 else:
3888 # unexpected result
3889 addAllResults = main.FALSE
3890 if addAllResults != main.TRUE:
3891 main.log.error( "Error executing set addAll" )
3892
3893 # Check if set is still correct
3894 size = len( onosSet )
3895 getResponses = []
3896 threads = []
3897 for i in main.activeNodes:
3898 t = main.Thread( target=main.CLIs[i].setTestGet,
3899 name="setTestGet-" + str( i ),
3900 args=[ onosSetName ] )
3901 threads.append( t )
3902 t.start()
3903 for t in threads:
3904 t.join()
3905 getResponses.append( t.result )
3906 getResults = main.TRUE
3907 for i in range( len( main.activeNodes ) ):
3908 node = str( main.activeNodes[i] + 1 )
3909 if isinstance( getResponses[ i ], list):
3910 current = set( getResponses[ i ] )
3911 if len( current ) == len( getResponses[ i ] ):
3912 # no repeats
3913 if onosSet != current:
3914 main.log.error( "ONOS" + node +
3915 " has incorrect view" +
3916 " of set " + onosSetName + ":\n" +
3917 str( getResponses[ i ] ) )
3918 main.log.debug( "Expected: " + str( onosSet ) )
3919 main.log.debug( "Actual: " + str( current ) )
3920 getResults = main.FALSE
3921 else:
3922 # error, set is not a set
3923 main.log.error( "ONOS" + node +
3924 " has repeat elements in" +
3925 " set " + onosSetName + ":\n" +
3926 str( getResponses[ i ] ) )
3927 getResults = main.FALSE
3928 elif getResponses[ i ] == main.ERROR:
3929 getResults = main.FALSE
3930 sizeResponses = []
3931 threads = []
3932 for i in main.activeNodes:
3933 t = main.Thread( target=main.CLIs[i].setTestSize,
3934 name="setTestSize-" + str( i ),
3935 args=[ onosSetName ] )
3936 threads.append( t )
3937 t.start()
3938 for t in threads:
3939 t.join()
3940 sizeResponses.append( t.result )
3941 sizeResults = main.TRUE
3942 for i in range( len( main.activeNodes ) ):
3943 node = str( main.activeNodes[i] + 1 )
3944 if size != sizeResponses[ i ]:
3945 sizeResults = main.FALSE
3946 main.log.error( "ONOS" + node +
3947 " expected a size of " + str( size ) +
3948 " for set " + onosSetName +
3949 " but got " + str( sizeResponses[ i ] ) )
3950 addAllResults = addAllResults and getResults and sizeResults
3951 utilities.assert_equals( expect=main.TRUE,
3952 actual=addAllResults,
3953 onpass="Set addAll correct",
3954 onfail="Set addAll was incorrect" )
3955
3956 main.step( "Distributed Set clear()" )
3957 onosSet.clear()
3958 clearResponses = []
3959 threads = []
3960 for i in main.activeNodes:
3961 t = main.Thread( target=main.CLIs[i].setTestRemove,
3962 name="setTestClear-" + str( i ),
3963 args=[ onosSetName, " "], # Values doesn't matter
3964 kwargs={ "clear": True } )
3965 threads.append( t )
3966 t.start()
3967 for t in threads:
3968 t.join()
3969 clearResponses.append( t.result )
3970
3971 # main.TRUE = successfully changed the set
3972 # main.FALSE = action resulted in no change in set
3973 # main.ERROR - Some error in executing the function
3974 clearResults = main.TRUE
3975 for i in range( len( main.activeNodes ) ):
3976 if clearResponses[ i ] == main.TRUE:
3977 # All is well
3978 pass
3979 elif clearResponses[ i ] == main.FALSE:
3980 # Nothing set, probably fine
3981 pass
3982 elif clearResponses[ i ] == main.ERROR:
3983 # Error in execution
3984 clearResults = main.FALSE
3985 else:
3986 # unexpected result
3987 clearResults = main.FALSE
3988 if clearResults != main.TRUE:
3989 main.log.error( "Error executing set clear" )
3990
3991 # Check if set is still correct
3992 size = len( onosSet )
3993 getResponses = []
3994 threads = []
3995 for i in main.activeNodes:
3996 t = main.Thread( target=main.CLIs[i].setTestGet,
3997 name="setTestGet-" + str( i ),
3998 args=[ onosSetName ] )
3999 threads.append( t )
4000 t.start()
4001 for t in threads:
4002 t.join()
4003 getResponses.append( t.result )
4004 getResults = main.TRUE
4005 for i in range( len( main.activeNodes ) ):
4006 node = str( main.activeNodes[i] + 1 )
4007 if isinstance( getResponses[ i ], list):
4008 current = set( getResponses[ i ] )
4009 if len( current ) == len( getResponses[ i ] ):
4010 # no repeats
4011 if onosSet != current:
4012 main.log.error( "ONOS" + node +
4013 " has incorrect view" +
4014 " of set " + onosSetName + ":\n" +
4015 str( getResponses[ i ] ) )
4016 main.log.debug( "Expected: " + str( onosSet ) )
4017 main.log.debug( "Actual: " + str( current ) )
4018 getResults = main.FALSE
4019 else:
4020 # error, set is not a set
4021 main.log.error( "ONOS" + node +
4022 " has repeat elements in" +
4023 " set " + onosSetName + ":\n" +
4024 str( getResponses[ i ] ) )
4025 getResults = main.FALSE
4026 elif getResponses[ i ] == main.ERROR:
4027 getResults = main.FALSE
4028 sizeResponses = []
4029 threads = []
4030 for i in main.activeNodes:
4031 t = main.Thread( target=main.CLIs[i].setTestSize,
4032 name="setTestSize-" + str( i ),
4033 args=[ onosSetName ] )
4034 threads.append( t )
4035 t.start()
4036 for t in threads:
4037 t.join()
4038 sizeResponses.append( t.result )
4039 sizeResults = main.TRUE
4040 for i in range( len( main.activeNodes ) ):
4041 node = str( main.activeNodes[i] + 1 )
4042 if size != sizeResponses[ i ]:
4043 sizeResults = main.FALSE
4044 main.log.error( "ONOS" + node +
4045 " expected a size of " + str( size ) +
4046 " for set " + onosSetName +
4047 " but got " + str( sizeResponses[ i ] ) )
4048 clearResults = clearResults and getResults and sizeResults
4049 utilities.assert_equals( expect=main.TRUE,
4050 actual=clearResults,
4051 onpass="Set clear correct",
4052 onfail="Set clear was incorrect" )
4053
4054 main.step( "Distributed Set addAll()" )
4055 onosSet.update( addAllValue.split() )
4056 addResponses = []
4057 threads = []
4058 for i in main.activeNodes:
4059 t = main.Thread( target=main.CLIs[i].setTestAdd,
4060 name="setTestAddAll-" + str( i ),
4061 args=[ onosSetName, addAllValue ] )
4062 threads.append( t )
4063 t.start()
4064 for t in threads:
4065 t.join()
4066 addResponses.append( t.result )
4067
4068 # main.TRUE = successfully changed the set
4069 # main.FALSE = action resulted in no change in set
4070 # main.ERROR - Some error in executing the function
4071 addAllResults = main.TRUE
4072 for i in range( len( main.activeNodes ) ):
4073 if addResponses[ i ] == main.TRUE:
4074 # All is well
4075 pass
4076 elif addResponses[ i ] == main.FALSE:
4077 # Already in set, probably fine
4078 pass
4079 elif addResponses[ i ] == main.ERROR:
4080 # Error in execution
4081 addAllResults = main.FALSE
4082 else:
4083 # unexpected result
4084 addAllResults = main.FALSE
4085 if addAllResults != main.TRUE:
4086 main.log.error( "Error executing set addAll" )
4087
4088 # Check if set is still correct
4089 size = len( onosSet )
4090 getResponses = []
4091 threads = []
4092 for i in main.activeNodes:
4093 t = main.Thread( target=main.CLIs[i].setTestGet,
4094 name="setTestGet-" + str( i ),
4095 args=[ onosSetName ] )
4096 threads.append( t )
4097 t.start()
4098 for t in threads:
4099 t.join()
4100 getResponses.append( t.result )
4101 getResults = main.TRUE
4102 for i in range( len( main.activeNodes ) ):
4103 node = str( main.activeNodes[i] + 1 )
4104 if isinstance( getResponses[ i ], list):
4105 current = set( getResponses[ i ] )
4106 if len( current ) == len( getResponses[ i ] ):
4107 # no repeats
4108 if onosSet != current:
4109 main.log.error( "ONOS" + node +
4110 " has incorrect view" +
4111 " of set " + onosSetName + ":\n" +
4112 str( getResponses[ i ] ) )
4113 main.log.debug( "Expected: " + str( onosSet ) )
4114 main.log.debug( "Actual: " + str( current ) )
4115 getResults = main.FALSE
4116 else:
4117 # error, set is not a set
4118 main.log.error( "ONOS" + node +
4119 " has repeat elements in" +
4120 " set " + onosSetName + ":\n" +
4121 str( getResponses[ i ] ) )
4122 getResults = main.FALSE
4123 elif getResponses[ i ] == main.ERROR:
4124 getResults = main.FALSE
4125 sizeResponses = []
4126 threads = []
4127 for i in main.activeNodes:
4128 t = main.Thread( target=main.CLIs[i].setTestSize,
4129 name="setTestSize-" + str( i ),
4130 args=[ onosSetName ] )
4131 threads.append( t )
4132 t.start()
4133 for t in threads:
4134 t.join()
4135 sizeResponses.append( t.result )
4136 sizeResults = main.TRUE
4137 for i in range( len( main.activeNodes ) ):
4138 node = str( main.activeNodes[i] + 1 )
4139 if size != sizeResponses[ i ]:
4140 sizeResults = main.FALSE
4141 main.log.error( "ONOS" + node +
4142 " expected a size of " + str( size ) +
4143 " for set " + onosSetName +
4144 " but got " + str( sizeResponses[ i ] ) )
4145 addAllResults = addAllResults and getResults and sizeResults
4146 utilities.assert_equals( expect=main.TRUE,
4147 actual=addAllResults,
4148 onpass="Set addAll correct",
4149 onfail="Set addAll was incorrect" )
4150
4151 main.step( "Distributed Set retain()" )
4152 onosSet.intersection_update( retainValue.split() )
4153 retainResponses = []
4154 threads = []
4155 for i in main.activeNodes:
4156 t = main.Thread( target=main.CLIs[i].setTestRemove,
4157 name="setTestRetain-" + str( i ),
4158 args=[ onosSetName, retainValue ],
4159 kwargs={ "retain": True } )
4160 threads.append( t )
4161 t.start()
4162 for t in threads:
4163 t.join()
4164 retainResponses.append( t.result )
4165
4166 # main.TRUE = successfully changed the set
4167 # main.FALSE = action resulted in no change in set
4168 # main.ERROR - Some error in executing the function
4169 retainResults = main.TRUE
4170 for i in range( len( main.activeNodes ) ):
4171 if retainResponses[ i ] == main.TRUE:
4172 # All is well
4173 pass
4174 elif retainResponses[ i ] == main.FALSE:
4175 # Already in set, probably fine
4176 pass
4177 elif retainResponses[ i ] == main.ERROR:
4178 # Error in execution
4179 retainResults = main.FALSE
4180 else:
4181 # unexpected result
4182 retainResults = main.FALSE
4183 if retainResults != main.TRUE:
4184 main.log.error( "Error executing set retain" )
4185
4186 # Check if set is still correct
4187 size = len( onosSet )
4188 getResponses = []
4189 threads = []
4190 for i in main.activeNodes:
4191 t = main.Thread( target=main.CLIs[i].setTestGet,
4192 name="setTestGet-" + str( i ),
4193 args=[ onosSetName ] )
4194 threads.append( t )
4195 t.start()
4196 for t in threads:
4197 t.join()
4198 getResponses.append( t.result )
4199 getResults = main.TRUE
4200 for i in range( len( main.activeNodes ) ):
4201 node = str( main.activeNodes[i] + 1 )
4202 if isinstance( getResponses[ i ], list):
4203 current = set( getResponses[ i ] )
4204 if len( current ) == len( getResponses[ i ] ):
4205 # no repeats
4206 if onosSet != current:
4207 main.log.error( "ONOS" + node +
4208 " has incorrect view" +
4209 " of set " + onosSetName + ":\n" +
4210 str( getResponses[ i ] ) )
4211 main.log.debug( "Expected: " + str( onosSet ) )
4212 main.log.debug( "Actual: " + str( current ) )
4213 getResults = main.FALSE
4214 else:
4215 # error, set is not a set
4216 main.log.error( "ONOS" + node +
4217 " has repeat elements in" +
4218 " set " + onosSetName + ":\n" +
4219 str( getResponses[ i ] ) )
4220 getResults = main.FALSE
4221 elif getResponses[ i ] == main.ERROR:
4222 getResults = main.FALSE
4223 sizeResponses = []
4224 threads = []
4225 for i in main.activeNodes:
4226 t = main.Thread( target=main.CLIs[i].setTestSize,
4227 name="setTestSize-" + str( i ),
4228 args=[ onosSetName ] )
4229 threads.append( t )
4230 t.start()
4231 for t in threads:
4232 t.join()
4233 sizeResponses.append( t.result )
4234 sizeResults = main.TRUE
4235 for i in range( len( main.activeNodes ) ):
4236 node = str( main.activeNodes[i] + 1 )
4237 if size != sizeResponses[ i ]:
4238 sizeResults = main.FALSE
4239 main.log.error( "ONOS" + node + " expected a size of " +
4240 str( size ) + " for set " + onosSetName +
4241 " but got " + str( sizeResponses[ i ] ) )
4242 retainResults = retainResults and getResults and sizeResults
4243 utilities.assert_equals( expect=main.TRUE,
4244 actual=retainResults,
4245 onpass="Set retain correct",
4246 onfail="Set retain was incorrect" )
4247
4248 # Transactional maps
4249 main.step( "Partitioned Transactional maps put" )
4250 tMapValue = "Testing"
4251 numKeys = 100
4252 putResult = True
4253 node = main.activeNodes[0]
4254 putResponses = main.CLIs[node].transactionalMapPut( numKeys, tMapValue )
4255 if putResponses and len( putResponses ) == 100:
4256 for i in putResponses:
4257 if putResponses[ i ][ 'value' ] != tMapValue:
4258 putResult = False
4259 else:
4260 putResult = False
4261 if not putResult:
4262 main.log.debug( "Put response values: " + str( putResponses ) )
4263 utilities.assert_equals( expect=True,
4264 actual=putResult,
4265 onpass="Partitioned Transactional Map put successful",
4266 onfail="Partitioned Transactional Map put values are incorrect" )
4267
4268 main.step( "Partitioned Transactional maps get" )
Jon Hall9bfadd22016-05-11 14:48:07 -07004269 # FIXME: is this sleep needed?
4270 time.sleep( 5 )
4271
Jon Hall9ebd1bd2016-04-19 01:37:17 -07004272 getCheck = True
4273 for n in range( 1, numKeys + 1 ):
4274 getResponses = []
4275 threads = []
4276 valueCheck = True
4277 for i in main.activeNodes:
4278 t = main.Thread( target=main.CLIs[i].transactionalMapGet,
4279 name="TMap-get-" + str( i ),
4280 args=[ "Key" + str( n ) ] )
4281 threads.append( t )
4282 t.start()
4283 for t in threads:
4284 t.join()
4285 getResponses.append( t.result )
4286 for node in getResponses:
4287 if node != tMapValue:
4288 valueCheck = False
4289 if not valueCheck:
4290 main.log.warn( "Values for key 'Key" + str( n ) + "' do not match:" )
4291 main.log.warn( getResponses )
4292 getCheck = getCheck and valueCheck
4293 utilities.assert_equals( expect=True,
4294 actual=getCheck,
4295 onpass="Partitioned Transactional Map get values were correct",
4296 onfail="Partitioned Transactional Map values incorrect" )