blob: b0bc1260d8a2b5876f8e6ab4e218331ca2fc45b7 [file] [log] [blame]
Jon Hall9ebd1bd2016-04-19 01:37:17 -07001"""
2Description: This test is to determine if ONOS can handle
3 dynamic scaling of the cluster size.
4
5List of test cases:
6CASE1: Compile ONOS and push it to the test machines
7CASE2: Assign devices to controllers
8CASE21: Assign mastership to controllers
9CASE3: Assign intents
10CASE4: Ping across added host intents
11CASE5: Reading state of ONOS
12CASE6: The scaling case.
13CASE7: Check state after control plane failure
14CASE8: Compare topo
15CASE9: Link s3-s28 down
16CASE10: Link s3-s28 up
17CASE11: Switch down
18CASE12: Switch up
19CASE13: Clean up
20CASE14: start election app on all onos nodes
21CASE15: Check that Leadership Election is still functional
22CASE16: Install Distributed Primitives app
23CASE17: Check for basic functionality with distributed primitives
24"""
25
26
27class HAscaling:
28
29 def __init__( self ):
30 self.default = ''
31
32 def CASE1( self, main ):
33 """
34 CASE1 is to compile ONOS and push it to the test machines
35
36 Startup sequence:
37 cell <name>
38 onos-verify-cell
39 NOTE: temporary - onos-remove-raft-logs
40 onos-uninstall
41 start mininet
42 git pull
43 mvn clean install
44 onos-package
45 onos-install -f
46 onos-wait-for-start
47 start cli sessions
48 start tcpdump
49 """
50 import time
51 import os
52 import re
53 main.log.info( "ONOS HA test: Restart all ONOS nodes - " +
54 "initialization" )
55 main.case( "Setting up test environment" )
56 main.caseExplanation = "Setup the test environment including " +\
57 "installing ONOS, starting Mininet and ONOS" +\
58 "cli sessions."
59
60 # load some variables from the params file
61 PULLCODE = False
62 if main.params[ 'Git' ] == 'True':
63 PULLCODE = True
64 gitBranch = main.params[ 'branch' ]
65 cellName = main.params[ 'ENV' ][ 'cellName' ]
66
67 main.numCtrls = int( main.params[ 'num_controllers' ] )
68 if main.ONOSbench.maxNodes:
69 if main.ONOSbench.maxNodes < main.numCtrls:
70 main.numCtrls = int( main.ONOSbench.maxNodes )
71 # set global variables
72 # These are for csv plotting in jenkins
73 global labels
74 global data
75 labels = []
76 data = []
77
78 try:
79 from tests.HA.dependencies.HA import HA
80 main.HA = HA()
81 from tests.HA.HAscaling.dependencies.Server import Server
82 main.Server = Server()
83 except Exception as e:
84 main.log.exception( e )
85 main.cleanup()
86 main.exit()
87
88 main.CLIs = []
89 main.nodes = []
90 ipList = []
91 for i in range( 1, main.numCtrls + 1 ):
92 try:
93 main.CLIs.append( getattr( main, 'ONOScli' + str( i ) ) )
94 main.nodes.append( getattr( main, 'ONOS' + str( i ) ) )
95 ipList.append( main.nodes[ -1 ].ip_address )
96 except AttributeError:
97 break
98
99 main.step( "Create cell file" )
100 cellAppString = main.params[ 'ENV' ][ 'appString' ]
101 main.ONOSbench.createCellFile( main.ONOSbench.ip_address, cellName,
102 main.Mininet1.ip_address,
103 cellAppString, ipList )
104
105 main.step( "Applying cell variable to environment" )
106 cellResult = main.ONOSbench.setCell( cellName )
107 utilities.assert_equals( expect=main.TRUE, actual=cellResult,
108 onpass="Set cell successfull",
109 onfail="Failled to set cell" )
110
111 main.step( "Verify connectivity to cell" )
112 verifyResult = main.ONOSbench.verifyCell()
113 utilities.assert_equals( expect=main.TRUE, actual=verifyResult,
114 onpass="Verify cell passed",
115 onfail="Failled to verify cell" )
116
117 # FIXME:this is short term fix
118 main.log.info( "Removing raft logs" )
119 main.ONOSbench.onosRemoveRaftLogs()
120
121 main.log.info( "Uninstalling ONOS" )
122 for node in main.nodes:
123 main.ONOSbench.onosUninstall( node.ip_address )
124
125 # Make sure ONOS is DEAD
126 main.log.info( "Killing any ONOS processes" )
127 killResults = main.TRUE
128 for node in main.nodes:
129 killed = main.ONOSbench.onosKill( node.ip_address )
130 killResults = killResults and killed
131
132 main.step( "Setup server for cluster metadata file" )
Jon Hall8f6d4622016-05-23 15:27:18 -0700133 port = main.params['server']['port']
Jon Hall9ebd1bd2016-04-19 01:37:17 -0700134 rootDir = os.path.dirname( main.testFile ) + "/dependencies"
135 main.log.debug( "Root dir: {}".format( rootDir ) )
136 status = main.Server.start( main.ONOSbench,
137 rootDir,
138 port=port,
139 logDir=main.logdir + "/server.log" )
140 utilities.assert_equals( expect=main.TRUE, actual=status,
141 onpass="Server started",
142 onfail="Failled to start SimpleHTTPServer" )
143
144 main.step( "Generate initial metadata file" )
145 main.scaling = main.params['scaling'].split( "," )
146 main.log.debug( main.scaling )
147 scale = main.scaling.pop(0)
148 main.log.debug( scale)
149 if "e" in scale:
150 equal = True
151 else:
152 equal = False
153 main.log.debug( equal)
154 main.numCtrls = int( re.search( "\d+", scale ).group(0) )
155 genResult = main.Server.generateFile( main.numCtrls, equal=equal )
156 utilities.assert_equals( expect=main.TRUE, actual=genResult,
157 onpass="New cluster metadata file generated",
158 onfail="Failled to generate new metadata file" )
159
160 cleanInstallResult = main.TRUE
161 gitPullResult = main.TRUE
162
163 main.step( "Starting Mininet" )
164 # scp topo file to mininet
165 # TODO: move to params?
166 topoName = "obelisk.py"
167 filePath = main.ONOSbench.home + "/tools/test/topos/"
168 main.ONOSbench.scp( main.Mininet1,
169 filePath + topoName,
170 main.Mininet1.home,
171 direction="to" )
172 mnResult = main.Mininet1.startNet( )
173 utilities.assert_equals( expect=main.TRUE, actual=mnResult,
174 onpass="Mininet Started",
175 onfail="Error starting Mininet" )
176
177 main.step( "Git checkout and pull " + gitBranch )
178 if PULLCODE:
179 main.ONOSbench.gitCheckout( gitBranch )
180 gitPullResult = main.ONOSbench.gitPull()
181 # values of 1 or 3 are good
182 utilities.assert_lesser( expect=0, actual=gitPullResult,
183 onpass="Git pull successful",
184 onfail="Git pull failed" )
185 main.ONOSbench.getVersion( report=True )
186
187 main.step( "Using mvn clean install" )
188 cleanInstallResult = main.TRUE
189 if PULLCODE and gitPullResult == main.TRUE:
190 cleanInstallResult = main.ONOSbench.cleanInstall()
191 else:
192 main.log.warn( "Did not pull new code so skipping mvn " +
193 "clean install" )
194 utilities.assert_equals( expect=main.TRUE,
195 actual=cleanInstallResult,
196 onpass="MCI successful",
197 onfail="MCI failed" )
198 # GRAPHS
199 # NOTE: important params here:
200 # job = name of Jenkins job
201 # Plot Name = Plot-HA, only can be used if multiple plots
202 # index = The number of the graph under plot name
203 job = "HAscaling"
204 plotName = "Plot-HA"
205 index = "0"
206 graphs = '<ac:structured-macro ac:name="html">\n'
207 graphs += '<ac:plain-text-body><![CDATA[\n'
208 graphs += '<iframe src="https://onos-jenkins.onlab.us/job/' + job +\
209 '/plot/' + plotName + '/getPlot?index=' + index +\
210 '&width=500&height=300"' +\
211 'noborder="0" width="500" height="300" scrolling="yes" ' +\
212 'seamless="seamless"></iframe>\n'
213 graphs += ']]></ac:plain-text-body>\n'
214 graphs += '</ac:structured-macro>\n'
215 main.log.wiki(graphs)
216
217 main.step( "Copying backup config files" )
218 path = "~/onos/tools/package/bin/onos-service"
219 cp = main.ONOSbench.scp( main.ONOSbench,
220 path,
221 path + ".backup",
222 direction="to" )
223
224 utilities.assert_equals( expect=main.TRUE,
225 actual=cp,
226 onpass="Copy backup config file succeeded",
227 onfail="Copy backup config file failed" )
228 # we need to modify the onos-service file to use remote metadata file
229 # url for cluster metadata file
Jon Hall8f6d4622016-05-23 15:27:18 -0700230 iface = main.params['server'].get( 'interface' )
231 ip = main.ONOSbench.getIpAddr( iface=iface )
Jon Hall9ebd1bd2016-04-19 01:37:17 -0700232 metaFile = "cluster.json"
233 javaArgs = r"-Donos.cluster.metadata.uri=http:\/\/{}:{}\/{}".format( ip, port, metaFile )
234 main.log.warn( javaArgs )
235 main.log.warn( repr( javaArgs ) )
236 handle = main.ONOSbench.handle
237 sed = r"sed -i 's/bash/bash\nexport JAVA_OPTS=${{JAVA_OPTS:-{}}}\n/' {}".format( javaArgs, path )
238 main.log.warn( sed )
239 main.log.warn( repr( sed ) )
240 handle.sendline( sed )
241 handle.expect( "\$" )
242 main.log.debug( repr( handle.before ) )
243
244 main.step( "Creating ONOS package" )
245 packageResult = main.ONOSbench.onosPackage()
246 utilities.assert_equals( expect=main.TRUE, actual=packageResult,
247 onpass="ONOS package successful",
248 onfail="ONOS package failed" )
249
250 main.step( "Installing ONOS package" )
251 onosInstallResult = main.TRUE
252 for i in range( main.ONOSbench.maxNodes ):
253 node = main.nodes[i]
254 options = "-f"
255 if i >= main.numCtrls:
256 options = "-nf" # Don't start more than the current scale
257 tmpResult = main.ONOSbench.onosInstall( options=options,
258 node=node.ip_address )
259 onosInstallResult = onosInstallResult and tmpResult
260 utilities.assert_equals( expect=main.TRUE, actual=onosInstallResult,
261 onpass="ONOS install successful",
262 onfail="ONOS install failed" )
263
264 # Cleanup custom onos-service file
265 main.ONOSbench.scp( main.ONOSbench,
266 path + ".backup",
267 path,
268 direction="to" )
269
270 main.step( "Checking if ONOS is up yet" )
271 for i in range( 2 ):
272 onosIsupResult = main.TRUE
273 for i in range( main.numCtrls ):
274 node = main.nodes[i]
275 started = main.ONOSbench.isup( node.ip_address )
276 if not started:
277 main.log.error( node.name + " hasn't started" )
278 onosIsupResult = onosIsupResult and started
279 if onosIsupResult == main.TRUE:
280 break
281 utilities.assert_equals( expect=main.TRUE, actual=onosIsupResult,
282 onpass="ONOS startup successful",
283 onfail="ONOS startup failed" )
284
Jon Hall6509dbf2016-06-21 17:01:17 -0700285 main.step( "Starting ONOS CLI sessions" )
Jon Hall9ebd1bd2016-04-19 01:37:17 -0700286 cliResults = main.TRUE
287 threads = []
288 for i in range( main.numCtrls ):
289 t = main.Thread( target=main.CLIs[i].startOnosCli,
290 name="startOnosCli-" + str( i ),
291 args=[main.nodes[i].ip_address] )
292 threads.append( t )
293 t.start()
294
295 for t in threads:
296 t.join()
297 cliResults = cliResults and t.result
298 utilities.assert_equals( expect=main.TRUE, actual=cliResults,
299 onpass="ONOS cli startup successful",
300 onfail="ONOS cli startup failed" )
301
302 # Create a list of active nodes for use when some nodes are stopped
303 main.activeNodes = [ i for i in range( 0, main.numCtrls ) ]
304
305 if main.params[ 'tcpdump' ].lower() == "true":
306 main.step( "Start Packet Capture MN" )
307 main.Mininet2.startTcpdump(
308 str( main.params[ 'MNtcpdump' ][ 'folder' ] ) + str( main.TEST )
309 + "-MN.pcap",
310 intf=main.params[ 'MNtcpdump' ][ 'intf' ],
311 port=main.params[ 'MNtcpdump' ][ 'port' ] )
312
313 main.step( "Checking ONOS nodes" )
314 nodeResults = utilities.retry( main.HA.nodesCheck,
315 False,
316 args=[main.activeNodes],
317 attempts=5 )
318 utilities.assert_equals( expect=True, actual=nodeResults,
319 onpass="Nodes check successful",
320 onfail="Nodes check NOT successful" )
321
322 if not nodeResults:
Jon Hall7ac7bc32016-05-05 10:57:02 -0700323 for i in main.activeNodes:
324 cli = main.CLIs[i]
Jon Hall9ebd1bd2016-04-19 01:37:17 -0700325 main.log.debug( "{} components not ACTIVE: \n{}".format(
326 cli.name,
327 cli.sendline( "scr:list | grep -v ACTIVE" ) ) )
Jon Hall9ebd1bd2016-04-19 01:37:17 -0700328 main.log.error( "Failed to start ONOS, stopping test" )
329 main.cleanup()
330 main.exit()
331
332 main.step( "Activate apps defined in the params file" )
333 # get data from the params
334 apps = main.params.get( 'apps' )
335 if apps:
336 apps = apps.split(',')
337 main.log.warn( apps )
338 activateResult = True
339 for app in apps:
340 main.CLIs[ 0 ].app( app, "Activate" )
341 # TODO: check this worked
342 time.sleep( 10 ) # wait for apps to activate
343 for app in apps:
344 state = main.CLIs[ 0 ].appStatus( app )
345 if state == "ACTIVE":
346 activateResult = activateResult and True
347 else:
348 main.log.error( "{} is in {} state".format( app, state ) )
349 activateResult = False
350 utilities.assert_equals( expect=True,
351 actual=activateResult,
352 onpass="Successfully activated apps",
353 onfail="Failed to activate apps" )
354 else:
355 main.log.warn( "No apps were specified to be loaded after startup" )
356
357 main.step( "Set ONOS configurations" )
358 config = main.params.get( 'ONOS_Configuration' )
359 if config:
360 main.log.debug( config )
361 checkResult = main.TRUE
362 for component in config:
363 for setting in config[component]:
364 value = config[component][setting]
365 check = main.CLIs[ 0 ].setCfg( component, setting, value )
366 main.log.info( "Value was changed? {}".format( main.TRUE == check ) )
367 checkResult = check and checkResult
368 utilities.assert_equals( expect=main.TRUE,
369 actual=checkResult,
370 onpass="Successfully set config",
371 onfail="Failed to set config" )
372 else:
373 main.log.warn( "No configurations were specified to be changed after startup" )
374
375 main.step( "App Ids check" )
376 appCheck = main.TRUE
377 threads = []
378 for i in main.activeNodes:
379 t = main.Thread( target=main.CLIs[i].appToIDCheck,
380 name="appToIDCheck-" + str( i ),
381 args=[] )
382 threads.append( t )
383 t.start()
384
385 for t in threads:
386 t.join()
387 appCheck = appCheck and t.result
388 if appCheck != main.TRUE:
389 node = main.activeNodes[0]
390 main.log.warn( main.CLIs[node].apps() )
391 main.log.warn( main.CLIs[node].appIDs() )
392 utilities.assert_equals( expect=main.TRUE, actual=appCheck,
393 onpass="App Ids seem to be correct",
394 onfail="Something is wrong with app Ids" )
395
396 def CASE2( self, main ):
397 """
398 Assign devices to controllers
399 """
400 import re
401 assert main.numCtrls, "main.numCtrls not defined"
402 assert main, "main not defined"
403 assert utilities.assert_equals, "utilities.assert_equals not defined"
404 assert main.CLIs, "main.CLIs not defined"
405 assert main.nodes, "main.nodes not defined"
406
407 main.case( "Assigning devices to controllers" )
408 main.caseExplanation = "Assign switches to ONOS using 'ovs-vsctl' " +\
409 "and check that an ONOS node becomes the " +\
410 "master of the device."
411 main.step( "Assign switches to controllers" )
412
413 ipList = []
414 for i in range( main.ONOSbench.maxNodes ):
415 ipList.append( main.nodes[ i ].ip_address )
416 swList = []
417 for i in range( 1, 29 ):
418 swList.append( "s" + str( i ) )
419 main.Mininet1.assignSwController( sw=swList, ip=ipList )
420
421 mastershipCheck = main.TRUE
422 for i in range( 1, 29 ):
423 response = main.Mininet1.getSwController( "s" + str( i ) )
424 try:
425 main.log.info( str( response ) )
426 except Exception:
427 main.log.info( repr( response ) )
428 for node in main.nodes:
429 if re.search( "tcp:" + node.ip_address, response ):
430 mastershipCheck = mastershipCheck and main.TRUE
431 else:
432 main.log.error( "Error, node " + node.ip_address + " is " +
433 "not in the list of controllers s" +
434 str( i ) + " is connecting to." )
435 mastershipCheck = main.FALSE
436 utilities.assert_equals(
437 expect=main.TRUE,
438 actual=mastershipCheck,
439 onpass="Switch mastership assigned correctly",
440 onfail="Switches not assigned correctly to controllers" )
441
442 def CASE21( self, main ):
443 """
444 Assign mastership to controllers
445 """
446 import time
447 assert main.numCtrls, "main.numCtrls not defined"
448 assert main, "main not defined"
449 assert utilities.assert_equals, "utilities.assert_equals not defined"
450 assert main.CLIs, "main.CLIs not defined"
451 assert main.nodes, "main.nodes not defined"
452
453 main.case( "Assigning Controller roles for switches" )
454 main.caseExplanation = "Check that ONOS is connected to each " +\
455 "device. Then manually assign" +\
456 " mastership to specific ONOS nodes using" +\
457 " 'device-role'"
458 main.step( "Assign mastership of switches to specific controllers" )
459 # Manually assign mastership to the controller we want
460 roleCall = main.TRUE
461
462 ipList = [ ]
463 deviceList = []
464 onosCli = main.CLIs[ main.activeNodes[0] ]
465 try:
466 # Assign mastership to specific controllers. This assignment was
467 # determined for a 7 node cluser, but will work with any sized
468 # cluster
469 for i in range( 1, 29 ): # switches 1 through 28
470 # set up correct variables:
471 if i == 1:
472 c = 0
473 ip = main.nodes[ c ].ip_address # ONOS1
474 deviceId = onosCli.getDevice( "1000" ).get( 'id' )
475 elif i == 2:
476 c = 1 % main.numCtrls
477 ip = main.nodes[ c ].ip_address # ONOS2
478 deviceId = onosCli.getDevice( "2000" ).get( 'id' )
479 elif i == 3:
480 c = 1 % main.numCtrls
481 ip = main.nodes[ c ].ip_address # ONOS2
482 deviceId = onosCli.getDevice( "3000" ).get( 'id' )
483 elif i == 4:
484 c = 3 % main.numCtrls
485 ip = main.nodes[ c ].ip_address # ONOS4
486 deviceId = onosCli.getDevice( "3004" ).get( 'id' )
487 elif i == 5:
488 c = 2 % main.numCtrls
489 ip = main.nodes[ c ].ip_address # ONOS3
490 deviceId = onosCli.getDevice( "5000" ).get( 'id' )
491 elif i == 6:
492 c = 2 % main.numCtrls
493 ip = main.nodes[ c ].ip_address # ONOS3
494 deviceId = onosCli.getDevice( "6000" ).get( 'id' )
495 elif i == 7:
496 c = 5 % main.numCtrls
497 ip = main.nodes[ c ].ip_address # ONOS6
498 deviceId = onosCli.getDevice( "6007" ).get( 'id' )
499 elif i >= 8 and i <= 17:
500 c = 4 % main.numCtrls
501 ip = main.nodes[ c ].ip_address # ONOS5
502 dpid = '3' + str( i ).zfill( 3 )
503 deviceId = onosCli.getDevice( dpid ).get( 'id' )
504 elif i >= 18 and i <= 27:
505 c = 6 % main.numCtrls
506 ip = main.nodes[ c ].ip_address # ONOS7
507 dpid = '6' + str( i ).zfill( 3 )
508 deviceId = onosCli.getDevice( dpid ).get( 'id' )
509 elif i == 28:
510 c = 0
511 ip = main.nodes[ c ].ip_address # ONOS1
512 deviceId = onosCli.getDevice( "2800" ).get( 'id' )
513 else:
514 main.log.error( "You didn't write an else statement for " +
515 "switch s" + str( i ) )
516 roleCall = main.FALSE
517 # Assign switch
518 assert deviceId, "No device id for s" + str( i ) + " in ONOS"
519 # TODO: make this controller dynamic
520 roleCall = roleCall and onosCli.deviceRole( deviceId, ip )
521 ipList.append( ip )
522 deviceList.append( deviceId )
523 except ( AttributeError, AssertionError ):
524 main.log.exception( "Something is wrong with ONOS device view" )
525 main.log.info( onosCli.devices() )
526 utilities.assert_equals(
527 expect=main.TRUE,
528 actual=roleCall,
529 onpass="Re-assigned switch mastership to designated controller",
530 onfail="Something wrong with deviceRole calls" )
531
532 main.step( "Check mastership was correctly assigned" )
533 roleCheck = main.TRUE
534 # NOTE: This is due to the fact that device mastership change is not
535 # atomic and is actually a multi step process
536 time.sleep( 5 )
537 for i in range( len( ipList ) ):
538 ip = ipList[i]
539 deviceId = deviceList[i]
540 # Check assignment
541 master = onosCli.getRole( deviceId ).get( 'master' )
542 if ip in master:
543 roleCheck = roleCheck and main.TRUE
544 else:
545 roleCheck = roleCheck and main.FALSE
546 main.log.error( "Error, controller " + ip + " is not" +
547 " master " + "of device " +
548 str( deviceId ) + ". Master is " +
549 repr( master ) + "." )
550 utilities.assert_equals(
551 expect=main.TRUE,
552 actual=roleCheck,
553 onpass="Switches were successfully reassigned to designated " +
554 "controller",
555 onfail="Switches were not successfully reassigned" )
556
557 def CASE3( self, main ):
558 """
559 Assign intents
560 """
561 import time
562 import json
563 assert main.numCtrls, "main.numCtrls not defined"
564 assert main, "main not defined"
565 assert utilities.assert_equals, "utilities.assert_equals not defined"
566 assert main.CLIs, "main.CLIs not defined"
567 assert main.nodes, "main.nodes not defined"
568 try:
569 labels
570 except NameError:
571 main.log.error( "labels not defined, setting to []" )
572 labels = []
573 try:
574 data
575 except NameError:
576 main.log.error( "data not defined, setting to []" )
577 data = []
578 # NOTE: we must reinstall intents until we have a persistant intent
579 # datastore!
580 main.case( "Adding host Intents" )
581 main.caseExplanation = "Discover hosts by using pingall then " +\
582 "assign predetermined host-to-host intents." +\
583 " After installation, check that the intent" +\
584 " is distributed to all nodes and the state" +\
585 " is INSTALLED"
586
587 # install onos-app-fwd
588 main.step( "Install reactive forwarding app" )
589 onosCli = main.CLIs[ main.activeNodes[0] ]
590 installResults = onosCli.activateApp( "org.onosproject.fwd" )
591 utilities.assert_equals( expect=main.TRUE, actual=installResults,
592 onpass="Install fwd successful",
593 onfail="Install fwd failed" )
594
595 main.step( "Check app ids" )
596 appCheck = main.TRUE
597 threads = []
598 for i in main.activeNodes:
599 t = main.Thread( target=main.CLIs[i].appToIDCheck,
600 name="appToIDCheck-" + str( i ),
601 args=[] )
602 threads.append( t )
603 t.start()
604
605 for t in threads:
606 t.join()
607 appCheck = appCheck and t.result
608 if appCheck != main.TRUE:
609 main.log.warn( onosCli.apps() )
610 main.log.warn( onosCli.appIDs() )
611 utilities.assert_equals( expect=main.TRUE, actual=appCheck,
612 onpass="App Ids seem to be correct",
613 onfail="Something is wrong with app Ids" )
614
615 main.step( "Discovering Hosts( Via pingall for now )" )
616 # FIXME: Once we have a host discovery mechanism, use that instead
617 # REACTIVE FWD test
618 pingResult = main.FALSE
619 passMsg = "Reactive Pingall test passed"
620 time1 = time.time()
621 pingResult = main.Mininet1.pingall()
622 time2 = time.time()
623 if not pingResult:
624 main.log.warn("First pingall failed. Trying again...")
625 pingResult = main.Mininet1.pingall()
626 passMsg += " on the second try"
627 utilities.assert_equals(
628 expect=main.TRUE,
629 actual=pingResult,
630 onpass= passMsg,
631 onfail="Reactive Pingall failed, " +
632 "one or more ping pairs failed" )
633 main.log.info( "Time for pingall: %2f seconds" %
634 ( time2 - time1 ) )
635 # timeout for fwd flows
636 time.sleep( 11 )
637 # uninstall onos-app-fwd
638 main.step( "Uninstall reactive forwarding app" )
639 node = main.activeNodes[0]
640 uninstallResult = main.CLIs[node].deactivateApp( "org.onosproject.fwd" )
641 utilities.assert_equals( expect=main.TRUE, actual=uninstallResult,
642 onpass="Uninstall fwd successful",
643 onfail="Uninstall fwd failed" )
644
645 main.step( "Check app ids" )
646 threads = []
647 appCheck2 = main.TRUE
648 for i in main.activeNodes:
649 t = main.Thread( target=main.CLIs[i].appToIDCheck,
650 name="appToIDCheck-" + str( i ),
651 args=[] )
652 threads.append( t )
653 t.start()
654
655 for t in threads:
656 t.join()
657 appCheck2 = appCheck2 and t.result
658 if appCheck2 != main.TRUE:
659 node = main.activeNodes[0]
660 main.log.warn( main.CLIs[node].apps() )
661 main.log.warn( main.CLIs[node].appIDs() )
662 utilities.assert_equals( expect=main.TRUE, actual=appCheck2,
663 onpass="App Ids seem to be correct",
664 onfail="Something is wrong with app Ids" )
665
666 main.step( "Add host intents via cli" )
667 intentIds = []
668 # TODO: move the host numbers to params
669 # Maybe look at all the paths we ping?
670 intentAddResult = True
671 hostResult = main.TRUE
672 for i in range( 8, 18 ):
673 main.log.info( "Adding host intent between h" + str( i ) +
674 " and h" + str( i + 10 ) )
675 host1 = "00:00:00:00:00:" + \
676 str( hex( i )[ 2: ] ).zfill( 2 ).upper()
677 host2 = "00:00:00:00:00:" + \
678 str( hex( i + 10 )[ 2: ] ).zfill( 2 ).upper()
679 # NOTE: getHost can return None
680 host1Dict = onosCli.getHost( host1 )
681 host2Dict = onosCli.getHost( host2 )
682 host1Id = None
683 host2Id = None
684 if host1Dict and host2Dict:
685 host1Id = host1Dict.get( 'id', None )
686 host2Id = host2Dict.get( 'id', None )
687 if host1Id and host2Id:
688 nodeNum = ( i % len( main.activeNodes ) )
689 node = main.activeNodes[nodeNum]
690 tmpId = main.CLIs[node].addHostIntent( host1Id, host2Id )
691 if tmpId:
692 main.log.info( "Added intent with id: " + tmpId )
693 intentIds.append( tmpId )
694 else:
695 main.log.error( "addHostIntent returned: " +
696 repr( tmpId ) )
697 else:
698 main.log.error( "Error, getHost() failed for h" + str( i ) +
699 " and/or h" + str( i + 10 ) )
700 node = main.activeNodes[0]
701 hosts = main.CLIs[node].hosts()
702 main.log.warn( "Hosts output: " )
703 try:
704 main.log.warn( json.dumps( json.loads( hosts ),
705 sort_keys=True,
706 indent=4,
707 separators=( ',', ': ' ) ) )
708 except ( ValueError, TypeError ):
709 main.log.warn( repr( hosts ) )
710 hostResult = main.FALSE
711 utilities.assert_equals( expect=main.TRUE, actual=hostResult,
712 onpass="Found a host id for each host",
713 onfail="Error looking up host ids" )
714
715 intentStart = time.time()
716 onosIds = onosCli.getAllIntentsId()
717 main.log.info( "Submitted intents: " + str( intentIds ) )
718 main.log.info( "Intents in ONOS: " + str( onosIds ) )
719 for intent in intentIds:
720 if intent in onosIds:
721 pass # intent submitted is in onos
722 else:
723 intentAddResult = False
724 if intentAddResult:
725 intentStop = time.time()
726 else:
727 intentStop = None
728 # Print the intent states
729 intents = onosCli.intents()
730 intentStates = []
731 installedCheck = True
732 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
733 count = 0
734 try:
735 for intent in json.loads( intents ):
736 state = intent.get( 'state', None )
737 if "INSTALLED" not in state:
738 installedCheck = False
739 intentId = intent.get( 'id', None )
740 intentStates.append( ( intentId, state ) )
741 except ( ValueError, TypeError ):
742 main.log.exception( "Error parsing intents" )
743 # add submitted intents not in the store
744 tmplist = [ i for i, s in intentStates ]
745 missingIntents = False
746 for i in intentIds:
747 if i not in tmplist:
748 intentStates.append( ( i, " - " ) )
749 missingIntents = True
750 intentStates.sort()
751 for i, s in intentStates:
752 count += 1
753 main.log.info( "%-6s%-15s%-15s" %
754 ( str( count ), str( i ), str( s ) ) )
755 leaders = onosCli.leaders()
756 try:
757 missing = False
758 if leaders:
759 parsedLeaders = json.loads( leaders )
760 main.log.warn( json.dumps( parsedLeaders,
761 sort_keys=True,
762 indent=4,
763 separators=( ',', ': ' ) ) )
764 # check for all intent partitions
765 topics = []
766 for i in range( 14 ):
767 topics.append( "intent-partition-" + str( i ) )
768 main.log.debug( topics )
769 ONOStopics = [ j['topic'] for j in parsedLeaders ]
770 for topic in topics:
771 if topic not in ONOStopics:
772 main.log.error( "Error: " + topic +
773 " not in leaders" )
774 missing = True
775 else:
776 main.log.error( "leaders() returned None" )
777 except ( ValueError, TypeError ):
778 main.log.exception( "Error parsing leaders" )
779 main.log.error( repr( leaders ) )
780 # Check all nodes
781 if missing:
782 for i in main.activeNodes:
783 response = main.CLIs[i].leaders( jsonFormat=False)
784 main.log.warn( str( main.CLIs[i].name ) + " leaders output: \n" +
785 str( response ) )
786
787 partitions = onosCli.partitions()
788 try:
789 if partitions :
790 parsedPartitions = json.loads( partitions )
791 main.log.warn( json.dumps( parsedPartitions,
792 sort_keys=True,
793 indent=4,
794 separators=( ',', ': ' ) ) )
795 # TODO check for a leader in all paritions
796 # TODO check for consistency among nodes
797 else:
798 main.log.error( "partitions() returned None" )
799 except ( ValueError, TypeError ):
800 main.log.exception( "Error parsing partitions" )
801 main.log.error( repr( partitions ) )
802 pendingMap = onosCli.pendingMap()
803 try:
804 if pendingMap :
805 parsedPending = json.loads( pendingMap )
806 main.log.warn( json.dumps( parsedPending,
807 sort_keys=True,
808 indent=4,
809 separators=( ',', ': ' ) ) )
810 # TODO check something here?
811 else:
812 main.log.error( "pendingMap() returned None" )
813 except ( ValueError, TypeError ):
814 main.log.exception( "Error parsing pending map" )
815 main.log.error( repr( pendingMap ) )
816
817 intentAddResult = bool( intentAddResult and not missingIntents and
818 installedCheck )
819 if not intentAddResult:
820 main.log.error( "Error in pushing host intents to ONOS" )
821
822 main.step( "Intent Anti-Entropy dispersion" )
823 for j in range(100):
824 correct = True
825 main.log.info( "Submitted intents: " + str( sorted( intentIds ) ) )
826 for i in main.activeNodes:
827 onosIds = []
828 ids = main.CLIs[i].getAllIntentsId()
829 onosIds.append( ids )
830 main.log.debug( "Intents in " + main.CLIs[i].name + ": " +
831 str( sorted( onosIds ) ) )
832 if sorted( ids ) != sorted( intentIds ):
833 main.log.warn( "Set of intent IDs doesn't match" )
834 correct = False
835 break
836 else:
837 intents = json.loads( main.CLIs[i].intents() )
838 for intent in intents:
839 if intent[ 'state' ] != "INSTALLED":
840 main.log.warn( "Intent " + intent[ 'id' ] +
841 " is " + intent[ 'state' ] )
842 correct = False
843 break
844 if correct:
845 break
846 else:
847 time.sleep(1)
848 if not intentStop:
849 intentStop = time.time()
850 global gossipTime
851 gossipTime = intentStop - intentStart
852 main.log.info( "It took about " + str( gossipTime ) +
853 " seconds for all intents to appear in each node" )
854 append = False
855 title = "Gossip Intents"
856 count = 1
857 while append is False:
858 curTitle = title + str( count )
859 if curTitle not in labels:
860 labels.append( curTitle )
861 data.append( str( gossipTime ) )
862 append = True
863 else:
864 count += 1
865 gossipPeriod = int( main.params['timers']['gossip'] )
866 maxGossipTime = gossipPeriod * len( main.activeNodes )
867 utilities.assert_greater_equals(
868 expect=maxGossipTime, actual=gossipTime,
869 onpass="ECM anti-entropy for intents worked within " +
870 "expected time",
871 onfail="Intent ECM anti-entropy took too long. " +
872 "Expected time:{}, Actual time:{}".format( maxGossipTime,
873 gossipTime ) )
874 if gossipTime <= maxGossipTime:
875 intentAddResult = True
876
877 if not intentAddResult or "key" in pendingMap:
878 import time
879 installedCheck = True
880 main.log.info( "Sleeping 60 seconds to see if intents are found" )
881 time.sleep( 60 )
882 onosIds = onosCli.getAllIntentsId()
883 main.log.info( "Submitted intents: " + str( intentIds ) )
884 main.log.info( "Intents in ONOS: " + str( onosIds ) )
885 # Print the intent states
886 intents = onosCli.intents()
887 intentStates = []
888 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
889 count = 0
890 try:
891 for intent in json.loads( intents ):
892 # Iter through intents of a node
893 state = intent.get( 'state', None )
894 if "INSTALLED" not in state:
895 installedCheck = False
896 intentId = intent.get( 'id', None )
897 intentStates.append( ( intentId, state ) )
898 except ( ValueError, TypeError ):
899 main.log.exception( "Error parsing intents" )
900 # add submitted intents not in the store
901 tmplist = [ i for i, s in intentStates ]
902 for i in intentIds:
903 if i not in tmplist:
904 intentStates.append( ( i, " - " ) )
905 intentStates.sort()
906 for i, s in intentStates:
907 count += 1
908 main.log.info( "%-6s%-15s%-15s" %
909 ( str( count ), str( i ), str( s ) ) )
910 leaders = onosCli.leaders()
911 try:
912 missing = False
913 if leaders:
914 parsedLeaders = json.loads( leaders )
915 main.log.warn( json.dumps( parsedLeaders,
916 sort_keys=True,
917 indent=4,
918 separators=( ',', ': ' ) ) )
919 # check for all intent partitions
920 # check for election
921 topics = []
922 for i in range( 14 ):
923 topics.append( "intent-partition-" + str( i ) )
924 # FIXME: this should only be after we start the app
925 topics.append( "org.onosproject.election" )
926 main.log.debug( topics )
927 ONOStopics = [ j['topic'] for j in parsedLeaders ]
928 for topic in topics:
929 if topic not in ONOStopics:
930 main.log.error( "Error: " + topic +
931 " not in leaders" )
932 missing = True
933 else:
934 main.log.error( "leaders() returned None" )
935 except ( ValueError, TypeError ):
936 main.log.exception( "Error parsing leaders" )
937 main.log.error( repr( leaders ) )
938 # Check all nodes
939 if missing:
940 for i in main.activeNodes:
941 node = main.CLIs[i]
942 response = node.leaders( jsonFormat=False)
943 main.log.warn( str( node.name ) + " leaders output: \n" +
944 str( response ) )
945
946 partitions = onosCli.partitions()
947 try:
948 if partitions :
949 parsedPartitions = json.loads( partitions )
950 main.log.warn( json.dumps( parsedPartitions,
951 sort_keys=True,
952 indent=4,
953 separators=( ',', ': ' ) ) )
954 # TODO check for a leader in all paritions
955 # TODO check for consistency among nodes
956 else:
957 main.log.error( "partitions() returned None" )
958 except ( ValueError, TypeError ):
959 main.log.exception( "Error parsing partitions" )
960 main.log.error( repr( partitions ) )
961 pendingMap = onosCli.pendingMap()
962 try:
963 if pendingMap :
964 parsedPending = json.loads( pendingMap )
965 main.log.warn( json.dumps( parsedPending,
966 sort_keys=True,
967 indent=4,
968 separators=( ',', ': ' ) ) )
969 # TODO check something here?
970 else:
971 main.log.error( "pendingMap() returned None" )
972 except ( ValueError, TypeError ):
973 main.log.exception( "Error parsing pending map" )
974 main.log.error( repr( pendingMap ) )
975
976 def CASE4( self, main ):
977 """
978 Ping across added host intents
979 """
980 import json
981 import time
982 assert main.numCtrls, "main.numCtrls not defined"
983 assert main, "main not defined"
984 assert utilities.assert_equals, "utilities.assert_equals not defined"
985 assert main.CLIs, "main.CLIs not defined"
986 assert main.nodes, "main.nodes not defined"
987 main.case( "Verify connectivity by sending traffic across Intents" )
988 main.caseExplanation = "Ping across added host intents to check " +\
989 "functionality and check the state of " +\
990 "the intent"
991
992 onosCli = main.CLIs[ main.activeNodes[0] ]
993 main.step( "Check Intent state" )
994 installedCheck = False
995 loopCount = 0
996 while not installedCheck and loopCount < 40:
997 installedCheck = True
998 # Print the intent states
999 intents = onosCli.intents()
1000 intentStates = []
1001 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
1002 count = 0
1003 # Iter through intents of a node
1004 try:
1005 for intent in json.loads( intents ):
1006 state = intent.get( 'state', None )
1007 if "INSTALLED" not in state:
1008 installedCheck = False
1009 intentId = intent.get( 'id', None )
1010 intentStates.append( ( intentId, state ) )
1011 except ( ValueError, TypeError ):
1012 main.log.exception( "Error parsing intents." )
1013 # Print states
1014 intentStates.sort()
1015 for i, s in intentStates:
1016 count += 1
1017 main.log.info( "%-6s%-15s%-15s" %
1018 ( str( count ), str( i ), str( s ) ) )
1019 if not installedCheck:
1020 time.sleep( 1 )
1021 loopCount += 1
1022 utilities.assert_equals( expect=True, actual=installedCheck,
1023 onpass="Intents are all INSTALLED",
1024 onfail="Intents are not all in " +
1025 "INSTALLED state" )
1026
1027 main.step( "Ping across added host intents" )
1028 PingResult = main.TRUE
1029 for i in range( 8, 18 ):
1030 ping = main.Mininet1.pingHost( src="h" + str( i ),
1031 target="h" + str( i + 10 ) )
1032 PingResult = PingResult and ping
1033 if ping == main.FALSE:
1034 main.log.warn( "Ping failed between h" + str( i ) +
1035 " and h" + str( i + 10 ) )
1036 elif ping == main.TRUE:
1037 main.log.info( "Ping test passed!" )
1038 # Don't set PingResult or you'd override failures
1039 if PingResult == main.FALSE:
1040 main.log.error(
1041 "Intents have not been installed correctly, pings failed." )
1042 # TODO: pretty print
1043 main.log.warn( "ONOS1 intents: " )
1044 try:
1045 tmpIntents = onosCli.intents()
1046 main.log.warn( json.dumps( json.loads( tmpIntents ),
1047 sort_keys=True,
1048 indent=4,
1049 separators=( ',', ': ' ) ) )
1050 except ( ValueError, TypeError ):
1051 main.log.warn( repr( tmpIntents ) )
1052 utilities.assert_equals(
1053 expect=main.TRUE,
1054 actual=PingResult,
1055 onpass="Intents have been installed correctly and pings work",
1056 onfail="Intents have not been installed correctly, pings failed." )
1057
1058 main.step( "Check leadership of topics" )
1059 leaders = onosCli.leaders()
1060 topicCheck = main.TRUE
1061 try:
1062 if leaders:
1063 parsedLeaders = json.loads( leaders )
1064 main.log.warn( json.dumps( parsedLeaders,
1065 sort_keys=True,
1066 indent=4,
1067 separators=( ',', ': ' ) ) )
1068 # check for all intent partitions
1069 # check for election
1070 # TODO: Look at Devices as topics now that it uses this system
1071 topics = []
1072 for i in range( 14 ):
1073 topics.append( "intent-partition-" + str( i ) )
1074 # FIXME: this should only be after we start the app
1075 # FIXME: topics.append( "org.onosproject.election" )
1076 # Print leaders output
1077 main.log.debug( topics )
1078 ONOStopics = [ j['topic'] for j in parsedLeaders ]
1079 for topic in topics:
1080 if topic not in ONOStopics:
1081 main.log.error( "Error: " + topic +
1082 " not in leaders" )
1083 topicCheck = main.FALSE
1084 else:
1085 main.log.error( "leaders() returned None" )
1086 topicCheck = main.FALSE
1087 except ( ValueError, TypeError ):
1088 topicCheck = main.FALSE
1089 main.log.exception( "Error parsing leaders" )
1090 main.log.error( repr( leaders ) )
1091 # TODO: Check for a leader of these topics
1092 # Check all nodes
1093 if topicCheck:
1094 for i in main.activeNodes:
1095 node = main.CLIs[i]
1096 response = node.leaders( jsonFormat=False)
1097 main.log.warn( str( node.name ) + " leaders output: \n" +
1098 str( response ) )
1099
1100 utilities.assert_equals( expect=main.TRUE, actual=topicCheck,
1101 onpass="intent Partitions is in leaders",
1102 onfail="Some topics were lost " )
1103 # Print partitions
1104 partitions = onosCli.partitions()
1105 try:
1106 if partitions :
1107 parsedPartitions = json.loads( partitions )
1108 main.log.warn( json.dumps( parsedPartitions,
1109 sort_keys=True,
1110 indent=4,
1111 separators=( ',', ': ' ) ) )
1112 # TODO check for a leader in all paritions
1113 # TODO check for consistency among nodes
1114 else:
1115 main.log.error( "partitions() returned None" )
1116 except ( ValueError, TypeError ):
1117 main.log.exception( "Error parsing partitions" )
1118 main.log.error( repr( partitions ) )
1119 # Print Pending Map
1120 pendingMap = onosCli.pendingMap()
1121 try:
1122 if pendingMap :
1123 parsedPending = json.loads( pendingMap )
1124 main.log.warn( json.dumps( parsedPending,
1125 sort_keys=True,
1126 indent=4,
1127 separators=( ',', ': ' ) ) )
1128 # TODO check something here?
1129 else:
1130 main.log.error( "pendingMap() returned None" )
1131 except ( ValueError, TypeError ):
1132 main.log.exception( "Error parsing pending map" )
1133 main.log.error( repr( pendingMap ) )
1134
1135 if not installedCheck:
1136 main.log.info( "Waiting 60 seconds to see if the state of " +
1137 "intents change" )
1138 time.sleep( 60 )
1139 # Print the intent states
1140 intents = onosCli.intents()
1141 intentStates = []
1142 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
1143 count = 0
1144 # Iter through intents of a node
1145 try:
1146 for intent in json.loads( intents ):
1147 state = intent.get( 'state', None )
1148 if "INSTALLED" not in state:
1149 installedCheck = False
1150 intentId = intent.get( 'id', None )
1151 intentStates.append( ( intentId, state ) )
1152 except ( ValueError, TypeError ):
1153 main.log.exception( "Error parsing intents." )
1154 intentStates.sort()
1155 for i, s in intentStates:
1156 count += 1
1157 main.log.info( "%-6s%-15s%-15s" %
1158 ( str( count ), str( i ), str( s ) ) )
1159 leaders = onosCli.leaders()
1160 try:
1161 missing = False
1162 if leaders:
1163 parsedLeaders = json.loads( leaders )
1164 main.log.warn( json.dumps( parsedLeaders,
1165 sort_keys=True,
1166 indent=4,
1167 separators=( ',', ': ' ) ) )
1168 # check for all intent partitions
1169 # check for election
1170 topics = []
1171 for i in range( 14 ):
1172 topics.append( "intent-partition-" + str( i ) )
1173 # FIXME: this should only be after we start the app
1174 topics.append( "org.onosproject.election" )
1175 main.log.debug( topics )
1176 ONOStopics = [ j['topic'] for j in parsedLeaders ]
1177 for topic in topics:
1178 if topic not in ONOStopics:
1179 main.log.error( "Error: " + topic +
1180 " not in leaders" )
1181 missing = True
1182 else:
1183 main.log.error( "leaders() returned None" )
1184 except ( ValueError, TypeError ):
1185 main.log.exception( "Error parsing leaders" )
1186 main.log.error( repr( leaders ) )
1187 if missing:
1188 for i in main.activeNodes:
1189 node = main.CLIs[i]
1190 response = node.leaders( jsonFormat=False)
1191 main.log.warn( str( node.name ) + " leaders output: \n" +
1192 str( response ) )
1193
1194 partitions = onosCli.partitions()
1195 try:
1196 if partitions :
1197 parsedPartitions = json.loads( partitions )
1198 main.log.warn( json.dumps( parsedPartitions,
1199 sort_keys=True,
1200 indent=4,
1201 separators=( ',', ': ' ) ) )
1202 # TODO check for a leader in all paritions
1203 # TODO check for consistency among nodes
1204 else:
1205 main.log.error( "partitions() returned None" )
1206 except ( ValueError, TypeError ):
1207 main.log.exception( "Error parsing partitions" )
1208 main.log.error( repr( partitions ) )
1209 pendingMap = onosCli.pendingMap()
1210 try:
1211 if pendingMap :
1212 parsedPending = json.loads( pendingMap )
1213 main.log.warn( json.dumps( parsedPending,
1214 sort_keys=True,
1215 indent=4,
1216 separators=( ',', ': ' ) ) )
1217 # TODO check something here?
1218 else:
1219 main.log.error( "pendingMap() returned None" )
1220 except ( ValueError, TypeError ):
1221 main.log.exception( "Error parsing pending map" )
1222 main.log.error( repr( pendingMap ) )
1223 # Print flowrules
1224 node = main.activeNodes[0]
1225 main.log.debug( main.CLIs[node].flows( jsonFormat=False ) )
1226 main.step( "Wait a minute then ping again" )
1227 # the wait is above
1228 PingResult = main.TRUE
1229 for i in range( 8, 18 ):
1230 ping = main.Mininet1.pingHost( src="h" + str( i ),
1231 target="h" + str( i + 10 ) )
1232 PingResult = PingResult and ping
1233 if ping == main.FALSE:
1234 main.log.warn( "Ping failed between h" + str( i ) +
1235 " and h" + str( i + 10 ) )
1236 elif ping == main.TRUE:
1237 main.log.info( "Ping test passed!" )
1238 # Don't set PingResult or you'd override failures
1239 if PingResult == main.FALSE:
1240 main.log.error(
1241 "Intents have not been installed correctly, pings failed." )
1242 # TODO: pretty print
1243 main.log.warn( "ONOS1 intents: " )
1244 try:
1245 tmpIntents = onosCli.intents()
1246 main.log.warn( json.dumps( json.loads( tmpIntents ),
1247 sort_keys=True,
1248 indent=4,
1249 separators=( ',', ': ' ) ) )
1250 except ( ValueError, TypeError ):
1251 main.log.warn( repr( tmpIntents ) )
1252 utilities.assert_equals(
1253 expect=main.TRUE,
1254 actual=PingResult,
1255 onpass="Intents have been installed correctly and pings work",
1256 onfail="Intents have not been installed correctly, pings failed." )
1257
1258 def CASE5( self, main ):
1259 """
1260 Reading state of ONOS
1261 """
1262 import json
1263 import time
1264 assert main.numCtrls, "main.numCtrls not defined"
1265 assert main, "main not defined"
1266 assert utilities.assert_equals, "utilities.assert_equals not defined"
1267 assert main.CLIs, "main.CLIs not defined"
1268 assert main.nodes, "main.nodes not defined"
1269
1270 main.case( "Setting up and gathering data for current state" )
1271 # The general idea for this test case is to pull the state of
1272 # ( intents,flows, topology,... ) from each ONOS node
1273 # We can then compare them with each other and also with past states
1274
1275 main.step( "Check that each switch has a master" )
1276 global mastershipState
1277 mastershipState = '[]'
1278
1279 # Assert that each device has a master
1280 rolesNotNull = main.TRUE
1281 threads = []
1282 for i in main.activeNodes:
1283 t = main.Thread( target=main.CLIs[i].rolesNotNull,
1284 name="rolesNotNull-" + str( i ),
1285 args=[] )
1286 threads.append( t )
1287 t.start()
1288
1289 for t in threads:
1290 t.join()
1291 rolesNotNull = rolesNotNull and t.result
1292 utilities.assert_equals(
1293 expect=main.TRUE,
1294 actual=rolesNotNull,
1295 onpass="Each device has a master",
1296 onfail="Some devices don't have a master assigned" )
1297
1298 main.step( "Get the Mastership of each switch from each controller" )
1299 ONOSMastership = []
1300 consistentMastership = True
1301 rolesResults = True
1302 threads = []
1303 for i in main.activeNodes:
1304 t = main.Thread( target=main.CLIs[i].roles,
1305 name="roles-" + str( i ),
1306 args=[] )
1307 threads.append( t )
1308 t.start()
1309
1310 for t in threads:
1311 t.join()
1312 ONOSMastership.append( t.result )
1313
1314 for i in range( len( ONOSMastership ) ):
1315 node = str( main.activeNodes[i] + 1 )
1316 if not ONOSMastership[i] or "Error" in ONOSMastership[i]:
1317 main.log.error( "Error in getting ONOS" + node + " roles" )
1318 main.log.warn( "ONOS" + node + " mastership response: " +
1319 repr( ONOSMastership[i] ) )
1320 rolesResults = False
1321 utilities.assert_equals(
1322 expect=True,
1323 actual=rolesResults,
1324 onpass="No error in reading roles output",
1325 onfail="Error in reading roles from ONOS" )
1326
1327 main.step( "Check for consistency in roles from each controller" )
1328 if all([ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
1329 main.log.info(
1330 "Switch roles are consistent across all ONOS nodes" )
1331 else:
1332 consistentMastership = False
1333 utilities.assert_equals(
1334 expect=True,
1335 actual=consistentMastership,
1336 onpass="Switch roles are consistent across all ONOS nodes",
1337 onfail="ONOS nodes have different views of switch roles" )
1338
1339 if rolesResults and not consistentMastership:
1340 for i in range( len( main.activeNodes ) ):
1341 node = str( main.activeNodes[i] + 1 )
1342 try:
1343 main.log.warn(
1344 "ONOS" + node + " roles: ",
1345 json.dumps(
1346 json.loads( ONOSMastership[ i ] ),
1347 sort_keys=True,
1348 indent=4,
1349 separators=( ',', ': ' ) ) )
1350 except ( ValueError, TypeError ):
1351 main.log.warn( repr( ONOSMastership[ i ] ) )
1352 elif rolesResults and consistentMastership:
1353 mastershipState = ONOSMastership[ 0 ]
1354
1355 main.step( "Get the intents from each controller" )
1356 global intentState
1357 intentState = []
1358 ONOSIntents = []
1359 consistentIntents = True # Are Intents consistent across nodes?
1360 intentsResults = True # Could we read Intents from ONOS?
1361 threads = []
1362 for i in main.activeNodes:
1363 t = main.Thread( target=main.CLIs[i].intents,
1364 name="intents-" + str( i ),
1365 args=[],
1366 kwargs={ 'jsonFormat': True } )
1367 threads.append( t )
1368 t.start()
1369
1370 for t in threads:
1371 t.join()
1372 ONOSIntents.append( t.result )
1373
1374 for i in range( len( ONOSIntents ) ):
1375 node = str( main.activeNodes[i] + 1 )
1376 if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
1377 main.log.error( "Error in getting ONOS" + node + " intents" )
1378 main.log.warn( "ONOS" + node + " intents response: " +
1379 repr( ONOSIntents[ i ] ) )
1380 intentsResults = False
1381 utilities.assert_equals(
1382 expect=True,
1383 actual=intentsResults,
1384 onpass="No error in reading intents output",
1385 onfail="Error in reading intents from ONOS" )
1386
1387 main.step( "Check for consistency in Intents from each controller" )
1388 if all([ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
1389 main.log.info( "Intents are consistent across all ONOS " +
1390 "nodes" )
1391 else:
1392 consistentIntents = False
1393 main.log.error( "Intents not consistent" )
1394 utilities.assert_equals(
1395 expect=True,
1396 actual=consistentIntents,
1397 onpass="Intents are consistent across all ONOS nodes",
1398 onfail="ONOS nodes have different views of intents" )
1399
1400 if intentsResults:
1401 # Try to make it easy to figure out what is happening
1402 #
1403 # Intent ONOS1 ONOS2 ...
1404 # 0x01 INSTALLED INSTALLING
1405 # ... ... ...
1406 # ... ... ...
1407 title = " Id"
1408 for n in main.activeNodes:
1409 title += " " * 10 + "ONOS" + str( n + 1 )
1410 main.log.warn( title )
1411 # get all intent keys in the cluster
1412 keys = []
1413 try:
1414 # Get the set of all intent keys
1415 for nodeStr in ONOSIntents:
1416 node = json.loads( nodeStr )
1417 for intent in node:
1418 keys.append( intent.get( 'id' ) )
1419 keys = set( keys )
1420 # For each intent key, print the state on each node
1421 for key in keys:
1422 row = "%-13s" % key
1423 for nodeStr in ONOSIntents:
1424 node = json.loads( nodeStr )
1425 for intent in node:
1426 if intent.get( 'id', "Error" ) == key:
1427 row += "%-15s" % intent.get( 'state' )
1428 main.log.warn( row )
1429 # End of intent state table
1430 except ValueError as e:
1431 main.log.exception( e )
1432 main.log.debug( "nodeStr was: " + repr( nodeStr ) )
1433
1434 if intentsResults and not consistentIntents:
1435 # print the json objects
1436 n = str( main.activeNodes[-1] + 1 )
1437 main.log.debug( "ONOS" + n + " intents: " )
1438 main.log.debug( json.dumps( json.loads( ONOSIntents[ -1 ] ),
1439 sort_keys=True,
1440 indent=4,
1441 separators=( ',', ': ' ) ) )
1442 for i in range( len( ONOSIntents ) ):
1443 node = str( main.activeNodes[i] + 1 )
1444 if ONOSIntents[ i ] != ONOSIntents[ -1 ]:
1445 main.log.debug( "ONOS" + node + " intents: " )
1446 main.log.debug( json.dumps( json.loads( ONOSIntents[i] ),
1447 sort_keys=True,
1448 indent=4,
1449 separators=( ',', ': ' ) ) )
1450 else:
1451 main.log.debug( "ONOS" + node + " intents match ONOS" +
1452 n + " intents" )
1453 elif intentsResults and consistentIntents:
1454 intentState = ONOSIntents[ 0 ]
1455
1456 main.step( "Get the flows from each controller" )
1457 global flowState
1458 flowState = []
1459 ONOSFlows = []
1460 ONOSFlowsJson = []
1461 flowCheck = main.FALSE
1462 consistentFlows = True
1463 flowsResults = True
1464 threads = []
1465 for i in main.activeNodes:
1466 t = main.Thread( target=main.CLIs[i].flows,
1467 name="flows-" + str( i ),
1468 args=[],
1469 kwargs={ 'jsonFormat': True } )
1470 threads.append( t )
1471 t.start()
1472
1473 # NOTE: Flows command can take some time to run
1474 time.sleep(30)
1475 for t in threads:
1476 t.join()
1477 result = t.result
1478 ONOSFlows.append( result )
1479
1480 for i in range( len( ONOSFlows ) ):
1481 num = str( main.activeNodes[i] + 1 )
1482 if not ONOSFlows[ i ] or "Error" in ONOSFlows[ i ]:
1483 main.log.error( "Error in getting ONOS" + num + " flows" )
1484 main.log.warn( "ONOS" + num + " flows response: " +
1485 repr( ONOSFlows[ i ] ) )
1486 flowsResults = False
1487 ONOSFlowsJson.append( None )
1488 else:
1489 try:
1490 ONOSFlowsJson.append( json.loads( ONOSFlows[ i ] ) )
1491 except ( ValueError, TypeError ):
1492 # FIXME: change this to log.error?
1493 main.log.exception( "Error in parsing ONOS" + num +
1494 " response as json." )
1495 main.log.error( repr( ONOSFlows[ i ] ) )
1496 ONOSFlowsJson.append( None )
1497 flowsResults = False
1498 utilities.assert_equals(
1499 expect=True,
1500 actual=flowsResults,
1501 onpass="No error in reading flows output",
1502 onfail="Error in reading flows from ONOS" )
1503
1504 main.step( "Check for consistency in Flows from each controller" )
1505 tmp = [ len( i ) == len( ONOSFlowsJson[ 0 ] ) for i in ONOSFlowsJson ]
1506 if all( tmp ):
1507 main.log.info( "Flow count is consistent across all ONOS nodes" )
1508 else:
1509 consistentFlows = False
1510 utilities.assert_equals(
1511 expect=True,
1512 actual=consistentFlows,
1513 onpass="The flow count is consistent across all ONOS nodes",
1514 onfail="ONOS nodes have different flow counts" )
1515
1516 if flowsResults and not consistentFlows:
1517 for i in range( len( ONOSFlows ) ):
1518 node = str( main.activeNodes[i] + 1 )
1519 try:
1520 main.log.warn(
1521 "ONOS" + node + " flows: " +
1522 json.dumps( json.loads( ONOSFlows[i] ), sort_keys=True,
1523 indent=4, separators=( ',', ': ' ) ) )
1524 except ( ValueError, TypeError ):
1525 main.log.warn( "ONOS" + node + " flows: " +
1526 repr( ONOSFlows[ i ] ) )
1527 elif flowsResults and consistentFlows:
1528 flowCheck = main.TRUE
1529 flowState = ONOSFlows[ 0 ]
1530
1531 main.step( "Get the OF Table entries" )
1532 global flows
1533 flows = []
1534 for i in range( 1, 29 ):
1535 flows.append( main.Mininet1.getFlowTable( "s" + str( i ), version="1.3", debug=False ) )
1536 if flowCheck == main.FALSE:
1537 for table in flows:
1538 main.log.warn( table )
1539 # TODO: Compare switch flow tables with ONOS flow tables
1540
1541 main.step( "Start continuous pings" )
1542 main.Mininet2.pingLong(
1543 src=main.params[ 'PING' ][ 'source1' ],
1544 target=main.params[ 'PING' ][ 'target1' ],
1545 pingTime=500 )
1546 main.Mininet2.pingLong(
1547 src=main.params[ 'PING' ][ 'source2' ],
1548 target=main.params[ 'PING' ][ 'target2' ],
1549 pingTime=500 )
1550 main.Mininet2.pingLong(
1551 src=main.params[ 'PING' ][ 'source3' ],
1552 target=main.params[ 'PING' ][ 'target3' ],
1553 pingTime=500 )
1554 main.Mininet2.pingLong(
1555 src=main.params[ 'PING' ][ 'source4' ],
1556 target=main.params[ 'PING' ][ 'target4' ],
1557 pingTime=500 )
1558 main.Mininet2.pingLong(
1559 src=main.params[ 'PING' ][ 'source5' ],
1560 target=main.params[ 'PING' ][ 'target5' ],
1561 pingTime=500 )
1562 main.Mininet2.pingLong(
1563 src=main.params[ 'PING' ][ 'source6' ],
1564 target=main.params[ 'PING' ][ 'target6' ],
1565 pingTime=500 )
1566 main.Mininet2.pingLong(
1567 src=main.params[ 'PING' ][ 'source7' ],
1568 target=main.params[ 'PING' ][ 'target7' ],
1569 pingTime=500 )
1570 main.Mininet2.pingLong(
1571 src=main.params[ 'PING' ][ 'source8' ],
1572 target=main.params[ 'PING' ][ 'target8' ],
1573 pingTime=500 )
1574 main.Mininet2.pingLong(
1575 src=main.params[ 'PING' ][ 'source9' ],
1576 target=main.params[ 'PING' ][ 'target9' ],
1577 pingTime=500 )
1578 main.Mininet2.pingLong(
1579 src=main.params[ 'PING' ][ 'source10' ],
1580 target=main.params[ 'PING' ][ 'target10' ],
1581 pingTime=500 )
1582
1583 main.step( "Collecting topology information from ONOS" )
1584 devices = []
1585 threads = []
1586 for i in main.activeNodes:
1587 t = main.Thread( target=main.CLIs[i].devices,
1588 name="devices-" + str( i ),
1589 args=[ ] )
1590 threads.append( t )
1591 t.start()
1592
1593 for t in threads:
1594 t.join()
1595 devices.append( t.result )
1596 hosts = []
1597 threads = []
1598 for i in main.activeNodes:
1599 t = main.Thread( target=main.CLIs[i].hosts,
1600 name="hosts-" + str( i ),
1601 args=[ ] )
1602 threads.append( t )
1603 t.start()
1604
1605 for t in threads:
1606 t.join()
1607 try:
1608 hosts.append( json.loads( t.result ) )
1609 except ( ValueError, TypeError ):
1610 # FIXME: better handling of this, print which node
1611 # Maybe use thread name?
1612 main.log.exception( "Error parsing json output of hosts" )
1613 main.log.warn( repr( t.result ) )
1614 hosts.append( None )
1615
1616 ports = []
1617 threads = []
1618 for i in main.activeNodes:
1619 t = main.Thread( target=main.CLIs[i].ports,
1620 name="ports-" + str( i ),
1621 args=[ ] )
1622 threads.append( t )
1623 t.start()
1624
1625 for t in threads:
1626 t.join()
1627 ports.append( t.result )
1628 links = []
1629 threads = []
1630 for i in main.activeNodes:
1631 t = main.Thread( target=main.CLIs[i].links,
1632 name="links-" + str( i ),
1633 args=[ ] )
1634 threads.append( t )
1635 t.start()
1636
1637 for t in threads:
1638 t.join()
1639 links.append( t.result )
1640 clusters = []
1641 threads = []
1642 for i in main.activeNodes:
1643 t = main.Thread( target=main.CLIs[i].clusters,
1644 name="clusters-" + str( i ),
1645 args=[ ] )
1646 threads.append( t )
1647 t.start()
1648
1649 for t in threads:
1650 t.join()
1651 clusters.append( t.result )
1652 # Compare json objects for hosts and dataplane clusters
1653
1654 # hosts
1655 main.step( "Host view is consistent across ONOS nodes" )
1656 consistentHostsResult = main.TRUE
1657 for controller in range( len( hosts ) ):
1658 controllerStr = str( main.activeNodes[controller] + 1 )
1659 if hosts[ controller ] and "Error" not in hosts[ controller ]:
1660 if hosts[ controller ] == hosts[ 0 ]:
1661 continue
1662 else: # hosts not consistent
1663 main.log.error( "hosts from ONOS" +
1664 controllerStr +
1665 " is inconsistent with ONOS1" )
1666 main.log.warn( repr( hosts[ controller ] ) )
1667 consistentHostsResult = main.FALSE
1668
1669 else:
1670 main.log.error( "Error in getting ONOS hosts from ONOS" +
1671 controllerStr )
1672 consistentHostsResult = main.FALSE
1673 main.log.warn( "ONOS" + controllerStr +
1674 " hosts response: " +
1675 repr( hosts[ controller ] ) )
1676 utilities.assert_equals(
1677 expect=main.TRUE,
1678 actual=consistentHostsResult,
1679 onpass="Hosts view is consistent across all ONOS nodes",
1680 onfail="ONOS nodes have different views of hosts" )
1681
1682 main.step( "Each host has an IP address" )
1683 ipResult = main.TRUE
1684 for controller in range( 0, len( hosts ) ):
1685 controllerStr = str( main.activeNodes[controller] + 1 )
1686 if hosts[ controller ]:
1687 for host in hosts[ controller ]:
1688 if not host.get( 'ipAddresses', [ ] ):
1689 main.log.error( "Error with host ips on controller" +
1690 controllerStr + ": " + str( host ) )
1691 ipResult = main.FALSE
1692 utilities.assert_equals(
1693 expect=main.TRUE,
1694 actual=ipResult,
1695 onpass="The ips of the hosts aren't empty",
1696 onfail="The ip of at least one host is missing" )
1697
1698 # Strongly connected clusters of devices
1699 main.step( "Cluster view is consistent across ONOS nodes" )
1700 consistentClustersResult = main.TRUE
1701 for controller in range( len( clusters ) ):
1702 controllerStr = str( main.activeNodes[controller] + 1 )
1703 if "Error" not in clusters[ controller ]:
1704 if clusters[ controller ] == clusters[ 0 ]:
1705 continue
1706 else: # clusters not consistent
1707 main.log.error( "clusters from ONOS" + controllerStr +
1708 " is inconsistent with ONOS1" )
1709 consistentClustersResult = main.FALSE
1710
1711 else:
1712 main.log.error( "Error in getting dataplane clusters " +
1713 "from ONOS" + controllerStr )
1714 consistentClustersResult = main.FALSE
1715 main.log.warn( "ONOS" + controllerStr +
1716 " clusters response: " +
1717 repr( clusters[ controller ] ) )
1718 utilities.assert_equals(
1719 expect=main.TRUE,
1720 actual=consistentClustersResult,
1721 onpass="Clusters view is consistent across all ONOS nodes",
1722 onfail="ONOS nodes have different views of clusters" )
Jon Hall64948022016-05-12 13:38:50 -07001723 if not consistentClustersResult:
Jon Hall9ebd1bd2016-04-19 01:37:17 -07001724 main.log.debug( clusters )
Jon Hall64948022016-05-12 13:38:50 -07001725
Jon Hall9ebd1bd2016-04-19 01:37:17 -07001726 # there should always only be one cluster
1727 main.step( "Cluster view correct across ONOS nodes" )
1728 try:
1729 numClusters = len( json.loads( clusters[ 0 ] ) )
1730 except ( ValueError, TypeError ):
1731 main.log.exception( "Error parsing clusters[0]: " +
1732 repr( clusters[ 0 ] ) )
1733 numClusters = "ERROR"
1734 utilities.assert_equals(
1735 expect=1,
1736 actual=numClusters,
1737 onpass="ONOS shows 1 SCC",
1738 onfail="ONOS shows " + str( numClusters ) + " SCCs" )
1739
1740 main.step( "Comparing ONOS topology to MN" )
1741 devicesResults = main.TRUE
1742 linksResults = main.TRUE
1743 hostsResults = main.TRUE
1744 mnSwitches = main.Mininet1.getSwitches()
1745 mnLinks = main.Mininet1.getLinks()
1746 mnHosts = main.Mininet1.getHosts()
1747 for controller in main.activeNodes:
1748 controllerStr = str( main.activeNodes[controller] + 1 )
1749 if devices[ controller ] and ports[ controller ] and\
1750 "Error" not in devices[ controller ] and\
1751 "Error" not in ports[ controller ]:
1752 currentDevicesResult = main.Mininet1.compareSwitches(
1753 mnSwitches,
1754 json.loads( devices[ controller ] ),
1755 json.loads( ports[ controller ] ) )
1756 else:
1757 currentDevicesResult = main.FALSE
1758 utilities.assert_equals( expect=main.TRUE,
1759 actual=currentDevicesResult,
1760 onpass="ONOS" + controllerStr +
1761 " Switches view is correct",
1762 onfail="ONOS" + controllerStr +
1763 " Switches view is incorrect" )
1764 if links[ controller ] and "Error" not in links[ controller ]:
1765 currentLinksResult = main.Mininet1.compareLinks(
1766 mnSwitches, mnLinks,
1767 json.loads( links[ controller ] ) )
1768 else:
1769 currentLinksResult = main.FALSE
1770 utilities.assert_equals( expect=main.TRUE,
1771 actual=currentLinksResult,
1772 onpass="ONOS" + controllerStr +
1773 " links view is correct",
1774 onfail="ONOS" + controllerStr +
1775 " links view is incorrect" )
1776
1777 if hosts[ controller ] and "Error" not in hosts[ controller ]:
1778 currentHostsResult = main.Mininet1.compareHosts(
1779 mnHosts,
1780 hosts[ controller ] )
1781 else:
1782 currentHostsResult = main.FALSE
1783 utilities.assert_equals( expect=main.TRUE,
1784 actual=currentHostsResult,
1785 onpass="ONOS" + controllerStr +
1786 " hosts exist in Mininet",
1787 onfail="ONOS" + controllerStr +
1788 " hosts don't match Mininet" )
1789
1790 devicesResults = devicesResults and currentDevicesResult
1791 linksResults = linksResults and currentLinksResult
1792 hostsResults = hostsResults and currentHostsResult
1793
1794 main.step( "Device information is correct" )
1795 utilities.assert_equals(
1796 expect=main.TRUE,
1797 actual=devicesResults,
1798 onpass="Device information is correct",
1799 onfail="Device information is incorrect" )
1800
1801 main.step( "Links are correct" )
1802 utilities.assert_equals(
1803 expect=main.TRUE,
1804 actual=linksResults,
1805 onpass="Link are correct",
1806 onfail="Links are incorrect" )
1807
1808 main.step( "Hosts are correct" )
1809 utilities.assert_equals(
1810 expect=main.TRUE,
1811 actual=hostsResults,
1812 onpass="Hosts are correct",
1813 onfail="Hosts are incorrect" )
1814
1815 def CASE6( self, main ):
1816 """
1817 The Scaling case.
1818 """
1819 import time
1820 import re
1821 assert main.numCtrls, "main.numCtrls not defined"
1822 assert main, "main not defined"
1823 assert utilities.assert_equals, "utilities.assert_equals not defined"
1824 assert main.CLIs, "main.CLIs not defined"
1825 assert main.nodes, "main.nodes not defined"
1826 try:
1827 labels
1828 except NameError:
1829 main.log.error( "labels not defined, setting to []" )
1830 global labels
1831 labels = []
1832 try:
1833 data
1834 except NameError:
1835 main.log.error( "data not defined, setting to []" )
1836 global data
1837 data = []
1838
Jon Hall69b2b982016-05-11 12:04:59 -07001839 main.case( "Scale the number of nodes in the ONOS cluster" )
Jon Hall9ebd1bd2016-04-19 01:37:17 -07001840
1841 main.step( "Checking ONOS Logs for errors" )
1842 for i in main.activeNodes:
1843 node = main.nodes[i]
1844 main.log.debug( "Checking logs for errors on " + node.name + ":" )
1845 main.log.warn( main.ONOSbench.checkLogs( node.ip_address ) )
1846
1847 """
1848 pop # of nodes from a list, might look like 1,3b,3,5b,5,7b,7,7b,5,5b,3...
1849 modify cluster.json file appropriately
1850 install/deactivate node as needed
1851 """
1852
1853 try:
1854 prevNodes = main.activeNodes
1855 scale = main.scaling.pop(0)
1856 if "e" in scale:
1857 equal = True
1858 else:
1859 equal = False
1860 main.numCtrls = int( re.search( "\d+", scale ).group(0) )
1861 main.log.info( "Scaling to {} nodes".format( main.numCtrls ) )
1862 genResult = main.Server.generateFile( main.numCtrls, equal=equal )
1863 utilities.assert_equals( expect=main.TRUE, actual=genResult,
1864 onpass="New cluster metadata file generated",
1865 onfail="Failled to generate new metadata file" )
1866 time.sleep( 5 ) # Give time for nodes to read new file
1867 except IndexError:
1868 main.cleanup()
1869 main.exit()
1870
1871 main.activeNodes = [ i for i in range( 0, main.numCtrls ) ]
1872 newNodes = [ x for x in main.activeNodes if x not in prevNodes ]
1873
1874 main.step( "Start new nodes" ) # OR stop old nodes?
1875 started = main.TRUE
1876 for i in newNodes:
1877 started = main.ONOSbench.onosStart( main.nodes[i].ip_address ) and main.TRUE
1878 utilities.assert_equals( expect=main.TRUE, actual=started,
1879 onpass="ONOS started",
1880 onfail="ONOS start NOT successful" )
1881
1882 main.step( "Checking if ONOS is up yet" )
1883 for i in range( 2 ):
1884 onosIsupResult = main.TRUE
1885 for i in main.activeNodes:
1886 node = main.nodes[i]
1887 started = main.ONOSbench.isup( node.ip_address )
1888 if not started:
1889 main.log.error( node.name + " didn't start!" )
1890 onosIsupResult = onosIsupResult and started
1891 if onosIsupResult == main.TRUE:
1892 break
1893 utilities.assert_equals( expect=main.TRUE, actual=onosIsupResult,
1894 onpass="ONOS started",
1895 onfail="ONOS start NOT successful" )
1896
Jon Hall6509dbf2016-06-21 17:01:17 -07001897 main.step( "Starting ONOS CLI sessions" )
Jon Hall9ebd1bd2016-04-19 01:37:17 -07001898 cliResults = main.TRUE
1899 threads = []
1900 for i in main.activeNodes:
1901 t = main.Thread( target=main.CLIs[i].startOnosCli,
1902 name="startOnosCli-" + str( i ),
1903 args=[main.nodes[i].ip_address] )
1904 threads.append( t )
1905 t.start()
1906
1907 for t in threads:
1908 t.join()
1909 cliResults = cliResults and t.result
1910 utilities.assert_equals( expect=main.TRUE, actual=cliResults,
1911 onpass="ONOS cli started",
1912 onfail="ONOS clis did not start" )
1913
1914 main.step( "Checking ONOS nodes" )
1915 nodeResults = utilities.retry( main.HA.nodesCheck,
1916 False,
1917 args=[main.activeNodes],
1918 attempts=5 )
1919 utilities.assert_equals( expect=True, actual=nodeResults,
1920 onpass="Nodes check successful",
1921 onfail="Nodes check NOT successful" )
1922
1923 for i in range( 10 ):
1924 ready = True
1925 for i in main.activeNodes:
1926 cli = main.CLIs[i]
1927 output = cli.summary()
1928 if not output:
1929 ready = False
1930 if ready:
1931 break
1932 time.sleep( 30 )
1933 utilities.assert_equals( expect=True, actual=ready,
1934 onpass="ONOS summary command succeded",
1935 onfail="ONOS summary command failed" )
1936 if not ready:
1937 main.cleanup()
1938 main.exit()
1939
1940 # Rerun for election on new nodes
1941 runResults = main.TRUE
1942 for i in main.activeNodes:
1943 cli = main.CLIs[i]
1944 run = cli.electionTestRun()
1945 if run != main.TRUE:
1946 main.log.error( "Error running for election on " + cli.name )
1947 runResults = runResults and run
1948 utilities.assert_equals( expect=main.TRUE, actual=runResults,
1949 onpass="Reran for election",
1950 onfail="Failed to rerun for election" )
1951
1952 # TODO: Make this configurable
1953 time.sleep( 60 )
1954 for node in main.activeNodes:
1955 main.log.warn( "\n****************** {} **************".format( main.nodes[node].ip_address ) )
1956 main.log.debug( main.CLIs[node].nodes( jsonFormat=False ) )
1957 main.log.debug( main.CLIs[node].leaders( jsonFormat=False ) )
1958 main.log.debug( main.CLIs[node].partitions( jsonFormat=False ) )
1959 main.log.debug( main.CLIs[node].apps( jsonFormat=False ) )
1960
1961 def CASE7( self, main ):
1962 """
1963 Check state after ONOS scaling
1964 """
1965 import json
1966 assert main.numCtrls, "main.numCtrls not defined"
1967 assert main, "main not defined"
1968 assert utilities.assert_equals, "utilities.assert_equals not defined"
1969 assert main.CLIs, "main.CLIs not defined"
1970 assert main.nodes, "main.nodes not defined"
1971 main.case( "Running ONOS Constant State Tests" )
1972
1973 main.step( "Check that each switch has a master" )
1974 # Assert that each device has a master
1975 rolesNotNull = main.TRUE
1976 threads = []
1977 for i in main.activeNodes:
1978 t = main.Thread( target=main.CLIs[i].rolesNotNull,
1979 name="rolesNotNull-" + str( i ),
1980 args=[ ] )
1981 threads.append( t )
1982 t.start()
1983
1984 for t in threads:
1985 t.join()
1986 rolesNotNull = rolesNotNull and t.result
1987 utilities.assert_equals(
1988 expect=main.TRUE,
1989 actual=rolesNotNull,
1990 onpass="Each device has a master",
1991 onfail="Some devices don't have a master assigned" )
1992
1993 main.step( "Read device roles from ONOS" )
1994 ONOSMastership = []
1995 consistentMastership = True
1996 rolesResults = True
1997 threads = []
1998 for i in main.activeNodes:
1999 t = main.Thread( target=main.CLIs[i].roles,
2000 name="roles-" + str( i ),
2001 args=[] )
2002 threads.append( t )
2003 t.start()
2004
2005 for t in threads:
2006 t.join()
2007 ONOSMastership.append( t.result )
2008
2009 for i in range( len( ONOSMastership ) ):
2010 node = str( main.activeNodes[i] + 1 )
2011 if not ONOSMastership[i] or "Error" in ONOSMastership[i]:
2012 main.log.error( "Error in getting ONOS" + node + " roles" )
2013 main.log.warn( "ONOS" + node + " mastership response: " +
2014 repr( ONOSMastership[i] ) )
2015 rolesResults = False
2016 utilities.assert_equals(
2017 expect=True,
2018 actual=rolesResults,
2019 onpass="No error in reading roles output",
2020 onfail="Error in reading roles from ONOS" )
2021
2022 main.step( "Check for consistency in roles from each controller" )
2023 if all([ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
2024 main.log.info(
2025 "Switch roles are consistent across all ONOS nodes" )
2026 else:
2027 consistentMastership = False
2028 utilities.assert_equals(
2029 expect=True,
2030 actual=consistentMastership,
2031 onpass="Switch roles are consistent across all ONOS nodes",
2032 onfail="ONOS nodes have different views of switch roles" )
2033
2034 if rolesResults and not consistentMastership:
2035 for i in range( len( ONOSMastership ) ):
2036 node = str( main.activeNodes[i] + 1 )
2037 main.log.warn( "ONOS" + node + " roles: ",
2038 json.dumps( json.loads( ONOSMastership[ i ] ),
2039 sort_keys=True,
2040 indent=4,
2041 separators=( ',', ': ' ) ) )
2042
2043 # NOTE: we expect mastership to change on controller scaling down
2044
2045 main.step( "Get the intents and compare across all nodes" )
2046 ONOSIntents = []
2047 intentCheck = main.FALSE
2048 consistentIntents = True
2049 intentsResults = True
2050 threads = []
2051 for i in main.activeNodes:
2052 t = main.Thread( target=main.CLIs[i].intents,
2053 name="intents-" + str( i ),
2054 args=[],
2055 kwargs={ 'jsonFormat': True } )
2056 threads.append( t )
2057 t.start()
2058
2059 for t in threads:
2060 t.join()
2061 ONOSIntents.append( t.result )
2062
2063 for i in range( len( ONOSIntents) ):
2064 node = str( main.activeNodes[i] + 1 )
2065 if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
2066 main.log.error( "Error in getting ONOS" + node + " intents" )
2067 main.log.warn( "ONOS" + node + " intents response: " +
2068 repr( ONOSIntents[ i ] ) )
2069 intentsResults = False
2070 utilities.assert_equals(
2071 expect=True,
2072 actual=intentsResults,
2073 onpass="No error in reading intents output",
2074 onfail="Error in reading intents from ONOS" )
2075
2076 main.step( "Check for consistency in Intents from each controller" )
2077 if all([ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
2078 main.log.info( "Intents are consistent across all ONOS " +
2079 "nodes" )
2080 else:
2081 consistentIntents = False
2082
2083 # Try to make it easy to figure out what is happening
2084 #
2085 # Intent ONOS1 ONOS2 ...
2086 # 0x01 INSTALLED INSTALLING
2087 # ... ... ...
2088 # ... ... ...
2089 title = " ID"
2090 for n in main.activeNodes:
2091 title += " " * 10 + "ONOS" + str( n + 1 )
2092 main.log.warn( title )
2093 # get all intent keys in the cluster
2094 keys = []
2095 for nodeStr in ONOSIntents:
2096 node = json.loads( nodeStr )
2097 for intent in node:
2098 keys.append( intent.get( 'id' ) )
2099 keys = set( keys )
2100 for key in keys:
2101 row = "%-13s" % key
2102 for nodeStr in ONOSIntents:
2103 node = json.loads( nodeStr )
2104 for intent in node:
2105 if intent.get( 'id' ) == key:
2106 row += "%-15s" % intent.get( 'state' )
2107 main.log.warn( row )
2108 # End table view
2109
2110 utilities.assert_equals(
2111 expect=True,
2112 actual=consistentIntents,
2113 onpass="Intents are consistent across all ONOS nodes",
2114 onfail="ONOS nodes have different views of intents" )
2115 intentStates = []
2116 for node in ONOSIntents: # Iter through ONOS nodes
2117 nodeStates = []
2118 # Iter through intents of a node
2119 try:
2120 for intent in json.loads( node ):
2121 nodeStates.append( intent[ 'state' ] )
2122 except ( ValueError, TypeError ):
2123 main.log.exception( "Error in parsing intents" )
2124 main.log.error( repr( node ) )
2125 intentStates.append( nodeStates )
2126 out = [ (i, nodeStates.count( i ) ) for i in set( nodeStates ) ]
2127 main.log.info( dict( out ) )
2128
2129 if intentsResults and not consistentIntents:
2130 for i in range( len( main.activeNodes ) ):
2131 node = str( main.activeNodes[i] + 1 )
2132 main.log.warn( "ONOS" + node + " intents: " )
2133 main.log.warn( json.dumps(
2134 json.loads( ONOSIntents[ i ] ),
2135 sort_keys=True,
2136 indent=4,
2137 separators=( ',', ': ' ) ) )
2138 elif intentsResults and consistentIntents:
2139 intentCheck = main.TRUE
2140
2141 main.step( "Compare current intents with intents before the scaling" )
2142 # NOTE: this requires case 5 to pass for intentState to be set.
2143 # maybe we should stop the test if that fails?
2144 sameIntents = main.FALSE
2145 try:
2146 intentState
2147 except NameError:
2148 main.log.warn( "No previous intent state was saved" )
2149 else:
2150 if intentState and intentState == ONOSIntents[ 0 ]:
2151 sameIntents = main.TRUE
2152 main.log.info( "Intents are consistent with before scaling" )
2153 # TODO: possibly the states have changed? we may need to figure out
2154 # what the acceptable states are
2155 elif len( intentState ) == len( ONOSIntents[ 0 ] ):
2156 sameIntents = main.TRUE
2157 try:
2158 before = json.loads( intentState )
2159 after = json.loads( ONOSIntents[ 0 ] )
2160 for intent in before:
2161 if intent not in after:
2162 sameIntents = main.FALSE
2163 main.log.debug( "Intent is not currently in ONOS " +
2164 "(at least in the same form):" )
2165 main.log.debug( json.dumps( intent ) )
2166 except ( ValueError, TypeError ):
2167 main.log.exception( "Exception printing intents" )
2168 main.log.debug( repr( ONOSIntents[0] ) )
2169 main.log.debug( repr( intentState ) )
2170 if sameIntents == main.FALSE:
2171 try:
2172 main.log.debug( "ONOS intents before: " )
2173 main.log.debug( json.dumps( json.loads( intentState ),
2174 sort_keys=True, indent=4,
2175 separators=( ',', ': ' ) ) )
2176 main.log.debug( "Current ONOS intents: " )
2177 main.log.debug( json.dumps( json.loads( ONOSIntents[ 0 ] ),
2178 sort_keys=True, indent=4,
2179 separators=( ',', ': ' ) ) )
2180 except ( ValueError, TypeError ):
2181 main.log.exception( "Exception printing intents" )
2182 main.log.debug( repr( ONOSIntents[0] ) )
2183 main.log.debug( repr( intentState ) )
2184 utilities.assert_equals(
2185 expect=main.TRUE,
2186 actual=sameIntents,
2187 onpass="Intents are consistent with before scaling",
2188 onfail="The Intents changed during scaling" )
2189 intentCheck = intentCheck and sameIntents
2190
2191 main.step( "Get the OF Table entries and compare to before " +
2192 "component scaling" )
2193 FlowTables = main.TRUE
2194 for i in range( 28 ):
2195 main.log.info( "Checking flow table on s" + str( i + 1 ) )
2196 tmpFlows = main.Mininet1.getFlowTable( "s" + str( i + 1 ), version="1.3", debug=False )
2197 curSwitch = main.Mininet1.flowTableComp( flows[i], tmpFlows )
2198 FlowTables = FlowTables and curSwitch
2199 if curSwitch == main.FALSE:
2200 main.log.warn( "Differences in flow table for switch: s{}".format( i + 1 ) )
2201 utilities.assert_equals(
2202 expect=main.TRUE,
2203 actual=FlowTables,
2204 onpass="No changes were found in the flow tables",
2205 onfail="Changes were found in the flow tables" )
2206
2207 main.Mininet2.pingLongKill()
2208 '''
2209 # main.step( "Check the continuous pings to ensure that no packets " +
2210 # "were dropped during component failure" )
2211 main.Mininet2.pingKill( main.params[ 'TESTONUSER' ],
2212 main.params[ 'TESTONIP' ] )
2213 LossInPings = main.FALSE
2214 # NOTE: checkForLoss returns main.FALSE with 0% packet loss
2215 for i in range( 8, 18 ):
2216 main.log.info(
2217 "Checking for a loss in pings along flow from s" +
2218 str( i ) )
2219 LossInPings = main.Mininet2.checkForLoss(
2220 "/tmp/ping.h" +
2221 str( i ) ) or LossInPings
2222 if LossInPings == main.TRUE:
2223 main.log.info( "Loss in ping detected" )
2224 elif LossInPings == main.ERROR:
2225 main.log.info( "There are multiple mininet process running" )
2226 elif LossInPings == main.FALSE:
2227 main.log.info( "No Loss in the pings" )
2228 main.log.info( "No loss of dataplane connectivity" )
2229 # utilities.assert_equals(
2230 # expect=main.FALSE,
2231 # actual=LossInPings,
2232 # onpass="No Loss of connectivity",
2233 # onfail="Loss of dataplane connectivity detected" )
2234
2235 # NOTE: Since intents are not persisted with IntnentStore,
2236 # we expect loss in dataplane connectivity
2237 LossInPings = main.FALSE
2238 '''
2239
2240 main.step( "Leadership Election is still functional" )
2241 # Test of LeadershipElection
2242 leaderList = []
2243 leaderResult = main.TRUE
2244
2245 for i in main.activeNodes:
2246 cli = main.CLIs[i]
2247 leaderN = cli.electionTestLeader()
2248 leaderList.append( leaderN )
2249 if leaderN == main.FALSE:
2250 # error in response
2251 main.log.error( "Something is wrong with " +
2252 "electionTestLeader function, check the" +
2253 " error logs" )
2254 leaderResult = main.FALSE
2255 elif leaderN is None:
2256 main.log.error( cli.name +
2257 " shows no leader for the election-app." )
2258 leaderResult = main.FALSE
2259 if len( set( leaderList ) ) != 1:
2260 leaderResult = main.FALSE
2261 main.log.error(
2262 "Inconsistent view of leader for the election test app" )
2263 # TODO: print the list
2264 utilities.assert_equals(
2265 expect=main.TRUE,
2266 actual=leaderResult,
2267 onpass="Leadership election passed",
2268 onfail="Something went wrong with Leadership election" )
2269
2270 def CASE8( self, main ):
2271 """
2272 Compare topo
2273 """
2274 import json
2275 import time
2276 assert main.numCtrls, "main.numCtrls not defined"
2277 assert main, "main not defined"
2278 assert utilities.assert_equals, "utilities.assert_equals not defined"
2279 assert main.CLIs, "main.CLIs not defined"
2280 assert main.nodes, "main.nodes not defined"
2281
2282 main.case( "Compare ONOS Topology view to Mininet topology" )
2283 main.caseExplanation = "Compare topology objects between Mininet" +\
2284 " and ONOS"
2285 topoResult = main.FALSE
2286 topoFailMsg = "ONOS topology don't match Mininet"
2287 elapsed = 0
2288 count = 0
2289 main.step( "Comparing ONOS topology to MN topology" )
2290 startTime = time.time()
2291 # Give time for Gossip to work
2292 while topoResult == main.FALSE and ( elapsed < 60 or count < 3 ):
2293 devicesResults = main.TRUE
2294 linksResults = main.TRUE
2295 hostsResults = main.TRUE
2296 hostAttachmentResults = True
2297 count += 1
2298 cliStart = time.time()
2299 devices = []
2300 threads = []
2301 for i in main.activeNodes:
2302 t = main.Thread( target=utilities.retry,
2303 name="devices-" + str( i ),
2304 args=[ main.CLIs[i].devices, [ None ] ],
2305 kwargs= { 'sleep': 5, 'attempts': 5,
2306 'randomTime': True } )
2307 threads.append( t )
2308 t.start()
2309
2310 for t in threads:
2311 t.join()
2312 devices.append( t.result )
2313 hosts = []
2314 ipResult = main.TRUE
2315 threads = []
2316 for i in main.activeNodes:
2317 t = main.Thread( target=utilities.retry,
2318 name="hosts-" + str( i ),
2319 args=[ main.CLIs[i].hosts, [ None ] ],
2320 kwargs= { 'sleep': 5, 'attempts': 5,
2321 'randomTime': True } )
2322 threads.append( t )
2323 t.start()
2324
2325 for t in threads:
2326 t.join()
2327 try:
2328 hosts.append( json.loads( t.result ) )
2329 except ( ValueError, TypeError ):
2330 main.log.exception( "Error parsing hosts results" )
2331 main.log.error( repr( t.result ) )
2332 hosts.append( None )
2333 for controller in range( 0, len( hosts ) ):
2334 controllerStr = str( main.activeNodes[controller] + 1 )
2335 if hosts[ controller ]:
2336 for host in hosts[ controller ]:
2337 if host is None or host.get( 'ipAddresses', [] ) == []:
2338 main.log.error(
2339 "Error with host ipAddresses on controller" +
2340 controllerStr + ": " + str( host ) )
2341 ipResult = main.FALSE
2342 ports = []
2343 threads = []
2344 for i in main.activeNodes:
2345 t = main.Thread( target=utilities.retry,
2346 name="ports-" + str( i ),
2347 args=[ main.CLIs[i].ports, [ None ] ],
2348 kwargs= { 'sleep': 5, 'attempts': 5,
2349 'randomTime': True } )
2350 threads.append( t )
2351 t.start()
2352
2353 for t in threads:
2354 t.join()
2355 ports.append( t.result )
2356 links = []
2357 threads = []
2358 for i in main.activeNodes:
2359 t = main.Thread( target=utilities.retry,
2360 name="links-" + str( i ),
2361 args=[ main.CLIs[i].links, [ None ] ],
2362 kwargs= { 'sleep': 5, 'attempts': 5,
2363 'randomTime': True } )
2364 threads.append( t )
2365 t.start()
2366
2367 for t in threads:
2368 t.join()
2369 links.append( t.result )
2370 clusters = []
2371 threads = []
2372 for i in main.activeNodes:
2373 t = main.Thread( target=utilities.retry,
2374 name="clusters-" + str( i ),
2375 args=[ main.CLIs[i].clusters, [ None ] ],
2376 kwargs= { 'sleep': 5, 'attempts': 5,
2377 'randomTime': True } )
2378 threads.append( t )
2379 t.start()
2380
2381 for t in threads:
2382 t.join()
2383 clusters.append( t.result )
2384
2385 elapsed = time.time() - startTime
2386 cliTime = time.time() - cliStart
2387 print "Elapsed time: " + str( elapsed )
2388 print "CLI time: " + str( cliTime )
2389
2390 if all( e is None for e in devices ) and\
2391 all( e is None for e in hosts ) and\
2392 all( e is None for e in ports ) and\
2393 all( e is None for e in links ) and\
2394 all( e is None for e in clusters ):
2395 topoFailMsg = "Could not get topology from ONOS"
2396 main.log.error( topoFailMsg )
2397 continue # Try again, No use trying to compare
2398
2399 mnSwitches = main.Mininet1.getSwitches()
2400 mnLinks = main.Mininet1.getLinks()
2401 mnHosts = main.Mininet1.getHosts()
2402 for controller in range( len( main.activeNodes ) ):
2403 controllerStr = str( main.activeNodes[controller] + 1 )
2404 if devices[ controller ] and ports[ controller ] and\
2405 "Error" not in devices[ controller ] and\
2406 "Error" not in ports[ controller ]:
2407
2408 try:
2409 currentDevicesResult = main.Mininet1.compareSwitches(
2410 mnSwitches,
2411 json.loads( devices[ controller ] ),
2412 json.loads( ports[ controller ] ) )
2413 except ( TypeError, ValueError ):
2414 main.log.exception( "Object not as expected; devices={!r}\nports={!r}".format(
2415 devices[ controller ], ports[ controller ] ) )
2416 else:
2417 currentDevicesResult = main.FALSE
2418 utilities.assert_equals( expect=main.TRUE,
2419 actual=currentDevicesResult,
2420 onpass="ONOS" + controllerStr +
2421 " Switches view is correct",
2422 onfail="ONOS" + controllerStr +
2423 " Switches view is incorrect" )
2424
2425 if links[ controller ] and "Error" not in links[ controller ]:
2426 currentLinksResult = main.Mininet1.compareLinks(
2427 mnSwitches, mnLinks,
2428 json.loads( links[ controller ] ) )
2429 else:
2430 currentLinksResult = main.FALSE
2431 utilities.assert_equals( expect=main.TRUE,
2432 actual=currentLinksResult,
2433 onpass="ONOS" + controllerStr +
2434 " links view is correct",
2435 onfail="ONOS" + controllerStr +
2436 " links view is incorrect" )
2437 if hosts[ controller ] and "Error" not in hosts[ controller ]:
2438 currentHostsResult = main.Mininet1.compareHosts(
2439 mnHosts,
2440 hosts[ controller ] )
2441 elif hosts[ controller ] == []:
2442 currentHostsResult = main.TRUE
2443 else:
2444 currentHostsResult = main.FALSE
2445 utilities.assert_equals( expect=main.TRUE,
2446 actual=currentHostsResult,
2447 onpass="ONOS" + controllerStr +
2448 " hosts exist in Mininet",
2449 onfail="ONOS" + controllerStr +
2450 " hosts don't match Mininet" )
2451 # CHECKING HOST ATTACHMENT POINTS
2452 hostAttachment = True
2453 zeroHosts = False
2454 # FIXME: topo-HA/obelisk specific mappings:
2455 # key is mac and value is dpid
2456 mappings = {}
2457 for i in range( 1, 29 ): # hosts 1 through 28
2458 # set up correct variables:
2459 macId = "00:" * 5 + hex( i ).split( "0x" )[1].upper().zfill(2)
2460 if i == 1:
2461 deviceId = "1000".zfill(16)
2462 elif i == 2:
2463 deviceId = "2000".zfill(16)
2464 elif i == 3:
2465 deviceId = "3000".zfill(16)
2466 elif i == 4:
2467 deviceId = "3004".zfill(16)
2468 elif i == 5:
2469 deviceId = "5000".zfill(16)
2470 elif i == 6:
2471 deviceId = "6000".zfill(16)
2472 elif i == 7:
2473 deviceId = "6007".zfill(16)
2474 elif i >= 8 and i <= 17:
2475 dpid = '3' + str( i ).zfill( 3 )
2476 deviceId = dpid.zfill(16)
2477 elif i >= 18 and i <= 27:
2478 dpid = '6' + str( i ).zfill( 3 )
2479 deviceId = dpid.zfill(16)
2480 elif i == 28:
2481 deviceId = "2800".zfill(16)
2482 mappings[ macId ] = deviceId
2483 if hosts[ controller ] is not None and "Error" not in hosts[ controller ]:
2484 if hosts[ controller ] == []:
2485 main.log.warn( "There are no hosts discovered" )
2486 zeroHosts = True
2487 else:
2488 for host in hosts[ controller ]:
2489 mac = None
2490 location = None
2491 device = None
2492 port = None
2493 try:
2494 mac = host.get( 'mac' )
2495 assert mac, "mac field could not be found for this host object"
2496
2497 location = host.get( 'location' )
2498 assert location, "location field could not be found for this host object"
2499
2500 # Trim the protocol identifier off deviceId
2501 device = str( location.get( 'elementId' ) ).split(':')[1]
2502 assert device, "elementId field could not be found for this host location object"
2503
2504 port = location.get( 'port' )
2505 assert port, "port field could not be found for this host location object"
2506
2507 # Now check if this matches where they should be
2508 if mac and device and port:
2509 if str( port ) != "1":
2510 main.log.error( "The attachment port is incorrect for " +
2511 "host " + str( mac ) +
2512 ". Expected: 1 Actual: " + str( port) )
2513 hostAttachment = False
2514 if device != mappings[ str( mac ) ]:
2515 main.log.error( "The attachment device is incorrect for " +
2516 "host " + str( mac ) +
2517 ". Expected: " + mappings[ str( mac ) ] +
2518 " Actual: " + device )
2519 hostAttachment = False
2520 else:
2521 hostAttachment = False
2522 except AssertionError:
2523 main.log.exception( "Json object not as expected" )
2524 main.log.error( repr( host ) )
2525 hostAttachment = False
2526 else:
2527 main.log.error( "No hosts json output or \"Error\"" +
2528 " in output. hosts = " +
2529 repr( hosts[ controller ] ) )
2530 if zeroHosts is False:
2531 # TODO: Find a way to know if there should be hosts in a
2532 # given point of the test
2533 hostAttachment = True
2534
2535 # END CHECKING HOST ATTACHMENT POINTS
2536 devicesResults = devicesResults and currentDevicesResult
2537 linksResults = linksResults and currentLinksResult
2538 hostsResults = hostsResults and currentHostsResult
2539 hostAttachmentResults = hostAttachmentResults and\
2540 hostAttachment
2541 topoResult = ( devicesResults and linksResults
2542 and hostsResults and ipResult and
2543 hostAttachmentResults )
2544 utilities.assert_equals( expect=True,
2545 actual=topoResult,
2546 onpass="ONOS topology matches Mininet",
2547 onfail=topoFailMsg )
2548 # End of While loop to pull ONOS state
2549
2550 # Compare json objects for hosts and dataplane clusters
2551
2552 # hosts
2553 main.step( "Hosts view is consistent across all ONOS nodes" )
2554 consistentHostsResult = main.TRUE
2555 for controller in range( len( hosts ) ):
2556 controllerStr = str( main.activeNodes[controller] + 1 )
2557 if hosts[ controller ] is not None and "Error" not in hosts[ controller ]:
2558 if hosts[ controller ] == hosts[ 0 ]:
2559 continue
2560 else: # hosts not consistent
2561 main.log.error( "hosts from ONOS" + controllerStr +
2562 " is inconsistent with ONOS1" )
2563 main.log.warn( repr( hosts[ controller ] ) )
2564 consistentHostsResult = main.FALSE
2565
2566 else:
2567 main.log.error( "Error in getting ONOS hosts from ONOS" +
2568 controllerStr )
2569 consistentHostsResult = main.FALSE
2570 main.log.warn( "ONOS" + controllerStr +
2571 " hosts response: " +
2572 repr( hosts[ controller ] ) )
2573 utilities.assert_equals(
2574 expect=main.TRUE,
2575 actual=consistentHostsResult,
2576 onpass="Hosts view is consistent across all ONOS nodes",
2577 onfail="ONOS nodes have different views of hosts" )
2578
2579 main.step( "Hosts information is correct" )
2580 hostsResults = hostsResults and ipResult
2581 utilities.assert_equals(
2582 expect=main.TRUE,
2583 actual=hostsResults,
2584 onpass="Host information is correct",
2585 onfail="Host information is incorrect" )
2586
2587 main.step( "Host attachment points to the network" )
2588 utilities.assert_equals(
2589 expect=True,
2590 actual=hostAttachmentResults,
2591 onpass="Hosts are correctly attached to the network",
2592 onfail="ONOS did not correctly attach hosts to the network" )
2593
2594 # Strongly connected clusters of devices
2595 main.step( "Clusters view is consistent across all ONOS nodes" )
2596 consistentClustersResult = main.TRUE
2597 for controller in range( len( clusters ) ):
2598 controllerStr = str( main.activeNodes[controller] + 1 )
2599 if "Error" not in clusters[ controller ]:
2600 if clusters[ controller ] == clusters[ 0 ]:
2601 continue
2602 else: # clusters not consistent
2603 main.log.error( "clusters from ONOS" +
2604 controllerStr +
2605 " is inconsistent with ONOS1" )
2606 consistentClustersResult = main.FALSE
2607 else:
2608 main.log.error( "Error in getting dataplane clusters " +
2609 "from ONOS" + controllerStr )
2610 consistentClustersResult = main.FALSE
2611 main.log.warn( "ONOS" + controllerStr +
2612 " clusters response: " +
2613 repr( clusters[ controller ] ) )
2614 utilities.assert_equals(
2615 expect=main.TRUE,
2616 actual=consistentClustersResult,
2617 onpass="Clusters view is consistent across all ONOS nodes",
2618 onfail="ONOS nodes have different views of clusters" )
Jon Hall64948022016-05-12 13:38:50 -07002619 if not consistentClustersResult:
2620 main.log.debug( clusters )
Jon Hall9ebd1bd2016-04-19 01:37:17 -07002621
2622 main.step( "There is only one SCC" )
2623 # there should always only be one cluster
2624 try:
2625 numClusters = len( json.loads( clusters[ 0 ] ) )
2626 except ( ValueError, TypeError ):
2627 main.log.exception( "Error parsing clusters[0]: " +
2628 repr( clusters[0] ) )
2629 numClusters = "ERROR"
2630 clusterResults = main.FALSE
2631 if numClusters == 1:
2632 clusterResults = main.TRUE
2633 utilities.assert_equals(
2634 expect=1,
2635 actual=numClusters,
2636 onpass="ONOS shows 1 SCC",
2637 onfail="ONOS shows " + str( numClusters ) + " SCCs" )
2638
2639 topoResult = ( devicesResults and linksResults
2640 and hostsResults and consistentHostsResult
2641 and consistentClustersResult and clusterResults
2642 and ipResult and hostAttachmentResults )
2643
2644 topoResult = topoResult and int( count <= 2 )
2645 note = "note it takes about " + str( int( cliTime ) ) + \
2646 " seconds for the test to make all the cli calls to fetch " +\
2647 "the topology from each ONOS instance"
2648 main.log.info(
2649 "Very crass estimate for topology discovery/convergence( " +
2650 str( note ) + " ): " + str( elapsed ) + " seconds, " +
2651 str( count ) + " tries" )
2652
2653 main.step( "Device information is correct" )
2654 utilities.assert_equals(
2655 expect=main.TRUE,
2656 actual=devicesResults,
2657 onpass="Device information is correct",
2658 onfail="Device information is incorrect" )
2659
2660 main.step( "Links are correct" )
2661 utilities.assert_equals(
2662 expect=main.TRUE,
2663 actual=linksResults,
2664 onpass="Link are correct",
2665 onfail="Links are incorrect" )
2666
2667 main.step( "Hosts are correct" )
2668 utilities.assert_equals(
2669 expect=main.TRUE,
2670 actual=hostsResults,
2671 onpass="Hosts are correct",
2672 onfail="Hosts are incorrect" )
2673
2674 # FIXME: move this to an ONOS state case
2675 main.step( "Checking ONOS nodes" )
2676 nodeResults = utilities.retry( main.HA.nodesCheck,
2677 False,
2678 args=[main.activeNodes],
2679 attempts=5 )
2680 utilities.assert_equals( expect=True, actual=nodeResults,
2681 onpass="Nodes check successful",
2682 onfail="Nodes check NOT successful" )
2683 if not nodeResults:
2684 for i in main.activeNodes:
2685 main.log.debug( "{} components not ACTIVE: \n{}".format(
2686 main.CLIs[i].name,
2687 main.CLIs[i].sendline( "scr:list | grep -v ACTIVE" ) ) )
2688
Jon Halld2871c22016-07-26 11:01:14 -07002689 if not topoResult:
2690 main.cleanup()
2691 main.exit()
2692
Jon Hall9ebd1bd2016-04-19 01:37:17 -07002693 def CASE9( self, main ):
2694 """
2695 Link s3-s28 down
2696 """
2697 import time
2698 assert main.numCtrls, "main.numCtrls not defined"
2699 assert main, "main not defined"
2700 assert utilities.assert_equals, "utilities.assert_equals not defined"
2701 assert main.CLIs, "main.CLIs not defined"
2702 assert main.nodes, "main.nodes not defined"
2703 # NOTE: You should probably run a topology check after this
2704
2705 linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
2706
2707 description = "Turn off a link to ensure that Link Discovery " +\
2708 "is working properly"
2709 main.case( description )
2710
2711 main.step( "Kill Link between s3 and s28" )
2712 LinkDown = main.Mininet1.link( END1="s3", END2="s28", OPTION="down" )
2713 main.log.info( "Waiting " + str( linkSleep ) +
2714 " seconds for link down to be discovered" )
2715 time.sleep( linkSleep )
2716 utilities.assert_equals( expect=main.TRUE, actual=LinkDown,
2717 onpass="Link down successful",
2718 onfail="Failed to bring link down" )
2719 # TODO do some sort of check here
2720
2721 def CASE10( self, main ):
2722 """
2723 Link s3-s28 up
2724 """
2725 import time
2726 assert main.numCtrls, "main.numCtrls not defined"
2727 assert main, "main not defined"
2728 assert utilities.assert_equals, "utilities.assert_equals not defined"
2729 assert main.CLIs, "main.CLIs not defined"
2730 assert main.nodes, "main.nodes not defined"
2731 # NOTE: You should probably run a topology check after this
2732
2733 linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
2734
2735 description = "Restore a link to ensure that Link Discovery is " + \
2736 "working properly"
2737 main.case( description )
2738
2739 main.step( "Bring link between s3 and s28 back up" )
2740 LinkUp = main.Mininet1.link( END1="s3", END2="s28", OPTION="up" )
2741 main.log.info( "Waiting " + str( linkSleep ) +
2742 " seconds for link up to be discovered" )
2743 time.sleep( linkSleep )
2744 utilities.assert_equals( expect=main.TRUE, actual=LinkUp,
2745 onpass="Link up successful",
2746 onfail="Failed to bring link up" )
2747 # TODO do some sort of check here
2748
2749 def CASE11( self, main ):
2750 """
2751 Switch Down
2752 """
2753 # NOTE: You should probably run a topology check after this
2754 import time
2755 assert main.numCtrls, "main.numCtrls not defined"
2756 assert main, "main not defined"
2757 assert utilities.assert_equals, "utilities.assert_equals not defined"
2758 assert main.CLIs, "main.CLIs not defined"
2759 assert main.nodes, "main.nodes not defined"
2760
2761 switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
2762
2763 description = "Killing a switch to ensure it is discovered correctly"
2764 onosCli = main.CLIs[ main.activeNodes[0] ]
2765 main.case( description )
2766 switch = main.params[ 'kill' ][ 'switch' ]
2767 switchDPID = main.params[ 'kill' ][ 'dpid' ]
2768
2769 # TODO: Make this switch parameterizable
2770 main.step( "Kill " + switch )
2771 main.log.info( "Deleting " + switch )
2772 main.Mininet1.delSwitch( switch )
2773 main.log.info( "Waiting " + str( switchSleep ) +
2774 " seconds for switch down to be discovered" )
2775 time.sleep( switchSleep )
2776 device = onosCli.getDevice( dpid=switchDPID )
2777 # Peek at the deleted switch
2778 main.log.warn( str( device ) )
2779 result = main.FALSE
2780 if device and device[ 'available' ] is False:
2781 result = main.TRUE
2782 utilities.assert_equals( expect=main.TRUE, actual=result,
2783 onpass="Kill switch successful",
2784 onfail="Failed to kill switch?" )
2785
2786 def CASE12( self, main ):
2787 """
2788 Switch Up
2789 """
2790 # NOTE: You should probably run a topology check after this
2791 import time
2792 assert main.numCtrls, "main.numCtrls not defined"
2793 assert main, "main not defined"
2794 assert utilities.assert_equals, "utilities.assert_equals not defined"
2795 assert main.CLIs, "main.CLIs not defined"
2796 assert main.nodes, "main.nodes not defined"
2797
2798 switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
2799 switch = main.params[ 'kill' ][ 'switch' ]
2800 switchDPID = main.params[ 'kill' ][ 'dpid' ]
2801 links = main.params[ 'kill' ][ 'links' ].split()
2802 onosCli = main.CLIs[ main.activeNodes[0] ]
2803 description = "Adding a switch to ensure it is discovered correctly"
2804 main.case( description )
2805
2806 main.step( "Add back " + switch )
2807 main.Mininet1.addSwitch( switch, dpid=switchDPID )
2808 for peer in links:
2809 main.Mininet1.addLink( switch, peer )
2810 ipList = [ node.ip_address for node in main.nodes ]
2811 main.Mininet1.assignSwController( sw=switch, ip=ipList )
2812 main.log.info( "Waiting " + str( switchSleep ) +
2813 " seconds for switch up to be discovered" )
2814 time.sleep( switchSleep )
2815 device = onosCli.getDevice( dpid=switchDPID )
2816 # Peek at the deleted switch
2817 main.log.warn( str( device ) )
2818 result = main.FALSE
2819 if device and device[ 'available' ]:
2820 result = main.TRUE
2821 utilities.assert_equals( expect=main.TRUE, actual=result,
2822 onpass="add switch successful",
2823 onfail="Failed to add switch?" )
2824
2825 def CASE13( self, main ):
2826 """
2827 Clean up
2828 """
2829 assert main.numCtrls, "main.numCtrls not defined"
2830 assert main, "main not defined"
2831 assert utilities.assert_equals, "utilities.assert_equals not defined"
2832 assert main.CLIs, "main.CLIs not defined"
2833 assert main.nodes, "main.nodes not defined"
2834
2835 main.case( "Test Cleanup" )
2836 main.step( "Killing tcpdumps" )
2837 main.Mininet2.stopTcpdump()
2838
2839 if main.params[ 'BACKUP' ][ 'ENABLED' ] == "True":
2840 main.step( "Copying MN pcap and ONOS log files to test station" )
2841 # NOTE: MN Pcap file is being saved to logdir.
2842 # We scp this file as MN and TestON aren't necessarily the same vm
2843
2844 # FIXME: To be replaced with a Jenkin's post script
2845 # TODO: Load these from params
2846 # NOTE: must end in /
2847 logFolder = "/opt/onos/log/"
2848 logFiles = [ "karaf.log", "karaf.log.1" ]
2849 # NOTE: must end in /
2850 for f in logFiles:
2851 for node in main.nodes:
2852 dstName = main.logdir + "/" + node.name + "-" + f
2853 main.ONOSbench.secureCopy( node.user_name, node.ip_address,
2854 logFolder + f, dstName )
2855 # std*.log's
2856 # NOTE: must end in /
2857 logFolder = "/opt/onos/var/"
2858 logFiles = [ "stderr.log", "stdout.log" ]
2859 # NOTE: must end in /
2860 for f in logFiles:
2861 for node in main.nodes:
2862 dstName = main.logdir + "/" + node.name + "-" + f
2863 main.ONOSbench.secureCopy( node.user_name, node.ip_address,
2864 logFolder + f, dstName )
2865 else:
2866 main.log.debug( "skipping saving log files" )
2867
2868 main.step( "Stopping Mininet" )
2869 mnResult = main.Mininet1.stopNet()
2870 utilities.assert_equals( expect=main.TRUE, actual=mnResult,
2871 onpass="Mininet stopped",
2872 onfail="MN cleanup NOT successful" )
2873
2874 main.step( "Checking ONOS Logs for errors" )
2875 for node in main.nodes:
2876 main.log.debug( "Checking logs for errors on " + node.name + ":" )
2877 main.log.warn( main.ONOSbench.checkLogs( node.ip_address ) )
2878
2879 try:
2880 timerLog = open( main.logdir + "/Timers.csv", 'w')
2881 main.log.error( ", ".join( labels ) + "\n" + ", ".join( data ) )
2882 timerLog.write( ", ".join( labels ) + "\n" + ", ".join( data ) )
2883 timerLog.close()
2884 except NameError, e:
2885 main.log.exception(e)
2886
2887 main.step( "Stopping webserver" )
2888 status = main.Server.stop( )
2889 utilities.assert_equals( expect=main.TRUE, actual=status,
2890 onpass="Stop Server",
2891 onfail="Failled to stop SimpleHTTPServer" )
2892 del main.Server
2893
2894 def CASE14( self, main ):
2895 """
2896 start election app on all onos nodes
2897 """
2898 import time
2899 assert main.numCtrls, "main.numCtrls not defined"
2900 assert main, "main not defined"
2901 assert utilities.assert_equals, "utilities.assert_equals not defined"
2902 assert main.CLIs, "main.CLIs not defined"
2903 assert main.nodes, "main.nodes not defined"
2904
2905 main.case("Start Leadership Election app")
2906 main.step( "Install leadership election app" )
2907 onosCli = main.CLIs[ main.activeNodes[0] ]
2908 appResult = onosCli.activateApp( "org.onosproject.election" )
2909 utilities.assert_equals(
2910 expect=main.TRUE,
2911 actual=appResult,
2912 onpass="Election app installed",
2913 onfail="Something went wrong with installing Leadership election" )
2914
2915 main.step( "Run for election on each node" )
2916 for i in main.activeNodes:
2917 main.CLIs[i].electionTestRun()
2918 time.sleep(5)
2919 activeCLIs = [ main.CLIs[i] for i in main.activeNodes ]
2920 sameResult, leaders = main.HA.consistentLeaderboards( activeCLIs )
2921 utilities.assert_equals(
2922 expect=True,
2923 actual=sameResult,
2924 onpass="All nodes see the same leaderboards",
2925 onfail="Inconsistent leaderboards" )
2926
2927 if sameResult:
2928 leader = leaders[ 0 ][ 0 ]
2929 if main.nodes[ main.activeNodes[0] ].ip_address in leader:
2930 correctLeader = True
2931 else:
2932 correctLeader = False
2933 main.step( "First node was elected leader" )
2934 utilities.assert_equals(
2935 expect=True,
2936 actual=correctLeader,
2937 onpass="Correct leader was elected",
2938 onfail="Incorrect leader" )
2939
2940 def CASE15( self, main ):
2941 """
2942 Check that Leadership Election is still functional
2943 15.1 Run election on each node
2944 15.2 Check that each node has the same leaders and candidates
2945 15.3 Find current leader and withdraw
2946 15.4 Check that a new node was elected leader
2947 15.5 Check that that new leader was the candidate of old leader
2948 15.6 Run for election on old leader
2949 15.7 Check that oldLeader is a candidate, and leader if only 1 node
2950 15.8 Make sure that the old leader was added to the candidate list
2951
2952 old and new variable prefixes refer to data from before vs after
2953 withdrawl and later before withdrawl vs after re-election
2954 """
2955 import time
2956 assert main.numCtrls, "main.numCtrls not defined"
2957 assert main, "main not defined"
2958 assert utilities.assert_equals, "utilities.assert_equals not defined"
2959 assert main.CLIs, "main.CLIs not defined"
2960 assert main.nodes, "main.nodes not defined"
2961
2962 description = "Check that Leadership Election is still functional"
2963 main.case( description )
2964 # NOTE: Need to re-run after restarts since being a canidate is not persistant
2965
2966 oldLeaders = [] # list of lists of each nodes' candidates before
2967 newLeaders = [] # list of lists of each nodes' candidates after
2968 oldLeader = '' # the old leader from oldLeaders, None if not same
2969 newLeader = '' # the new leaders fron newLoeaders, None if not same
2970 oldLeaderCLI = None # the CLI of the old leader used for re-electing
2971 expectNoLeader = False # True when there is only one leader
2972 if main.numCtrls == 1:
2973 expectNoLeader = True
2974
2975 main.step( "Run for election on each node" )
2976 electionResult = main.TRUE
2977
2978 for i in main.activeNodes: # run test election on each node
2979 if main.CLIs[i].electionTestRun() == main.FALSE:
2980 electionResult = main.FALSE
2981 utilities.assert_equals(
2982 expect=main.TRUE,
2983 actual=electionResult,
2984 onpass="All nodes successfully ran for leadership",
2985 onfail="At least one node failed to run for leadership" )
2986
2987 if electionResult == main.FALSE:
2988 main.log.error(
2989 "Skipping Test Case because Election Test App isn't loaded" )
2990 main.skipCase()
2991
2992 main.step( "Check that each node shows the same leader and candidates" )
2993 failMessage = "Nodes have different leaderboards"
2994 activeCLIs = [ main.CLIs[i] for i in main.activeNodes ]
2995 sameResult, oldLeaders = main.HA.consistentLeaderboards( activeCLIs )
2996 if sameResult:
2997 oldLeader = oldLeaders[ 0 ][ 0 ]
2998 main.log.warn( oldLeader )
2999 else:
3000 oldLeader = None
3001 utilities.assert_equals(
3002 expect=True,
3003 actual=sameResult,
3004 onpass="Leaderboards are consistent for the election topic",
3005 onfail=failMessage )
3006
3007 main.step( "Find current leader and withdraw" )
3008 withdrawResult = main.TRUE
3009 # do some sanity checking on leader before using it
3010 if oldLeader is None:
3011 main.log.error( "Leadership isn't consistent." )
3012 withdrawResult = main.FALSE
3013 # Get the CLI of the oldLeader
3014 for i in main.activeNodes:
3015 if oldLeader == main.nodes[ i ].ip_address:
3016 oldLeaderCLI = main.CLIs[ i ]
3017 break
3018 else: # FOR/ELSE statement
3019 main.log.error( "Leader election, could not find current leader" )
3020 if oldLeader:
3021 withdrawResult = oldLeaderCLI.electionTestWithdraw()
3022 utilities.assert_equals(
3023 expect=main.TRUE,
3024 actual=withdrawResult,
3025 onpass="Node was withdrawn from election",
3026 onfail="Node was not withdrawn from election" )
3027
3028 main.step( "Check that a new node was elected leader" )
3029 failMessage = "Nodes have different leaders"
3030 # Get new leaders and candidates
3031 newLeaderResult, newLeaders = main.HA.consistentLeaderboards( activeCLIs )
3032 newLeader = None
3033 if newLeaderResult:
3034 if newLeaders[ 0 ][ 0 ] == 'none':
3035 main.log.error( "No leader was elected on at least 1 node" )
3036 if not expectNoLeader:
3037 newLeaderResult = False
3038 newLeader = newLeaders[ 0 ][ 0 ]
3039
3040 # Check that the new leader is not the older leader, which was withdrawn
3041 if newLeader == oldLeader:
3042 newLeaderResult = False
3043 main.log.error( "All nodes still see old leader: " + str( oldLeader ) +
3044 " as the current leader" )
3045 utilities.assert_equals(
3046 expect=True,
3047 actual=newLeaderResult,
3048 onpass="Leadership election passed",
3049 onfail="Something went wrong with Leadership election" )
3050
3051 main.step( "Check that that new leader was the candidate of old leader" )
3052 # candidates[ 2 ] should become the top candidate after withdrawl
3053 correctCandidateResult = main.TRUE
3054 if expectNoLeader:
3055 if newLeader == 'none':
3056 main.log.info( "No leader expected. None found. Pass" )
3057 correctCandidateResult = main.TRUE
3058 else:
3059 main.log.info( "Expected no leader, got: " + str( newLeader ) )
3060 correctCandidateResult = main.FALSE
3061 elif len( oldLeaders[0] ) >= 3:
3062 if newLeader == oldLeaders[ 0 ][ 2 ]:
3063 # correct leader was elected
3064 correctCandidateResult = main.TRUE
3065 else:
3066 correctCandidateResult = main.FALSE
3067 main.log.error( "Candidate {} was elected. {} should have had priority.".format(
3068 newLeader, oldLeaders[ 0 ][ 2 ] ) )
3069 else:
3070 main.log.warn( "Could not determine who should be the correct leader" )
3071 main.log.debug( oldLeaders[ 0 ] )
3072 correctCandidateResult = main.FALSE
3073 utilities.assert_equals(
3074 expect=main.TRUE,
3075 actual=correctCandidateResult,
3076 onpass="Correct Candidate Elected",
3077 onfail="Incorrect Candidate Elected" )
3078
3079 main.step( "Run for election on old leader( just so everyone " +
3080 "is in the hat )" )
3081 if oldLeaderCLI is not None:
3082 runResult = oldLeaderCLI.electionTestRun()
3083 else:
3084 main.log.error( "No old leader to re-elect" )
3085 runResult = main.FALSE
3086 utilities.assert_equals(
3087 expect=main.TRUE,
3088 actual=runResult,
3089 onpass="App re-ran for election",
3090 onfail="App failed to run for election" )
3091
3092 main.step(
3093 "Check that oldLeader is a candidate, and leader if only 1 node" )
3094 # verify leader didn't just change
3095 # Get new leaders and candidates
3096 reRunLeaders = []
3097 time.sleep( 5 ) # Paremterize
3098 positionResult, reRunLeaders = main.HA.consistentLeaderboards( activeCLIs )
3099
3100 # Check that the re-elected node is last on the candidate List
3101 if not reRunLeaders[0]:
3102 positionResult = main.FALSE
3103 elif oldLeader != reRunLeaders[ 0 ][ -1 ]:
3104 main.log.error( "Old Leader ({}) not in the proper position: {} ".format( str( oldLeader),
3105 str( reRunLeaders[ 0 ] ) ) )
3106 positionResult = main.FALSE
3107 utilities.assert_equals(
3108 expect=True,
3109 actual=positionResult,
3110 onpass="Old leader successfully re-ran for election",
3111 onfail="Something went wrong with Leadership election after " +
3112 "the old leader re-ran for election" )
3113
3114 def CASE16( self, main ):
3115 """
3116 Install Distributed Primitives app
3117 """
3118 import time
3119 assert main.numCtrls, "main.numCtrls not defined"
3120 assert main, "main not defined"
3121 assert utilities.assert_equals, "utilities.assert_equals not defined"
3122 assert main.CLIs, "main.CLIs not defined"
3123 assert main.nodes, "main.nodes not defined"
3124
3125 # Variables for the distributed primitives tests
3126 global pCounterName
3127 global pCounterValue
3128 global onosSet
3129 global onosSetName
3130 pCounterName = "TestON-Partitions"
3131 pCounterValue = 0
3132 onosSet = set([])
3133 onosSetName = "TestON-set"
3134
3135 description = "Install Primitives app"
3136 main.case( description )
3137 main.step( "Install Primitives app" )
3138 appName = "org.onosproject.distributedprimitives"
3139 node = main.activeNodes[0]
3140 appResults = main.CLIs[node].activateApp( appName )
3141 utilities.assert_equals( expect=main.TRUE,
3142 actual=appResults,
3143 onpass="Primitives app activated",
3144 onfail="Primitives app not activated" )
3145 time.sleep( 5 ) # To allow all nodes to activate
3146
3147 def CASE17( self, main ):
3148 """
3149 Check for basic functionality with distributed primitives
3150 """
3151 # Make sure variables are defined/set
3152 assert main.numCtrls, "main.numCtrls not defined"
3153 assert main, "main not defined"
3154 assert utilities.assert_equals, "utilities.assert_equals not defined"
3155 assert main.CLIs, "main.CLIs not defined"
3156 assert main.nodes, "main.nodes not defined"
3157 assert pCounterName, "pCounterName not defined"
3158 assert onosSetName, "onosSetName not defined"
3159 # NOTE: assert fails if value is 0/None/Empty/False
3160 try:
3161 pCounterValue
3162 except NameError:
3163 main.log.error( "pCounterValue not defined, setting to 0" )
3164 pCounterValue = 0
3165 try:
3166 onosSet
3167 except NameError:
3168 main.log.error( "onosSet not defined, setting to empty Set" )
3169 onosSet = set([])
3170 # Variables for the distributed primitives tests. These are local only
3171 addValue = "a"
3172 addAllValue = "a b c d e f"
3173 retainValue = "c d e f"
3174
3175 description = "Check for basic functionality with distributed " +\
3176 "primitives"
3177 main.case( description )
3178 main.caseExplanation = "Test the methods of the distributed " +\
3179 "primitives (counters and sets) throught the cli"
3180 # DISTRIBUTED ATOMIC COUNTERS
3181 # Partitioned counters
3182 main.step( "Increment then get a default counter on each node" )
3183 pCounters = []
3184 threads = []
3185 addedPValues = []
3186 for i in main.activeNodes:
3187 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3188 name="counterAddAndGet-" + str( i ),
3189 args=[ pCounterName ] )
3190 pCounterValue += 1
3191 addedPValues.append( pCounterValue )
3192 threads.append( t )
3193 t.start()
3194
3195 for t in threads:
3196 t.join()
3197 pCounters.append( t.result )
3198 # Check that counter incremented numController times
3199 pCounterResults = True
3200 for i in addedPValues:
3201 tmpResult = i in pCounters
3202 pCounterResults = pCounterResults and tmpResult
3203 if not tmpResult:
3204 main.log.error( str( i ) + " is not in partitioned "
3205 "counter incremented results" )
3206 utilities.assert_equals( expect=True,
3207 actual=pCounterResults,
3208 onpass="Default counter incremented",
3209 onfail="Error incrementing default" +
3210 " counter" )
3211
3212 main.step( "Get then Increment a default counter on each node" )
3213 pCounters = []
3214 threads = []
3215 addedPValues = []
3216 for i in main.activeNodes:
3217 t = main.Thread( target=main.CLIs[i].counterTestGetAndAdd,
3218 name="counterGetAndAdd-" + str( i ),
3219 args=[ pCounterName ] )
3220 addedPValues.append( pCounterValue )
3221 pCounterValue += 1
3222 threads.append( t )
3223 t.start()
3224
3225 for t in threads:
3226 t.join()
3227 pCounters.append( t.result )
3228 # Check that counter incremented numController times
3229 pCounterResults = True
3230 for i in addedPValues:
3231 tmpResult = i in pCounters
3232 pCounterResults = pCounterResults and tmpResult
3233 if not tmpResult:
3234 main.log.error( str( i ) + " is not in partitioned "
3235 "counter incremented results" )
3236 utilities.assert_equals( expect=True,
3237 actual=pCounterResults,
3238 onpass="Default counter incremented",
3239 onfail="Error incrementing default" +
3240 " counter" )
3241
3242 main.step( "Counters we added have the correct values" )
3243 incrementCheck = main.HA.counterCheck( pCounterName, pCounterValue )
3244 utilities.assert_equals( expect=main.TRUE,
3245 actual=incrementCheck,
3246 onpass="Added counters are correct",
3247 onfail="Added counters are incorrect" )
3248
3249 main.step( "Add -8 to then get a default counter on each node" )
3250 pCounters = []
3251 threads = []
3252 addedPValues = []
3253 for i in main.activeNodes:
3254 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3255 name="counterIncrement-" + str( i ),
3256 args=[ pCounterName ],
3257 kwargs={ "delta": -8 } )
3258 pCounterValue += -8
3259 addedPValues.append( pCounterValue )
3260 threads.append( t )
3261 t.start()
3262
3263 for t in threads:
3264 t.join()
3265 pCounters.append( t.result )
3266 # Check that counter incremented numController times
3267 pCounterResults = True
3268 for i in addedPValues:
3269 tmpResult = i in pCounters
3270 pCounterResults = pCounterResults and tmpResult
3271 if not tmpResult:
3272 main.log.error( str( i ) + " is not in partitioned "
3273 "counter incremented results" )
3274 utilities.assert_equals( expect=True,
3275 actual=pCounterResults,
3276 onpass="Default counter incremented",
3277 onfail="Error incrementing default" +
3278 " counter" )
3279
3280 main.step( "Add 5 to then get a default counter on each node" )
3281 pCounters = []
3282 threads = []
3283 addedPValues = []
3284 for i in main.activeNodes:
3285 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3286 name="counterIncrement-" + str( i ),
3287 args=[ pCounterName ],
3288 kwargs={ "delta": 5 } )
3289 pCounterValue += 5
3290 addedPValues.append( pCounterValue )
3291 threads.append( t )
3292 t.start()
3293
3294 for t in threads:
3295 t.join()
3296 pCounters.append( t.result )
3297 # Check that counter incremented numController times
3298 pCounterResults = True
3299 for i in addedPValues:
3300 tmpResult = i in pCounters
3301 pCounterResults = pCounterResults and tmpResult
3302 if not tmpResult:
3303 main.log.error( str( i ) + " is not in partitioned "
3304 "counter incremented results" )
3305 utilities.assert_equals( expect=True,
3306 actual=pCounterResults,
3307 onpass="Default counter incremented",
3308 onfail="Error incrementing default" +
3309 " counter" )
3310
3311 main.step( "Get then add 5 to a default counter on each node" )
3312 pCounters = []
3313 threads = []
3314 addedPValues = []
3315 for i in main.activeNodes:
3316 t = main.Thread( target=main.CLIs[i].counterTestGetAndAdd,
3317 name="counterIncrement-" + str( i ),
3318 args=[ pCounterName ],
3319 kwargs={ "delta": 5 } )
3320 addedPValues.append( pCounterValue )
3321 pCounterValue += 5
3322 threads.append( t )
3323 t.start()
3324
3325 for t in threads:
3326 t.join()
3327 pCounters.append( t.result )
3328 # Check that counter incremented numController times
3329 pCounterResults = True
3330 for i in addedPValues:
3331 tmpResult = i in pCounters
3332 pCounterResults = pCounterResults and tmpResult
3333 if not tmpResult:
3334 main.log.error( str( i ) + " is not in partitioned "
3335 "counter incremented results" )
3336 utilities.assert_equals( expect=True,
3337 actual=pCounterResults,
3338 onpass="Default counter incremented",
3339 onfail="Error incrementing default" +
3340 " counter" )
3341
3342 main.step( "Counters we added have the correct values" )
3343 incrementCheck = main.HA.counterCheck( pCounterName, pCounterValue )
3344 utilities.assert_equals( expect=main.TRUE,
3345 actual=incrementCheck,
3346 onpass="Added counters are correct",
3347 onfail="Added counters are incorrect" )
3348
3349 # DISTRIBUTED SETS
3350 main.step( "Distributed Set get" )
3351 size = len( onosSet )
3352 getResponses = []
3353 threads = []
3354 for i in main.activeNodes:
3355 t = main.Thread( target=main.CLIs[i].setTestGet,
3356 name="setTestGet-" + str( i ),
3357 args=[ onosSetName ] )
3358 threads.append( t )
3359 t.start()
3360 for t in threads:
3361 t.join()
3362 getResponses.append( t.result )
3363
3364 getResults = main.TRUE
3365 for i in range( len( main.activeNodes ) ):
3366 node = str( main.activeNodes[i] + 1 )
3367 if isinstance( getResponses[ i ], list):
3368 current = set( getResponses[ i ] )
3369 if len( current ) == len( getResponses[ i ] ):
3370 # no repeats
3371 if onosSet != current:
3372 main.log.error( "ONOS" + node +
3373 " has incorrect view" +
3374 " of set " + onosSetName + ":\n" +
3375 str( getResponses[ i ] ) )
3376 main.log.debug( "Expected: " + str( onosSet ) )
3377 main.log.debug( "Actual: " + str( current ) )
3378 getResults = main.FALSE
3379 else:
3380 # error, set is not a set
3381 main.log.error( "ONOS" + node +
3382 " has repeat elements in" +
3383 " set " + onosSetName + ":\n" +
3384 str( getResponses[ i ] ) )
3385 getResults = main.FALSE
3386 elif getResponses[ i ] == main.ERROR:
3387 getResults = main.FALSE
3388 utilities.assert_equals( expect=main.TRUE,
3389 actual=getResults,
3390 onpass="Set elements are correct",
3391 onfail="Set elements are incorrect" )
3392
3393 main.step( "Distributed Set size" )
3394 sizeResponses = []
3395 threads = []
3396 for i in main.activeNodes:
3397 t = main.Thread( target=main.CLIs[i].setTestSize,
3398 name="setTestSize-" + str( i ),
3399 args=[ onosSetName ] )
3400 threads.append( t )
3401 t.start()
3402 for t in threads:
3403 t.join()
3404 sizeResponses.append( t.result )
3405
3406 sizeResults = main.TRUE
3407 for i in range( len( main.activeNodes ) ):
3408 node = str( main.activeNodes[i] + 1 )
3409 if size != sizeResponses[ i ]:
3410 sizeResults = main.FALSE
3411 main.log.error( "ONOS" + node +
3412 " expected a size of " + str( size ) +
3413 " for set " + onosSetName +
3414 " but got " + str( sizeResponses[ i ] ) )
3415 utilities.assert_equals( expect=main.TRUE,
3416 actual=sizeResults,
3417 onpass="Set sizes are correct",
3418 onfail="Set sizes are incorrect" )
3419
3420 main.step( "Distributed Set add()" )
3421 onosSet.add( addValue )
3422 addResponses = []
3423 threads = []
3424 for i in main.activeNodes:
3425 t = main.Thread( target=main.CLIs[i].setTestAdd,
3426 name="setTestAdd-" + str( i ),
3427 args=[ onosSetName, addValue ] )
3428 threads.append( t )
3429 t.start()
3430 for t in threads:
3431 t.join()
3432 addResponses.append( t.result )
3433
3434 # main.TRUE = successfully changed the set
3435 # main.FALSE = action resulted in no change in set
3436 # main.ERROR - Some error in executing the function
3437 addResults = main.TRUE
3438 for i in range( len( main.activeNodes ) ):
3439 if addResponses[ i ] == main.TRUE:
3440 # All is well
3441 pass
3442 elif addResponses[ i ] == main.FALSE:
3443 # Already in set, probably fine
3444 pass
3445 elif addResponses[ i ] == main.ERROR:
3446 # Error in execution
3447 addResults = main.FALSE
3448 else:
3449 # unexpected result
3450 addResults = main.FALSE
3451 if addResults != main.TRUE:
3452 main.log.error( "Error executing set add" )
3453
3454 # Check if set is still correct
3455 size = len( onosSet )
3456 getResponses = []
3457 threads = []
3458 for i in main.activeNodes:
3459 t = main.Thread( target=main.CLIs[i].setTestGet,
3460 name="setTestGet-" + str( i ),
3461 args=[ onosSetName ] )
3462 threads.append( t )
3463 t.start()
3464 for t in threads:
3465 t.join()
3466 getResponses.append( t.result )
3467 getResults = main.TRUE
3468 for i in range( len( main.activeNodes ) ):
3469 node = str( main.activeNodes[i] + 1 )
3470 if isinstance( getResponses[ i ], list):
3471 current = set( getResponses[ i ] )
3472 if len( current ) == len( getResponses[ i ] ):
3473 # no repeats
3474 if onosSet != current:
3475 main.log.error( "ONOS" + node + " has incorrect view" +
3476 " of set " + onosSetName + ":\n" +
3477 str( getResponses[ i ] ) )
3478 main.log.debug( "Expected: " + str( onosSet ) )
3479 main.log.debug( "Actual: " + str( current ) )
3480 getResults = main.FALSE
3481 else:
3482 # error, set is not a set
3483 main.log.error( "ONOS" + node + " has repeat elements in" +
3484 " set " + onosSetName + ":\n" +
3485 str( getResponses[ i ] ) )
3486 getResults = main.FALSE
3487 elif getResponses[ i ] == main.ERROR:
3488 getResults = main.FALSE
3489 sizeResponses = []
3490 threads = []
3491 for i in main.activeNodes:
3492 t = main.Thread( target=main.CLIs[i].setTestSize,
3493 name="setTestSize-" + str( i ),
3494 args=[ onosSetName ] )
3495 threads.append( t )
3496 t.start()
3497 for t in threads:
3498 t.join()
3499 sizeResponses.append( t.result )
3500 sizeResults = main.TRUE
3501 for i in range( len( main.activeNodes ) ):
3502 node = str( main.activeNodes[i] + 1 )
3503 if size != sizeResponses[ i ]:
3504 sizeResults = main.FALSE
3505 main.log.error( "ONOS" + node +
3506 " expected a size of " + str( size ) +
3507 " for set " + onosSetName +
3508 " but got " + str( sizeResponses[ i ] ) )
3509 addResults = addResults and getResults and sizeResults
3510 utilities.assert_equals( expect=main.TRUE,
3511 actual=addResults,
3512 onpass="Set add correct",
3513 onfail="Set add was incorrect" )
3514
3515 main.step( "Distributed Set addAll()" )
3516 onosSet.update( addAllValue.split() )
3517 addResponses = []
3518 threads = []
3519 for i in main.activeNodes:
3520 t = main.Thread( target=main.CLIs[i].setTestAdd,
3521 name="setTestAddAll-" + str( i ),
3522 args=[ onosSetName, addAllValue ] )
3523 threads.append( t )
3524 t.start()
3525 for t in threads:
3526 t.join()
3527 addResponses.append( t.result )
3528
3529 # main.TRUE = successfully changed the set
3530 # main.FALSE = action resulted in no change in set
3531 # main.ERROR - Some error in executing the function
3532 addAllResults = main.TRUE
3533 for i in range( len( main.activeNodes ) ):
3534 if addResponses[ i ] == main.TRUE:
3535 # All is well
3536 pass
3537 elif addResponses[ i ] == main.FALSE:
3538 # Already in set, probably fine
3539 pass
3540 elif addResponses[ i ] == main.ERROR:
3541 # Error in execution
3542 addAllResults = main.FALSE
3543 else:
3544 # unexpected result
3545 addAllResults = main.FALSE
3546 if addAllResults != main.TRUE:
3547 main.log.error( "Error executing set addAll" )
3548
3549 # Check if set is still correct
3550 size = len( onosSet )
3551 getResponses = []
3552 threads = []
3553 for i in main.activeNodes:
3554 t = main.Thread( target=main.CLIs[i].setTestGet,
3555 name="setTestGet-" + str( i ),
3556 args=[ onosSetName ] )
3557 threads.append( t )
3558 t.start()
3559 for t in threads:
3560 t.join()
3561 getResponses.append( t.result )
3562 getResults = main.TRUE
3563 for i in range( len( main.activeNodes ) ):
3564 node = str( main.activeNodes[i] + 1 )
3565 if isinstance( getResponses[ i ], list):
3566 current = set( getResponses[ i ] )
3567 if len( current ) == len( getResponses[ i ] ):
3568 # no repeats
3569 if onosSet != current:
3570 main.log.error( "ONOS" + node +
3571 " has incorrect view" +
3572 " of set " + onosSetName + ":\n" +
3573 str( getResponses[ i ] ) )
3574 main.log.debug( "Expected: " + str( onosSet ) )
3575 main.log.debug( "Actual: " + str( current ) )
3576 getResults = main.FALSE
3577 else:
3578 # error, set is not a set
3579 main.log.error( "ONOS" + node +
3580 " has repeat elements in" +
3581 " set " + onosSetName + ":\n" +
3582 str( getResponses[ i ] ) )
3583 getResults = main.FALSE
3584 elif getResponses[ i ] == main.ERROR:
3585 getResults = main.FALSE
3586 sizeResponses = []
3587 threads = []
3588 for i in main.activeNodes:
3589 t = main.Thread( target=main.CLIs[i].setTestSize,
3590 name="setTestSize-" + str( i ),
3591 args=[ onosSetName ] )
3592 threads.append( t )
3593 t.start()
3594 for t in threads:
3595 t.join()
3596 sizeResponses.append( t.result )
3597 sizeResults = main.TRUE
3598 for i in range( len( main.activeNodes ) ):
3599 node = str( main.activeNodes[i] + 1 )
3600 if size != sizeResponses[ i ]:
3601 sizeResults = main.FALSE
3602 main.log.error( "ONOS" + node +
3603 " expected a size of " + str( size ) +
3604 " for set " + onosSetName +
3605 " but got " + str( sizeResponses[ i ] ) )
3606 addAllResults = addAllResults and getResults and sizeResults
3607 utilities.assert_equals( expect=main.TRUE,
3608 actual=addAllResults,
3609 onpass="Set addAll correct",
3610 onfail="Set addAll was incorrect" )
3611
3612 main.step( "Distributed Set contains()" )
3613 containsResponses = []
3614 threads = []
3615 for i in main.activeNodes:
3616 t = main.Thread( target=main.CLIs[i].setTestGet,
3617 name="setContains-" + str( i ),
3618 args=[ onosSetName ],
3619 kwargs={ "values": addValue } )
3620 threads.append( t )
3621 t.start()
3622 for t in threads:
3623 t.join()
3624 # NOTE: This is the tuple
3625 containsResponses.append( t.result )
3626
3627 containsResults = main.TRUE
3628 for i in range( len( main.activeNodes ) ):
3629 if containsResponses[ i ] == main.ERROR:
3630 containsResults = main.FALSE
3631 else:
3632 containsResults = containsResults and\
3633 containsResponses[ i ][ 1 ]
3634 utilities.assert_equals( expect=main.TRUE,
3635 actual=containsResults,
3636 onpass="Set contains is functional",
3637 onfail="Set contains failed" )
3638
3639 main.step( "Distributed Set containsAll()" )
3640 containsAllResponses = []
3641 threads = []
3642 for i in main.activeNodes:
3643 t = main.Thread( target=main.CLIs[i].setTestGet,
3644 name="setContainsAll-" + str( i ),
3645 args=[ onosSetName ],
3646 kwargs={ "values": addAllValue } )
3647 threads.append( t )
3648 t.start()
3649 for t in threads:
3650 t.join()
3651 # NOTE: This is the tuple
3652 containsAllResponses.append( t.result )
3653
3654 containsAllResults = main.TRUE
3655 for i in range( len( main.activeNodes ) ):
3656 if containsResponses[ i ] == main.ERROR:
3657 containsResults = main.FALSE
3658 else:
3659 containsResults = containsResults and\
3660 containsResponses[ i ][ 1 ]
3661 utilities.assert_equals( expect=main.TRUE,
3662 actual=containsAllResults,
3663 onpass="Set containsAll is functional",
3664 onfail="Set containsAll failed" )
3665
3666 main.step( "Distributed Set remove()" )
3667 onosSet.remove( addValue )
3668 removeResponses = []
3669 threads = []
3670 for i in main.activeNodes:
3671 t = main.Thread( target=main.CLIs[i].setTestRemove,
3672 name="setTestRemove-" + str( i ),
3673 args=[ onosSetName, addValue ] )
3674 threads.append( t )
3675 t.start()
3676 for t in threads:
3677 t.join()
3678 removeResponses.append( t.result )
3679
3680 # main.TRUE = successfully changed the set
3681 # main.FALSE = action resulted in no change in set
3682 # main.ERROR - Some error in executing the function
3683 removeResults = main.TRUE
3684 for i in range( len( main.activeNodes ) ):
3685 if removeResponses[ i ] == main.TRUE:
3686 # All is well
3687 pass
3688 elif removeResponses[ i ] == main.FALSE:
3689 # not in set, probably fine
3690 pass
3691 elif removeResponses[ i ] == main.ERROR:
3692 # Error in execution
3693 removeResults = main.FALSE
3694 else:
3695 # unexpected result
3696 removeResults = main.FALSE
3697 if removeResults != main.TRUE:
3698 main.log.error( "Error executing set remove" )
3699
3700 # Check if set is still correct
3701 size = len( onosSet )
3702 getResponses = []
3703 threads = []
3704 for i in main.activeNodes:
3705 t = main.Thread( target=main.CLIs[i].setTestGet,
3706 name="setTestGet-" + str( i ),
3707 args=[ onosSetName ] )
3708 threads.append( t )
3709 t.start()
3710 for t in threads:
3711 t.join()
3712 getResponses.append( t.result )
3713 getResults = main.TRUE
3714 for i in range( len( main.activeNodes ) ):
3715 node = str( main.activeNodes[i] + 1 )
3716 if isinstance( getResponses[ i ], list):
3717 current = set( getResponses[ i ] )
3718 if len( current ) == len( getResponses[ i ] ):
3719 # no repeats
3720 if onosSet != current:
3721 main.log.error( "ONOS" + node +
3722 " has incorrect view" +
3723 " of set " + onosSetName + ":\n" +
3724 str( getResponses[ i ] ) )
3725 main.log.debug( "Expected: " + str( onosSet ) )
3726 main.log.debug( "Actual: " + str( current ) )
3727 getResults = main.FALSE
3728 else:
3729 # error, set is not a set
3730 main.log.error( "ONOS" + node +
3731 " has repeat elements in" +
3732 " set " + onosSetName + ":\n" +
3733 str( getResponses[ i ] ) )
3734 getResults = main.FALSE
3735 elif getResponses[ i ] == main.ERROR:
3736 getResults = main.FALSE
3737 sizeResponses = []
3738 threads = []
3739 for i in main.activeNodes:
3740 t = main.Thread( target=main.CLIs[i].setTestSize,
3741 name="setTestSize-" + str( i ),
3742 args=[ onosSetName ] )
3743 threads.append( t )
3744 t.start()
3745 for t in threads:
3746 t.join()
3747 sizeResponses.append( t.result )
3748 sizeResults = main.TRUE
3749 for i in range( len( main.activeNodes ) ):
3750 node = str( main.activeNodes[i] + 1 )
3751 if size != sizeResponses[ i ]:
3752 sizeResults = main.FALSE
3753 main.log.error( "ONOS" + node +
3754 " expected a size of " + str( size ) +
3755 " for set " + onosSetName +
3756 " but got " + str( sizeResponses[ i ] ) )
3757 removeResults = removeResults and getResults and sizeResults
3758 utilities.assert_equals( expect=main.TRUE,
3759 actual=removeResults,
3760 onpass="Set remove correct",
3761 onfail="Set remove was incorrect" )
3762
3763 main.step( "Distributed Set removeAll()" )
3764 onosSet.difference_update( addAllValue.split() )
3765 removeAllResponses = []
3766 threads = []
3767 try:
3768 for i in main.activeNodes:
3769 t = main.Thread( target=main.CLIs[i].setTestRemove,
3770 name="setTestRemoveAll-" + str( i ),
3771 args=[ onosSetName, addAllValue ] )
3772 threads.append( t )
3773 t.start()
3774 for t in threads:
3775 t.join()
3776 removeAllResponses.append( t.result )
3777 except Exception, e:
3778 main.log.exception(e)
3779
3780 # main.TRUE = successfully changed the set
3781 # main.FALSE = action resulted in no change in set
3782 # main.ERROR - Some error in executing the function
3783 removeAllResults = main.TRUE
3784 for i in range( len( main.activeNodes ) ):
3785 if removeAllResponses[ i ] == main.TRUE:
3786 # All is well
3787 pass
3788 elif removeAllResponses[ i ] == main.FALSE:
3789 # not in set, probably fine
3790 pass
3791 elif removeAllResponses[ i ] == main.ERROR:
3792 # Error in execution
3793 removeAllResults = main.FALSE
3794 else:
3795 # unexpected result
3796 removeAllResults = main.FALSE
3797 if removeAllResults != main.TRUE:
3798 main.log.error( "Error executing set removeAll" )
3799
3800 # Check if set is still correct
3801 size = len( onosSet )
3802 getResponses = []
3803 threads = []
3804 for i in main.activeNodes:
3805 t = main.Thread( target=main.CLIs[i].setTestGet,
3806 name="setTestGet-" + str( i ),
3807 args=[ onosSetName ] )
3808 threads.append( t )
3809 t.start()
3810 for t in threads:
3811 t.join()
3812 getResponses.append( t.result )
3813 getResults = main.TRUE
3814 for i in range( len( main.activeNodes ) ):
3815 node = str( main.activeNodes[i] + 1 )
3816 if isinstance( getResponses[ i ], list):
3817 current = set( getResponses[ i ] )
3818 if len( current ) == len( getResponses[ i ] ):
3819 # no repeats
3820 if onosSet != current:
3821 main.log.error( "ONOS" + node +
3822 " has incorrect view" +
3823 " of set " + onosSetName + ":\n" +
3824 str( getResponses[ i ] ) )
3825 main.log.debug( "Expected: " + str( onosSet ) )
3826 main.log.debug( "Actual: " + str( current ) )
3827 getResults = main.FALSE
3828 else:
3829 # error, set is not a set
3830 main.log.error( "ONOS" + node +
3831 " has repeat elements in" +
3832 " set " + onosSetName + ":\n" +
3833 str( getResponses[ i ] ) )
3834 getResults = main.FALSE
3835 elif getResponses[ i ] == main.ERROR:
3836 getResults = main.FALSE
3837 sizeResponses = []
3838 threads = []
3839 for i in main.activeNodes:
3840 t = main.Thread( target=main.CLIs[i].setTestSize,
3841 name="setTestSize-" + str( i ),
3842 args=[ onosSetName ] )
3843 threads.append( t )
3844 t.start()
3845 for t in threads:
3846 t.join()
3847 sizeResponses.append( t.result )
3848 sizeResults = main.TRUE
3849 for i in range( len( main.activeNodes ) ):
3850 node = str( main.activeNodes[i] + 1 )
3851 if size != sizeResponses[ i ]:
3852 sizeResults = main.FALSE
3853 main.log.error( "ONOS" + node +
3854 " expected a size of " + str( size ) +
3855 " for set " + onosSetName +
3856 " but got " + str( sizeResponses[ i ] ) )
3857 removeAllResults = removeAllResults and getResults and sizeResults
3858 utilities.assert_equals( expect=main.TRUE,
3859 actual=removeAllResults,
3860 onpass="Set removeAll correct",
3861 onfail="Set removeAll was incorrect" )
3862
3863 main.step( "Distributed Set addAll()" )
3864 onosSet.update( addAllValue.split() )
3865 addResponses = []
3866 threads = []
3867 for i in main.activeNodes:
3868 t = main.Thread( target=main.CLIs[i].setTestAdd,
3869 name="setTestAddAll-" + str( i ),
3870 args=[ onosSetName, addAllValue ] )
3871 threads.append( t )
3872 t.start()
3873 for t in threads:
3874 t.join()
3875 addResponses.append( t.result )
3876
3877 # main.TRUE = successfully changed the set
3878 # main.FALSE = action resulted in no change in set
3879 # main.ERROR - Some error in executing the function
3880 addAllResults = main.TRUE
3881 for i in range( len( main.activeNodes ) ):
3882 if addResponses[ i ] == main.TRUE:
3883 # All is well
3884 pass
3885 elif addResponses[ i ] == main.FALSE:
3886 # Already in set, probably fine
3887 pass
3888 elif addResponses[ i ] == main.ERROR:
3889 # Error in execution
3890 addAllResults = main.FALSE
3891 else:
3892 # unexpected result
3893 addAllResults = main.FALSE
3894 if addAllResults != main.TRUE:
3895 main.log.error( "Error executing set addAll" )
3896
3897 # Check if set is still correct
3898 size = len( onosSet )
3899 getResponses = []
3900 threads = []
3901 for i in main.activeNodes:
3902 t = main.Thread( target=main.CLIs[i].setTestGet,
3903 name="setTestGet-" + str( i ),
3904 args=[ onosSetName ] )
3905 threads.append( t )
3906 t.start()
3907 for t in threads:
3908 t.join()
3909 getResponses.append( t.result )
3910 getResults = main.TRUE
3911 for i in range( len( main.activeNodes ) ):
3912 node = str( main.activeNodes[i] + 1 )
3913 if isinstance( getResponses[ i ], list):
3914 current = set( getResponses[ i ] )
3915 if len( current ) == len( getResponses[ i ] ):
3916 # no repeats
3917 if onosSet != current:
3918 main.log.error( "ONOS" + node +
3919 " has incorrect view" +
3920 " of set " + onosSetName + ":\n" +
3921 str( getResponses[ i ] ) )
3922 main.log.debug( "Expected: " + str( onosSet ) )
3923 main.log.debug( "Actual: " + str( current ) )
3924 getResults = main.FALSE
3925 else:
3926 # error, set is not a set
3927 main.log.error( "ONOS" + node +
3928 " has repeat elements in" +
3929 " set " + onosSetName + ":\n" +
3930 str( getResponses[ i ] ) )
3931 getResults = main.FALSE
3932 elif getResponses[ i ] == main.ERROR:
3933 getResults = main.FALSE
3934 sizeResponses = []
3935 threads = []
3936 for i in main.activeNodes:
3937 t = main.Thread( target=main.CLIs[i].setTestSize,
3938 name="setTestSize-" + str( i ),
3939 args=[ onosSetName ] )
3940 threads.append( t )
3941 t.start()
3942 for t in threads:
3943 t.join()
3944 sizeResponses.append( t.result )
3945 sizeResults = main.TRUE
3946 for i in range( len( main.activeNodes ) ):
3947 node = str( main.activeNodes[i] + 1 )
3948 if size != sizeResponses[ i ]:
3949 sizeResults = main.FALSE
3950 main.log.error( "ONOS" + node +
3951 " expected a size of " + str( size ) +
3952 " for set " + onosSetName +
3953 " but got " + str( sizeResponses[ i ] ) )
3954 addAllResults = addAllResults and getResults and sizeResults
3955 utilities.assert_equals( expect=main.TRUE,
3956 actual=addAllResults,
3957 onpass="Set addAll correct",
3958 onfail="Set addAll was incorrect" )
3959
3960 main.step( "Distributed Set clear()" )
3961 onosSet.clear()
3962 clearResponses = []
3963 threads = []
3964 for i in main.activeNodes:
3965 t = main.Thread( target=main.CLIs[i].setTestRemove,
3966 name="setTestClear-" + str( i ),
3967 args=[ onosSetName, " "], # Values doesn't matter
3968 kwargs={ "clear": True } )
3969 threads.append( t )
3970 t.start()
3971 for t in threads:
3972 t.join()
3973 clearResponses.append( t.result )
3974
3975 # main.TRUE = successfully changed the set
3976 # main.FALSE = action resulted in no change in set
3977 # main.ERROR - Some error in executing the function
3978 clearResults = main.TRUE
3979 for i in range( len( main.activeNodes ) ):
3980 if clearResponses[ i ] == main.TRUE:
3981 # All is well
3982 pass
3983 elif clearResponses[ i ] == main.FALSE:
3984 # Nothing set, probably fine
3985 pass
3986 elif clearResponses[ i ] == main.ERROR:
3987 # Error in execution
3988 clearResults = main.FALSE
3989 else:
3990 # unexpected result
3991 clearResults = main.FALSE
3992 if clearResults != main.TRUE:
3993 main.log.error( "Error executing set clear" )
3994
3995 # Check if set is still correct
3996 size = len( onosSet )
3997 getResponses = []
3998 threads = []
3999 for i in main.activeNodes:
4000 t = main.Thread( target=main.CLIs[i].setTestGet,
4001 name="setTestGet-" + str( i ),
4002 args=[ onosSetName ] )
4003 threads.append( t )
4004 t.start()
4005 for t in threads:
4006 t.join()
4007 getResponses.append( t.result )
4008 getResults = main.TRUE
4009 for i in range( len( main.activeNodes ) ):
4010 node = str( main.activeNodes[i] + 1 )
4011 if isinstance( getResponses[ i ], list):
4012 current = set( getResponses[ i ] )
4013 if len( current ) == len( getResponses[ i ] ):
4014 # no repeats
4015 if onosSet != current:
4016 main.log.error( "ONOS" + node +
4017 " has incorrect view" +
4018 " of set " + onosSetName + ":\n" +
4019 str( getResponses[ i ] ) )
4020 main.log.debug( "Expected: " + str( onosSet ) )
4021 main.log.debug( "Actual: " + str( current ) )
4022 getResults = main.FALSE
4023 else:
4024 # error, set is not a set
4025 main.log.error( "ONOS" + node +
4026 " has repeat elements in" +
4027 " set " + onosSetName + ":\n" +
4028 str( getResponses[ i ] ) )
4029 getResults = main.FALSE
4030 elif getResponses[ i ] == main.ERROR:
4031 getResults = main.FALSE
4032 sizeResponses = []
4033 threads = []
4034 for i in main.activeNodes:
4035 t = main.Thread( target=main.CLIs[i].setTestSize,
4036 name="setTestSize-" + str( i ),
4037 args=[ onosSetName ] )
4038 threads.append( t )
4039 t.start()
4040 for t in threads:
4041 t.join()
4042 sizeResponses.append( t.result )
4043 sizeResults = main.TRUE
4044 for i in range( len( main.activeNodes ) ):
4045 node = str( main.activeNodes[i] + 1 )
4046 if size != sizeResponses[ i ]:
4047 sizeResults = main.FALSE
4048 main.log.error( "ONOS" + node +
4049 " expected a size of " + str( size ) +
4050 " for set " + onosSetName +
4051 " but got " + str( sizeResponses[ i ] ) )
4052 clearResults = clearResults and getResults and sizeResults
4053 utilities.assert_equals( expect=main.TRUE,
4054 actual=clearResults,
4055 onpass="Set clear correct",
4056 onfail="Set clear was incorrect" )
4057
4058 main.step( "Distributed Set addAll()" )
4059 onosSet.update( addAllValue.split() )
4060 addResponses = []
4061 threads = []
4062 for i in main.activeNodes:
4063 t = main.Thread( target=main.CLIs[i].setTestAdd,
4064 name="setTestAddAll-" + str( i ),
4065 args=[ onosSetName, addAllValue ] )
4066 threads.append( t )
4067 t.start()
4068 for t in threads:
4069 t.join()
4070 addResponses.append( t.result )
4071
4072 # main.TRUE = successfully changed the set
4073 # main.FALSE = action resulted in no change in set
4074 # main.ERROR - Some error in executing the function
4075 addAllResults = main.TRUE
4076 for i in range( len( main.activeNodes ) ):
4077 if addResponses[ i ] == main.TRUE:
4078 # All is well
4079 pass
4080 elif addResponses[ i ] == main.FALSE:
4081 # Already in set, probably fine
4082 pass
4083 elif addResponses[ i ] == main.ERROR:
4084 # Error in execution
4085 addAllResults = main.FALSE
4086 else:
4087 # unexpected result
4088 addAllResults = main.FALSE
4089 if addAllResults != main.TRUE:
4090 main.log.error( "Error executing set addAll" )
4091
4092 # Check if set is still correct
4093 size = len( onosSet )
4094 getResponses = []
4095 threads = []
4096 for i in main.activeNodes:
4097 t = main.Thread( target=main.CLIs[i].setTestGet,
4098 name="setTestGet-" + str( i ),
4099 args=[ onosSetName ] )
4100 threads.append( t )
4101 t.start()
4102 for t in threads:
4103 t.join()
4104 getResponses.append( t.result )
4105 getResults = main.TRUE
4106 for i in range( len( main.activeNodes ) ):
4107 node = str( main.activeNodes[i] + 1 )
4108 if isinstance( getResponses[ i ], list):
4109 current = set( getResponses[ i ] )
4110 if len( current ) == len( getResponses[ i ] ):
4111 # no repeats
4112 if onosSet != current:
4113 main.log.error( "ONOS" + node +
4114 " has incorrect view" +
4115 " of set " + onosSetName + ":\n" +
4116 str( getResponses[ i ] ) )
4117 main.log.debug( "Expected: " + str( onosSet ) )
4118 main.log.debug( "Actual: " + str( current ) )
4119 getResults = main.FALSE
4120 else:
4121 # error, set is not a set
4122 main.log.error( "ONOS" + node +
4123 " has repeat elements in" +
4124 " set " + onosSetName + ":\n" +
4125 str( getResponses[ i ] ) )
4126 getResults = main.FALSE
4127 elif getResponses[ i ] == main.ERROR:
4128 getResults = main.FALSE
4129 sizeResponses = []
4130 threads = []
4131 for i in main.activeNodes:
4132 t = main.Thread( target=main.CLIs[i].setTestSize,
4133 name="setTestSize-" + str( i ),
4134 args=[ onosSetName ] )
4135 threads.append( t )
4136 t.start()
4137 for t in threads:
4138 t.join()
4139 sizeResponses.append( t.result )
4140 sizeResults = main.TRUE
4141 for i in range( len( main.activeNodes ) ):
4142 node = str( main.activeNodes[i] + 1 )
4143 if size != sizeResponses[ i ]:
4144 sizeResults = main.FALSE
4145 main.log.error( "ONOS" + node +
4146 " expected a size of " + str( size ) +
4147 " for set " + onosSetName +
4148 " but got " + str( sizeResponses[ i ] ) )
4149 addAllResults = addAllResults and getResults and sizeResults
4150 utilities.assert_equals( expect=main.TRUE,
4151 actual=addAllResults,
4152 onpass="Set addAll correct",
4153 onfail="Set addAll was incorrect" )
4154
4155 main.step( "Distributed Set retain()" )
4156 onosSet.intersection_update( retainValue.split() )
4157 retainResponses = []
4158 threads = []
4159 for i in main.activeNodes:
4160 t = main.Thread( target=main.CLIs[i].setTestRemove,
4161 name="setTestRetain-" + str( i ),
4162 args=[ onosSetName, retainValue ],
4163 kwargs={ "retain": True } )
4164 threads.append( t )
4165 t.start()
4166 for t in threads:
4167 t.join()
4168 retainResponses.append( t.result )
4169
4170 # main.TRUE = successfully changed the set
4171 # main.FALSE = action resulted in no change in set
4172 # main.ERROR - Some error in executing the function
4173 retainResults = main.TRUE
4174 for i in range( len( main.activeNodes ) ):
4175 if retainResponses[ i ] == main.TRUE:
4176 # All is well
4177 pass
4178 elif retainResponses[ i ] == main.FALSE:
4179 # Already in set, probably fine
4180 pass
4181 elif retainResponses[ i ] == main.ERROR:
4182 # Error in execution
4183 retainResults = main.FALSE
4184 else:
4185 # unexpected result
4186 retainResults = main.FALSE
4187 if retainResults != main.TRUE:
4188 main.log.error( "Error executing set retain" )
4189
4190 # Check if set is still correct
4191 size = len( onosSet )
4192 getResponses = []
4193 threads = []
4194 for i in main.activeNodes:
4195 t = main.Thread( target=main.CLIs[i].setTestGet,
4196 name="setTestGet-" + str( i ),
4197 args=[ onosSetName ] )
4198 threads.append( t )
4199 t.start()
4200 for t in threads:
4201 t.join()
4202 getResponses.append( t.result )
4203 getResults = main.TRUE
4204 for i in range( len( main.activeNodes ) ):
4205 node = str( main.activeNodes[i] + 1 )
4206 if isinstance( getResponses[ i ], list):
4207 current = set( getResponses[ i ] )
4208 if len( current ) == len( getResponses[ i ] ):
4209 # no repeats
4210 if onosSet != current:
4211 main.log.error( "ONOS" + node +
4212 " has incorrect view" +
4213 " of set " + onosSetName + ":\n" +
4214 str( getResponses[ i ] ) )
4215 main.log.debug( "Expected: " + str( onosSet ) )
4216 main.log.debug( "Actual: " + str( current ) )
4217 getResults = main.FALSE
4218 else:
4219 # error, set is not a set
4220 main.log.error( "ONOS" + node +
4221 " has repeat elements in" +
4222 " set " + onosSetName + ":\n" +
4223 str( getResponses[ i ] ) )
4224 getResults = main.FALSE
4225 elif getResponses[ i ] == main.ERROR:
4226 getResults = main.FALSE
4227 sizeResponses = []
4228 threads = []
4229 for i in main.activeNodes:
4230 t = main.Thread( target=main.CLIs[i].setTestSize,
4231 name="setTestSize-" + str( i ),
4232 args=[ onosSetName ] )
4233 threads.append( t )
4234 t.start()
4235 for t in threads:
4236 t.join()
4237 sizeResponses.append( t.result )
4238 sizeResults = main.TRUE
4239 for i in range( len( main.activeNodes ) ):
4240 node = str( main.activeNodes[i] + 1 )
4241 if size != sizeResponses[ i ]:
4242 sizeResults = main.FALSE
4243 main.log.error( "ONOS" + node + " expected a size of " +
4244 str( size ) + " for set " + onosSetName +
4245 " but got " + str( sizeResponses[ i ] ) )
4246 retainResults = retainResults and getResults and sizeResults
4247 utilities.assert_equals( expect=main.TRUE,
4248 actual=retainResults,
4249 onpass="Set retain correct",
4250 onfail="Set retain was incorrect" )
4251
4252 # Transactional maps
4253 main.step( "Partitioned Transactional maps put" )
4254 tMapValue = "Testing"
4255 numKeys = 100
4256 putResult = True
4257 node = main.activeNodes[0]
4258 putResponses = main.CLIs[node].transactionalMapPut( numKeys, tMapValue )
4259 if putResponses and len( putResponses ) == 100:
4260 for i in putResponses:
4261 if putResponses[ i ][ 'value' ] != tMapValue:
4262 putResult = False
4263 else:
4264 putResult = False
4265 if not putResult:
4266 main.log.debug( "Put response values: " + str( putResponses ) )
4267 utilities.assert_equals( expect=True,
4268 actual=putResult,
4269 onpass="Partitioned Transactional Map put successful",
4270 onfail="Partitioned Transactional Map put values are incorrect" )
4271
4272 main.step( "Partitioned Transactional maps get" )
Jon Hall9bfadd22016-05-11 14:48:07 -07004273 # FIXME: is this sleep needed?
4274 time.sleep( 5 )
4275
Jon Hall9ebd1bd2016-04-19 01:37:17 -07004276 getCheck = True
4277 for n in range( 1, numKeys + 1 ):
4278 getResponses = []
4279 threads = []
4280 valueCheck = True
4281 for i in main.activeNodes:
4282 t = main.Thread( target=main.CLIs[i].transactionalMapGet,
4283 name="TMap-get-" + str( i ),
4284 args=[ "Key" + str( n ) ] )
4285 threads.append( t )
4286 t.start()
4287 for t in threads:
4288 t.join()
4289 getResponses.append( t.result )
4290 for node in getResponses:
4291 if node != tMapValue:
4292 valueCheck = False
4293 if not valueCheck:
4294 main.log.warn( "Values for key 'Key" + str( n ) + "' do not match:" )
4295 main.log.warn( getResponses )
4296 getCheck = getCheck and valueCheck
4297 utilities.assert_equals( expect=True,
4298 actual=getCheck,
4299 onpass="Partitioned Transactional Map get values were correct",
4300 onfail="Partitioned Transactional Map values incorrect" )