blob: c58ad410bcf2998c0f60351541a6a2385ce4cd1e [file] [log] [blame]
Jon Hall9ebd1bd2016-04-19 01:37:17 -07001"""
2Description: This test is to determine if ONOS can handle
3 dynamic scaling of the cluster size.
4
5List of test cases:
6CASE1: Compile ONOS and push it to the test machines
7CASE2: Assign devices to controllers
8CASE21: Assign mastership to controllers
9CASE3: Assign intents
10CASE4: Ping across added host intents
11CASE5: Reading state of ONOS
12CASE6: The scaling case.
13CASE7: Check state after control plane failure
14CASE8: Compare topo
15CASE9: Link s3-s28 down
16CASE10: Link s3-s28 up
17CASE11: Switch down
18CASE12: Switch up
19CASE13: Clean up
20CASE14: start election app on all onos nodes
21CASE15: Check that Leadership Election is still functional
22CASE16: Install Distributed Primitives app
23CASE17: Check for basic functionality with distributed primitives
24"""
25
26
27class HAscaling:
28
29 def __init__( self ):
30 self.default = ''
31
32 def CASE1( self, main ):
33 """
34 CASE1 is to compile ONOS and push it to the test machines
35
36 Startup sequence:
37 cell <name>
38 onos-verify-cell
39 NOTE: temporary - onos-remove-raft-logs
40 onos-uninstall
41 start mininet
42 git pull
43 mvn clean install
44 onos-package
45 onos-install -f
46 onos-wait-for-start
47 start cli sessions
48 start tcpdump
49 """
50 import time
51 import os
52 import re
53 main.log.info( "ONOS HA test: Restart all ONOS nodes - " +
54 "initialization" )
55 main.case( "Setting up test environment" )
56 main.caseExplanation = "Setup the test environment including " +\
57 "installing ONOS, starting Mininet and ONOS" +\
58 "cli sessions."
59
60 # load some variables from the params file
61 PULLCODE = False
62 if main.params[ 'Git' ] == 'True':
63 PULLCODE = True
64 gitBranch = main.params[ 'branch' ]
65 cellName = main.params[ 'ENV' ][ 'cellName' ]
66
67 main.numCtrls = int( main.params[ 'num_controllers' ] )
68 if main.ONOSbench.maxNodes:
69 if main.ONOSbench.maxNodes < main.numCtrls:
70 main.numCtrls = int( main.ONOSbench.maxNodes )
71 # set global variables
72 # These are for csv plotting in jenkins
73 global labels
74 global data
75 labels = []
76 data = []
77
78 try:
79 from tests.HA.dependencies.HA import HA
80 main.HA = HA()
81 from tests.HA.HAscaling.dependencies.Server import Server
82 main.Server = Server()
83 except Exception as e:
84 main.log.exception( e )
85 main.cleanup()
86 main.exit()
87
88 main.CLIs = []
89 main.nodes = []
90 ipList = []
91 for i in range( 1, main.numCtrls + 1 ):
92 try:
93 main.CLIs.append( getattr( main, 'ONOScli' + str( i ) ) )
94 main.nodes.append( getattr( main, 'ONOS' + str( i ) ) )
95 ipList.append( main.nodes[ -1 ].ip_address )
96 except AttributeError:
97 break
98
99 main.step( "Create cell file" )
100 cellAppString = main.params[ 'ENV' ][ 'appString' ]
101 main.ONOSbench.createCellFile( main.ONOSbench.ip_address, cellName,
102 main.Mininet1.ip_address,
103 cellAppString, ipList )
104
105 main.step( "Applying cell variable to environment" )
106 cellResult = main.ONOSbench.setCell( cellName )
107 utilities.assert_equals( expect=main.TRUE, actual=cellResult,
108 onpass="Set cell successfull",
109 onfail="Failled to set cell" )
110
111 main.step( "Verify connectivity to cell" )
112 verifyResult = main.ONOSbench.verifyCell()
113 utilities.assert_equals( expect=main.TRUE, actual=verifyResult,
114 onpass="Verify cell passed",
115 onfail="Failled to verify cell" )
116
117 # FIXME:this is short term fix
118 main.log.info( "Removing raft logs" )
119 main.ONOSbench.onosRemoveRaftLogs()
120
121 main.log.info( "Uninstalling ONOS" )
122 for node in main.nodes:
123 main.ONOSbench.onosUninstall( node.ip_address )
124
125 # Make sure ONOS is DEAD
126 main.log.info( "Killing any ONOS processes" )
127 killResults = main.TRUE
128 for node in main.nodes:
129 killed = main.ONOSbench.onosKill( node.ip_address )
130 killResults = killResults and killed
131
132 main.step( "Setup server for cluster metadata file" )
Jon Hall8f6d4622016-05-23 15:27:18 -0700133 port = main.params['server']['port']
Jon Hall9ebd1bd2016-04-19 01:37:17 -0700134 rootDir = os.path.dirname( main.testFile ) + "/dependencies"
135 main.log.debug( "Root dir: {}".format( rootDir ) )
136 status = main.Server.start( main.ONOSbench,
137 rootDir,
138 port=port,
139 logDir=main.logdir + "/server.log" )
140 utilities.assert_equals( expect=main.TRUE, actual=status,
141 onpass="Server started",
142 onfail="Failled to start SimpleHTTPServer" )
143
144 main.step( "Generate initial metadata file" )
145 main.scaling = main.params['scaling'].split( "," )
146 main.log.debug( main.scaling )
147 scale = main.scaling.pop(0)
148 main.log.debug( scale)
149 if "e" in scale:
150 equal = True
151 else:
152 equal = False
153 main.log.debug( equal)
154 main.numCtrls = int( re.search( "\d+", scale ).group(0) )
155 genResult = main.Server.generateFile( main.numCtrls, equal=equal )
156 utilities.assert_equals( expect=main.TRUE, actual=genResult,
157 onpass="New cluster metadata file generated",
158 onfail="Failled to generate new metadata file" )
159
160 cleanInstallResult = main.TRUE
161 gitPullResult = main.TRUE
162
163 main.step( "Starting Mininet" )
164 # scp topo file to mininet
165 # TODO: move to params?
166 topoName = "obelisk.py"
167 filePath = main.ONOSbench.home + "/tools/test/topos/"
168 main.ONOSbench.scp( main.Mininet1,
169 filePath + topoName,
170 main.Mininet1.home,
171 direction="to" )
172 mnResult = main.Mininet1.startNet( )
173 utilities.assert_equals( expect=main.TRUE, actual=mnResult,
174 onpass="Mininet Started",
175 onfail="Error starting Mininet" )
176
177 main.step( "Git checkout and pull " + gitBranch )
178 if PULLCODE:
179 main.ONOSbench.gitCheckout( gitBranch )
180 gitPullResult = main.ONOSbench.gitPull()
181 # values of 1 or 3 are good
182 utilities.assert_lesser( expect=0, actual=gitPullResult,
183 onpass="Git pull successful",
184 onfail="Git pull failed" )
185 main.ONOSbench.getVersion( report=True )
186
187 main.step( "Using mvn clean install" )
188 cleanInstallResult = main.TRUE
189 if PULLCODE and gitPullResult == main.TRUE:
190 cleanInstallResult = main.ONOSbench.cleanInstall()
191 else:
192 main.log.warn( "Did not pull new code so skipping mvn " +
193 "clean install" )
194 utilities.assert_equals( expect=main.TRUE,
195 actual=cleanInstallResult,
196 onpass="MCI successful",
197 onfail="MCI failed" )
198 # GRAPHS
199 # NOTE: important params here:
200 # job = name of Jenkins job
201 # Plot Name = Plot-HA, only can be used if multiple plots
202 # index = The number of the graph under plot name
203 job = "HAscaling"
204 plotName = "Plot-HA"
Jon Hall676e5432016-09-26 11:32:50 -0700205 index = "1"
Jon Hall9ebd1bd2016-04-19 01:37:17 -0700206 graphs = '<ac:structured-macro ac:name="html">\n'
207 graphs += '<ac:plain-text-body><![CDATA[\n'
208 graphs += '<iframe src="https://onos-jenkins.onlab.us/job/' + job +\
209 '/plot/' + plotName + '/getPlot?index=' + index +\
210 '&width=500&height=300"' +\
211 'noborder="0" width="500" height="300" scrolling="yes" ' +\
212 'seamless="seamless"></iframe>\n'
213 graphs += ']]></ac:plain-text-body>\n'
214 graphs += '</ac:structured-macro>\n'
215 main.log.wiki(graphs)
216
217 main.step( "Copying backup config files" )
218 path = "~/onos/tools/package/bin/onos-service"
219 cp = main.ONOSbench.scp( main.ONOSbench,
220 path,
221 path + ".backup",
222 direction="to" )
223
224 utilities.assert_equals( expect=main.TRUE,
225 actual=cp,
226 onpass="Copy backup config file succeeded",
227 onfail="Copy backup config file failed" )
228 # we need to modify the onos-service file to use remote metadata file
229 # url for cluster metadata file
Jon Hall8f6d4622016-05-23 15:27:18 -0700230 iface = main.params['server'].get( 'interface' )
231 ip = main.ONOSbench.getIpAddr( iface=iface )
Jon Hall9ebd1bd2016-04-19 01:37:17 -0700232 metaFile = "cluster.json"
233 javaArgs = r"-Donos.cluster.metadata.uri=http:\/\/{}:{}\/{}".format( ip, port, metaFile )
234 main.log.warn( javaArgs )
235 main.log.warn( repr( javaArgs ) )
236 handle = main.ONOSbench.handle
237 sed = r"sed -i 's/bash/bash\nexport JAVA_OPTS=${{JAVA_OPTS:-{}}}\n/' {}".format( javaArgs, path )
238 main.log.warn( sed )
239 main.log.warn( repr( sed ) )
240 handle.sendline( sed )
Jon Hallbd60ea02016-08-23 10:03:59 -0700241 handle.expect( metaFile )
242 output = handle.before
Jon Hall9ebd1bd2016-04-19 01:37:17 -0700243 handle.expect( "\$" )
Jon Hallbd60ea02016-08-23 10:03:59 -0700244 output += handle.before
245 main.log.debug( repr( output ) )
Jon Hall9ebd1bd2016-04-19 01:37:17 -0700246
247 main.step( "Creating ONOS package" )
Jon Hallbd60ea02016-08-23 10:03:59 -0700248 packageResult = main.ONOSbench.buckBuild()
Jon Hall9ebd1bd2016-04-19 01:37:17 -0700249 utilities.assert_equals( expect=main.TRUE, actual=packageResult,
250 onpass="ONOS package successful",
251 onfail="ONOS package failed" )
Jon Hallbd60ea02016-08-23 10:03:59 -0700252 if not packageResult:
253 main.cleanup()
254 main.exit()
Jon Hall9ebd1bd2016-04-19 01:37:17 -0700255
256 main.step( "Installing ONOS package" )
257 onosInstallResult = main.TRUE
258 for i in range( main.ONOSbench.maxNodes ):
259 node = main.nodes[i]
260 options = "-f"
261 if i >= main.numCtrls:
262 options = "-nf" # Don't start more than the current scale
263 tmpResult = main.ONOSbench.onosInstall( options=options,
264 node=node.ip_address )
265 onosInstallResult = onosInstallResult and tmpResult
266 utilities.assert_equals( expect=main.TRUE, actual=onosInstallResult,
267 onpass="ONOS install successful",
268 onfail="ONOS install failed" )
269
270 # Cleanup custom onos-service file
271 main.ONOSbench.scp( main.ONOSbench,
272 path + ".backup",
273 path,
274 direction="to" )
275
You Wangf5de25b2017-01-06 15:13:01 -0800276 main.step( "Set up ONOS secure SSH" )
277 secureSshResult = main.TRUE
Jon Hall168c1862017-01-31 17:35:34 -0800278 for i in range( main.numCtrls ):
279 node = main.nodes[i]
You Wangf5de25b2017-01-06 15:13:01 -0800280 secureSshResult = secureSshResult and main.ONOSbench.onosSecureSSH( node=node.ip_address )
281 utilities.assert_equals( expect=main.TRUE, actual=secureSshResult,
282 onpass="Test step PASS",
283 onfail="Test step FAIL" )
284
Jon Hall9ebd1bd2016-04-19 01:37:17 -0700285 main.step( "Checking if ONOS is up yet" )
286 for i in range( 2 ):
287 onosIsupResult = main.TRUE
288 for i in range( main.numCtrls ):
289 node = main.nodes[i]
290 started = main.ONOSbench.isup( node.ip_address )
291 if not started:
292 main.log.error( node.name + " hasn't started" )
293 onosIsupResult = onosIsupResult and started
294 if onosIsupResult == main.TRUE:
295 break
296 utilities.assert_equals( expect=main.TRUE, actual=onosIsupResult,
297 onpass="ONOS startup successful",
298 onfail="ONOS startup failed" )
299
Jon Hall6509dbf2016-06-21 17:01:17 -0700300 main.step( "Starting ONOS CLI sessions" )
Jon Hall9ebd1bd2016-04-19 01:37:17 -0700301 cliResults = main.TRUE
302 threads = []
303 for i in range( main.numCtrls ):
304 t = main.Thread( target=main.CLIs[i].startOnosCli,
305 name="startOnosCli-" + str( i ),
306 args=[main.nodes[i].ip_address] )
307 threads.append( t )
308 t.start()
309
310 for t in threads:
311 t.join()
312 cliResults = cliResults and t.result
313 utilities.assert_equals( expect=main.TRUE, actual=cliResults,
314 onpass="ONOS cli startup successful",
315 onfail="ONOS cli startup failed" )
316
317 # Create a list of active nodes for use when some nodes are stopped
318 main.activeNodes = [ i for i in range( 0, main.numCtrls ) ]
319
320 if main.params[ 'tcpdump' ].lower() == "true":
321 main.step( "Start Packet Capture MN" )
322 main.Mininet2.startTcpdump(
323 str( main.params[ 'MNtcpdump' ][ 'folder' ] ) + str( main.TEST )
324 + "-MN.pcap",
325 intf=main.params[ 'MNtcpdump' ][ 'intf' ],
326 port=main.params[ 'MNtcpdump' ][ 'port' ] )
327
328 main.step( "Checking ONOS nodes" )
329 nodeResults = utilities.retry( main.HA.nodesCheck,
330 False,
331 args=[main.activeNodes],
332 attempts=5 )
333 utilities.assert_equals( expect=True, actual=nodeResults,
334 onpass="Nodes check successful",
335 onfail="Nodes check NOT successful" )
336
337 if not nodeResults:
Jon Hall7ac7bc32016-05-05 10:57:02 -0700338 for i in main.activeNodes:
339 cli = main.CLIs[i]
Jon Hall9ebd1bd2016-04-19 01:37:17 -0700340 main.log.debug( "{} components not ACTIVE: \n{}".format(
341 cli.name,
342 cli.sendline( "scr:list | grep -v ACTIVE" ) ) )
Jon Hall9ebd1bd2016-04-19 01:37:17 -0700343 main.log.error( "Failed to start ONOS, stopping test" )
344 main.cleanup()
345 main.exit()
346
347 main.step( "Activate apps defined in the params file" )
348 # get data from the params
349 apps = main.params.get( 'apps' )
350 if apps:
351 apps = apps.split(',')
352 main.log.warn( apps )
353 activateResult = True
354 for app in apps:
355 main.CLIs[ 0 ].app( app, "Activate" )
356 # TODO: check this worked
357 time.sleep( 10 ) # wait for apps to activate
358 for app in apps:
359 state = main.CLIs[ 0 ].appStatus( app )
360 if state == "ACTIVE":
361 activateResult = activateResult and True
362 else:
363 main.log.error( "{} is in {} state".format( app, state ) )
364 activateResult = False
365 utilities.assert_equals( expect=True,
366 actual=activateResult,
367 onpass="Successfully activated apps",
368 onfail="Failed to activate apps" )
369 else:
370 main.log.warn( "No apps were specified to be loaded after startup" )
371
372 main.step( "Set ONOS configurations" )
373 config = main.params.get( 'ONOS_Configuration' )
374 if config:
375 main.log.debug( config )
376 checkResult = main.TRUE
377 for component in config:
378 for setting in config[component]:
379 value = config[component][setting]
380 check = main.CLIs[ 0 ].setCfg( component, setting, value )
381 main.log.info( "Value was changed? {}".format( main.TRUE == check ) )
382 checkResult = check and checkResult
383 utilities.assert_equals( expect=main.TRUE,
384 actual=checkResult,
385 onpass="Successfully set config",
386 onfail="Failed to set config" )
387 else:
388 main.log.warn( "No configurations were specified to be changed after startup" )
389
390 main.step( "App Ids check" )
391 appCheck = main.TRUE
392 threads = []
393 for i in main.activeNodes:
394 t = main.Thread( target=main.CLIs[i].appToIDCheck,
395 name="appToIDCheck-" + str( i ),
396 args=[] )
397 threads.append( t )
398 t.start()
399
400 for t in threads:
401 t.join()
402 appCheck = appCheck and t.result
403 if appCheck != main.TRUE:
404 node = main.activeNodes[0]
405 main.log.warn( main.CLIs[node].apps() )
406 main.log.warn( main.CLIs[node].appIDs() )
407 utilities.assert_equals( expect=main.TRUE, actual=appCheck,
408 onpass="App Ids seem to be correct",
409 onfail="Something is wrong with app Ids" )
410
411 def CASE2( self, main ):
412 """
413 Assign devices to controllers
414 """
415 import re
416 assert main.numCtrls, "main.numCtrls not defined"
417 assert main, "main not defined"
418 assert utilities.assert_equals, "utilities.assert_equals not defined"
419 assert main.CLIs, "main.CLIs not defined"
420 assert main.nodes, "main.nodes not defined"
421
422 main.case( "Assigning devices to controllers" )
423 main.caseExplanation = "Assign switches to ONOS using 'ovs-vsctl' " +\
424 "and check that an ONOS node becomes the " +\
425 "master of the device."
426 main.step( "Assign switches to controllers" )
427
428 ipList = []
429 for i in range( main.ONOSbench.maxNodes ):
430 ipList.append( main.nodes[ i ].ip_address )
431 swList = []
432 for i in range( 1, 29 ):
433 swList.append( "s" + str( i ) )
434 main.Mininet1.assignSwController( sw=swList, ip=ipList )
435
436 mastershipCheck = main.TRUE
437 for i in range( 1, 29 ):
438 response = main.Mininet1.getSwController( "s" + str( i ) )
439 try:
440 main.log.info( str( response ) )
441 except Exception:
442 main.log.info( repr( response ) )
443 for node in main.nodes:
444 if re.search( "tcp:" + node.ip_address, response ):
445 mastershipCheck = mastershipCheck and main.TRUE
446 else:
447 main.log.error( "Error, node " + node.ip_address + " is " +
448 "not in the list of controllers s" +
449 str( i ) + " is connecting to." )
450 mastershipCheck = main.FALSE
451 utilities.assert_equals(
452 expect=main.TRUE,
453 actual=mastershipCheck,
454 onpass="Switch mastership assigned correctly",
455 onfail="Switches not assigned correctly to controllers" )
456
457 def CASE21( self, main ):
458 """
459 Assign mastership to controllers
460 """
461 import time
462 assert main.numCtrls, "main.numCtrls not defined"
463 assert main, "main not defined"
464 assert utilities.assert_equals, "utilities.assert_equals not defined"
465 assert main.CLIs, "main.CLIs not defined"
466 assert main.nodes, "main.nodes not defined"
467
468 main.case( "Assigning Controller roles for switches" )
469 main.caseExplanation = "Check that ONOS is connected to each " +\
470 "device. Then manually assign" +\
471 " mastership to specific ONOS nodes using" +\
472 " 'device-role'"
473 main.step( "Assign mastership of switches to specific controllers" )
474 # Manually assign mastership to the controller we want
475 roleCall = main.TRUE
476
477 ipList = [ ]
478 deviceList = []
479 onosCli = main.CLIs[ main.activeNodes[0] ]
480 try:
481 # Assign mastership to specific controllers. This assignment was
482 # determined for a 7 node cluser, but will work with any sized
483 # cluster
484 for i in range( 1, 29 ): # switches 1 through 28
485 # set up correct variables:
486 if i == 1:
487 c = 0
488 ip = main.nodes[ c ].ip_address # ONOS1
489 deviceId = onosCli.getDevice( "1000" ).get( 'id' )
490 elif i == 2:
491 c = 1 % main.numCtrls
492 ip = main.nodes[ c ].ip_address # ONOS2
493 deviceId = onosCli.getDevice( "2000" ).get( 'id' )
494 elif i == 3:
495 c = 1 % main.numCtrls
496 ip = main.nodes[ c ].ip_address # ONOS2
497 deviceId = onosCli.getDevice( "3000" ).get( 'id' )
498 elif i == 4:
499 c = 3 % main.numCtrls
500 ip = main.nodes[ c ].ip_address # ONOS4
501 deviceId = onosCli.getDevice( "3004" ).get( 'id' )
502 elif i == 5:
503 c = 2 % main.numCtrls
504 ip = main.nodes[ c ].ip_address # ONOS3
505 deviceId = onosCli.getDevice( "5000" ).get( 'id' )
506 elif i == 6:
507 c = 2 % main.numCtrls
508 ip = main.nodes[ c ].ip_address # ONOS3
509 deviceId = onosCli.getDevice( "6000" ).get( 'id' )
510 elif i == 7:
511 c = 5 % main.numCtrls
512 ip = main.nodes[ c ].ip_address # ONOS6
513 deviceId = onosCli.getDevice( "6007" ).get( 'id' )
514 elif i >= 8 and i <= 17:
515 c = 4 % main.numCtrls
516 ip = main.nodes[ c ].ip_address # ONOS5
517 dpid = '3' + str( i ).zfill( 3 )
518 deviceId = onosCli.getDevice( dpid ).get( 'id' )
519 elif i >= 18 and i <= 27:
520 c = 6 % main.numCtrls
521 ip = main.nodes[ c ].ip_address # ONOS7
522 dpid = '6' + str( i ).zfill( 3 )
523 deviceId = onosCli.getDevice( dpid ).get( 'id' )
524 elif i == 28:
525 c = 0
526 ip = main.nodes[ c ].ip_address # ONOS1
527 deviceId = onosCli.getDevice( "2800" ).get( 'id' )
528 else:
529 main.log.error( "You didn't write an else statement for " +
530 "switch s" + str( i ) )
531 roleCall = main.FALSE
532 # Assign switch
533 assert deviceId, "No device id for s" + str( i ) + " in ONOS"
534 # TODO: make this controller dynamic
535 roleCall = roleCall and onosCli.deviceRole( deviceId, ip )
536 ipList.append( ip )
537 deviceList.append( deviceId )
538 except ( AttributeError, AssertionError ):
539 main.log.exception( "Something is wrong with ONOS device view" )
540 main.log.info( onosCli.devices() )
541 utilities.assert_equals(
542 expect=main.TRUE,
543 actual=roleCall,
544 onpass="Re-assigned switch mastership to designated controller",
545 onfail="Something wrong with deviceRole calls" )
546
547 main.step( "Check mastership was correctly assigned" )
548 roleCheck = main.TRUE
549 # NOTE: This is due to the fact that device mastership change is not
550 # atomic and is actually a multi step process
551 time.sleep( 5 )
552 for i in range( len( ipList ) ):
553 ip = ipList[i]
554 deviceId = deviceList[i]
555 # Check assignment
556 master = onosCli.getRole( deviceId ).get( 'master' )
557 if ip in master:
558 roleCheck = roleCheck and main.TRUE
559 else:
560 roleCheck = roleCheck and main.FALSE
561 main.log.error( "Error, controller " + ip + " is not" +
562 " master " + "of device " +
563 str( deviceId ) + ". Master is " +
564 repr( master ) + "." )
565 utilities.assert_equals(
566 expect=main.TRUE,
567 actual=roleCheck,
568 onpass="Switches were successfully reassigned to designated " +
569 "controller",
570 onfail="Switches were not successfully reassigned" )
571
572 def CASE3( self, main ):
573 """
574 Assign intents
575 """
576 import time
577 import json
578 assert main.numCtrls, "main.numCtrls not defined"
579 assert main, "main not defined"
580 assert utilities.assert_equals, "utilities.assert_equals not defined"
581 assert main.CLIs, "main.CLIs not defined"
582 assert main.nodes, "main.nodes not defined"
583 try:
584 labels
585 except NameError:
586 main.log.error( "labels not defined, setting to []" )
587 labels = []
588 try:
589 data
590 except NameError:
591 main.log.error( "data not defined, setting to []" )
592 data = []
593 # NOTE: we must reinstall intents until we have a persistant intent
594 # datastore!
595 main.case( "Adding host Intents" )
596 main.caseExplanation = "Discover hosts by using pingall then " +\
597 "assign predetermined host-to-host intents." +\
598 " After installation, check that the intent" +\
599 " is distributed to all nodes and the state" +\
600 " is INSTALLED"
601
602 # install onos-app-fwd
603 main.step( "Install reactive forwarding app" )
604 onosCli = main.CLIs[ main.activeNodes[0] ]
605 installResults = onosCli.activateApp( "org.onosproject.fwd" )
606 utilities.assert_equals( expect=main.TRUE, actual=installResults,
607 onpass="Install fwd successful",
608 onfail="Install fwd failed" )
609
610 main.step( "Check app ids" )
611 appCheck = main.TRUE
612 threads = []
613 for i in main.activeNodes:
614 t = main.Thread( target=main.CLIs[i].appToIDCheck,
615 name="appToIDCheck-" + str( i ),
616 args=[] )
617 threads.append( t )
618 t.start()
619
620 for t in threads:
621 t.join()
622 appCheck = appCheck and t.result
623 if appCheck != main.TRUE:
624 main.log.warn( onosCli.apps() )
625 main.log.warn( onosCli.appIDs() )
626 utilities.assert_equals( expect=main.TRUE, actual=appCheck,
627 onpass="App Ids seem to be correct",
628 onfail="Something is wrong with app Ids" )
629
630 main.step( "Discovering Hosts( Via pingall for now )" )
631 # FIXME: Once we have a host discovery mechanism, use that instead
632 # REACTIVE FWD test
633 pingResult = main.FALSE
634 passMsg = "Reactive Pingall test passed"
635 time1 = time.time()
636 pingResult = main.Mininet1.pingall()
637 time2 = time.time()
638 if not pingResult:
639 main.log.warn("First pingall failed. Trying again...")
640 pingResult = main.Mininet1.pingall()
641 passMsg += " on the second try"
642 utilities.assert_equals(
643 expect=main.TRUE,
644 actual=pingResult,
645 onpass= passMsg,
646 onfail="Reactive Pingall failed, " +
647 "one or more ping pairs failed" )
648 main.log.info( "Time for pingall: %2f seconds" %
649 ( time2 - time1 ) )
650 # timeout for fwd flows
651 time.sleep( 11 )
652 # uninstall onos-app-fwd
653 main.step( "Uninstall reactive forwarding app" )
654 node = main.activeNodes[0]
655 uninstallResult = main.CLIs[node].deactivateApp( "org.onosproject.fwd" )
656 utilities.assert_equals( expect=main.TRUE, actual=uninstallResult,
657 onpass="Uninstall fwd successful",
658 onfail="Uninstall fwd failed" )
659
660 main.step( "Check app ids" )
661 threads = []
662 appCheck2 = main.TRUE
663 for i in main.activeNodes:
664 t = main.Thread( target=main.CLIs[i].appToIDCheck,
665 name="appToIDCheck-" + str( i ),
666 args=[] )
667 threads.append( t )
668 t.start()
669
670 for t in threads:
671 t.join()
672 appCheck2 = appCheck2 and t.result
673 if appCheck2 != main.TRUE:
674 node = main.activeNodes[0]
675 main.log.warn( main.CLIs[node].apps() )
676 main.log.warn( main.CLIs[node].appIDs() )
677 utilities.assert_equals( expect=main.TRUE, actual=appCheck2,
678 onpass="App Ids seem to be correct",
679 onfail="Something is wrong with app Ids" )
680
681 main.step( "Add host intents via cli" )
682 intentIds = []
683 # TODO: move the host numbers to params
684 # Maybe look at all the paths we ping?
685 intentAddResult = True
686 hostResult = main.TRUE
687 for i in range( 8, 18 ):
688 main.log.info( "Adding host intent between h" + str( i ) +
689 " and h" + str( i + 10 ) )
690 host1 = "00:00:00:00:00:" + \
691 str( hex( i )[ 2: ] ).zfill( 2 ).upper()
692 host2 = "00:00:00:00:00:" + \
693 str( hex( i + 10 )[ 2: ] ).zfill( 2 ).upper()
694 # NOTE: getHost can return None
695 host1Dict = onosCli.getHost( host1 )
696 host2Dict = onosCli.getHost( host2 )
697 host1Id = None
698 host2Id = None
699 if host1Dict and host2Dict:
700 host1Id = host1Dict.get( 'id', None )
701 host2Id = host2Dict.get( 'id', None )
702 if host1Id and host2Id:
703 nodeNum = ( i % len( main.activeNodes ) )
704 node = main.activeNodes[nodeNum]
705 tmpId = main.CLIs[node].addHostIntent( host1Id, host2Id )
706 if tmpId:
707 main.log.info( "Added intent with id: " + tmpId )
708 intentIds.append( tmpId )
709 else:
710 main.log.error( "addHostIntent returned: " +
711 repr( tmpId ) )
712 else:
713 main.log.error( "Error, getHost() failed for h" + str( i ) +
714 " and/or h" + str( i + 10 ) )
715 node = main.activeNodes[0]
716 hosts = main.CLIs[node].hosts()
717 main.log.warn( "Hosts output: " )
718 try:
719 main.log.warn( json.dumps( json.loads( hosts ),
720 sort_keys=True,
721 indent=4,
722 separators=( ',', ': ' ) ) )
723 except ( ValueError, TypeError ):
724 main.log.warn( repr( hosts ) )
725 hostResult = main.FALSE
726 utilities.assert_equals( expect=main.TRUE, actual=hostResult,
727 onpass="Found a host id for each host",
728 onfail="Error looking up host ids" )
729
730 intentStart = time.time()
731 onosIds = onosCli.getAllIntentsId()
732 main.log.info( "Submitted intents: " + str( intentIds ) )
733 main.log.info( "Intents in ONOS: " + str( onosIds ) )
734 for intent in intentIds:
735 if intent in onosIds:
736 pass # intent submitted is in onos
737 else:
738 intentAddResult = False
739 if intentAddResult:
740 intentStop = time.time()
741 else:
742 intentStop = None
743 # Print the intent states
744 intents = onosCli.intents()
745 intentStates = []
746 installedCheck = True
747 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
748 count = 0
749 try:
750 for intent in json.loads( intents ):
751 state = intent.get( 'state', None )
752 if "INSTALLED" not in state:
753 installedCheck = False
754 intentId = intent.get( 'id', None )
755 intentStates.append( ( intentId, state ) )
756 except ( ValueError, TypeError ):
757 main.log.exception( "Error parsing intents" )
758 # add submitted intents not in the store
759 tmplist = [ i for i, s in intentStates ]
760 missingIntents = False
761 for i in intentIds:
762 if i not in tmplist:
763 intentStates.append( ( i, " - " ) )
764 missingIntents = True
765 intentStates.sort()
766 for i, s in intentStates:
767 count += 1
768 main.log.info( "%-6s%-15s%-15s" %
769 ( str( count ), str( i ), str( s ) ) )
770 leaders = onosCli.leaders()
771 try:
772 missing = False
773 if leaders:
774 parsedLeaders = json.loads( leaders )
775 main.log.warn( json.dumps( parsedLeaders,
776 sort_keys=True,
777 indent=4,
778 separators=( ',', ': ' ) ) )
779 # check for all intent partitions
780 topics = []
781 for i in range( 14 ):
Jon Hall8dafdcc2016-09-16 10:21:25 -0700782 topics.append( "work-partition-" + str( i ) )
Jon Hall9ebd1bd2016-04-19 01:37:17 -0700783 main.log.debug( topics )
784 ONOStopics = [ j['topic'] for j in parsedLeaders ]
785 for topic in topics:
786 if topic not in ONOStopics:
787 main.log.error( "Error: " + topic +
788 " not in leaders" )
789 missing = True
790 else:
791 main.log.error( "leaders() returned None" )
792 except ( ValueError, TypeError ):
793 main.log.exception( "Error parsing leaders" )
794 main.log.error( repr( leaders ) )
795 # Check all nodes
796 if missing:
797 for i in main.activeNodes:
798 response = main.CLIs[i].leaders( jsonFormat=False)
799 main.log.warn( str( main.CLIs[i].name ) + " leaders output: \n" +
800 str( response ) )
801
802 partitions = onosCli.partitions()
803 try:
804 if partitions :
805 parsedPartitions = json.loads( partitions )
806 main.log.warn( json.dumps( parsedPartitions,
807 sort_keys=True,
808 indent=4,
809 separators=( ',', ': ' ) ) )
810 # TODO check for a leader in all paritions
811 # TODO check for consistency among nodes
812 else:
813 main.log.error( "partitions() returned None" )
814 except ( ValueError, TypeError ):
815 main.log.exception( "Error parsing partitions" )
816 main.log.error( repr( partitions ) )
817 pendingMap = onosCli.pendingMap()
818 try:
819 if pendingMap :
820 parsedPending = json.loads( pendingMap )
821 main.log.warn( json.dumps( parsedPending,
822 sort_keys=True,
823 indent=4,
824 separators=( ',', ': ' ) ) )
825 # TODO check something here?
826 else:
827 main.log.error( "pendingMap() returned None" )
828 except ( ValueError, TypeError ):
829 main.log.exception( "Error parsing pending map" )
830 main.log.error( repr( pendingMap ) )
831
832 intentAddResult = bool( intentAddResult and not missingIntents and
833 installedCheck )
834 if not intentAddResult:
835 main.log.error( "Error in pushing host intents to ONOS" )
836
837 main.step( "Intent Anti-Entropy dispersion" )
838 for j in range(100):
839 correct = True
840 main.log.info( "Submitted intents: " + str( sorted( intentIds ) ) )
841 for i in main.activeNodes:
842 onosIds = []
843 ids = main.CLIs[i].getAllIntentsId()
844 onosIds.append( ids )
845 main.log.debug( "Intents in " + main.CLIs[i].name + ": " +
846 str( sorted( onosIds ) ) )
847 if sorted( ids ) != sorted( intentIds ):
848 main.log.warn( "Set of intent IDs doesn't match" )
849 correct = False
850 break
851 else:
852 intents = json.loads( main.CLIs[i].intents() )
853 for intent in intents:
854 if intent[ 'state' ] != "INSTALLED":
855 main.log.warn( "Intent " + intent[ 'id' ] +
856 " is " + intent[ 'state' ] )
857 correct = False
858 break
859 if correct:
860 break
861 else:
862 time.sleep(1)
863 if not intentStop:
864 intentStop = time.time()
865 global gossipTime
866 gossipTime = intentStop - intentStart
867 main.log.info( "It took about " + str( gossipTime ) +
868 " seconds for all intents to appear in each node" )
869 append = False
870 title = "Gossip Intents"
871 count = 1
872 while append is False:
873 curTitle = title + str( count )
874 if curTitle not in labels:
875 labels.append( curTitle )
876 data.append( str( gossipTime ) )
877 append = True
878 else:
879 count += 1
880 gossipPeriod = int( main.params['timers']['gossip'] )
881 maxGossipTime = gossipPeriod * len( main.activeNodes )
882 utilities.assert_greater_equals(
883 expect=maxGossipTime, actual=gossipTime,
884 onpass="ECM anti-entropy for intents worked within " +
885 "expected time",
886 onfail="Intent ECM anti-entropy took too long. " +
887 "Expected time:{}, Actual time:{}".format( maxGossipTime,
888 gossipTime ) )
889 if gossipTime <= maxGossipTime:
890 intentAddResult = True
891
892 if not intentAddResult or "key" in pendingMap:
893 import time
894 installedCheck = True
895 main.log.info( "Sleeping 60 seconds to see if intents are found" )
896 time.sleep( 60 )
897 onosIds = onosCli.getAllIntentsId()
898 main.log.info( "Submitted intents: " + str( intentIds ) )
899 main.log.info( "Intents in ONOS: " + str( onosIds ) )
900 # Print the intent states
901 intents = onosCli.intents()
902 intentStates = []
903 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
904 count = 0
905 try:
906 for intent in json.loads( intents ):
907 # Iter through intents of a node
908 state = intent.get( 'state', None )
909 if "INSTALLED" not in state:
910 installedCheck = False
911 intentId = intent.get( 'id', None )
912 intentStates.append( ( intentId, state ) )
913 except ( ValueError, TypeError ):
914 main.log.exception( "Error parsing intents" )
915 # add submitted intents not in the store
916 tmplist = [ i for i, s in intentStates ]
917 for i in intentIds:
918 if i not in tmplist:
919 intentStates.append( ( i, " - " ) )
920 intentStates.sort()
921 for i, s in intentStates:
922 count += 1
923 main.log.info( "%-6s%-15s%-15s" %
924 ( str( count ), str( i ), str( s ) ) )
925 leaders = onosCli.leaders()
926 try:
927 missing = False
928 if leaders:
929 parsedLeaders = json.loads( leaders )
930 main.log.warn( json.dumps( parsedLeaders,
931 sort_keys=True,
932 indent=4,
933 separators=( ',', ': ' ) ) )
934 # check for all intent partitions
935 # check for election
936 topics = []
937 for i in range( 14 ):
Jon Hall8dafdcc2016-09-16 10:21:25 -0700938 topics.append( "work-partition-" + str( i ) )
Jon Hall9ebd1bd2016-04-19 01:37:17 -0700939 # FIXME: this should only be after we start the app
940 topics.append( "org.onosproject.election" )
941 main.log.debug( topics )
942 ONOStopics = [ j['topic'] for j in parsedLeaders ]
943 for topic in topics:
944 if topic not in ONOStopics:
945 main.log.error( "Error: " + topic +
946 " not in leaders" )
947 missing = True
948 else:
949 main.log.error( "leaders() returned None" )
950 except ( ValueError, TypeError ):
951 main.log.exception( "Error parsing leaders" )
952 main.log.error( repr( leaders ) )
953 # Check all nodes
954 if missing:
955 for i in main.activeNodes:
956 node = main.CLIs[i]
957 response = node.leaders( jsonFormat=False)
958 main.log.warn( str( node.name ) + " leaders output: \n" +
959 str( response ) )
960
961 partitions = onosCli.partitions()
962 try:
963 if partitions :
964 parsedPartitions = json.loads( partitions )
965 main.log.warn( json.dumps( parsedPartitions,
966 sort_keys=True,
967 indent=4,
968 separators=( ',', ': ' ) ) )
969 # TODO check for a leader in all paritions
970 # TODO check for consistency among nodes
971 else:
972 main.log.error( "partitions() returned None" )
973 except ( ValueError, TypeError ):
974 main.log.exception( "Error parsing partitions" )
975 main.log.error( repr( partitions ) )
976 pendingMap = onosCli.pendingMap()
977 try:
978 if pendingMap :
979 parsedPending = json.loads( pendingMap )
980 main.log.warn( json.dumps( parsedPending,
981 sort_keys=True,
982 indent=4,
983 separators=( ',', ': ' ) ) )
984 # TODO check something here?
985 else:
986 main.log.error( "pendingMap() returned None" )
987 except ( ValueError, TypeError ):
988 main.log.exception( "Error parsing pending map" )
989 main.log.error( repr( pendingMap ) )
990
991 def CASE4( self, main ):
992 """
993 Ping across added host intents
994 """
995 import json
996 import time
997 assert main.numCtrls, "main.numCtrls not defined"
998 assert main, "main not defined"
999 assert utilities.assert_equals, "utilities.assert_equals not defined"
1000 assert main.CLIs, "main.CLIs not defined"
1001 assert main.nodes, "main.nodes not defined"
1002 main.case( "Verify connectivity by sending traffic across Intents" )
1003 main.caseExplanation = "Ping across added host intents to check " +\
1004 "functionality and check the state of " +\
1005 "the intent"
1006
1007 onosCli = main.CLIs[ main.activeNodes[0] ]
1008 main.step( "Check Intent state" )
1009 installedCheck = False
1010 loopCount = 0
1011 while not installedCheck and loopCount < 40:
1012 installedCheck = True
1013 # Print the intent states
1014 intents = onosCli.intents()
1015 intentStates = []
1016 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
1017 count = 0
1018 # Iter through intents of a node
1019 try:
1020 for intent in json.loads( intents ):
1021 state = intent.get( 'state', None )
1022 if "INSTALLED" not in state:
1023 installedCheck = False
1024 intentId = intent.get( 'id', None )
1025 intentStates.append( ( intentId, state ) )
1026 except ( ValueError, TypeError ):
1027 main.log.exception( "Error parsing intents." )
1028 # Print states
1029 intentStates.sort()
1030 for i, s in intentStates:
1031 count += 1
1032 main.log.info( "%-6s%-15s%-15s" %
1033 ( str( count ), str( i ), str( s ) ) )
1034 if not installedCheck:
1035 time.sleep( 1 )
1036 loopCount += 1
1037 utilities.assert_equals( expect=True, actual=installedCheck,
1038 onpass="Intents are all INSTALLED",
1039 onfail="Intents are not all in " +
1040 "INSTALLED state" )
1041
1042 main.step( "Ping across added host intents" )
1043 PingResult = main.TRUE
1044 for i in range( 8, 18 ):
1045 ping = main.Mininet1.pingHost( src="h" + str( i ),
1046 target="h" + str( i + 10 ) )
1047 PingResult = PingResult and ping
1048 if ping == main.FALSE:
1049 main.log.warn( "Ping failed between h" + str( i ) +
1050 " and h" + str( i + 10 ) )
1051 elif ping == main.TRUE:
1052 main.log.info( "Ping test passed!" )
1053 # Don't set PingResult or you'd override failures
1054 if PingResult == main.FALSE:
1055 main.log.error(
1056 "Intents have not been installed correctly, pings failed." )
1057 # TODO: pretty print
1058 main.log.warn( "ONOS1 intents: " )
1059 try:
1060 tmpIntents = onosCli.intents()
1061 main.log.warn( json.dumps( json.loads( tmpIntents ),
1062 sort_keys=True,
1063 indent=4,
1064 separators=( ',', ': ' ) ) )
1065 except ( ValueError, TypeError ):
1066 main.log.warn( repr( tmpIntents ) )
1067 utilities.assert_equals(
1068 expect=main.TRUE,
1069 actual=PingResult,
1070 onpass="Intents have been installed correctly and pings work",
1071 onfail="Intents have not been installed correctly, pings failed." )
1072
1073 main.step( "Check leadership of topics" )
1074 leaders = onosCli.leaders()
1075 topicCheck = main.TRUE
1076 try:
1077 if leaders:
1078 parsedLeaders = json.loads( leaders )
1079 main.log.warn( json.dumps( parsedLeaders,
1080 sort_keys=True,
1081 indent=4,
1082 separators=( ',', ': ' ) ) )
1083 # check for all intent partitions
1084 # check for election
1085 # TODO: Look at Devices as topics now that it uses this system
1086 topics = []
1087 for i in range( 14 ):
Jon Hall8dafdcc2016-09-16 10:21:25 -07001088 topics.append( "work-partition-" + str( i ) )
Jon Hall9ebd1bd2016-04-19 01:37:17 -07001089 # FIXME: this should only be after we start the app
1090 # FIXME: topics.append( "org.onosproject.election" )
1091 # Print leaders output
1092 main.log.debug( topics )
1093 ONOStopics = [ j['topic'] for j in parsedLeaders ]
1094 for topic in topics:
1095 if topic not in ONOStopics:
1096 main.log.error( "Error: " + topic +
1097 " not in leaders" )
1098 topicCheck = main.FALSE
1099 else:
1100 main.log.error( "leaders() returned None" )
1101 topicCheck = main.FALSE
1102 except ( ValueError, TypeError ):
1103 topicCheck = main.FALSE
1104 main.log.exception( "Error parsing leaders" )
1105 main.log.error( repr( leaders ) )
1106 # TODO: Check for a leader of these topics
1107 # Check all nodes
1108 if topicCheck:
1109 for i in main.activeNodes:
1110 node = main.CLIs[i]
1111 response = node.leaders( jsonFormat=False)
1112 main.log.warn( str( node.name ) + " leaders output: \n" +
1113 str( response ) )
1114
1115 utilities.assert_equals( expect=main.TRUE, actual=topicCheck,
1116 onpass="intent Partitions is in leaders",
1117 onfail="Some topics were lost " )
1118 # Print partitions
1119 partitions = onosCli.partitions()
1120 try:
1121 if partitions :
1122 parsedPartitions = json.loads( partitions )
1123 main.log.warn( json.dumps( parsedPartitions,
1124 sort_keys=True,
1125 indent=4,
1126 separators=( ',', ': ' ) ) )
1127 # TODO check for a leader in all paritions
1128 # TODO check for consistency among nodes
1129 else:
1130 main.log.error( "partitions() returned None" )
1131 except ( ValueError, TypeError ):
1132 main.log.exception( "Error parsing partitions" )
1133 main.log.error( repr( partitions ) )
1134 # Print Pending Map
1135 pendingMap = onosCli.pendingMap()
1136 try:
1137 if pendingMap :
1138 parsedPending = json.loads( pendingMap )
1139 main.log.warn( json.dumps( parsedPending,
1140 sort_keys=True,
1141 indent=4,
1142 separators=( ',', ': ' ) ) )
1143 # TODO check something here?
1144 else:
1145 main.log.error( "pendingMap() returned None" )
1146 except ( ValueError, TypeError ):
1147 main.log.exception( "Error parsing pending map" )
1148 main.log.error( repr( pendingMap ) )
1149
1150 if not installedCheck:
1151 main.log.info( "Waiting 60 seconds to see if the state of " +
1152 "intents change" )
1153 time.sleep( 60 )
1154 # Print the intent states
1155 intents = onosCli.intents()
1156 intentStates = []
1157 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
1158 count = 0
1159 # Iter through intents of a node
1160 try:
1161 for intent in json.loads( intents ):
1162 state = intent.get( 'state', None )
1163 if "INSTALLED" not in state:
1164 installedCheck = False
1165 intentId = intent.get( 'id', None )
1166 intentStates.append( ( intentId, state ) )
1167 except ( ValueError, TypeError ):
1168 main.log.exception( "Error parsing intents." )
1169 intentStates.sort()
1170 for i, s in intentStates:
1171 count += 1
1172 main.log.info( "%-6s%-15s%-15s" %
1173 ( str( count ), str( i ), str( s ) ) )
1174 leaders = onosCli.leaders()
1175 try:
1176 missing = False
1177 if leaders:
1178 parsedLeaders = json.loads( leaders )
1179 main.log.warn( json.dumps( parsedLeaders,
1180 sort_keys=True,
1181 indent=4,
1182 separators=( ',', ': ' ) ) )
1183 # check for all intent partitions
1184 # check for election
1185 topics = []
1186 for i in range( 14 ):
Jon Hall8dafdcc2016-09-16 10:21:25 -07001187 topics.append( "work-partition-" + str( i ) )
Jon Hall9ebd1bd2016-04-19 01:37:17 -07001188 # FIXME: this should only be after we start the app
1189 topics.append( "org.onosproject.election" )
1190 main.log.debug( topics )
1191 ONOStopics = [ j['topic'] for j in parsedLeaders ]
1192 for topic in topics:
1193 if topic not in ONOStopics:
1194 main.log.error( "Error: " + topic +
1195 " not in leaders" )
1196 missing = True
1197 else:
1198 main.log.error( "leaders() returned None" )
1199 except ( ValueError, TypeError ):
1200 main.log.exception( "Error parsing leaders" )
1201 main.log.error( repr( leaders ) )
1202 if missing:
1203 for i in main.activeNodes:
1204 node = main.CLIs[i]
1205 response = node.leaders( jsonFormat=False)
1206 main.log.warn( str( node.name ) + " leaders output: \n" +
1207 str( response ) )
1208
1209 partitions = onosCli.partitions()
1210 try:
1211 if partitions :
1212 parsedPartitions = json.loads( partitions )
1213 main.log.warn( json.dumps( parsedPartitions,
1214 sort_keys=True,
1215 indent=4,
1216 separators=( ',', ': ' ) ) )
1217 # TODO check for a leader in all paritions
1218 # TODO check for consistency among nodes
1219 else:
1220 main.log.error( "partitions() returned None" )
1221 except ( ValueError, TypeError ):
1222 main.log.exception( "Error parsing partitions" )
1223 main.log.error( repr( partitions ) )
1224 pendingMap = onosCli.pendingMap()
1225 try:
1226 if pendingMap :
1227 parsedPending = json.loads( pendingMap )
1228 main.log.warn( json.dumps( parsedPending,
1229 sort_keys=True,
1230 indent=4,
1231 separators=( ',', ': ' ) ) )
1232 # TODO check something here?
1233 else:
1234 main.log.error( "pendingMap() returned None" )
1235 except ( ValueError, TypeError ):
1236 main.log.exception( "Error parsing pending map" )
1237 main.log.error( repr( pendingMap ) )
1238 # Print flowrules
1239 node = main.activeNodes[0]
1240 main.log.debug( main.CLIs[node].flows( jsonFormat=False ) )
1241 main.step( "Wait a minute then ping again" )
1242 # the wait is above
1243 PingResult = main.TRUE
1244 for i in range( 8, 18 ):
1245 ping = main.Mininet1.pingHost( src="h" + str( i ),
1246 target="h" + str( i + 10 ) )
1247 PingResult = PingResult and ping
1248 if ping == main.FALSE:
1249 main.log.warn( "Ping failed between h" + str( i ) +
1250 " and h" + str( i + 10 ) )
1251 elif ping == main.TRUE:
1252 main.log.info( "Ping test passed!" )
1253 # Don't set PingResult or you'd override failures
1254 if PingResult == main.FALSE:
1255 main.log.error(
1256 "Intents have not been installed correctly, pings failed." )
1257 # TODO: pretty print
1258 main.log.warn( "ONOS1 intents: " )
1259 try:
1260 tmpIntents = onosCli.intents()
1261 main.log.warn( json.dumps( json.loads( tmpIntents ),
1262 sort_keys=True,
1263 indent=4,
1264 separators=( ',', ': ' ) ) )
1265 except ( ValueError, TypeError ):
1266 main.log.warn( repr( tmpIntents ) )
1267 utilities.assert_equals(
1268 expect=main.TRUE,
1269 actual=PingResult,
1270 onpass="Intents have been installed correctly and pings work",
1271 onfail="Intents have not been installed correctly, pings failed." )
1272
1273 def CASE5( self, main ):
1274 """
1275 Reading state of ONOS
1276 """
1277 import json
1278 import time
1279 assert main.numCtrls, "main.numCtrls not defined"
1280 assert main, "main not defined"
1281 assert utilities.assert_equals, "utilities.assert_equals not defined"
1282 assert main.CLIs, "main.CLIs not defined"
1283 assert main.nodes, "main.nodes not defined"
1284
1285 main.case( "Setting up and gathering data for current state" )
1286 # The general idea for this test case is to pull the state of
1287 # ( intents,flows, topology,... ) from each ONOS node
1288 # We can then compare them with each other and also with past states
1289
1290 main.step( "Check that each switch has a master" )
1291 global mastershipState
1292 mastershipState = '[]'
1293
1294 # Assert that each device has a master
1295 rolesNotNull = main.TRUE
1296 threads = []
1297 for i in main.activeNodes:
1298 t = main.Thread( target=main.CLIs[i].rolesNotNull,
1299 name="rolesNotNull-" + str( i ),
1300 args=[] )
1301 threads.append( t )
1302 t.start()
1303
1304 for t in threads:
1305 t.join()
1306 rolesNotNull = rolesNotNull and t.result
1307 utilities.assert_equals(
1308 expect=main.TRUE,
1309 actual=rolesNotNull,
1310 onpass="Each device has a master",
1311 onfail="Some devices don't have a master assigned" )
1312
1313 main.step( "Get the Mastership of each switch from each controller" )
1314 ONOSMastership = []
1315 consistentMastership = True
1316 rolesResults = True
1317 threads = []
1318 for i in main.activeNodes:
1319 t = main.Thread( target=main.CLIs[i].roles,
1320 name="roles-" + str( i ),
1321 args=[] )
1322 threads.append( t )
1323 t.start()
1324
1325 for t in threads:
1326 t.join()
1327 ONOSMastership.append( t.result )
1328
1329 for i in range( len( ONOSMastership ) ):
1330 node = str( main.activeNodes[i] + 1 )
1331 if not ONOSMastership[i] or "Error" in ONOSMastership[i]:
1332 main.log.error( "Error in getting ONOS" + node + " roles" )
1333 main.log.warn( "ONOS" + node + " mastership response: " +
1334 repr( ONOSMastership[i] ) )
1335 rolesResults = False
1336 utilities.assert_equals(
1337 expect=True,
1338 actual=rolesResults,
1339 onpass="No error in reading roles output",
1340 onfail="Error in reading roles from ONOS" )
1341
1342 main.step( "Check for consistency in roles from each controller" )
1343 if all([ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
1344 main.log.info(
1345 "Switch roles are consistent across all ONOS nodes" )
1346 else:
1347 consistentMastership = False
1348 utilities.assert_equals(
1349 expect=True,
1350 actual=consistentMastership,
1351 onpass="Switch roles are consistent across all ONOS nodes",
1352 onfail="ONOS nodes have different views of switch roles" )
1353
1354 if rolesResults and not consistentMastership:
1355 for i in range( len( main.activeNodes ) ):
1356 node = str( main.activeNodes[i] + 1 )
1357 try:
1358 main.log.warn(
1359 "ONOS" + node + " roles: ",
1360 json.dumps(
1361 json.loads( ONOSMastership[ i ] ),
1362 sort_keys=True,
1363 indent=4,
1364 separators=( ',', ': ' ) ) )
1365 except ( ValueError, TypeError ):
1366 main.log.warn( repr( ONOSMastership[ i ] ) )
1367 elif rolesResults and consistentMastership:
1368 mastershipState = ONOSMastership[ 0 ]
1369
1370 main.step( "Get the intents from each controller" )
1371 global intentState
1372 intentState = []
1373 ONOSIntents = []
1374 consistentIntents = True # Are Intents consistent across nodes?
1375 intentsResults = True # Could we read Intents from ONOS?
1376 threads = []
1377 for i in main.activeNodes:
1378 t = main.Thread( target=main.CLIs[i].intents,
1379 name="intents-" + str( i ),
1380 args=[],
1381 kwargs={ 'jsonFormat': True } )
1382 threads.append( t )
1383 t.start()
1384
1385 for t in threads:
1386 t.join()
1387 ONOSIntents.append( t.result )
1388
1389 for i in range( len( ONOSIntents ) ):
1390 node = str( main.activeNodes[i] + 1 )
1391 if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
1392 main.log.error( "Error in getting ONOS" + node + " intents" )
1393 main.log.warn( "ONOS" + node + " intents response: " +
1394 repr( ONOSIntents[ i ] ) )
1395 intentsResults = False
1396 utilities.assert_equals(
1397 expect=True,
1398 actual=intentsResults,
1399 onpass="No error in reading intents output",
1400 onfail="Error in reading intents from ONOS" )
1401
1402 main.step( "Check for consistency in Intents from each controller" )
1403 if all([ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
1404 main.log.info( "Intents are consistent across all ONOS " +
1405 "nodes" )
1406 else:
1407 consistentIntents = False
1408 main.log.error( "Intents not consistent" )
1409 utilities.assert_equals(
1410 expect=True,
1411 actual=consistentIntents,
1412 onpass="Intents are consistent across all ONOS nodes",
1413 onfail="ONOS nodes have different views of intents" )
1414
1415 if intentsResults:
1416 # Try to make it easy to figure out what is happening
1417 #
1418 # Intent ONOS1 ONOS2 ...
1419 # 0x01 INSTALLED INSTALLING
1420 # ... ... ...
1421 # ... ... ...
1422 title = " Id"
1423 for n in main.activeNodes:
1424 title += " " * 10 + "ONOS" + str( n + 1 )
1425 main.log.warn( title )
1426 # get all intent keys in the cluster
1427 keys = []
1428 try:
1429 # Get the set of all intent keys
1430 for nodeStr in ONOSIntents:
1431 node = json.loads( nodeStr )
1432 for intent in node:
1433 keys.append( intent.get( 'id' ) )
1434 keys = set( keys )
1435 # For each intent key, print the state on each node
1436 for key in keys:
1437 row = "%-13s" % key
1438 for nodeStr in ONOSIntents:
1439 node = json.loads( nodeStr )
1440 for intent in node:
1441 if intent.get( 'id', "Error" ) == key:
1442 row += "%-15s" % intent.get( 'state' )
1443 main.log.warn( row )
1444 # End of intent state table
1445 except ValueError as e:
1446 main.log.exception( e )
1447 main.log.debug( "nodeStr was: " + repr( nodeStr ) )
1448
1449 if intentsResults and not consistentIntents:
1450 # print the json objects
1451 n = str( main.activeNodes[-1] + 1 )
1452 main.log.debug( "ONOS" + n + " intents: " )
1453 main.log.debug( json.dumps( json.loads( ONOSIntents[ -1 ] ),
1454 sort_keys=True,
1455 indent=4,
1456 separators=( ',', ': ' ) ) )
1457 for i in range( len( ONOSIntents ) ):
1458 node = str( main.activeNodes[i] + 1 )
1459 if ONOSIntents[ i ] != ONOSIntents[ -1 ]:
1460 main.log.debug( "ONOS" + node + " intents: " )
1461 main.log.debug( json.dumps( json.loads( ONOSIntents[i] ),
1462 sort_keys=True,
1463 indent=4,
1464 separators=( ',', ': ' ) ) )
1465 else:
1466 main.log.debug( "ONOS" + node + " intents match ONOS" +
1467 n + " intents" )
1468 elif intentsResults and consistentIntents:
1469 intentState = ONOSIntents[ 0 ]
1470
1471 main.step( "Get the flows from each controller" )
1472 global flowState
1473 flowState = []
1474 ONOSFlows = []
1475 ONOSFlowsJson = []
1476 flowCheck = main.FALSE
1477 consistentFlows = True
1478 flowsResults = True
1479 threads = []
1480 for i in main.activeNodes:
1481 t = main.Thread( target=main.CLIs[i].flows,
1482 name="flows-" + str( i ),
1483 args=[],
1484 kwargs={ 'jsonFormat': True } )
1485 threads.append( t )
1486 t.start()
1487
1488 # NOTE: Flows command can take some time to run
1489 time.sleep(30)
1490 for t in threads:
1491 t.join()
1492 result = t.result
1493 ONOSFlows.append( result )
1494
1495 for i in range( len( ONOSFlows ) ):
1496 num = str( main.activeNodes[i] + 1 )
1497 if not ONOSFlows[ i ] or "Error" in ONOSFlows[ i ]:
1498 main.log.error( "Error in getting ONOS" + num + " flows" )
1499 main.log.warn( "ONOS" + num + " flows response: " +
1500 repr( ONOSFlows[ i ] ) )
1501 flowsResults = False
1502 ONOSFlowsJson.append( None )
1503 else:
1504 try:
1505 ONOSFlowsJson.append( json.loads( ONOSFlows[ i ] ) )
1506 except ( ValueError, TypeError ):
1507 # FIXME: change this to log.error?
1508 main.log.exception( "Error in parsing ONOS" + num +
1509 " response as json." )
1510 main.log.error( repr( ONOSFlows[ i ] ) )
1511 ONOSFlowsJson.append( None )
1512 flowsResults = False
1513 utilities.assert_equals(
1514 expect=True,
1515 actual=flowsResults,
1516 onpass="No error in reading flows output",
1517 onfail="Error in reading flows from ONOS" )
1518
1519 main.step( "Check for consistency in Flows from each controller" )
1520 tmp = [ len( i ) == len( ONOSFlowsJson[ 0 ] ) for i in ONOSFlowsJson ]
1521 if all( tmp ):
1522 main.log.info( "Flow count is consistent across all ONOS nodes" )
1523 else:
1524 consistentFlows = False
1525 utilities.assert_equals(
1526 expect=True,
1527 actual=consistentFlows,
1528 onpass="The flow count is consistent across all ONOS nodes",
1529 onfail="ONOS nodes have different flow counts" )
1530
1531 if flowsResults and not consistentFlows:
1532 for i in range( len( ONOSFlows ) ):
1533 node = str( main.activeNodes[i] + 1 )
1534 try:
1535 main.log.warn(
1536 "ONOS" + node + " flows: " +
1537 json.dumps( json.loads( ONOSFlows[i] ), sort_keys=True,
1538 indent=4, separators=( ',', ': ' ) ) )
1539 except ( ValueError, TypeError ):
1540 main.log.warn( "ONOS" + node + " flows: " +
1541 repr( ONOSFlows[ i ] ) )
1542 elif flowsResults and consistentFlows:
1543 flowCheck = main.TRUE
1544 flowState = ONOSFlows[ 0 ]
1545
1546 main.step( "Get the OF Table entries" )
1547 global flows
1548 flows = []
1549 for i in range( 1, 29 ):
1550 flows.append( main.Mininet1.getFlowTable( "s" + str( i ), version="1.3", debug=False ) )
1551 if flowCheck == main.FALSE:
1552 for table in flows:
1553 main.log.warn( table )
1554 # TODO: Compare switch flow tables with ONOS flow tables
1555
1556 main.step( "Start continuous pings" )
1557 main.Mininet2.pingLong(
1558 src=main.params[ 'PING' ][ 'source1' ],
1559 target=main.params[ 'PING' ][ 'target1' ],
1560 pingTime=500 )
1561 main.Mininet2.pingLong(
1562 src=main.params[ 'PING' ][ 'source2' ],
1563 target=main.params[ 'PING' ][ 'target2' ],
1564 pingTime=500 )
1565 main.Mininet2.pingLong(
1566 src=main.params[ 'PING' ][ 'source3' ],
1567 target=main.params[ 'PING' ][ 'target3' ],
1568 pingTime=500 )
1569 main.Mininet2.pingLong(
1570 src=main.params[ 'PING' ][ 'source4' ],
1571 target=main.params[ 'PING' ][ 'target4' ],
1572 pingTime=500 )
1573 main.Mininet2.pingLong(
1574 src=main.params[ 'PING' ][ 'source5' ],
1575 target=main.params[ 'PING' ][ 'target5' ],
1576 pingTime=500 )
1577 main.Mininet2.pingLong(
1578 src=main.params[ 'PING' ][ 'source6' ],
1579 target=main.params[ 'PING' ][ 'target6' ],
1580 pingTime=500 )
1581 main.Mininet2.pingLong(
1582 src=main.params[ 'PING' ][ 'source7' ],
1583 target=main.params[ 'PING' ][ 'target7' ],
1584 pingTime=500 )
1585 main.Mininet2.pingLong(
1586 src=main.params[ 'PING' ][ 'source8' ],
1587 target=main.params[ 'PING' ][ 'target8' ],
1588 pingTime=500 )
1589 main.Mininet2.pingLong(
1590 src=main.params[ 'PING' ][ 'source9' ],
1591 target=main.params[ 'PING' ][ 'target9' ],
1592 pingTime=500 )
1593 main.Mininet2.pingLong(
1594 src=main.params[ 'PING' ][ 'source10' ],
1595 target=main.params[ 'PING' ][ 'target10' ],
1596 pingTime=500 )
1597
1598 main.step( "Collecting topology information from ONOS" )
1599 devices = []
1600 threads = []
1601 for i in main.activeNodes:
1602 t = main.Thread( target=main.CLIs[i].devices,
1603 name="devices-" + str( i ),
1604 args=[ ] )
1605 threads.append( t )
1606 t.start()
1607
1608 for t in threads:
1609 t.join()
1610 devices.append( t.result )
1611 hosts = []
1612 threads = []
1613 for i in main.activeNodes:
1614 t = main.Thread( target=main.CLIs[i].hosts,
1615 name="hosts-" + str( i ),
1616 args=[ ] )
1617 threads.append( t )
1618 t.start()
1619
1620 for t in threads:
1621 t.join()
1622 try:
1623 hosts.append( json.loads( t.result ) )
1624 except ( ValueError, TypeError ):
1625 # FIXME: better handling of this, print which node
1626 # Maybe use thread name?
1627 main.log.exception( "Error parsing json output of hosts" )
1628 main.log.warn( repr( t.result ) )
1629 hosts.append( None )
1630
1631 ports = []
1632 threads = []
1633 for i in main.activeNodes:
1634 t = main.Thread( target=main.CLIs[i].ports,
1635 name="ports-" + str( i ),
1636 args=[ ] )
1637 threads.append( t )
1638 t.start()
1639
1640 for t in threads:
1641 t.join()
1642 ports.append( t.result )
1643 links = []
1644 threads = []
1645 for i in main.activeNodes:
1646 t = main.Thread( target=main.CLIs[i].links,
1647 name="links-" + str( i ),
1648 args=[ ] )
1649 threads.append( t )
1650 t.start()
1651
1652 for t in threads:
1653 t.join()
1654 links.append( t.result )
1655 clusters = []
1656 threads = []
1657 for i in main.activeNodes:
1658 t = main.Thread( target=main.CLIs[i].clusters,
1659 name="clusters-" + str( i ),
1660 args=[ ] )
1661 threads.append( t )
1662 t.start()
1663
1664 for t in threads:
1665 t.join()
1666 clusters.append( t.result )
1667 # Compare json objects for hosts and dataplane clusters
1668
1669 # hosts
1670 main.step( "Host view is consistent across ONOS nodes" )
1671 consistentHostsResult = main.TRUE
1672 for controller in range( len( hosts ) ):
1673 controllerStr = str( main.activeNodes[controller] + 1 )
1674 if hosts[ controller ] and "Error" not in hosts[ controller ]:
1675 if hosts[ controller ] == hosts[ 0 ]:
1676 continue
1677 else: # hosts not consistent
1678 main.log.error( "hosts from ONOS" +
1679 controllerStr +
1680 " is inconsistent with ONOS1" )
1681 main.log.warn( repr( hosts[ controller ] ) )
1682 consistentHostsResult = main.FALSE
1683
1684 else:
1685 main.log.error( "Error in getting ONOS hosts from ONOS" +
1686 controllerStr )
1687 consistentHostsResult = main.FALSE
1688 main.log.warn( "ONOS" + controllerStr +
1689 " hosts response: " +
1690 repr( hosts[ controller ] ) )
1691 utilities.assert_equals(
1692 expect=main.TRUE,
1693 actual=consistentHostsResult,
1694 onpass="Hosts view is consistent across all ONOS nodes",
1695 onfail="ONOS nodes have different views of hosts" )
1696
1697 main.step( "Each host has an IP address" )
1698 ipResult = main.TRUE
1699 for controller in range( 0, len( hosts ) ):
1700 controllerStr = str( main.activeNodes[controller] + 1 )
1701 if hosts[ controller ]:
1702 for host in hosts[ controller ]:
1703 if not host.get( 'ipAddresses', [ ] ):
1704 main.log.error( "Error with host ips on controller" +
1705 controllerStr + ": " + str( host ) )
1706 ipResult = main.FALSE
1707 utilities.assert_equals(
1708 expect=main.TRUE,
1709 actual=ipResult,
1710 onpass="The ips of the hosts aren't empty",
1711 onfail="The ip of at least one host is missing" )
1712
1713 # Strongly connected clusters of devices
1714 main.step( "Cluster view is consistent across ONOS nodes" )
1715 consistentClustersResult = main.TRUE
1716 for controller in range( len( clusters ) ):
1717 controllerStr = str( main.activeNodes[controller] + 1 )
1718 if "Error" not in clusters[ controller ]:
1719 if clusters[ controller ] == clusters[ 0 ]:
1720 continue
1721 else: # clusters not consistent
1722 main.log.error( "clusters from ONOS" + controllerStr +
1723 " is inconsistent with ONOS1" )
1724 consistentClustersResult = main.FALSE
1725
1726 else:
1727 main.log.error( "Error in getting dataplane clusters " +
1728 "from ONOS" + controllerStr )
1729 consistentClustersResult = main.FALSE
1730 main.log.warn( "ONOS" + controllerStr +
1731 " clusters response: " +
1732 repr( clusters[ controller ] ) )
1733 utilities.assert_equals(
1734 expect=main.TRUE,
1735 actual=consistentClustersResult,
1736 onpass="Clusters view is consistent across all ONOS nodes",
1737 onfail="ONOS nodes have different views of clusters" )
Jon Hall64948022016-05-12 13:38:50 -07001738 if not consistentClustersResult:
Jon Hall9ebd1bd2016-04-19 01:37:17 -07001739 main.log.debug( clusters )
Jon Hall64948022016-05-12 13:38:50 -07001740
Jon Hall9ebd1bd2016-04-19 01:37:17 -07001741 # there should always only be one cluster
1742 main.step( "Cluster view correct across ONOS nodes" )
1743 try:
1744 numClusters = len( json.loads( clusters[ 0 ] ) )
1745 except ( ValueError, TypeError ):
1746 main.log.exception( "Error parsing clusters[0]: " +
1747 repr( clusters[ 0 ] ) )
1748 numClusters = "ERROR"
1749 utilities.assert_equals(
1750 expect=1,
1751 actual=numClusters,
1752 onpass="ONOS shows 1 SCC",
1753 onfail="ONOS shows " + str( numClusters ) + " SCCs" )
1754
1755 main.step( "Comparing ONOS topology to MN" )
1756 devicesResults = main.TRUE
1757 linksResults = main.TRUE
1758 hostsResults = main.TRUE
1759 mnSwitches = main.Mininet1.getSwitches()
1760 mnLinks = main.Mininet1.getLinks()
1761 mnHosts = main.Mininet1.getHosts()
1762 for controller in main.activeNodes:
1763 controllerStr = str( main.activeNodes[controller] + 1 )
1764 if devices[ controller ] and ports[ controller ] and\
1765 "Error" not in devices[ controller ] and\
1766 "Error" not in ports[ controller ]:
1767 currentDevicesResult = main.Mininet1.compareSwitches(
1768 mnSwitches,
1769 json.loads( devices[ controller ] ),
1770 json.loads( ports[ controller ] ) )
1771 else:
1772 currentDevicesResult = main.FALSE
1773 utilities.assert_equals( expect=main.TRUE,
1774 actual=currentDevicesResult,
1775 onpass="ONOS" + controllerStr +
1776 " Switches view is correct",
1777 onfail="ONOS" + controllerStr +
1778 " Switches view is incorrect" )
1779 if links[ controller ] and "Error" not in links[ controller ]:
1780 currentLinksResult = main.Mininet1.compareLinks(
1781 mnSwitches, mnLinks,
1782 json.loads( links[ controller ] ) )
1783 else:
1784 currentLinksResult = main.FALSE
1785 utilities.assert_equals( expect=main.TRUE,
1786 actual=currentLinksResult,
1787 onpass="ONOS" + controllerStr +
1788 " links view is correct",
1789 onfail="ONOS" + controllerStr +
1790 " links view is incorrect" )
1791
1792 if hosts[ controller ] and "Error" not in hosts[ controller ]:
1793 currentHostsResult = main.Mininet1.compareHosts(
1794 mnHosts,
1795 hosts[ controller ] )
1796 else:
1797 currentHostsResult = main.FALSE
1798 utilities.assert_equals( expect=main.TRUE,
1799 actual=currentHostsResult,
1800 onpass="ONOS" + controllerStr +
1801 " hosts exist in Mininet",
1802 onfail="ONOS" + controllerStr +
1803 " hosts don't match Mininet" )
1804
1805 devicesResults = devicesResults and currentDevicesResult
1806 linksResults = linksResults and currentLinksResult
1807 hostsResults = hostsResults and currentHostsResult
1808
1809 main.step( "Device information is correct" )
1810 utilities.assert_equals(
1811 expect=main.TRUE,
1812 actual=devicesResults,
1813 onpass="Device information is correct",
1814 onfail="Device information is incorrect" )
1815
1816 main.step( "Links are correct" )
1817 utilities.assert_equals(
1818 expect=main.TRUE,
1819 actual=linksResults,
1820 onpass="Link are correct",
1821 onfail="Links are incorrect" )
1822
1823 main.step( "Hosts are correct" )
1824 utilities.assert_equals(
1825 expect=main.TRUE,
1826 actual=hostsResults,
1827 onpass="Hosts are correct",
1828 onfail="Hosts are incorrect" )
1829
1830 def CASE6( self, main ):
1831 """
1832 The Scaling case.
1833 """
1834 import time
1835 import re
1836 assert main.numCtrls, "main.numCtrls not defined"
1837 assert main, "main not defined"
1838 assert utilities.assert_equals, "utilities.assert_equals not defined"
1839 assert main.CLIs, "main.CLIs not defined"
1840 assert main.nodes, "main.nodes not defined"
1841 try:
1842 labels
1843 except NameError:
1844 main.log.error( "labels not defined, setting to []" )
1845 global labels
1846 labels = []
1847 try:
1848 data
1849 except NameError:
1850 main.log.error( "data not defined, setting to []" )
1851 global data
1852 data = []
1853
Jon Hall69b2b982016-05-11 12:04:59 -07001854 main.case( "Scale the number of nodes in the ONOS cluster" )
Jon Hall9ebd1bd2016-04-19 01:37:17 -07001855
1856 main.step( "Checking ONOS Logs for errors" )
1857 for i in main.activeNodes:
1858 node = main.nodes[i]
1859 main.log.debug( "Checking logs for errors on " + node.name + ":" )
1860 main.log.warn( main.ONOSbench.checkLogs( node.ip_address ) )
1861
1862 """
1863 pop # of nodes from a list, might look like 1,3b,3,5b,5,7b,7,7b,5,5b,3...
1864 modify cluster.json file appropriately
1865 install/deactivate node as needed
1866 """
1867
1868 try:
1869 prevNodes = main.activeNodes
1870 scale = main.scaling.pop(0)
1871 if "e" in scale:
1872 equal = True
1873 else:
1874 equal = False
1875 main.numCtrls = int( re.search( "\d+", scale ).group(0) )
1876 main.log.info( "Scaling to {} nodes".format( main.numCtrls ) )
1877 genResult = main.Server.generateFile( main.numCtrls, equal=equal )
1878 utilities.assert_equals( expect=main.TRUE, actual=genResult,
1879 onpass="New cluster metadata file generated",
1880 onfail="Failled to generate new metadata file" )
1881 time.sleep( 5 ) # Give time for nodes to read new file
1882 except IndexError:
1883 main.cleanup()
1884 main.exit()
1885
1886 main.activeNodes = [ i for i in range( 0, main.numCtrls ) ]
1887 newNodes = [ x for x in main.activeNodes if x not in prevNodes ]
1888
1889 main.step( "Start new nodes" ) # OR stop old nodes?
1890 started = main.TRUE
1891 for i in newNodes:
1892 started = main.ONOSbench.onosStart( main.nodes[i].ip_address ) and main.TRUE
1893 utilities.assert_equals( expect=main.TRUE, actual=started,
1894 onpass="ONOS started",
1895 onfail="ONOS start NOT successful" )
1896
1897 main.step( "Checking if ONOS is up yet" )
1898 for i in range( 2 ):
1899 onosIsupResult = main.TRUE
1900 for i in main.activeNodes:
1901 node = main.nodes[i]
Jon Hall168c1862017-01-31 17:35:34 -08001902 main.ONOSbench.onosSecureSSH( node=node.ip_address )
Jon Hall9ebd1bd2016-04-19 01:37:17 -07001903 started = main.ONOSbench.isup( node.ip_address )
1904 if not started:
1905 main.log.error( node.name + " didn't start!" )
1906 onosIsupResult = onosIsupResult and started
1907 if onosIsupResult == main.TRUE:
1908 break
1909 utilities.assert_equals( expect=main.TRUE, actual=onosIsupResult,
1910 onpass="ONOS started",
1911 onfail="ONOS start NOT successful" )
1912
Jon Hall6509dbf2016-06-21 17:01:17 -07001913 main.step( "Starting ONOS CLI sessions" )
Jon Hall9ebd1bd2016-04-19 01:37:17 -07001914 cliResults = main.TRUE
1915 threads = []
1916 for i in main.activeNodes:
1917 t = main.Thread( target=main.CLIs[i].startOnosCli,
1918 name="startOnosCli-" + str( i ),
1919 args=[main.nodes[i].ip_address] )
1920 threads.append( t )
1921 t.start()
1922
1923 for t in threads:
1924 t.join()
1925 cliResults = cliResults and t.result
1926 utilities.assert_equals( expect=main.TRUE, actual=cliResults,
1927 onpass="ONOS cli started",
1928 onfail="ONOS clis did not start" )
1929
1930 main.step( "Checking ONOS nodes" )
1931 nodeResults = utilities.retry( main.HA.nodesCheck,
1932 False,
1933 args=[main.activeNodes],
1934 attempts=5 )
1935 utilities.assert_equals( expect=True, actual=nodeResults,
1936 onpass="Nodes check successful",
1937 onfail="Nodes check NOT successful" )
1938
1939 for i in range( 10 ):
1940 ready = True
1941 for i in main.activeNodes:
1942 cli = main.CLIs[i]
1943 output = cli.summary()
1944 if not output:
1945 ready = False
1946 if ready:
1947 break
1948 time.sleep( 30 )
1949 utilities.assert_equals( expect=True, actual=ready,
1950 onpass="ONOS summary command succeded",
1951 onfail="ONOS summary command failed" )
1952 if not ready:
1953 main.cleanup()
1954 main.exit()
1955
1956 # Rerun for election on new nodes
1957 runResults = main.TRUE
1958 for i in main.activeNodes:
1959 cli = main.CLIs[i]
1960 run = cli.electionTestRun()
1961 if run != main.TRUE:
1962 main.log.error( "Error running for election on " + cli.name )
1963 runResults = runResults and run
1964 utilities.assert_equals( expect=main.TRUE, actual=runResults,
1965 onpass="Reran for election",
1966 onfail="Failed to rerun for election" )
1967
1968 # TODO: Make this configurable
1969 time.sleep( 60 )
1970 for node in main.activeNodes:
1971 main.log.warn( "\n****************** {} **************".format( main.nodes[node].ip_address ) )
1972 main.log.debug( main.CLIs[node].nodes( jsonFormat=False ) )
1973 main.log.debug( main.CLIs[node].leaders( jsonFormat=False ) )
1974 main.log.debug( main.CLIs[node].partitions( jsonFormat=False ) )
1975 main.log.debug( main.CLIs[node].apps( jsonFormat=False ) )
1976
1977 def CASE7( self, main ):
1978 """
1979 Check state after ONOS scaling
1980 """
1981 import json
1982 assert main.numCtrls, "main.numCtrls not defined"
1983 assert main, "main not defined"
1984 assert utilities.assert_equals, "utilities.assert_equals not defined"
1985 assert main.CLIs, "main.CLIs not defined"
1986 assert main.nodes, "main.nodes not defined"
1987 main.case( "Running ONOS Constant State Tests" )
1988
1989 main.step( "Check that each switch has a master" )
1990 # Assert that each device has a master
1991 rolesNotNull = main.TRUE
1992 threads = []
1993 for i in main.activeNodes:
1994 t = main.Thread( target=main.CLIs[i].rolesNotNull,
1995 name="rolesNotNull-" + str( i ),
1996 args=[ ] )
1997 threads.append( t )
1998 t.start()
1999
2000 for t in threads:
2001 t.join()
2002 rolesNotNull = rolesNotNull and t.result
2003 utilities.assert_equals(
2004 expect=main.TRUE,
2005 actual=rolesNotNull,
2006 onpass="Each device has a master",
2007 onfail="Some devices don't have a master assigned" )
2008
2009 main.step( "Read device roles from ONOS" )
2010 ONOSMastership = []
2011 consistentMastership = True
2012 rolesResults = True
2013 threads = []
2014 for i in main.activeNodes:
2015 t = main.Thread( target=main.CLIs[i].roles,
2016 name="roles-" + str( i ),
2017 args=[] )
2018 threads.append( t )
2019 t.start()
2020
2021 for t in threads:
2022 t.join()
2023 ONOSMastership.append( t.result )
2024
2025 for i in range( len( ONOSMastership ) ):
2026 node = str( main.activeNodes[i] + 1 )
2027 if not ONOSMastership[i] or "Error" in ONOSMastership[i]:
2028 main.log.error( "Error in getting ONOS" + node + " roles" )
2029 main.log.warn( "ONOS" + node + " mastership response: " +
2030 repr( ONOSMastership[i] ) )
2031 rolesResults = False
2032 utilities.assert_equals(
2033 expect=True,
2034 actual=rolesResults,
2035 onpass="No error in reading roles output",
2036 onfail="Error in reading roles from ONOS" )
2037
2038 main.step( "Check for consistency in roles from each controller" )
2039 if all([ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
2040 main.log.info(
2041 "Switch roles are consistent across all ONOS nodes" )
2042 else:
2043 consistentMastership = False
2044 utilities.assert_equals(
2045 expect=True,
2046 actual=consistentMastership,
2047 onpass="Switch roles are consistent across all ONOS nodes",
2048 onfail="ONOS nodes have different views of switch roles" )
2049
2050 if rolesResults and not consistentMastership:
2051 for i in range( len( ONOSMastership ) ):
2052 node = str( main.activeNodes[i] + 1 )
2053 main.log.warn( "ONOS" + node + " roles: ",
2054 json.dumps( json.loads( ONOSMastership[ i ] ),
2055 sort_keys=True,
2056 indent=4,
2057 separators=( ',', ': ' ) ) )
2058
2059 # NOTE: we expect mastership to change on controller scaling down
2060
2061 main.step( "Get the intents and compare across all nodes" )
2062 ONOSIntents = []
2063 intentCheck = main.FALSE
2064 consistentIntents = True
2065 intentsResults = True
2066 threads = []
2067 for i in main.activeNodes:
2068 t = main.Thread( target=main.CLIs[i].intents,
2069 name="intents-" + str( i ),
2070 args=[],
2071 kwargs={ 'jsonFormat': True } )
2072 threads.append( t )
2073 t.start()
2074
2075 for t in threads:
2076 t.join()
2077 ONOSIntents.append( t.result )
2078
2079 for i in range( len( ONOSIntents) ):
2080 node = str( main.activeNodes[i] + 1 )
2081 if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
2082 main.log.error( "Error in getting ONOS" + node + " intents" )
2083 main.log.warn( "ONOS" + node + " intents response: " +
2084 repr( ONOSIntents[ i ] ) )
2085 intentsResults = False
2086 utilities.assert_equals(
2087 expect=True,
2088 actual=intentsResults,
2089 onpass="No error in reading intents output",
2090 onfail="Error in reading intents from ONOS" )
2091
2092 main.step( "Check for consistency in Intents from each controller" )
2093 if all([ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
2094 main.log.info( "Intents are consistent across all ONOS " +
2095 "nodes" )
2096 else:
2097 consistentIntents = False
2098
2099 # Try to make it easy to figure out what is happening
2100 #
2101 # Intent ONOS1 ONOS2 ...
2102 # 0x01 INSTALLED INSTALLING
2103 # ... ... ...
2104 # ... ... ...
2105 title = " ID"
2106 for n in main.activeNodes:
2107 title += " " * 10 + "ONOS" + str( n + 1 )
2108 main.log.warn( title )
2109 # get all intent keys in the cluster
2110 keys = []
2111 for nodeStr in ONOSIntents:
2112 node = json.loads( nodeStr )
2113 for intent in node:
2114 keys.append( intent.get( 'id' ) )
2115 keys = set( keys )
2116 for key in keys:
2117 row = "%-13s" % key
2118 for nodeStr in ONOSIntents:
2119 node = json.loads( nodeStr )
2120 for intent in node:
2121 if intent.get( 'id' ) == key:
2122 row += "%-15s" % intent.get( 'state' )
2123 main.log.warn( row )
2124 # End table view
2125
2126 utilities.assert_equals(
2127 expect=True,
2128 actual=consistentIntents,
2129 onpass="Intents are consistent across all ONOS nodes",
2130 onfail="ONOS nodes have different views of intents" )
2131 intentStates = []
2132 for node in ONOSIntents: # Iter through ONOS nodes
2133 nodeStates = []
2134 # Iter through intents of a node
2135 try:
2136 for intent in json.loads( node ):
2137 nodeStates.append( intent[ 'state' ] )
2138 except ( ValueError, TypeError ):
2139 main.log.exception( "Error in parsing intents" )
2140 main.log.error( repr( node ) )
2141 intentStates.append( nodeStates )
2142 out = [ (i, nodeStates.count( i ) ) for i in set( nodeStates ) ]
2143 main.log.info( dict( out ) )
2144
2145 if intentsResults and not consistentIntents:
2146 for i in range( len( main.activeNodes ) ):
2147 node = str( main.activeNodes[i] + 1 )
2148 main.log.warn( "ONOS" + node + " intents: " )
2149 main.log.warn( json.dumps(
2150 json.loads( ONOSIntents[ i ] ),
2151 sort_keys=True,
2152 indent=4,
2153 separators=( ',', ': ' ) ) )
2154 elif intentsResults and consistentIntents:
2155 intentCheck = main.TRUE
2156
2157 main.step( "Compare current intents with intents before the scaling" )
2158 # NOTE: this requires case 5 to pass for intentState to be set.
2159 # maybe we should stop the test if that fails?
2160 sameIntents = main.FALSE
2161 try:
2162 intentState
2163 except NameError:
2164 main.log.warn( "No previous intent state was saved" )
2165 else:
2166 if intentState and intentState == ONOSIntents[ 0 ]:
2167 sameIntents = main.TRUE
2168 main.log.info( "Intents are consistent with before scaling" )
2169 # TODO: possibly the states have changed? we may need to figure out
2170 # what the acceptable states are
2171 elif len( intentState ) == len( ONOSIntents[ 0 ] ):
2172 sameIntents = main.TRUE
2173 try:
2174 before = json.loads( intentState )
2175 after = json.loads( ONOSIntents[ 0 ] )
2176 for intent in before:
2177 if intent not in after:
2178 sameIntents = main.FALSE
2179 main.log.debug( "Intent is not currently in ONOS " +
2180 "(at least in the same form):" )
2181 main.log.debug( json.dumps( intent ) )
2182 except ( ValueError, TypeError ):
2183 main.log.exception( "Exception printing intents" )
2184 main.log.debug( repr( ONOSIntents[0] ) )
2185 main.log.debug( repr( intentState ) )
2186 if sameIntents == main.FALSE:
2187 try:
2188 main.log.debug( "ONOS intents before: " )
2189 main.log.debug( json.dumps( json.loads( intentState ),
2190 sort_keys=True, indent=4,
2191 separators=( ',', ': ' ) ) )
2192 main.log.debug( "Current ONOS intents: " )
2193 main.log.debug( json.dumps( json.loads( ONOSIntents[ 0 ] ),
2194 sort_keys=True, indent=4,
2195 separators=( ',', ': ' ) ) )
2196 except ( ValueError, TypeError ):
2197 main.log.exception( "Exception printing intents" )
2198 main.log.debug( repr( ONOSIntents[0] ) )
2199 main.log.debug( repr( intentState ) )
2200 utilities.assert_equals(
2201 expect=main.TRUE,
2202 actual=sameIntents,
2203 onpass="Intents are consistent with before scaling",
2204 onfail="The Intents changed during scaling" )
2205 intentCheck = intentCheck and sameIntents
2206
2207 main.step( "Get the OF Table entries and compare to before " +
2208 "component scaling" )
2209 FlowTables = main.TRUE
2210 for i in range( 28 ):
2211 main.log.info( "Checking flow table on s" + str( i + 1 ) )
2212 tmpFlows = main.Mininet1.getFlowTable( "s" + str( i + 1 ), version="1.3", debug=False )
2213 curSwitch = main.Mininet1.flowTableComp( flows[i], tmpFlows )
2214 FlowTables = FlowTables and curSwitch
2215 if curSwitch == main.FALSE:
2216 main.log.warn( "Differences in flow table for switch: s{}".format( i + 1 ) )
2217 utilities.assert_equals(
2218 expect=main.TRUE,
2219 actual=FlowTables,
2220 onpass="No changes were found in the flow tables",
2221 onfail="Changes were found in the flow tables" )
2222
2223 main.Mininet2.pingLongKill()
2224 '''
2225 # main.step( "Check the continuous pings to ensure that no packets " +
2226 # "were dropped during component failure" )
2227 main.Mininet2.pingKill( main.params[ 'TESTONUSER' ],
2228 main.params[ 'TESTONIP' ] )
2229 LossInPings = main.FALSE
2230 # NOTE: checkForLoss returns main.FALSE with 0% packet loss
2231 for i in range( 8, 18 ):
2232 main.log.info(
2233 "Checking for a loss in pings along flow from s" +
2234 str( i ) )
2235 LossInPings = main.Mininet2.checkForLoss(
2236 "/tmp/ping.h" +
2237 str( i ) ) or LossInPings
2238 if LossInPings == main.TRUE:
2239 main.log.info( "Loss in ping detected" )
2240 elif LossInPings == main.ERROR:
2241 main.log.info( "There are multiple mininet process running" )
2242 elif LossInPings == main.FALSE:
2243 main.log.info( "No Loss in the pings" )
2244 main.log.info( "No loss of dataplane connectivity" )
2245 # utilities.assert_equals(
2246 # expect=main.FALSE,
2247 # actual=LossInPings,
2248 # onpass="No Loss of connectivity",
2249 # onfail="Loss of dataplane connectivity detected" )
2250
2251 # NOTE: Since intents are not persisted with IntnentStore,
2252 # we expect loss in dataplane connectivity
2253 LossInPings = main.FALSE
2254 '''
2255
2256 main.step( "Leadership Election is still functional" )
2257 # Test of LeadershipElection
2258 leaderList = []
2259 leaderResult = main.TRUE
2260
2261 for i in main.activeNodes:
2262 cli = main.CLIs[i]
2263 leaderN = cli.electionTestLeader()
2264 leaderList.append( leaderN )
2265 if leaderN == main.FALSE:
2266 # error in response
2267 main.log.error( "Something is wrong with " +
2268 "electionTestLeader function, check the" +
2269 " error logs" )
2270 leaderResult = main.FALSE
2271 elif leaderN is None:
2272 main.log.error( cli.name +
2273 " shows no leader for the election-app." )
2274 leaderResult = main.FALSE
2275 if len( set( leaderList ) ) != 1:
2276 leaderResult = main.FALSE
2277 main.log.error(
2278 "Inconsistent view of leader for the election test app" )
2279 # TODO: print the list
2280 utilities.assert_equals(
2281 expect=main.TRUE,
2282 actual=leaderResult,
2283 onpass="Leadership election passed",
2284 onfail="Something went wrong with Leadership election" )
2285
2286 def CASE8( self, main ):
2287 """
2288 Compare topo
2289 """
2290 import json
2291 import time
2292 assert main.numCtrls, "main.numCtrls not defined"
2293 assert main, "main not defined"
2294 assert utilities.assert_equals, "utilities.assert_equals not defined"
2295 assert main.CLIs, "main.CLIs not defined"
2296 assert main.nodes, "main.nodes not defined"
2297
2298 main.case( "Compare ONOS Topology view to Mininet topology" )
2299 main.caseExplanation = "Compare topology objects between Mininet" +\
2300 " and ONOS"
2301 topoResult = main.FALSE
2302 topoFailMsg = "ONOS topology don't match Mininet"
2303 elapsed = 0
2304 count = 0
2305 main.step( "Comparing ONOS topology to MN topology" )
2306 startTime = time.time()
2307 # Give time for Gossip to work
2308 while topoResult == main.FALSE and ( elapsed < 60 or count < 3 ):
2309 devicesResults = main.TRUE
2310 linksResults = main.TRUE
2311 hostsResults = main.TRUE
2312 hostAttachmentResults = True
2313 count += 1
2314 cliStart = time.time()
2315 devices = []
2316 threads = []
2317 for i in main.activeNodes:
2318 t = main.Thread( target=utilities.retry,
2319 name="devices-" + str( i ),
2320 args=[ main.CLIs[i].devices, [ None ] ],
2321 kwargs= { 'sleep': 5, 'attempts': 5,
2322 'randomTime': True } )
2323 threads.append( t )
2324 t.start()
2325
2326 for t in threads:
2327 t.join()
2328 devices.append( t.result )
2329 hosts = []
2330 ipResult = main.TRUE
2331 threads = []
2332 for i in main.activeNodes:
2333 t = main.Thread( target=utilities.retry,
2334 name="hosts-" + str( i ),
2335 args=[ main.CLIs[i].hosts, [ None ] ],
2336 kwargs= { 'sleep': 5, 'attempts': 5,
2337 'randomTime': True } )
2338 threads.append( t )
2339 t.start()
2340
2341 for t in threads:
2342 t.join()
2343 try:
2344 hosts.append( json.loads( t.result ) )
2345 except ( ValueError, TypeError ):
2346 main.log.exception( "Error parsing hosts results" )
2347 main.log.error( repr( t.result ) )
2348 hosts.append( None )
2349 for controller in range( 0, len( hosts ) ):
2350 controllerStr = str( main.activeNodes[controller] + 1 )
2351 if hosts[ controller ]:
2352 for host in hosts[ controller ]:
2353 if host is None or host.get( 'ipAddresses', [] ) == []:
2354 main.log.error(
2355 "Error with host ipAddresses on controller" +
2356 controllerStr + ": " + str( host ) )
2357 ipResult = main.FALSE
2358 ports = []
2359 threads = []
2360 for i in main.activeNodes:
2361 t = main.Thread( target=utilities.retry,
2362 name="ports-" + str( i ),
2363 args=[ main.CLIs[i].ports, [ None ] ],
2364 kwargs= { 'sleep': 5, 'attempts': 5,
2365 'randomTime': True } )
2366 threads.append( t )
2367 t.start()
2368
2369 for t in threads:
2370 t.join()
2371 ports.append( t.result )
2372 links = []
2373 threads = []
2374 for i in main.activeNodes:
2375 t = main.Thread( target=utilities.retry,
2376 name="links-" + str( i ),
2377 args=[ main.CLIs[i].links, [ None ] ],
2378 kwargs= { 'sleep': 5, 'attempts': 5,
2379 'randomTime': True } )
2380 threads.append( t )
2381 t.start()
2382
2383 for t in threads:
2384 t.join()
2385 links.append( t.result )
2386 clusters = []
2387 threads = []
2388 for i in main.activeNodes:
2389 t = main.Thread( target=utilities.retry,
2390 name="clusters-" + str( i ),
2391 args=[ main.CLIs[i].clusters, [ None ] ],
2392 kwargs= { 'sleep': 5, 'attempts': 5,
2393 'randomTime': True } )
2394 threads.append( t )
2395 t.start()
2396
2397 for t in threads:
2398 t.join()
2399 clusters.append( t.result )
2400
2401 elapsed = time.time() - startTime
2402 cliTime = time.time() - cliStart
2403 print "Elapsed time: " + str( elapsed )
2404 print "CLI time: " + str( cliTime )
2405
2406 if all( e is None for e in devices ) and\
2407 all( e is None for e in hosts ) and\
2408 all( e is None for e in ports ) and\
2409 all( e is None for e in links ) and\
2410 all( e is None for e in clusters ):
2411 topoFailMsg = "Could not get topology from ONOS"
2412 main.log.error( topoFailMsg )
2413 continue # Try again, No use trying to compare
2414
2415 mnSwitches = main.Mininet1.getSwitches()
2416 mnLinks = main.Mininet1.getLinks()
2417 mnHosts = main.Mininet1.getHosts()
2418 for controller in range( len( main.activeNodes ) ):
2419 controllerStr = str( main.activeNodes[controller] + 1 )
2420 if devices[ controller ] and ports[ controller ] and\
2421 "Error" not in devices[ controller ] and\
2422 "Error" not in ports[ controller ]:
2423
2424 try:
2425 currentDevicesResult = main.Mininet1.compareSwitches(
2426 mnSwitches,
2427 json.loads( devices[ controller ] ),
2428 json.loads( ports[ controller ] ) )
2429 except ( TypeError, ValueError ):
2430 main.log.exception( "Object not as expected; devices={!r}\nports={!r}".format(
2431 devices[ controller ], ports[ controller ] ) )
2432 else:
2433 currentDevicesResult = main.FALSE
2434 utilities.assert_equals( expect=main.TRUE,
2435 actual=currentDevicesResult,
2436 onpass="ONOS" + controllerStr +
2437 " Switches view is correct",
2438 onfail="ONOS" + controllerStr +
2439 " Switches view is incorrect" )
2440
2441 if links[ controller ] and "Error" not in links[ controller ]:
2442 currentLinksResult = main.Mininet1.compareLinks(
2443 mnSwitches, mnLinks,
2444 json.loads( links[ controller ] ) )
2445 else:
2446 currentLinksResult = main.FALSE
2447 utilities.assert_equals( expect=main.TRUE,
2448 actual=currentLinksResult,
2449 onpass="ONOS" + controllerStr +
2450 " links view is correct",
2451 onfail="ONOS" + controllerStr +
2452 " links view is incorrect" )
2453 if hosts[ controller ] and "Error" not in hosts[ controller ]:
2454 currentHostsResult = main.Mininet1.compareHosts(
2455 mnHosts,
2456 hosts[ controller ] )
2457 elif hosts[ controller ] == []:
2458 currentHostsResult = main.TRUE
2459 else:
2460 currentHostsResult = main.FALSE
2461 utilities.assert_equals( expect=main.TRUE,
2462 actual=currentHostsResult,
2463 onpass="ONOS" + controllerStr +
2464 " hosts exist in Mininet",
2465 onfail="ONOS" + controllerStr +
2466 " hosts don't match Mininet" )
2467 # CHECKING HOST ATTACHMENT POINTS
2468 hostAttachment = True
2469 zeroHosts = False
2470 # FIXME: topo-HA/obelisk specific mappings:
2471 # key is mac and value is dpid
2472 mappings = {}
2473 for i in range( 1, 29 ): # hosts 1 through 28
2474 # set up correct variables:
2475 macId = "00:" * 5 + hex( i ).split( "0x" )[1].upper().zfill(2)
2476 if i == 1:
2477 deviceId = "1000".zfill(16)
2478 elif i == 2:
2479 deviceId = "2000".zfill(16)
2480 elif i == 3:
2481 deviceId = "3000".zfill(16)
2482 elif i == 4:
2483 deviceId = "3004".zfill(16)
2484 elif i == 5:
2485 deviceId = "5000".zfill(16)
2486 elif i == 6:
2487 deviceId = "6000".zfill(16)
2488 elif i == 7:
2489 deviceId = "6007".zfill(16)
2490 elif i >= 8 and i <= 17:
2491 dpid = '3' + str( i ).zfill( 3 )
2492 deviceId = dpid.zfill(16)
2493 elif i >= 18 and i <= 27:
2494 dpid = '6' + str( i ).zfill( 3 )
2495 deviceId = dpid.zfill(16)
2496 elif i == 28:
2497 deviceId = "2800".zfill(16)
2498 mappings[ macId ] = deviceId
2499 if hosts[ controller ] is not None and "Error" not in hosts[ controller ]:
2500 if hosts[ controller ] == []:
2501 main.log.warn( "There are no hosts discovered" )
2502 zeroHosts = True
2503 else:
2504 for host in hosts[ controller ]:
2505 mac = None
2506 location = None
2507 device = None
2508 port = None
2509 try:
2510 mac = host.get( 'mac' )
2511 assert mac, "mac field could not be found for this host object"
2512
2513 location = host.get( 'location' )
2514 assert location, "location field could not be found for this host object"
2515
2516 # Trim the protocol identifier off deviceId
2517 device = str( location.get( 'elementId' ) ).split(':')[1]
2518 assert device, "elementId field could not be found for this host location object"
2519
2520 port = location.get( 'port' )
2521 assert port, "port field could not be found for this host location object"
2522
2523 # Now check if this matches where they should be
2524 if mac and device and port:
2525 if str( port ) != "1":
2526 main.log.error( "The attachment port is incorrect for " +
2527 "host " + str( mac ) +
2528 ". Expected: 1 Actual: " + str( port) )
2529 hostAttachment = False
2530 if device != mappings[ str( mac ) ]:
2531 main.log.error( "The attachment device is incorrect for " +
2532 "host " + str( mac ) +
2533 ". Expected: " + mappings[ str( mac ) ] +
2534 " Actual: " + device )
2535 hostAttachment = False
2536 else:
2537 hostAttachment = False
2538 except AssertionError:
2539 main.log.exception( "Json object not as expected" )
2540 main.log.error( repr( host ) )
2541 hostAttachment = False
2542 else:
2543 main.log.error( "No hosts json output or \"Error\"" +
2544 " in output. hosts = " +
2545 repr( hosts[ controller ] ) )
2546 if zeroHosts is False:
2547 # TODO: Find a way to know if there should be hosts in a
2548 # given point of the test
2549 hostAttachment = True
2550
2551 # END CHECKING HOST ATTACHMENT POINTS
2552 devicesResults = devicesResults and currentDevicesResult
2553 linksResults = linksResults and currentLinksResult
2554 hostsResults = hostsResults and currentHostsResult
2555 hostAttachmentResults = hostAttachmentResults and\
2556 hostAttachment
2557 topoResult = ( devicesResults and linksResults
2558 and hostsResults and ipResult and
2559 hostAttachmentResults )
2560 utilities.assert_equals( expect=True,
2561 actual=topoResult,
2562 onpass="ONOS topology matches Mininet",
2563 onfail=topoFailMsg )
2564 # End of While loop to pull ONOS state
2565
2566 # Compare json objects for hosts and dataplane clusters
2567
2568 # hosts
2569 main.step( "Hosts view is consistent across all ONOS nodes" )
2570 consistentHostsResult = main.TRUE
2571 for controller in range( len( hosts ) ):
2572 controllerStr = str( main.activeNodes[controller] + 1 )
2573 if hosts[ controller ] is not None and "Error" not in hosts[ controller ]:
2574 if hosts[ controller ] == hosts[ 0 ]:
2575 continue
2576 else: # hosts not consistent
2577 main.log.error( "hosts from ONOS" + controllerStr +
2578 " is inconsistent with ONOS1" )
2579 main.log.warn( repr( hosts[ controller ] ) )
2580 consistentHostsResult = main.FALSE
2581
2582 else:
2583 main.log.error( "Error in getting ONOS hosts from ONOS" +
2584 controllerStr )
2585 consistentHostsResult = main.FALSE
2586 main.log.warn( "ONOS" + controllerStr +
2587 " hosts response: " +
2588 repr( hosts[ controller ] ) )
2589 utilities.assert_equals(
2590 expect=main.TRUE,
2591 actual=consistentHostsResult,
2592 onpass="Hosts view is consistent across all ONOS nodes",
2593 onfail="ONOS nodes have different views of hosts" )
2594
2595 main.step( "Hosts information is correct" )
2596 hostsResults = hostsResults and ipResult
2597 utilities.assert_equals(
2598 expect=main.TRUE,
2599 actual=hostsResults,
2600 onpass="Host information is correct",
2601 onfail="Host information is incorrect" )
2602
2603 main.step( "Host attachment points to the network" )
2604 utilities.assert_equals(
2605 expect=True,
2606 actual=hostAttachmentResults,
2607 onpass="Hosts are correctly attached to the network",
2608 onfail="ONOS did not correctly attach hosts to the network" )
2609
2610 # Strongly connected clusters of devices
2611 main.step( "Clusters view is consistent across all ONOS nodes" )
2612 consistentClustersResult = main.TRUE
2613 for controller in range( len( clusters ) ):
2614 controllerStr = str( main.activeNodes[controller] + 1 )
2615 if "Error" not in clusters[ controller ]:
2616 if clusters[ controller ] == clusters[ 0 ]:
2617 continue
2618 else: # clusters not consistent
2619 main.log.error( "clusters from ONOS" +
2620 controllerStr +
2621 " is inconsistent with ONOS1" )
2622 consistentClustersResult = main.FALSE
2623 else:
2624 main.log.error( "Error in getting dataplane clusters " +
2625 "from ONOS" + controllerStr )
2626 consistentClustersResult = main.FALSE
2627 main.log.warn( "ONOS" + controllerStr +
2628 " clusters response: " +
2629 repr( clusters[ controller ] ) )
2630 utilities.assert_equals(
2631 expect=main.TRUE,
2632 actual=consistentClustersResult,
2633 onpass="Clusters view is consistent across all ONOS nodes",
2634 onfail="ONOS nodes have different views of clusters" )
Jon Hall64948022016-05-12 13:38:50 -07002635 if not consistentClustersResult:
2636 main.log.debug( clusters )
Jon Hall9ebd1bd2016-04-19 01:37:17 -07002637
2638 main.step( "There is only one SCC" )
2639 # there should always only be one cluster
2640 try:
2641 numClusters = len( json.loads( clusters[ 0 ] ) )
2642 except ( ValueError, TypeError ):
2643 main.log.exception( "Error parsing clusters[0]: " +
2644 repr( clusters[0] ) )
2645 numClusters = "ERROR"
2646 clusterResults = main.FALSE
2647 if numClusters == 1:
2648 clusterResults = main.TRUE
2649 utilities.assert_equals(
2650 expect=1,
2651 actual=numClusters,
2652 onpass="ONOS shows 1 SCC",
2653 onfail="ONOS shows " + str( numClusters ) + " SCCs" )
2654
2655 topoResult = ( devicesResults and linksResults
2656 and hostsResults and consistentHostsResult
2657 and consistentClustersResult and clusterResults
2658 and ipResult and hostAttachmentResults )
2659
2660 topoResult = topoResult and int( count <= 2 )
2661 note = "note it takes about " + str( int( cliTime ) ) + \
2662 " seconds for the test to make all the cli calls to fetch " +\
2663 "the topology from each ONOS instance"
2664 main.log.info(
2665 "Very crass estimate for topology discovery/convergence( " +
2666 str( note ) + " ): " + str( elapsed ) + " seconds, " +
2667 str( count ) + " tries" )
2668
2669 main.step( "Device information is correct" )
2670 utilities.assert_equals(
2671 expect=main.TRUE,
2672 actual=devicesResults,
2673 onpass="Device information is correct",
2674 onfail="Device information is incorrect" )
2675
2676 main.step( "Links are correct" )
2677 utilities.assert_equals(
2678 expect=main.TRUE,
2679 actual=linksResults,
2680 onpass="Link are correct",
2681 onfail="Links are incorrect" )
2682
2683 main.step( "Hosts are correct" )
2684 utilities.assert_equals(
2685 expect=main.TRUE,
2686 actual=hostsResults,
2687 onpass="Hosts are correct",
2688 onfail="Hosts are incorrect" )
2689
2690 # FIXME: move this to an ONOS state case
2691 main.step( "Checking ONOS nodes" )
2692 nodeResults = utilities.retry( main.HA.nodesCheck,
2693 False,
2694 args=[main.activeNodes],
2695 attempts=5 )
2696 utilities.assert_equals( expect=True, actual=nodeResults,
2697 onpass="Nodes check successful",
2698 onfail="Nodes check NOT successful" )
2699 if not nodeResults:
2700 for i in main.activeNodes:
2701 main.log.debug( "{} components not ACTIVE: \n{}".format(
2702 main.CLIs[i].name,
2703 main.CLIs[i].sendline( "scr:list | grep -v ACTIVE" ) ) )
2704
Jon Halld2871c22016-07-26 11:01:14 -07002705 if not topoResult:
2706 main.cleanup()
2707 main.exit()
2708
Jon Hall9ebd1bd2016-04-19 01:37:17 -07002709 def CASE9( self, main ):
2710 """
2711 Link s3-s28 down
2712 """
2713 import time
2714 assert main.numCtrls, "main.numCtrls not defined"
2715 assert main, "main not defined"
2716 assert utilities.assert_equals, "utilities.assert_equals not defined"
2717 assert main.CLIs, "main.CLIs not defined"
2718 assert main.nodes, "main.nodes not defined"
2719 # NOTE: You should probably run a topology check after this
2720
2721 linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
2722
2723 description = "Turn off a link to ensure that Link Discovery " +\
2724 "is working properly"
2725 main.case( description )
2726
2727 main.step( "Kill Link between s3 and s28" )
2728 LinkDown = main.Mininet1.link( END1="s3", END2="s28", OPTION="down" )
2729 main.log.info( "Waiting " + str( linkSleep ) +
2730 " seconds for link down to be discovered" )
2731 time.sleep( linkSleep )
2732 utilities.assert_equals( expect=main.TRUE, actual=LinkDown,
2733 onpass="Link down successful",
2734 onfail="Failed to bring link down" )
2735 # TODO do some sort of check here
2736
2737 def CASE10( self, main ):
2738 """
2739 Link s3-s28 up
2740 """
2741 import time
2742 assert main.numCtrls, "main.numCtrls not defined"
2743 assert main, "main not defined"
2744 assert utilities.assert_equals, "utilities.assert_equals not defined"
2745 assert main.CLIs, "main.CLIs not defined"
2746 assert main.nodes, "main.nodes not defined"
2747 # NOTE: You should probably run a topology check after this
2748
2749 linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
2750
2751 description = "Restore a link to ensure that Link Discovery is " + \
2752 "working properly"
2753 main.case( description )
2754
2755 main.step( "Bring link between s3 and s28 back up" )
2756 LinkUp = main.Mininet1.link( END1="s3", END2="s28", OPTION="up" )
2757 main.log.info( "Waiting " + str( linkSleep ) +
2758 " seconds for link up to be discovered" )
2759 time.sleep( linkSleep )
2760 utilities.assert_equals( expect=main.TRUE, actual=LinkUp,
2761 onpass="Link up successful",
2762 onfail="Failed to bring link up" )
2763 # TODO do some sort of check here
2764
2765 def CASE11( self, main ):
2766 """
2767 Switch Down
2768 """
2769 # NOTE: You should probably run a topology check after this
2770 import time
2771 assert main.numCtrls, "main.numCtrls not defined"
2772 assert main, "main not defined"
2773 assert utilities.assert_equals, "utilities.assert_equals not defined"
2774 assert main.CLIs, "main.CLIs not defined"
2775 assert main.nodes, "main.nodes not defined"
2776
2777 switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
2778
2779 description = "Killing a switch to ensure it is discovered correctly"
2780 onosCli = main.CLIs[ main.activeNodes[0] ]
2781 main.case( description )
2782 switch = main.params[ 'kill' ][ 'switch' ]
2783 switchDPID = main.params[ 'kill' ][ 'dpid' ]
2784
2785 # TODO: Make this switch parameterizable
2786 main.step( "Kill " + switch )
2787 main.log.info( "Deleting " + switch )
2788 main.Mininet1.delSwitch( switch )
2789 main.log.info( "Waiting " + str( switchSleep ) +
2790 " seconds for switch down to be discovered" )
2791 time.sleep( switchSleep )
2792 device = onosCli.getDevice( dpid=switchDPID )
2793 # Peek at the deleted switch
2794 main.log.warn( str( device ) )
2795 result = main.FALSE
2796 if device and device[ 'available' ] is False:
2797 result = main.TRUE
2798 utilities.assert_equals( expect=main.TRUE, actual=result,
2799 onpass="Kill switch successful",
2800 onfail="Failed to kill switch?" )
2801
2802 def CASE12( self, main ):
2803 """
2804 Switch Up
2805 """
2806 # NOTE: You should probably run a topology check after this
2807 import time
2808 assert main.numCtrls, "main.numCtrls not defined"
2809 assert main, "main not defined"
2810 assert utilities.assert_equals, "utilities.assert_equals not defined"
2811 assert main.CLIs, "main.CLIs not defined"
2812 assert main.nodes, "main.nodes not defined"
2813
2814 switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
2815 switch = main.params[ 'kill' ][ 'switch' ]
2816 switchDPID = main.params[ 'kill' ][ 'dpid' ]
2817 links = main.params[ 'kill' ][ 'links' ].split()
2818 onosCli = main.CLIs[ main.activeNodes[0] ]
2819 description = "Adding a switch to ensure it is discovered correctly"
2820 main.case( description )
2821
2822 main.step( "Add back " + switch )
2823 main.Mininet1.addSwitch( switch, dpid=switchDPID )
2824 for peer in links:
2825 main.Mininet1.addLink( switch, peer )
2826 ipList = [ node.ip_address for node in main.nodes ]
2827 main.Mininet1.assignSwController( sw=switch, ip=ipList )
2828 main.log.info( "Waiting " + str( switchSleep ) +
2829 " seconds for switch up to be discovered" )
2830 time.sleep( switchSleep )
2831 device = onosCli.getDevice( dpid=switchDPID )
2832 # Peek at the deleted switch
2833 main.log.warn( str( device ) )
2834 result = main.FALSE
2835 if device and device[ 'available' ]:
2836 result = main.TRUE
2837 utilities.assert_equals( expect=main.TRUE, actual=result,
2838 onpass="add switch successful",
2839 onfail="Failed to add switch?" )
2840
2841 def CASE13( self, main ):
2842 """
2843 Clean up
2844 """
2845 assert main.numCtrls, "main.numCtrls not defined"
2846 assert main, "main not defined"
2847 assert utilities.assert_equals, "utilities.assert_equals not defined"
2848 assert main.CLIs, "main.CLIs not defined"
2849 assert main.nodes, "main.nodes not defined"
2850
2851 main.case( "Test Cleanup" )
2852 main.step( "Killing tcpdumps" )
2853 main.Mininet2.stopTcpdump()
2854
2855 if main.params[ 'BACKUP' ][ 'ENABLED' ] == "True":
2856 main.step( "Copying MN pcap and ONOS log files to test station" )
2857 # NOTE: MN Pcap file is being saved to logdir.
2858 # We scp this file as MN and TestON aren't necessarily the same vm
2859
2860 # FIXME: To be replaced with a Jenkin's post script
2861 # TODO: Load these from params
2862 # NOTE: must end in /
2863 logFolder = "/opt/onos/log/"
2864 logFiles = [ "karaf.log", "karaf.log.1" ]
2865 # NOTE: must end in /
2866 for f in logFiles:
2867 for node in main.nodes:
2868 dstName = main.logdir + "/" + node.name + "-" + f
2869 main.ONOSbench.secureCopy( node.user_name, node.ip_address,
2870 logFolder + f, dstName )
2871 # std*.log's
2872 # NOTE: must end in /
2873 logFolder = "/opt/onos/var/"
2874 logFiles = [ "stderr.log", "stdout.log" ]
2875 # NOTE: must end in /
2876 for f in logFiles:
2877 for node in main.nodes:
2878 dstName = main.logdir + "/" + node.name + "-" + f
2879 main.ONOSbench.secureCopy( node.user_name, node.ip_address,
2880 logFolder + f, dstName )
2881 else:
2882 main.log.debug( "skipping saving log files" )
2883
2884 main.step( "Stopping Mininet" )
2885 mnResult = main.Mininet1.stopNet()
2886 utilities.assert_equals( expect=main.TRUE, actual=mnResult,
2887 onpass="Mininet stopped",
2888 onfail="MN cleanup NOT successful" )
2889
2890 main.step( "Checking ONOS Logs for errors" )
2891 for node in main.nodes:
2892 main.log.debug( "Checking logs for errors on " + node.name + ":" )
2893 main.log.warn( main.ONOSbench.checkLogs( node.ip_address ) )
2894
2895 try:
2896 timerLog = open( main.logdir + "/Timers.csv", 'w')
2897 main.log.error( ", ".join( labels ) + "\n" + ", ".join( data ) )
2898 timerLog.write( ", ".join( labels ) + "\n" + ", ".join( data ) )
2899 timerLog.close()
2900 except NameError, e:
2901 main.log.exception(e)
2902
2903 main.step( "Stopping webserver" )
2904 status = main.Server.stop( )
2905 utilities.assert_equals( expect=main.TRUE, actual=status,
2906 onpass="Stop Server",
2907 onfail="Failled to stop SimpleHTTPServer" )
2908 del main.Server
2909
2910 def CASE14( self, main ):
2911 """
2912 start election app on all onos nodes
2913 """
2914 import time
2915 assert main.numCtrls, "main.numCtrls not defined"
2916 assert main, "main not defined"
2917 assert utilities.assert_equals, "utilities.assert_equals not defined"
2918 assert main.CLIs, "main.CLIs not defined"
2919 assert main.nodes, "main.nodes not defined"
2920
2921 main.case("Start Leadership Election app")
2922 main.step( "Install leadership election app" )
2923 onosCli = main.CLIs[ main.activeNodes[0] ]
2924 appResult = onosCli.activateApp( "org.onosproject.election" )
2925 utilities.assert_equals(
2926 expect=main.TRUE,
2927 actual=appResult,
2928 onpass="Election app installed",
2929 onfail="Something went wrong with installing Leadership election" )
2930
2931 main.step( "Run for election on each node" )
2932 for i in main.activeNodes:
2933 main.CLIs[i].electionTestRun()
2934 time.sleep(5)
2935 activeCLIs = [ main.CLIs[i] for i in main.activeNodes ]
2936 sameResult, leaders = main.HA.consistentLeaderboards( activeCLIs )
2937 utilities.assert_equals(
2938 expect=True,
2939 actual=sameResult,
2940 onpass="All nodes see the same leaderboards",
2941 onfail="Inconsistent leaderboards" )
2942
2943 if sameResult:
2944 leader = leaders[ 0 ][ 0 ]
2945 if main.nodes[ main.activeNodes[0] ].ip_address in leader:
2946 correctLeader = True
2947 else:
2948 correctLeader = False
2949 main.step( "First node was elected leader" )
2950 utilities.assert_equals(
2951 expect=True,
2952 actual=correctLeader,
2953 onpass="Correct leader was elected",
2954 onfail="Incorrect leader" )
2955
2956 def CASE15( self, main ):
2957 """
2958 Check that Leadership Election is still functional
2959 15.1 Run election on each node
2960 15.2 Check that each node has the same leaders and candidates
2961 15.3 Find current leader and withdraw
2962 15.4 Check that a new node was elected leader
2963 15.5 Check that that new leader was the candidate of old leader
2964 15.6 Run for election on old leader
2965 15.7 Check that oldLeader is a candidate, and leader if only 1 node
2966 15.8 Make sure that the old leader was added to the candidate list
2967
2968 old and new variable prefixes refer to data from before vs after
2969 withdrawl and later before withdrawl vs after re-election
2970 """
2971 import time
2972 assert main.numCtrls, "main.numCtrls not defined"
2973 assert main, "main not defined"
2974 assert utilities.assert_equals, "utilities.assert_equals not defined"
2975 assert main.CLIs, "main.CLIs not defined"
2976 assert main.nodes, "main.nodes not defined"
2977
2978 description = "Check that Leadership Election is still functional"
2979 main.case( description )
2980 # NOTE: Need to re-run after restarts since being a canidate is not persistant
2981
2982 oldLeaders = [] # list of lists of each nodes' candidates before
2983 newLeaders = [] # list of lists of each nodes' candidates after
2984 oldLeader = '' # the old leader from oldLeaders, None if not same
2985 newLeader = '' # the new leaders fron newLoeaders, None if not same
2986 oldLeaderCLI = None # the CLI of the old leader used for re-electing
2987 expectNoLeader = False # True when there is only one leader
2988 if main.numCtrls == 1:
2989 expectNoLeader = True
2990
2991 main.step( "Run for election on each node" )
2992 electionResult = main.TRUE
2993
2994 for i in main.activeNodes: # run test election on each node
2995 if main.CLIs[i].electionTestRun() == main.FALSE:
2996 electionResult = main.FALSE
2997 utilities.assert_equals(
2998 expect=main.TRUE,
2999 actual=electionResult,
3000 onpass="All nodes successfully ran for leadership",
3001 onfail="At least one node failed to run for leadership" )
3002
3003 if electionResult == main.FALSE:
3004 main.log.error(
3005 "Skipping Test Case because Election Test App isn't loaded" )
3006 main.skipCase()
3007
3008 main.step( "Check that each node shows the same leader and candidates" )
3009 failMessage = "Nodes have different leaderboards"
3010 activeCLIs = [ main.CLIs[i] for i in main.activeNodes ]
3011 sameResult, oldLeaders = main.HA.consistentLeaderboards( activeCLIs )
3012 if sameResult:
3013 oldLeader = oldLeaders[ 0 ][ 0 ]
3014 main.log.warn( oldLeader )
3015 else:
3016 oldLeader = None
3017 utilities.assert_equals(
3018 expect=True,
3019 actual=sameResult,
3020 onpass="Leaderboards are consistent for the election topic",
3021 onfail=failMessage )
3022
3023 main.step( "Find current leader and withdraw" )
3024 withdrawResult = main.TRUE
3025 # do some sanity checking on leader before using it
3026 if oldLeader is None:
3027 main.log.error( "Leadership isn't consistent." )
3028 withdrawResult = main.FALSE
3029 # Get the CLI of the oldLeader
3030 for i in main.activeNodes:
3031 if oldLeader == main.nodes[ i ].ip_address:
3032 oldLeaderCLI = main.CLIs[ i ]
3033 break
3034 else: # FOR/ELSE statement
3035 main.log.error( "Leader election, could not find current leader" )
3036 if oldLeader:
3037 withdrawResult = oldLeaderCLI.electionTestWithdraw()
3038 utilities.assert_equals(
3039 expect=main.TRUE,
3040 actual=withdrawResult,
3041 onpass="Node was withdrawn from election",
3042 onfail="Node was not withdrawn from election" )
3043
3044 main.step( "Check that a new node was elected leader" )
3045 failMessage = "Nodes have different leaders"
3046 # Get new leaders and candidates
3047 newLeaderResult, newLeaders = main.HA.consistentLeaderboards( activeCLIs )
3048 newLeader = None
3049 if newLeaderResult:
3050 if newLeaders[ 0 ][ 0 ] == 'none':
3051 main.log.error( "No leader was elected on at least 1 node" )
3052 if not expectNoLeader:
3053 newLeaderResult = False
3054 newLeader = newLeaders[ 0 ][ 0 ]
3055
3056 # Check that the new leader is not the older leader, which was withdrawn
3057 if newLeader == oldLeader:
3058 newLeaderResult = False
3059 main.log.error( "All nodes still see old leader: " + str( oldLeader ) +
3060 " as the current leader" )
3061 utilities.assert_equals(
3062 expect=True,
3063 actual=newLeaderResult,
3064 onpass="Leadership election passed",
3065 onfail="Something went wrong with Leadership election" )
3066
3067 main.step( "Check that that new leader was the candidate of old leader" )
3068 # candidates[ 2 ] should become the top candidate after withdrawl
3069 correctCandidateResult = main.TRUE
3070 if expectNoLeader:
3071 if newLeader == 'none':
3072 main.log.info( "No leader expected. None found. Pass" )
3073 correctCandidateResult = main.TRUE
3074 else:
3075 main.log.info( "Expected no leader, got: " + str( newLeader ) )
3076 correctCandidateResult = main.FALSE
3077 elif len( oldLeaders[0] ) >= 3:
3078 if newLeader == oldLeaders[ 0 ][ 2 ]:
3079 # correct leader was elected
3080 correctCandidateResult = main.TRUE
3081 else:
3082 correctCandidateResult = main.FALSE
3083 main.log.error( "Candidate {} was elected. {} should have had priority.".format(
3084 newLeader, oldLeaders[ 0 ][ 2 ] ) )
3085 else:
3086 main.log.warn( "Could not determine who should be the correct leader" )
3087 main.log.debug( oldLeaders[ 0 ] )
3088 correctCandidateResult = main.FALSE
3089 utilities.assert_equals(
3090 expect=main.TRUE,
3091 actual=correctCandidateResult,
3092 onpass="Correct Candidate Elected",
3093 onfail="Incorrect Candidate Elected" )
3094
3095 main.step( "Run for election on old leader( just so everyone " +
3096 "is in the hat )" )
3097 if oldLeaderCLI is not None:
3098 runResult = oldLeaderCLI.electionTestRun()
3099 else:
3100 main.log.error( "No old leader to re-elect" )
3101 runResult = main.FALSE
3102 utilities.assert_equals(
3103 expect=main.TRUE,
3104 actual=runResult,
3105 onpass="App re-ran for election",
3106 onfail="App failed to run for election" )
3107
3108 main.step(
3109 "Check that oldLeader is a candidate, and leader if only 1 node" )
3110 # verify leader didn't just change
3111 # Get new leaders and candidates
3112 reRunLeaders = []
3113 time.sleep( 5 ) # Paremterize
3114 positionResult, reRunLeaders = main.HA.consistentLeaderboards( activeCLIs )
3115
3116 # Check that the re-elected node is last on the candidate List
3117 if not reRunLeaders[0]:
3118 positionResult = main.FALSE
3119 elif oldLeader != reRunLeaders[ 0 ][ -1 ]:
3120 main.log.error( "Old Leader ({}) not in the proper position: {} ".format( str( oldLeader),
3121 str( reRunLeaders[ 0 ] ) ) )
3122 positionResult = main.FALSE
3123 utilities.assert_equals(
3124 expect=True,
3125 actual=positionResult,
3126 onpass="Old leader successfully re-ran for election",
3127 onfail="Something went wrong with Leadership election after " +
3128 "the old leader re-ran for election" )
3129
3130 def CASE16( self, main ):
3131 """
3132 Install Distributed Primitives app
3133 """
3134 import time
3135 assert main.numCtrls, "main.numCtrls not defined"
3136 assert main, "main not defined"
3137 assert utilities.assert_equals, "utilities.assert_equals not defined"
3138 assert main.CLIs, "main.CLIs not defined"
3139 assert main.nodes, "main.nodes not defined"
3140
3141 # Variables for the distributed primitives tests
Jon Hall7a6ebfd2017-03-13 10:58:58 -07003142 main.pCounterName = "TestON-Partitions"
3143 main.pCounterValue = 0
3144 main.onosSet = set([])
3145 main.onosSetName = "TestON-set"
Jon Hall9ebd1bd2016-04-19 01:37:17 -07003146
3147 description = "Install Primitives app"
3148 main.case( description )
3149 main.step( "Install Primitives app" )
3150 appName = "org.onosproject.distributedprimitives"
3151 node = main.activeNodes[0]
3152 appResults = main.CLIs[node].activateApp( appName )
3153 utilities.assert_equals( expect=main.TRUE,
3154 actual=appResults,
3155 onpass="Primitives app activated",
3156 onfail="Primitives app not activated" )
3157 time.sleep( 5 ) # To allow all nodes to activate
3158
3159 def CASE17( self, main ):
3160 """
3161 Check for basic functionality with distributed primitives
3162 """
Jon Hall7a6ebfd2017-03-13 10:58:58 -07003163 main.HA.CASE17( main )