blob: 8f2b58cba339e83d38f2939320c1a0d3503cff24 [file] [log] [blame]
Jon Hall69b2b982016-05-11 12:04:59 -07001"""
2Description: This test is to determine if ONOS can handle
3 dynamic swapping of cluster nodes.
4
5List of test cases:
6CASE1: Compile ONOS and push it to the test machines
7CASE2: Assign devices to controllers
8CASE21: Assign mastership to controllers
9CASE3: Assign intents
10CASE4: Ping across added host intents
11CASE5: Reading state of ONOS
12CASE6: Swap nodes
13CASE7: Check state after control plane failure
14CASE8: Compare topo
15CASE9: Link s3-s28 down
16CASE10: Link s3-s28 up
17CASE11: Switch down
18CASE12: Switch up
19CASE13: Clean up
20CASE14: start election app on all onos nodes
21CASE15: Check that Leadership Election is still functional
22CASE16: Install Distributed Primitives app
23CASE17: Check for basic functionality with distributed primitives
24"""
25
26
27class HAswapNodes:
28
29 def __init__( self ):
30 self.default = ''
31
32 def CASE1( self, main ):
33 """
34 CASE1 is to compile ONOS and push it to the test machines
35
36 Startup sequence:
37 cell <name>
38 onos-verify-cell
39 NOTE: temporary - onos-remove-raft-logs
40 onos-uninstall
41 start mininet
42 git pull
43 mvn clean install
44 onos-package
45 onos-install -f
46 onos-wait-for-start
47 start cli sessions
48 start tcpdump
49 """
50 import time
51 import os
52 import re
53 main.log.info( "ONOS HA test: Restart all ONOS nodes - " +
54 "initialization" )
55 main.case( "Setting up test environment" )
56 main.caseExplanation = "Setup the test environment including " +\
57 "installing ONOS, starting Mininet and ONOS" +\
58 "cli sessions."
59
60 # load some variables from the params file
61 PULLCODE = False
62 if main.params[ 'Git' ] == 'True':
63 PULLCODE = True
64 gitBranch = main.params[ 'branch' ]
65 cellName = main.params[ 'ENV' ][ 'cellName' ]
66
67 main.numCtrls = int( main.params[ 'num_controllers' ] )
68 if main.ONOSbench.maxNodes:
69 if main.ONOSbench.maxNodes < main.numCtrls:
70 main.numCtrls = int( main.ONOSbench.maxNodes )
71 # set global variables
72 # These are for csv plotting in jenkins
73 global labels
74 global data
75 labels = []
76 data = []
77
78 try:
79 from tests.HA.dependencies.HA import HA
80 main.HA = HA()
81 from tests.HA.HAswapNodes.dependencies.Server import Server
82 main.Server = Server()
83 except Exception as e:
84 main.log.exception( e )
85 main.cleanup()
86 main.exit()
87
88 main.CLIs = []
89 main.nodes = []
90 ipList = []
91 for i in range( 1, main.numCtrls + 1 ):
92 try:
93 main.CLIs.append( getattr( main, 'ONOScli' + str( i ) ) )
94 main.nodes.append( getattr( main, 'ONOS' + str( i ) ) )
95 ipList.append( main.nodes[ -1 ].ip_address )
96 except AttributeError:
97 break
98
99 main.step( "Create cell file" )
100 cellAppString = main.params[ 'ENV' ][ 'appString' ]
101 main.ONOSbench.createCellFile( main.ONOSbench.ip_address, cellName,
102 main.Mininet1.ip_address,
103 cellAppString, ipList )
104
105 main.step( "Applying cell variable to environment" )
106 cellResult = main.ONOSbench.setCell( cellName )
107 utilities.assert_equals( expect=main.TRUE, actual=cellResult,
108 onpass="Set cell successfull",
109 onfail="Failled to set cell" )
110
111 main.step( "Verify connectivity to cell" )
112 verifyResult = main.ONOSbench.verifyCell()
113 utilities.assert_equals( expect=main.TRUE, actual=verifyResult,
114 onpass="Verify cell passed",
115 onfail="Failled to verify cell" )
116
117 # FIXME:this is short term fix
118 main.log.info( "Removing raft logs" )
119 main.ONOSbench.onosRemoveRaftLogs()
120
121 main.log.info( "Uninstalling ONOS" )
122 for node in main.nodes:
123 main.ONOSbench.onosUninstall( node.ip_address )
124
125 # Make sure ONOS is DEAD
126 main.log.info( "Killing any ONOS processes" )
127 killResults = main.TRUE
128 for node in main.nodes:
129 killed = main.ONOSbench.onosKill( node.ip_address )
130 killResults = killResults and killed
131
132 main.step( "Setup server for cluster metadata file" )
Jon Hall8f6d4622016-05-23 15:27:18 -0700133 port = main.params['server']['port']
Jon Hall69b2b982016-05-11 12:04:59 -0700134 rootDir = os.path.dirname( main.testFile ) + "/dependencies"
135 main.log.debug( "Root dir: {}".format( rootDir ) )
136 status = main.Server.start( main.ONOSbench,
137 rootDir,
138 port=port,
139 logDir=main.logdir + "/server.log" )
140 utilities.assert_equals( expect=main.TRUE, actual=status,
141 onpass="Server started",
142 onfail="Failled to start SimpleHTTPServer" )
143
144 main.step( "Generate initial metadata file" )
145 if main.numCtrls >= 5:
146 main.numCtrls -= 2
147 else:
148 main.log.error( "Not enough ONOS nodes to run this test. Requires 5 or more" )
149 genResult = main.Server.generateFile( main.numCtrls )
150 utilities.assert_equals( expect=main.TRUE, actual=genResult,
151 onpass="New cluster metadata file generated",
152 onfail="Failled to generate new metadata file" )
153
154 cleanInstallResult = main.TRUE
155 gitPullResult = main.TRUE
156
157 main.step( "Starting Mininet" )
158 # scp topo file to mininet
159 # TODO: move to params?
160 topoName = "obelisk.py"
161 filePath = main.ONOSbench.home + "/tools/test/topos/"
162 main.ONOSbench.scp( main.Mininet1,
163 filePath + topoName,
164 main.Mininet1.home,
165 direction="to" )
166 mnResult = main.Mininet1.startNet( )
167 utilities.assert_equals( expect=main.TRUE, actual=mnResult,
168 onpass="Mininet Started",
169 onfail="Error starting Mininet" )
170
171 main.step( "Git checkout and pull " + gitBranch )
172 if PULLCODE:
173 main.ONOSbench.gitCheckout( gitBranch )
174 gitPullResult = main.ONOSbench.gitPull()
175 # values of 1 or 3 are good
176 utilities.assert_lesser( expect=0, actual=gitPullResult,
177 onpass="Git pull successful",
178 onfail="Git pull failed" )
179 main.ONOSbench.getVersion( report=True )
180
181 main.step( "Using mvn clean install" )
182 cleanInstallResult = main.TRUE
183 if PULLCODE and gitPullResult == main.TRUE:
184 cleanInstallResult = main.ONOSbench.cleanInstall()
185 else:
186 main.log.warn( "Did not pull new code so skipping mvn " +
187 "clean install" )
188 utilities.assert_equals( expect=main.TRUE,
189 actual=cleanInstallResult,
190 onpass="MCI successful",
191 onfail="MCI failed" )
192 # GRAPHS
193 # NOTE: important params here:
194 # job = name of Jenkins job
195 # Plot Name = Plot-HA, only can be used if multiple plots
196 # index = The number of the graph under plot name
197 job = "HAswapNodes"
198 plotName = "Plot-HA"
Jon Hall676e5432016-09-26 11:32:50 -0700199 index = "2"
Jon Hall69b2b982016-05-11 12:04:59 -0700200 graphs = '<ac:structured-macro ac:name="html">\n'
201 graphs += '<ac:plain-text-body><![CDATA[\n'
202 graphs += '<iframe src="https://onos-jenkins.onlab.us/job/' + job +\
203 '/plot/' + plotName + '/getPlot?index=' + index +\
204 '&width=500&height=300"' +\
205 'noborder="0" width="500" height="300" scrolling="yes" ' +\
206 'seamless="seamless"></iframe>\n'
207 graphs += ']]></ac:plain-text-body>\n'
208 graphs += '</ac:structured-macro>\n'
209 main.log.wiki(graphs)
210
211 main.step( "Copying backup config files" )
212 path = "~/onos/tools/package/bin/onos-service"
213 cp = main.ONOSbench.scp( main.ONOSbench,
214 path,
215 path + ".backup",
216 direction="to" )
217
218 utilities.assert_equals( expect=main.TRUE,
219 actual=cp,
220 onpass="Copy backup config file succeeded",
221 onfail="Copy backup config file failed" )
222 # we need to modify the onos-service file to use remote metadata file
223 # url for cluster metadata file
Jon Hall8f6d4622016-05-23 15:27:18 -0700224 iface = main.params['server'].get( 'interface' )
225 ip = main.ONOSbench.getIpAddr( iface=iface )
Jon Hall69b2b982016-05-11 12:04:59 -0700226 metaFile = "cluster.json"
227 javaArgs = r"-Donos.cluster.metadata.uri=http:\/\/{}:{}\/{}".format( ip, port, metaFile )
228 main.log.warn( javaArgs )
229 main.log.warn( repr( javaArgs ) )
230 handle = main.ONOSbench.handle
231 sed = r"sed -i 's/bash/bash\nexport JAVA_OPTS=${{JAVA_OPTS:-{}}}\n/' {}".format( javaArgs, path )
232 main.log.warn( sed )
233 main.log.warn( repr( sed ) )
234 handle.sendline( sed )
Jon Hallbd60ea02016-08-23 10:03:59 -0700235 handle.expect( metaFile )
236 output = handle.before
Jon Hall69b2b982016-05-11 12:04:59 -0700237 handle.expect( "\$" )
Jon Hallbd60ea02016-08-23 10:03:59 -0700238 output += handle.before
239 main.log.debug( repr( output ) )
Jon Hall69b2b982016-05-11 12:04:59 -0700240
241 main.step( "Creating ONOS package" )
Jon Hallbd60ea02016-08-23 10:03:59 -0700242 packageResult = main.ONOSbench.buckBuild()
Jon Hall69b2b982016-05-11 12:04:59 -0700243 utilities.assert_equals( expect=main.TRUE, actual=packageResult,
244 onpass="ONOS package successful",
245 onfail="ONOS package failed" )
Jon Hallbd60ea02016-08-23 10:03:59 -0700246 if not packageResult:
247 main.cleanup()
248 main.exit()
Jon Hall69b2b982016-05-11 12:04:59 -0700249
250 main.step( "Installing ONOS package" )
251 onosInstallResult = main.TRUE
252 for i in range( main.ONOSbench.maxNodes ):
253 node = main.nodes[i]
254 options = "-f"
255 if i >= main.numCtrls:
256 options = "-nf" # Don't start more than the current scale
257 tmpResult = main.ONOSbench.onosInstall( options=options,
258 node=node.ip_address )
259 onosInstallResult = onosInstallResult and tmpResult
260 utilities.assert_equals( expect=main.TRUE, actual=onosInstallResult,
261 onpass="ONOS install successful",
262 onfail="ONOS install failed" )
263
264 # Cleanup custom onos-service file
265 main.ONOSbench.scp( main.ONOSbench,
266 path + ".backup",
267 path,
268 direction="to" )
269
You Wangf5de25b2017-01-06 15:13:01 -0800270 main.step( "Set up ONOS secure SSH" )
271 secureSshResult = main.TRUE
Jon Hall168c1862017-01-31 17:35:34 -0800272 for i in range( main.numCtrls ):
273 node = main.nodes[i]
You Wangf5de25b2017-01-06 15:13:01 -0800274 secureSshResult = secureSshResult and main.ONOSbench.onosSecureSSH( node=node.ip_address )
275 utilities.assert_equals( expect=main.TRUE, actual=secureSshResult,
276 onpass="Test step PASS",
277 onfail="Test step FAIL" )
278
Jon Hall69b2b982016-05-11 12:04:59 -0700279 main.step( "Checking if ONOS is up yet" )
280 for i in range( 2 ):
281 onosIsupResult = main.TRUE
282 for i in range( main.numCtrls ):
283 node = main.nodes[i]
284 started = main.ONOSbench.isup( node.ip_address )
285 if not started:
286 main.log.error( node.name + " hasn't started" )
287 onosIsupResult = onosIsupResult and started
288 if onosIsupResult == main.TRUE:
289 break
290 utilities.assert_equals( expect=main.TRUE, actual=onosIsupResult,
291 onpass="ONOS startup successful",
292 onfail="ONOS startup failed" )
293
Jon Hall6509dbf2016-06-21 17:01:17 -0700294 main.step( "Starting ONOS CLI sessions" )
Jon Hall69b2b982016-05-11 12:04:59 -0700295 cliResults = main.TRUE
296 threads = []
297 for i in range( main.numCtrls ):
298 t = main.Thread( target=main.CLIs[i].startOnosCli,
299 name="startOnosCli-" + str( i ),
300 args=[main.nodes[i].ip_address] )
301 threads.append( t )
302 t.start()
303
304 for t in threads:
305 t.join()
306 cliResults = cliResults and t.result
307 utilities.assert_equals( expect=main.TRUE, actual=cliResults,
308 onpass="ONOS cli startup successful",
309 onfail="ONOS cli startup failed" )
310
311 # Create a list of active nodes for use when some nodes are stopped
312 main.activeNodes = [ i for i in range( 0, main.numCtrls ) ]
313
314 if main.params[ 'tcpdump' ].lower() == "true":
315 main.step( "Start Packet Capture MN" )
316 main.Mininet2.startTcpdump(
317 str( main.params[ 'MNtcpdump' ][ 'folder' ] ) + str( main.TEST )
318 + "-MN.pcap",
319 intf=main.params[ 'MNtcpdump' ][ 'intf' ],
320 port=main.params[ 'MNtcpdump' ][ 'port' ] )
321
322 main.step( "Checking ONOS nodes" )
323 nodeResults = utilities.retry( main.HA.nodesCheck,
324 False,
325 args=[main.activeNodes],
326 attempts=5 )
327 utilities.assert_equals( expect=True, actual=nodeResults,
328 onpass="Nodes check successful",
329 onfail="Nodes check NOT successful" )
330
331 if not nodeResults:
332 for i in main.activeNodes:
333 cli = main.CLIs[i]
334 main.log.debug( "{} components not ACTIVE: \n{}".format(
335 cli.name,
336 cli.sendline( "scr:list | grep -v ACTIVE" ) ) )
337 main.log.error( "Failed to start ONOS, stopping test" )
338 main.cleanup()
339 main.exit()
340
341 main.step( "Activate apps defined in the params file" )
342 # get data from the params
343 apps = main.params.get( 'apps' )
344 if apps:
345 apps = apps.split(',')
346 main.log.warn( apps )
347 activateResult = True
348 for app in apps:
349 main.CLIs[ 0 ].app( app, "Activate" )
350 # TODO: check this worked
351 time.sleep( 10 ) # wait for apps to activate
352 for app in apps:
353 state = main.CLIs[ 0 ].appStatus( app )
354 if state == "ACTIVE":
355 activateResult = activateResult and True
356 else:
357 main.log.error( "{} is in {} state".format( app, state ) )
358 activateResult = False
359 utilities.assert_equals( expect=True,
360 actual=activateResult,
361 onpass="Successfully activated apps",
362 onfail="Failed to activate apps" )
363 else:
364 main.log.warn( "No apps were specified to be loaded after startup" )
365
366 main.step( "Set ONOS configurations" )
367 config = main.params.get( 'ONOS_Configuration' )
368 if config:
369 main.log.debug( config )
370 checkResult = main.TRUE
371 for component in config:
372 for setting in config[component]:
373 value = config[component][setting]
374 check = main.CLIs[ 0 ].setCfg( component, setting, value )
375 main.log.info( "Value was changed? {}".format( main.TRUE == check ) )
376 checkResult = check and checkResult
377 utilities.assert_equals( expect=main.TRUE,
378 actual=checkResult,
379 onpass="Successfully set config",
380 onfail="Failed to set config" )
381 else:
382 main.log.warn( "No configurations were specified to be changed after startup" )
383
384 main.step( "App Ids check" )
385 appCheck = main.TRUE
386 threads = []
387 for i in main.activeNodes:
388 t = main.Thread( target=main.CLIs[i].appToIDCheck,
389 name="appToIDCheck-" + str( i ),
390 args=[] )
391 threads.append( t )
392 t.start()
393
394 for t in threads:
395 t.join()
396 appCheck = appCheck and t.result
397 if appCheck != main.TRUE:
398 node = main.activeNodes[0]
399 main.log.warn( main.CLIs[node].apps() )
400 main.log.warn( main.CLIs[node].appIDs() )
401 utilities.assert_equals( expect=main.TRUE, actual=appCheck,
402 onpass="App Ids seem to be correct",
403 onfail="Something is wrong with app Ids" )
404
405 def CASE2( self, main ):
406 """
407 Assign devices to controllers
408 """
409 import re
410 assert main.numCtrls, "main.numCtrls not defined"
411 assert main, "main not defined"
412 assert utilities.assert_equals, "utilities.assert_equals not defined"
413 assert main.CLIs, "main.CLIs not defined"
414 assert main.nodes, "main.nodes not defined"
415
416 main.case( "Assigning devices to controllers" )
417 main.caseExplanation = "Assign switches to ONOS using 'ovs-vsctl' " +\
418 "and check that an ONOS node becomes the " +\
419 "master of the device."
420 main.step( "Assign switches to controllers" )
421
422 ipList = []
423 for i in range( main.ONOSbench.maxNodes ):
424 ipList.append( main.nodes[ i ].ip_address )
425 swList = []
426 for i in range( 1, 29 ):
427 swList.append( "s" + str( i ) )
428 main.Mininet1.assignSwController( sw=swList, ip=ipList )
429
430 mastershipCheck = main.TRUE
431 for i in range( 1, 29 ):
432 response = main.Mininet1.getSwController( "s" + str( i ) )
433 try:
434 main.log.info( str( response ) )
435 except Exception:
436 main.log.info( repr( response ) )
437 for node in main.nodes:
438 if re.search( "tcp:" + node.ip_address, response ):
439 mastershipCheck = mastershipCheck and main.TRUE
440 else:
441 main.log.error( "Error, node " + node.ip_address + " is " +
442 "not in the list of controllers s" +
443 str( i ) + " is connecting to." )
444 mastershipCheck = main.FALSE
445 utilities.assert_equals(
446 expect=main.TRUE,
447 actual=mastershipCheck,
448 onpass="Switch mastership assigned correctly",
449 onfail="Switches not assigned correctly to controllers" )
450
451 def CASE21( self, main ):
452 """
453 Assign mastership to controllers
454 """
455 import time
456 assert main.numCtrls, "main.numCtrls not defined"
457 assert main, "main not defined"
458 assert utilities.assert_equals, "utilities.assert_equals not defined"
459 assert main.CLIs, "main.CLIs not defined"
460 assert main.nodes, "main.nodes not defined"
461
462 main.case( "Assigning Controller roles for switches" )
463 main.caseExplanation = "Check that ONOS is connected to each " +\
464 "device. Then manually assign" +\
465 " mastership to specific ONOS nodes using" +\
466 " 'device-role'"
467 main.step( "Assign mastership of switches to specific controllers" )
468 # Manually assign mastership to the controller we want
469 roleCall = main.TRUE
470
471 ipList = [ ]
472 deviceList = []
473 onosCli = main.CLIs[ main.activeNodes[0] ]
474 try:
475 # Assign mastership to specific controllers. This assignment was
476 # determined for a 7 node cluser, but will work with any sized
477 # cluster
478 for i in range( 1, 29 ): # switches 1 through 28
479 # set up correct variables:
480 if i == 1:
481 c = 0
482 ip = main.nodes[ c ].ip_address # ONOS1
483 deviceId = onosCli.getDevice( "1000" ).get( 'id' )
484 elif i == 2:
485 c = 1 % main.numCtrls
486 ip = main.nodes[ c ].ip_address # ONOS2
487 deviceId = onosCli.getDevice( "2000" ).get( 'id' )
488 elif i == 3:
489 c = 1 % main.numCtrls
490 ip = main.nodes[ c ].ip_address # ONOS2
491 deviceId = onosCli.getDevice( "3000" ).get( 'id' )
492 elif i == 4:
493 c = 3 % main.numCtrls
494 ip = main.nodes[ c ].ip_address # ONOS4
495 deviceId = onosCli.getDevice( "3004" ).get( 'id' )
496 elif i == 5:
497 c = 2 % main.numCtrls
498 ip = main.nodes[ c ].ip_address # ONOS3
499 deviceId = onosCli.getDevice( "5000" ).get( 'id' )
500 elif i == 6:
501 c = 2 % main.numCtrls
502 ip = main.nodes[ c ].ip_address # ONOS3
503 deviceId = onosCli.getDevice( "6000" ).get( 'id' )
504 elif i == 7:
505 c = 5 % main.numCtrls
506 ip = main.nodes[ c ].ip_address # ONOS6
507 deviceId = onosCli.getDevice( "6007" ).get( 'id' )
508 elif i >= 8 and i <= 17:
509 c = 4 % main.numCtrls
510 ip = main.nodes[ c ].ip_address # ONOS5
511 dpid = '3' + str( i ).zfill( 3 )
512 deviceId = onosCli.getDevice( dpid ).get( 'id' )
513 elif i >= 18 and i <= 27:
514 c = 6 % main.numCtrls
515 ip = main.nodes[ c ].ip_address # ONOS7
516 dpid = '6' + str( i ).zfill( 3 )
517 deviceId = onosCli.getDevice( dpid ).get( 'id' )
518 elif i == 28:
519 c = 0
520 ip = main.nodes[ c ].ip_address # ONOS1
521 deviceId = onosCli.getDevice( "2800" ).get( 'id' )
522 else:
523 main.log.error( "You didn't write an else statement for " +
524 "switch s" + str( i ) )
525 roleCall = main.FALSE
526 # Assign switch
527 assert deviceId, "No device id for s" + str( i ) + " in ONOS"
528 # TODO: make this controller dynamic
529 roleCall = roleCall and onosCli.deviceRole( deviceId, ip )
530 ipList.append( ip )
531 deviceList.append( deviceId )
532 except ( AttributeError, AssertionError ):
533 main.log.exception( "Something is wrong with ONOS device view" )
534 main.log.info( onosCli.devices() )
535 utilities.assert_equals(
536 expect=main.TRUE,
537 actual=roleCall,
538 onpass="Re-assigned switch mastership to designated controller",
539 onfail="Something wrong with deviceRole calls" )
540
541 main.step( "Check mastership was correctly assigned" )
542 roleCheck = main.TRUE
543 # NOTE: This is due to the fact that device mastership change is not
544 # atomic and is actually a multi step process
545 time.sleep( 5 )
546 for i in range( len( ipList ) ):
547 ip = ipList[i]
548 deviceId = deviceList[i]
549 # Check assignment
550 master = onosCli.getRole( deviceId ).get( 'master' )
551 if ip in master:
552 roleCheck = roleCheck and main.TRUE
553 else:
554 roleCheck = roleCheck and main.FALSE
555 main.log.error( "Error, controller " + ip + " is not" +
556 " master " + "of device " +
557 str( deviceId ) + ". Master is " +
558 repr( master ) + "." )
559 utilities.assert_equals(
560 expect=main.TRUE,
561 actual=roleCheck,
562 onpass="Switches were successfully reassigned to designated " +
563 "controller",
564 onfail="Switches were not successfully reassigned" )
565
566 def CASE3( self, main ):
567 """
568 Assign intents
569 """
570 import time
571 import json
572 assert main.numCtrls, "main.numCtrls not defined"
573 assert main, "main not defined"
574 assert utilities.assert_equals, "utilities.assert_equals not defined"
575 assert main.CLIs, "main.CLIs not defined"
576 assert main.nodes, "main.nodes not defined"
577 try:
578 labels
579 except NameError:
580 main.log.error( "labels not defined, setting to []" )
581 labels = []
582 try:
583 data
584 except NameError:
585 main.log.error( "data not defined, setting to []" )
586 data = []
587 # NOTE: we must reinstall intents until we have a persistant intent
588 # datastore!
589 main.case( "Adding host Intents" )
590 main.caseExplanation = "Discover hosts by using pingall then " +\
591 "assign predetermined host-to-host intents." +\
592 " After installation, check that the intent" +\
593 " is distributed to all nodes and the state" +\
594 " is INSTALLED"
595
596 # install onos-app-fwd
597 main.step( "Install reactive forwarding app" )
598 onosCli = main.CLIs[ main.activeNodes[0] ]
599 installResults = onosCli.activateApp( "org.onosproject.fwd" )
600 utilities.assert_equals( expect=main.TRUE, actual=installResults,
601 onpass="Install fwd successful",
602 onfail="Install fwd failed" )
603
604 main.step( "Check app ids" )
605 appCheck = main.TRUE
606 threads = []
607 for i in main.activeNodes:
608 t = main.Thread( target=main.CLIs[i].appToIDCheck,
609 name="appToIDCheck-" + str( i ),
610 args=[] )
611 threads.append( t )
612 t.start()
613
614 for t in threads:
615 t.join()
616 appCheck = appCheck and t.result
617 if appCheck != main.TRUE:
618 main.log.warn( onosCli.apps() )
619 main.log.warn( onosCli.appIDs() )
620 utilities.assert_equals( expect=main.TRUE, actual=appCheck,
621 onpass="App Ids seem to be correct",
622 onfail="Something is wrong with app Ids" )
623
624 main.step( "Discovering Hosts( Via pingall for now )" )
625 # FIXME: Once we have a host discovery mechanism, use that instead
626 # REACTIVE FWD test
627 pingResult = main.FALSE
628 passMsg = "Reactive Pingall test passed"
629 time1 = time.time()
630 pingResult = main.Mininet1.pingall()
631 time2 = time.time()
632 if not pingResult:
633 main.log.warn("First pingall failed. Trying again...")
634 pingResult = main.Mininet1.pingall()
635 passMsg += " on the second try"
636 utilities.assert_equals(
637 expect=main.TRUE,
638 actual=pingResult,
639 onpass= passMsg,
640 onfail="Reactive Pingall failed, " +
641 "one or more ping pairs failed" )
642 main.log.info( "Time for pingall: %2f seconds" %
643 ( time2 - time1 ) )
644 # timeout for fwd flows
645 time.sleep( 11 )
646 # uninstall onos-app-fwd
647 main.step( "Uninstall reactive forwarding app" )
648 node = main.activeNodes[0]
649 uninstallResult = main.CLIs[node].deactivateApp( "org.onosproject.fwd" )
650 utilities.assert_equals( expect=main.TRUE, actual=uninstallResult,
651 onpass="Uninstall fwd successful",
652 onfail="Uninstall fwd failed" )
653
654 main.step( "Check app ids" )
655 threads = []
656 appCheck2 = main.TRUE
657 for i in main.activeNodes:
658 t = main.Thread( target=main.CLIs[i].appToIDCheck,
659 name="appToIDCheck-" + str( i ),
660 args=[] )
661 threads.append( t )
662 t.start()
663
664 for t in threads:
665 t.join()
666 appCheck2 = appCheck2 and t.result
667 if appCheck2 != main.TRUE:
668 node = main.activeNodes[0]
669 main.log.warn( main.CLIs[node].apps() )
670 main.log.warn( main.CLIs[node].appIDs() )
671 utilities.assert_equals( expect=main.TRUE, actual=appCheck2,
672 onpass="App Ids seem to be correct",
673 onfail="Something is wrong with app Ids" )
674
675 main.step( "Add host intents via cli" )
676 intentIds = []
677 # TODO: move the host numbers to params
678 # Maybe look at all the paths we ping?
679 intentAddResult = True
680 hostResult = main.TRUE
681 for i in range( 8, 18 ):
682 main.log.info( "Adding host intent between h" + str( i ) +
683 " and h" + str( i + 10 ) )
684 host1 = "00:00:00:00:00:" + \
685 str( hex( i )[ 2: ] ).zfill( 2 ).upper()
686 host2 = "00:00:00:00:00:" + \
687 str( hex( i + 10 )[ 2: ] ).zfill( 2 ).upper()
688 # NOTE: getHost can return None
689 host1Dict = onosCli.getHost( host1 )
690 host2Dict = onosCli.getHost( host2 )
691 host1Id = None
692 host2Id = None
693 if host1Dict and host2Dict:
694 host1Id = host1Dict.get( 'id', None )
695 host2Id = host2Dict.get( 'id', None )
696 if host1Id and host2Id:
697 nodeNum = ( i % len( main.activeNodes ) )
698 node = main.activeNodes[nodeNum]
699 tmpId = main.CLIs[node].addHostIntent( host1Id, host2Id )
700 if tmpId:
701 main.log.info( "Added intent with id: " + tmpId )
702 intentIds.append( tmpId )
703 else:
704 main.log.error( "addHostIntent returned: " +
705 repr( tmpId ) )
706 else:
707 main.log.error( "Error, getHost() failed for h" + str( i ) +
708 " and/or h" + str( i + 10 ) )
709 node = main.activeNodes[0]
710 hosts = main.CLIs[node].hosts()
711 main.log.warn( "Hosts output: " )
712 try:
713 main.log.warn( json.dumps( json.loads( hosts ),
714 sort_keys=True,
715 indent=4,
716 separators=( ',', ': ' ) ) )
717 except ( ValueError, TypeError ):
718 main.log.warn( repr( hosts ) )
719 hostResult = main.FALSE
720 utilities.assert_equals( expect=main.TRUE, actual=hostResult,
721 onpass="Found a host id for each host",
722 onfail="Error looking up host ids" )
723
724 intentStart = time.time()
725 onosIds = onosCli.getAllIntentsId()
726 main.log.info( "Submitted intents: " + str( intentIds ) )
727 main.log.info( "Intents in ONOS: " + str( onosIds ) )
728 for intent in intentIds:
729 if intent in onosIds:
730 pass # intent submitted is in onos
731 else:
732 intentAddResult = False
733 if intentAddResult:
734 intentStop = time.time()
735 else:
736 intentStop = None
737 # Print the intent states
738 intents = onosCli.intents()
739 intentStates = []
740 installedCheck = True
741 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
742 count = 0
743 try:
744 for intent in json.loads( intents ):
745 state = intent.get( 'state', None )
746 if "INSTALLED" not in state:
747 installedCheck = False
748 intentId = intent.get( 'id', None )
749 intentStates.append( ( intentId, state ) )
750 except ( ValueError, TypeError ):
751 main.log.exception( "Error parsing intents" )
752 # add submitted intents not in the store
753 tmplist = [ i for i, s in intentStates ]
754 missingIntents = False
755 for i in intentIds:
756 if i not in tmplist:
757 intentStates.append( ( i, " - " ) )
758 missingIntents = True
759 intentStates.sort()
760 for i, s in intentStates:
761 count += 1
762 main.log.info( "%-6s%-15s%-15s" %
763 ( str( count ), str( i ), str( s ) ) )
764 leaders = onosCli.leaders()
765 try:
766 missing = False
767 if leaders:
768 parsedLeaders = json.loads( leaders )
769 main.log.warn( json.dumps( parsedLeaders,
770 sort_keys=True,
771 indent=4,
772 separators=( ',', ': ' ) ) )
773 # check for all intent partitions
774 topics = []
775 for i in range( 14 ):
Jon Hall8dafdcc2016-09-16 10:21:25 -0700776 topics.append( "work-partition-" + str( i ) )
Jon Hall69b2b982016-05-11 12:04:59 -0700777 main.log.debug( topics )
778 ONOStopics = [ j['topic'] for j in parsedLeaders ]
779 for topic in topics:
780 if topic not in ONOStopics:
781 main.log.error( "Error: " + topic +
782 " not in leaders" )
783 missing = True
784 else:
785 main.log.error( "leaders() returned None" )
786 except ( ValueError, TypeError ):
787 main.log.exception( "Error parsing leaders" )
788 main.log.error( repr( leaders ) )
789 # Check all nodes
790 if missing:
791 for i in main.activeNodes:
792 response = main.CLIs[i].leaders( jsonFormat=False)
793 main.log.warn( str( main.CLIs[i].name ) + " leaders output: \n" +
794 str( response ) )
795
796 partitions = onosCli.partitions()
797 try:
798 if partitions :
799 parsedPartitions = json.loads( partitions )
800 main.log.warn( json.dumps( parsedPartitions,
801 sort_keys=True,
802 indent=4,
803 separators=( ',', ': ' ) ) )
804 # TODO check for a leader in all paritions
805 # TODO check for consistency among nodes
806 else:
807 main.log.error( "partitions() returned None" )
808 except ( ValueError, TypeError ):
809 main.log.exception( "Error parsing partitions" )
810 main.log.error( repr( partitions ) )
811 pendingMap = onosCli.pendingMap()
812 try:
813 if pendingMap :
814 parsedPending = json.loads( pendingMap )
815 main.log.warn( json.dumps( parsedPending,
816 sort_keys=True,
817 indent=4,
818 separators=( ',', ': ' ) ) )
819 # TODO check something here?
820 else:
821 main.log.error( "pendingMap() returned None" )
822 except ( ValueError, TypeError ):
823 main.log.exception( "Error parsing pending map" )
824 main.log.error( repr( pendingMap ) )
825
826 intentAddResult = bool( intentAddResult and not missingIntents and
827 installedCheck )
828 if not intentAddResult:
829 main.log.error( "Error in pushing host intents to ONOS" )
830
831 main.step( "Intent Anti-Entropy dispersion" )
832 for j in range(100):
833 correct = True
834 main.log.info( "Submitted intents: " + str( sorted( intentIds ) ) )
835 for i in main.activeNodes:
836 onosIds = []
837 ids = main.CLIs[i].getAllIntentsId()
838 onosIds.append( ids )
839 main.log.debug( "Intents in " + main.CLIs[i].name + ": " +
840 str( sorted( onosIds ) ) )
841 if sorted( ids ) != sorted( intentIds ):
842 main.log.warn( "Set of intent IDs doesn't match" )
843 correct = False
844 break
845 else:
846 intents = json.loads( main.CLIs[i].intents() )
847 for intent in intents:
848 if intent[ 'state' ] != "INSTALLED":
849 main.log.warn( "Intent " + intent[ 'id' ] +
850 " is " + intent[ 'state' ] )
851 correct = False
852 break
853 if correct:
854 break
855 else:
856 time.sleep(1)
857 if not intentStop:
858 intentStop = time.time()
859 global gossipTime
860 gossipTime = intentStop - intentStart
861 main.log.info( "It took about " + str( gossipTime ) +
862 " seconds for all intents to appear in each node" )
863 append = False
864 title = "Gossip Intents"
865 count = 1
866 while append is False:
867 curTitle = title + str( count )
868 if curTitle not in labels:
869 labels.append( curTitle )
870 data.append( str( gossipTime ) )
871 append = True
872 else:
873 count += 1
874 gossipPeriod = int( main.params['timers']['gossip'] )
875 maxGossipTime = gossipPeriod * len( main.activeNodes )
876 utilities.assert_greater_equals(
877 expect=maxGossipTime, actual=gossipTime,
878 onpass="ECM anti-entropy for intents worked within " +
879 "expected time",
880 onfail="Intent ECM anti-entropy took too long. " +
881 "Expected time:{}, Actual time:{}".format( maxGossipTime,
882 gossipTime ) )
883 if gossipTime <= maxGossipTime:
884 intentAddResult = True
885
886 if not intentAddResult or "key" in pendingMap:
887 import time
888 installedCheck = True
889 main.log.info( "Sleeping 60 seconds to see if intents are found" )
890 time.sleep( 60 )
891 onosIds = onosCli.getAllIntentsId()
892 main.log.info( "Submitted intents: " + str( intentIds ) )
893 main.log.info( "Intents in ONOS: " + str( onosIds ) )
894 # Print the intent states
895 intents = onosCli.intents()
896 intentStates = []
897 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
898 count = 0
899 try:
900 for intent in json.loads( intents ):
901 # Iter through intents of a node
902 state = intent.get( 'state', None )
903 if "INSTALLED" not in state:
904 installedCheck = False
905 intentId = intent.get( 'id', None )
906 intentStates.append( ( intentId, state ) )
907 except ( ValueError, TypeError ):
908 main.log.exception( "Error parsing intents" )
909 # add submitted intents not in the store
910 tmplist = [ i for i, s in intentStates ]
911 for i in intentIds:
912 if i not in tmplist:
913 intentStates.append( ( i, " - " ) )
914 intentStates.sort()
915 for i, s in intentStates:
916 count += 1
917 main.log.info( "%-6s%-15s%-15s" %
918 ( str( count ), str( i ), str( s ) ) )
919 leaders = onosCli.leaders()
920 try:
921 missing = False
922 if leaders:
923 parsedLeaders = json.loads( leaders )
924 main.log.warn( json.dumps( parsedLeaders,
925 sort_keys=True,
926 indent=4,
927 separators=( ',', ': ' ) ) )
928 # check for all intent partitions
929 # check for election
930 topics = []
931 for i in range( 14 ):
Jon Hall8dafdcc2016-09-16 10:21:25 -0700932 topics.append( "work-partition-" + str( i ) )
Jon Hall69b2b982016-05-11 12:04:59 -0700933 # FIXME: this should only be after we start the app
934 topics.append( "org.onosproject.election" )
935 main.log.debug( topics )
936 ONOStopics = [ j['topic'] for j in parsedLeaders ]
937 for topic in topics:
938 if topic not in ONOStopics:
939 main.log.error( "Error: " + topic +
940 " not in leaders" )
941 missing = True
942 else:
943 main.log.error( "leaders() returned None" )
944 except ( ValueError, TypeError ):
945 main.log.exception( "Error parsing leaders" )
946 main.log.error( repr( leaders ) )
947 # Check all nodes
948 if missing:
949 for i in main.activeNodes:
950 node = main.CLIs[i]
951 response = node.leaders( jsonFormat=False)
952 main.log.warn( str( node.name ) + " leaders output: \n" +
953 str( response ) )
954
955 partitions = onosCli.partitions()
956 try:
957 if partitions :
958 parsedPartitions = json.loads( partitions )
959 main.log.warn( json.dumps( parsedPartitions,
960 sort_keys=True,
961 indent=4,
962 separators=( ',', ': ' ) ) )
963 # TODO check for a leader in all paritions
964 # TODO check for consistency among nodes
965 else:
966 main.log.error( "partitions() returned None" )
967 except ( ValueError, TypeError ):
968 main.log.exception( "Error parsing partitions" )
969 main.log.error( repr( partitions ) )
970 pendingMap = onosCli.pendingMap()
971 try:
972 if pendingMap :
973 parsedPending = json.loads( pendingMap )
974 main.log.warn( json.dumps( parsedPending,
975 sort_keys=True,
976 indent=4,
977 separators=( ',', ': ' ) ) )
978 # TODO check something here?
979 else:
980 main.log.error( "pendingMap() returned None" )
981 except ( ValueError, TypeError ):
982 main.log.exception( "Error parsing pending map" )
983 main.log.error( repr( pendingMap ) )
984
985 def CASE4( self, main ):
986 """
987 Ping across added host intents
988 """
989 import json
990 import time
991 assert main.numCtrls, "main.numCtrls not defined"
992 assert main, "main not defined"
993 assert utilities.assert_equals, "utilities.assert_equals not defined"
994 assert main.CLIs, "main.CLIs not defined"
995 assert main.nodes, "main.nodes not defined"
996 main.case( "Verify connectivity by sending traffic across Intents" )
997 main.caseExplanation = "Ping across added host intents to check " +\
998 "functionality and check the state of " +\
999 "the intent"
1000
1001 onosCli = main.CLIs[ main.activeNodes[0] ]
1002 main.step( "Check Intent state" )
1003 installedCheck = False
1004 loopCount = 0
1005 while not installedCheck and loopCount < 40:
1006 installedCheck = True
1007 # Print the intent states
1008 intents = onosCli.intents()
1009 intentStates = []
1010 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
1011 count = 0
1012 # Iter through intents of a node
1013 try:
1014 for intent in json.loads( intents ):
1015 state = intent.get( 'state', None )
1016 if "INSTALLED" not in state:
1017 installedCheck = False
1018 intentId = intent.get( 'id', None )
1019 intentStates.append( ( intentId, state ) )
1020 except ( ValueError, TypeError ):
1021 main.log.exception( "Error parsing intents." )
1022 # Print states
1023 intentStates.sort()
1024 for i, s in intentStates:
1025 count += 1
1026 main.log.info( "%-6s%-15s%-15s" %
1027 ( str( count ), str( i ), str( s ) ) )
1028 if not installedCheck:
1029 time.sleep( 1 )
1030 loopCount += 1
1031 utilities.assert_equals( expect=True, actual=installedCheck,
1032 onpass="Intents are all INSTALLED",
1033 onfail="Intents are not all in " +
1034 "INSTALLED state" )
1035
1036 main.step( "Ping across added host intents" )
1037 PingResult = main.TRUE
1038 for i in range( 8, 18 ):
1039 ping = main.Mininet1.pingHost( src="h" + str( i ),
1040 target="h" + str( i + 10 ) )
1041 PingResult = PingResult and ping
1042 if ping == main.FALSE:
1043 main.log.warn( "Ping failed between h" + str( i ) +
1044 " and h" + str( i + 10 ) )
1045 elif ping == main.TRUE:
1046 main.log.info( "Ping test passed!" )
1047 # Don't set PingResult or you'd override failures
1048 if PingResult == main.FALSE:
1049 main.log.error(
1050 "Intents have not been installed correctly, pings failed." )
1051 # TODO: pretty print
1052 main.log.warn( "ONOS1 intents: " )
1053 try:
1054 tmpIntents = onosCli.intents()
1055 main.log.warn( json.dumps( json.loads( tmpIntents ),
1056 sort_keys=True,
1057 indent=4,
1058 separators=( ',', ': ' ) ) )
1059 except ( ValueError, TypeError ):
1060 main.log.warn( repr( tmpIntents ) )
1061 utilities.assert_equals(
1062 expect=main.TRUE,
1063 actual=PingResult,
1064 onpass="Intents have been installed correctly and pings work",
1065 onfail="Intents have not been installed correctly, pings failed." )
1066
1067 main.step( "Check leadership of topics" )
1068 leaders = onosCli.leaders()
1069 topicCheck = main.TRUE
1070 try:
1071 if leaders:
1072 parsedLeaders = json.loads( leaders )
1073 main.log.warn( json.dumps( parsedLeaders,
1074 sort_keys=True,
1075 indent=4,
1076 separators=( ',', ': ' ) ) )
1077 # check for all intent partitions
1078 # check for election
1079 # TODO: Look at Devices as topics now that it uses this system
1080 topics = []
1081 for i in range( 14 ):
Jon Hall8dafdcc2016-09-16 10:21:25 -07001082 topics.append( "work-partition-" + str( i ) )
Jon Hall69b2b982016-05-11 12:04:59 -07001083 # FIXME: this should only be after we start the app
1084 # FIXME: topics.append( "org.onosproject.election" )
1085 # Print leaders output
1086 main.log.debug( topics )
1087 ONOStopics = [ j['topic'] for j in parsedLeaders ]
1088 for topic in topics:
1089 if topic not in ONOStopics:
1090 main.log.error( "Error: " + topic +
1091 " not in leaders" )
1092 topicCheck = main.FALSE
1093 else:
1094 main.log.error( "leaders() returned None" )
1095 topicCheck = main.FALSE
1096 except ( ValueError, TypeError ):
1097 topicCheck = main.FALSE
1098 main.log.exception( "Error parsing leaders" )
1099 main.log.error( repr( leaders ) )
1100 # TODO: Check for a leader of these topics
1101 # Check all nodes
1102 if topicCheck:
1103 for i in main.activeNodes:
1104 node = main.CLIs[i]
1105 response = node.leaders( jsonFormat=False)
1106 main.log.warn( str( node.name ) + " leaders output: \n" +
1107 str( response ) )
1108
1109 utilities.assert_equals( expect=main.TRUE, actual=topicCheck,
1110 onpass="intent Partitions is in leaders",
1111 onfail="Some topics were lost " )
1112 # Print partitions
1113 partitions = onosCli.partitions()
1114 try:
1115 if partitions :
1116 parsedPartitions = json.loads( partitions )
1117 main.log.warn( json.dumps( parsedPartitions,
1118 sort_keys=True,
1119 indent=4,
1120 separators=( ',', ': ' ) ) )
1121 # TODO check for a leader in all paritions
1122 # TODO check for consistency among nodes
1123 else:
1124 main.log.error( "partitions() returned None" )
1125 except ( ValueError, TypeError ):
1126 main.log.exception( "Error parsing partitions" )
1127 main.log.error( repr( partitions ) )
1128 # Print Pending Map
1129 pendingMap = onosCli.pendingMap()
1130 try:
1131 if pendingMap :
1132 parsedPending = json.loads( pendingMap )
1133 main.log.warn( json.dumps( parsedPending,
1134 sort_keys=True,
1135 indent=4,
1136 separators=( ',', ': ' ) ) )
1137 # TODO check something here?
1138 else:
1139 main.log.error( "pendingMap() returned None" )
1140 except ( ValueError, TypeError ):
1141 main.log.exception( "Error parsing pending map" )
1142 main.log.error( repr( pendingMap ) )
1143
1144 if not installedCheck:
1145 main.log.info( "Waiting 60 seconds to see if the state of " +
1146 "intents change" )
1147 time.sleep( 60 )
1148 # Print the intent states
1149 intents = onosCli.intents()
1150 intentStates = []
1151 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
1152 count = 0
1153 # Iter through intents of a node
1154 try:
1155 for intent in json.loads( intents ):
1156 state = intent.get( 'state', None )
1157 if "INSTALLED" not in state:
1158 installedCheck = False
1159 intentId = intent.get( 'id', None )
1160 intentStates.append( ( intentId, state ) )
1161 except ( ValueError, TypeError ):
1162 main.log.exception( "Error parsing intents." )
1163 intentStates.sort()
1164 for i, s in intentStates:
1165 count += 1
1166 main.log.info( "%-6s%-15s%-15s" %
1167 ( str( count ), str( i ), str( s ) ) )
1168 leaders = onosCli.leaders()
1169 try:
1170 missing = False
1171 if leaders:
1172 parsedLeaders = json.loads( leaders )
1173 main.log.warn( json.dumps( parsedLeaders,
1174 sort_keys=True,
1175 indent=4,
1176 separators=( ',', ': ' ) ) )
1177 # check for all intent partitions
1178 # check for election
1179 topics = []
1180 for i in range( 14 ):
Jon Hall8dafdcc2016-09-16 10:21:25 -07001181 topics.append( "work-partition-" + str( i ) )
Jon Hall69b2b982016-05-11 12:04:59 -07001182 # FIXME: this should only be after we start the app
1183 topics.append( "org.onosproject.election" )
1184 main.log.debug( topics )
1185 ONOStopics = [ j['topic'] for j in parsedLeaders ]
1186 for topic in topics:
1187 if topic not in ONOStopics:
1188 main.log.error( "Error: " + topic +
1189 " not in leaders" )
1190 missing = True
1191 else:
1192 main.log.error( "leaders() returned None" )
1193 except ( ValueError, TypeError ):
1194 main.log.exception( "Error parsing leaders" )
1195 main.log.error( repr( leaders ) )
1196 if missing:
1197 for i in main.activeNodes:
1198 node = main.CLIs[i]
1199 response = node.leaders( jsonFormat=False)
1200 main.log.warn( str( node.name ) + " leaders output: \n" +
1201 str( response ) )
1202
1203 partitions = onosCli.partitions()
1204 try:
1205 if partitions :
1206 parsedPartitions = json.loads( partitions )
1207 main.log.warn( json.dumps( parsedPartitions,
1208 sort_keys=True,
1209 indent=4,
1210 separators=( ',', ': ' ) ) )
1211 # TODO check for a leader in all paritions
1212 # TODO check for consistency among nodes
1213 else:
1214 main.log.error( "partitions() returned None" )
1215 except ( ValueError, TypeError ):
1216 main.log.exception( "Error parsing partitions" )
1217 main.log.error( repr( partitions ) )
1218 pendingMap = onosCli.pendingMap()
1219 try:
1220 if pendingMap :
1221 parsedPending = json.loads( pendingMap )
1222 main.log.warn( json.dumps( parsedPending,
1223 sort_keys=True,
1224 indent=4,
1225 separators=( ',', ': ' ) ) )
1226 # TODO check something here?
1227 else:
1228 main.log.error( "pendingMap() returned None" )
1229 except ( ValueError, TypeError ):
1230 main.log.exception( "Error parsing pending map" )
1231 main.log.error( repr( pendingMap ) )
1232 # Print flowrules
1233 node = main.activeNodes[0]
1234 main.log.debug( main.CLIs[node].flows( jsonFormat=False ) )
1235 main.step( "Wait a minute then ping again" )
1236 # the wait is above
1237 PingResult = main.TRUE
1238 for i in range( 8, 18 ):
1239 ping = main.Mininet1.pingHost( src="h" + str( i ),
1240 target="h" + str( i + 10 ) )
1241 PingResult = PingResult and ping
1242 if ping == main.FALSE:
1243 main.log.warn( "Ping failed between h" + str( i ) +
1244 " and h" + str( i + 10 ) )
1245 elif ping == main.TRUE:
1246 main.log.info( "Ping test passed!" )
1247 # Don't set PingResult or you'd override failures
1248 if PingResult == main.FALSE:
1249 main.log.error(
1250 "Intents have not been installed correctly, pings failed." )
1251 # TODO: pretty print
1252 main.log.warn( "ONOS1 intents: " )
1253 try:
1254 tmpIntents = onosCli.intents()
1255 main.log.warn( json.dumps( json.loads( tmpIntents ),
1256 sort_keys=True,
1257 indent=4,
1258 separators=( ',', ': ' ) ) )
1259 except ( ValueError, TypeError ):
1260 main.log.warn( repr( tmpIntents ) )
1261 utilities.assert_equals(
1262 expect=main.TRUE,
1263 actual=PingResult,
1264 onpass="Intents have been installed correctly and pings work",
1265 onfail="Intents have not been installed correctly, pings failed." )
1266
1267 def CASE5( self, main ):
1268 """
1269 Reading state of ONOS
1270 """
1271 import json
1272 import time
1273 assert main.numCtrls, "main.numCtrls not defined"
1274 assert main, "main not defined"
1275 assert utilities.assert_equals, "utilities.assert_equals not defined"
1276 assert main.CLIs, "main.CLIs not defined"
1277 assert main.nodes, "main.nodes not defined"
1278
1279 main.case( "Setting up and gathering data for current state" )
1280 # The general idea for this test case is to pull the state of
1281 # ( intents,flows, topology,... ) from each ONOS node
1282 # We can then compare them with each other and also with past states
1283
1284 main.step( "Check that each switch has a master" )
1285 global mastershipState
1286 mastershipState = '[]'
1287
1288 # Assert that each device has a master
1289 rolesNotNull = main.TRUE
1290 threads = []
1291 for i in main.activeNodes:
1292 t = main.Thread( target=main.CLIs[i].rolesNotNull,
1293 name="rolesNotNull-" + str( i ),
1294 args=[] )
1295 threads.append( t )
1296 t.start()
1297
1298 for t in threads:
1299 t.join()
1300 rolesNotNull = rolesNotNull and t.result
1301 utilities.assert_equals(
1302 expect=main.TRUE,
1303 actual=rolesNotNull,
1304 onpass="Each device has a master",
1305 onfail="Some devices don't have a master assigned" )
1306
1307 main.step( "Get the Mastership of each switch from each controller" )
1308 ONOSMastership = []
1309 consistentMastership = True
1310 rolesResults = True
1311 threads = []
1312 for i in main.activeNodes:
1313 t = main.Thread( target=main.CLIs[i].roles,
1314 name="roles-" + str( i ),
1315 args=[] )
1316 threads.append( t )
1317 t.start()
1318
1319 for t in threads:
1320 t.join()
1321 ONOSMastership.append( t.result )
1322
1323 for i in range( len( ONOSMastership ) ):
1324 node = str( main.activeNodes[i] + 1 )
1325 if not ONOSMastership[i] or "Error" in ONOSMastership[i]:
1326 main.log.error( "Error in getting ONOS" + node + " roles" )
1327 main.log.warn( "ONOS" + node + " mastership response: " +
1328 repr( ONOSMastership[i] ) )
1329 rolesResults = False
1330 utilities.assert_equals(
1331 expect=True,
1332 actual=rolesResults,
1333 onpass="No error in reading roles output",
1334 onfail="Error in reading roles from ONOS" )
1335
1336 main.step( "Check for consistency in roles from each controller" )
1337 if all([ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
1338 main.log.info(
1339 "Switch roles are consistent across all ONOS nodes" )
1340 else:
1341 consistentMastership = False
1342 utilities.assert_equals(
1343 expect=True,
1344 actual=consistentMastership,
1345 onpass="Switch roles are consistent across all ONOS nodes",
1346 onfail="ONOS nodes have different views of switch roles" )
1347
1348 if rolesResults and not consistentMastership:
1349 for i in range( len( main.activeNodes ) ):
1350 node = str( main.activeNodes[i] + 1 )
1351 try:
1352 main.log.warn(
1353 "ONOS" + node + " roles: ",
1354 json.dumps(
1355 json.loads( ONOSMastership[ i ] ),
1356 sort_keys=True,
1357 indent=4,
1358 separators=( ',', ': ' ) ) )
1359 except ( ValueError, TypeError ):
1360 main.log.warn( repr( ONOSMastership[ i ] ) )
1361 elif rolesResults and consistentMastership:
1362 mastershipState = ONOSMastership[ 0 ]
1363
1364 main.step( "Get the intents from each controller" )
1365 global intentState
1366 intentState = []
1367 ONOSIntents = []
1368 consistentIntents = True # Are Intents consistent across nodes?
1369 intentsResults = True # Could we read Intents from ONOS?
1370 threads = []
1371 for i in main.activeNodes:
1372 t = main.Thread( target=main.CLIs[i].intents,
1373 name="intents-" + str( i ),
1374 args=[],
1375 kwargs={ 'jsonFormat': True } )
1376 threads.append( t )
1377 t.start()
1378
1379 for t in threads:
1380 t.join()
1381 ONOSIntents.append( t.result )
1382
1383 for i in range( len( ONOSIntents ) ):
1384 node = str( main.activeNodes[i] + 1 )
1385 if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
1386 main.log.error( "Error in getting ONOS" + node + " intents" )
1387 main.log.warn( "ONOS" + node + " intents response: " +
1388 repr( ONOSIntents[ i ] ) )
1389 intentsResults = False
1390 utilities.assert_equals(
1391 expect=True,
1392 actual=intentsResults,
1393 onpass="No error in reading intents output",
1394 onfail="Error in reading intents from ONOS" )
1395
1396 main.step( "Check for consistency in Intents from each controller" )
1397 if all([ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
1398 main.log.info( "Intents are consistent across all ONOS " +
1399 "nodes" )
1400 else:
1401 consistentIntents = False
1402 main.log.error( "Intents not consistent" )
1403 utilities.assert_equals(
1404 expect=True,
1405 actual=consistentIntents,
1406 onpass="Intents are consistent across all ONOS nodes",
1407 onfail="ONOS nodes have different views of intents" )
1408
1409 if intentsResults:
1410 # Try to make it easy to figure out what is happening
1411 #
1412 # Intent ONOS1 ONOS2 ...
1413 # 0x01 INSTALLED INSTALLING
1414 # ... ... ...
1415 # ... ... ...
1416 title = " Id"
1417 for n in main.activeNodes:
1418 title += " " * 10 + "ONOS" + str( n + 1 )
1419 main.log.warn( title )
1420 # get all intent keys in the cluster
1421 keys = []
1422 try:
1423 # Get the set of all intent keys
1424 for nodeStr in ONOSIntents:
1425 node = json.loads( nodeStr )
1426 for intent in node:
1427 keys.append( intent.get( 'id' ) )
1428 keys = set( keys )
1429 # For each intent key, print the state on each node
1430 for key in keys:
1431 row = "%-13s" % key
1432 for nodeStr in ONOSIntents:
1433 node = json.loads( nodeStr )
1434 for intent in node:
1435 if intent.get( 'id', "Error" ) == key:
1436 row += "%-15s" % intent.get( 'state' )
1437 main.log.warn( row )
1438 # End of intent state table
1439 except ValueError as e:
1440 main.log.exception( e )
1441 main.log.debug( "nodeStr was: " + repr( nodeStr ) )
1442
1443 if intentsResults and not consistentIntents:
1444 # print the json objects
1445 n = str( main.activeNodes[-1] + 1 )
1446 main.log.debug( "ONOS" + n + " intents: " )
1447 main.log.debug( json.dumps( json.loads( ONOSIntents[ -1 ] ),
1448 sort_keys=True,
1449 indent=4,
1450 separators=( ',', ': ' ) ) )
1451 for i in range( len( ONOSIntents ) ):
1452 node = str( main.activeNodes[i] + 1 )
1453 if ONOSIntents[ i ] != ONOSIntents[ -1 ]:
1454 main.log.debug( "ONOS" + node + " intents: " )
1455 main.log.debug( json.dumps( json.loads( ONOSIntents[i] ),
1456 sort_keys=True,
1457 indent=4,
1458 separators=( ',', ': ' ) ) )
1459 else:
1460 main.log.debug( "ONOS" + node + " intents match ONOS" +
1461 n + " intents" )
1462 elif intentsResults and consistentIntents:
1463 intentState = ONOSIntents[ 0 ]
1464
1465 main.step( "Get the flows from each controller" )
1466 global flowState
1467 flowState = []
1468 ONOSFlows = []
1469 ONOSFlowsJson = []
1470 flowCheck = main.FALSE
1471 consistentFlows = True
1472 flowsResults = True
1473 threads = []
1474 for i in main.activeNodes:
1475 t = main.Thread( target=main.CLIs[i].flows,
1476 name="flows-" + str( i ),
1477 args=[],
1478 kwargs={ 'jsonFormat': True } )
1479 threads.append( t )
1480 t.start()
1481
1482 # NOTE: Flows command can take some time to run
1483 time.sleep(30)
1484 for t in threads:
1485 t.join()
1486 result = t.result
1487 ONOSFlows.append( result )
1488
1489 for i in range( len( ONOSFlows ) ):
1490 num = str( main.activeNodes[i] + 1 )
1491 if not ONOSFlows[ i ] or "Error" in ONOSFlows[ i ]:
1492 main.log.error( "Error in getting ONOS" + num + " flows" )
1493 main.log.warn( "ONOS" + num + " flows response: " +
1494 repr( ONOSFlows[ i ] ) )
1495 flowsResults = False
1496 ONOSFlowsJson.append( None )
1497 else:
1498 try:
1499 ONOSFlowsJson.append( json.loads( ONOSFlows[ i ] ) )
1500 except ( ValueError, TypeError ):
1501 # FIXME: change this to log.error?
1502 main.log.exception( "Error in parsing ONOS" + num +
1503 " response as json." )
1504 main.log.error( repr( ONOSFlows[ i ] ) )
1505 ONOSFlowsJson.append( None )
1506 flowsResults = False
1507 utilities.assert_equals(
1508 expect=True,
1509 actual=flowsResults,
1510 onpass="No error in reading flows output",
1511 onfail="Error in reading flows from ONOS" )
1512
1513 main.step( "Check for consistency in Flows from each controller" )
1514 tmp = [ len( i ) == len( ONOSFlowsJson[ 0 ] ) for i in ONOSFlowsJson ]
1515 if all( tmp ):
1516 main.log.info( "Flow count is consistent across all ONOS nodes" )
1517 else:
1518 consistentFlows = False
1519 utilities.assert_equals(
1520 expect=True,
1521 actual=consistentFlows,
1522 onpass="The flow count is consistent across all ONOS nodes",
1523 onfail="ONOS nodes have different flow counts" )
1524
1525 if flowsResults and not consistentFlows:
1526 for i in range( len( ONOSFlows ) ):
1527 node = str( main.activeNodes[i] + 1 )
1528 try:
1529 main.log.warn(
1530 "ONOS" + node + " flows: " +
1531 json.dumps( json.loads( ONOSFlows[i] ), sort_keys=True,
1532 indent=4, separators=( ',', ': ' ) ) )
1533 except ( ValueError, TypeError ):
1534 main.log.warn( "ONOS" + node + " flows: " +
1535 repr( ONOSFlows[ i ] ) )
1536 elif flowsResults and consistentFlows:
1537 flowCheck = main.TRUE
1538 flowState = ONOSFlows[ 0 ]
1539
1540 main.step( "Get the OF Table entries" )
1541 global flows
1542 flows = []
1543 for i in range( 1, 29 ):
1544 flows.append( main.Mininet1.getFlowTable( "s" + str( i ), version="1.3", debug=False ) )
1545 if flowCheck == main.FALSE:
1546 for table in flows:
1547 main.log.warn( table )
1548 # TODO: Compare switch flow tables with ONOS flow tables
1549
1550 main.step( "Start continuous pings" )
1551 main.Mininet2.pingLong(
1552 src=main.params[ 'PING' ][ 'source1' ],
1553 target=main.params[ 'PING' ][ 'target1' ],
1554 pingTime=500 )
1555 main.Mininet2.pingLong(
1556 src=main.params[ 'PING' ][ 'source2' ],
1557 target=main.params[ 'PING' ][ 'target2' ],
1558 pingTime=500 )
1559 main.Mininet2.pingLong(
1560 src=main.params[ 'PING' ][ 'source3' ],
1561 target=main.params[ 'PING' ][ 'target3' ],
1562 pingTime=500 )
1563 main.Mininet2.pingLong(
1564 src=main.params[ 'PING' ][ 'source4' ],
1565 target=main.params[ 'PING' ][ 'target4' ],
1566 pingTime=500 )
1567 main.Mininet2.pingLong(
1568 src=main.params[ 'PING' ][ 'source5' ],
1569 target=main.params[ 'PING' ][ 'target5' ],
1570 pingTime=500 )
1571 main.Mininet2.pingLong(
1572 src=main.params[ 'PING' ][ 'source6' ],
1573 target=main.params[ 'PING' ][ 'target6' ],
1574 pingTime=500 )
1575 main.Mininet2.pingLong(
1576 src=main.params[ 'PING' ][ 'source7' ],
1577 target=main.params[ 'PING' ][ 'target7' ],
1578 pingTime=500 )
1579 main.Mininet2.pingLong(
1580 src=main.params[ 'PING' ][ 'source8' ],
1581 target=main.params[ 'PING' ][ 'target8' ],
1582 pingTime=500 )
1583 main.Mininet2.pingLong(
1584 src=main.params[ 'PING' ][ 'source9' ],
1585 target=main.params[ 'PING' ][ 'target9' ],
1586 pingTime=500 )
1587 main.Mininet2.pingLong(
1588 src=main.params[ 'PING' ][ 'source10' ],
1589 target=main.params[ 'PING' ][ 'target10' ],
1590 pingTime=500 )
1591
1592 main.step( "Collecting topology information from ONOS" )
1593 devices = []
1594 threads = []
1595 for i in main.activeNodes:
1596 t = main.Thread( target=main.CLIs[i].devices,
1597 name="devices-" + str( i ),
1598 args=[ ] )
1599 threads.append( t )
1600 t.start()
1601
1602 for t in threads:
1603 t.join()
1604 devices.append( t.result )
1605 hosts = []
1606 threads = []
1607 for i in main.activeNodes:
1608 t = main.Thread( target=main.CLIs[i].hosts,
1609 name="hosts-" + str( i ),
1610 args=[ ] )
1611 threads.append( t )
1612 t.start()
1613
1614 for t in threads:
1615 t.join()
1616 try:
1617 hosts.append( json.loads( t.result ) )
1618 except ( ValueError, TypeError ):
1619 # FIXME: better handling of this, print which node
1620 # Maybe use thread name?
1621 main.log.exception( "Error parsing json output of hosts" )
1622 main.log.warn( repr( t.result ) )
1623 hosts.append( None )
1624
1625 ports = []
1626 threads = []
1627 for i in main.activeNodes:
1628 t = main.Thread( target=main.CLIs[i].ports,
1629 name="ports-" + str( i ),
1630 args=[ ] )
1631 threads.append( t )
1632 t.start()
1633
1634 for t in threads:
1635 t.join()
1636 ports.append( t.result )
1637 links = []
1638 threads = []
1639 for i in main.activeNodes:
1640 t = main.Thread( target=main.CLIs[i].links,
1641 name="links-" + str( i ),
1642 args=[ ] )
1643 threads.append( t )
1644 t.start()
1645
1646 for t in threads:
1647 t.join()
1648 links.append( t.result )
1649 clusters = []
1650 threads = []
1651 for i in main.activeNodes:
1652 t = main.Thread( target=main.CLIs[i].clusters,
1653 name="clusters-" + str( i ),
1654 args=[ ] )
1655 threads.append( t )
1656 t.start()
1657
1658 for t in threads:
1659 t.join()
1660 clusters.append( t.result )
1661 # Compare json objects for hosts and dataplane clusters
1662
1663 # hosts
1664 main.step( "Host view is consistent across ONOS nodes" )
1665 consistentHostsResult = main.TRUE
1666 for controller in range( len( hosts ) ):
1667 controllerStr = str( main.activeNodes[controller] + 1 )
1668 if hosts[ controller ] and "Error" not in hosts[ controller ]:
1669 if hosts[ controller ] == hosts[ 0 ]:
1670 continue
1671 else: # hosts not consistent
1672 main.log.error( "hosts from ONOS" +
1673 controllerStr +
1674 " is inconsistent with ONOS1" )
1675 main.log.warn( repr( hosts[ controller ] ) )
1676 consistentHostsResult = main.FALSE
1677
1678 else:
1679 main.log.error( "Error in getting ONOS hosts from ONOS" +
1680 controllerStr )
1681 consistentHostsResult = main.FALSE
1682 main.log.warn( "ONOS" + controllerStr +
1683 " hosts response: " +
1684 repr( hosts[ controller ] ) )
1685 utilities.assert_equals(
1686 expect=main.TRUE,
1687 actual=consistentHostsResult,
1688 onpass="Hosts view is consistent across all ONOS nodes",
1689 onfail="ONOS nodes have different views of hosts" )
1690
1691 main.step( "Each host has an IP address" )
1692 ipResult = main.TRUE
1693 for controller in range( 0, len( hosts ) ):
1694 controllerStr = str( main.activeNodes[controller] + 1 )
1695 if hosts[ controller ]:
1696 for host in hosts[ controller ]:
1697 if not host.get( 'ipAddresses', [ ] ):
1698 main.log.error( "Error with host ips on controller" +
1699 controllerStr + ": " + str( host ) )
1700 ipResult = main.FALSE
1701 utilities.assert_equals(
1702 expect=main.TRUE,
1703 actual=ipResult,
1704 onpass="The ips of the hosts aren't empty",
1705 onfail="The ip of at least one host is missing" )
1706
1707 # Strongly connected clusters of devices
1708 main.step( "Cluster view is consistent across ONOS nodes" )
1709 consistentClustersResult = main.TRUE
1710 for controller in range( len( clusters ) ):
1711 controllerStr = str( main.activeNodes[controller] + 1 )
1712 if "Error" not in clusters[ controller ]:
1713 if clusters[ controller ] == clusters[ 0 ]:
1714 continue
1715 else: # clusters not consistent
1716 main.log.error( "clusters from ONOS" + controllerStr +
1717 " is inconsistent with ONOS1" )
1718 consistentClustersResult = main.FALSE
1719
1720 else:
1721 main.log.error( "Error in getting dataplane clusters " +
1722 "from ONOS" + controllerStr )
1723 consistentClustersResult = main.FALSE
1724 main.log.warn( "ONOS" + controllerStr +
1725 " clusters response: " +
1726 repr( clusters[ controller ] ) )
1727 utilities.assert_equals(
1728 expect=main.TRUE,
1729 actual=consistentClustersResult,
1730 onpass="Clusters view is consistent across all ONOS nodes",
1731 onfail="ONOS nodes have different views of clusters" )
1732 if not consistentClustersResult:
1733 main.log.debug( clusters )
1734
1735 # there should always only be one cluster
1736 main.step( "Cluster view correct across ONOS nodes" )
1737 try:
1738 numClusters = len( json.loads( clusters[ 0 ] ) )
1739 except ( ValueError, TypeError ):
1740 main.log.exception( "Error parsing clusters[0]: " +
1741 repr( clusters[ 0 ] ) )
1742 numClusters = "ERROR"
1743 utilities.assert_equals(
1744 expect=1,
1745 actual=numClusters,
1746 onpass="ONOS shows 1 SCC",
1747 onfail="ONOS shows " + str( numClusters ) + " SCCs" )
1748
1749 main.step( "Comparing ONOS topology to MN" )
1750 devicesResults = main.TRUE
1751 linksResults = main.TRUE
1752 hostsResults = main.TRUE
1753 mnSwitches = main.Mininet1.getSwitches()
1754 mnLinks = main.Mininet1.getLinks()
1755 mnHosts = main.Mininet1.getHosts()
1756 for controller in main.activeNodes:
1757 controllerStr = str( main.activeNodes[controller] + 1 )
1758 if devices[ controller ] and ports[ controller ] and\
1759 "Error" not in devices[ controller ] and\
1760 "Error" not in ports[ controller ]:
1761 currentDevicesResult = main.Mininet1.compareSwitches(
1762 mnSwitches,
1763 json.loads( devices[ controller ] ),
1764 json.loads( ports[ controller ] ) )
1765 else:
1766 currentDevicesResult = main.FALSE
1767 utilities.assert_equals( expect=main.TRUE,
1768 actual=currentDevicesResult,
1769 onpass="ONOS" + controllerStr +
1770 " Switches view is correct",
1771 onfail="ONOS" + controllerStr +
1772 " Switches view is incorrect" )
1773 if links[ controller ] and "Error" not in links[ controller ]:
1774 currentLinksResult = main.Mininet1.compareLinks(
1775 mnSwitches, mnLinks,
1776 json.loads( links[ controller ] ) )
1777 else:
1778 currentLinksResult = main.FALSE
1779 utilities.assert_equals( expect=main.TRUE,
1780 actual=currentLinksResult,
1781 onpass="ONOS" + controllerStr +
1782 " links view is correct",
1783 onfail="ONOS" + controllerStr +
1784 " links view is incorrect" )
1785
1786 if hosts[ controller ] and "Error" not in hosts[ controller ]:
1787 currentHostsResult = main.Mininet1.compareHosts(
1788 mnHosts,
1789 hosts[ controller ] )
1790 else:
1791 currentHostsResult = main.FALSE
1792 utilities.assert_equals( expect=main.TRUE,
1793 actual=currentHostsResult,
1794 onpass="ONOS" + controllerStr +
1795 " hosts exist in Mininet",
1796 onfail="ONOS" + controllerStr +
1797 " hosts don't match Mininet" )
1798
1799 devicesResults = devicesResults and currentDevicesResult
1800 linksResults = linksResults and currentLinksResult
1801 hostsResults = hostsResults and currentHostsResult
1802
1803 main.step( "Device information is correct" )
1804 utilities.assert_equals(
1805 expect=main.TRUE,
1806 actual=devicesResults,
1807 onpass="Device information is correct",
1808 onfail="Device information is incorrect" )
1809
1810 main.step( "Links are correct" )
1811 utilities.assert_equals(
1812 expect=main.TRUE,
1813 actual=linksResults,
1814 onpass="Link are correct",
1815 onfail="Links are incorrect" )
1816
1817 main.step( "Hosts are correct" )
1818 utilities.assert_equals(
1819 expect=main.TRUE,
1820 actual=hostsResults,
1821 onpass="Hosts are correct",
1822 onfail="Hosts are incorrect" )
1823
1824 def CASE6( self, main ):
1825 """
1826 The Scaling case.
1827 """
1828 import time
1829 import re
1830 assert main.numCtrls, "main.numCtrls not defined"
1831 assert main, "main not defined"
1832 assert utilities.assert_equals, "utilities.assert_equals not defined"
1833 assert main.CLIs, "main.CLIs not defined"
1834 assert main.nodes, "main.nodes not defined"
1835 try:
1836 labels
1837 except NameError:
1838 main.log.error( "labels not defined, setting to []" )
1839 global labels
1840 labels = []
1841 try:
1842 data
1843 except NameError:
1844 main.log.error( "data not defined, setting to []" )
1845 global data
1846 data = []
1847
1848 main.case( "Swap some of the ONOS nodes" )
1849
1850 main.step( "Checking ONOS Logs for errors" )
1851 for i in main.activeNodes:
1852 node = main.nodes[i]
1853 main.log.debug( "Checking logs for errors on " + node.name + ":" )
1854 main.log.warn( main.ONOSbench.checkLogs( node.ip_address ) )
1855
1856 main.step( "Generate new metadata file" )
Jon Hallbd60ea02016-08-23 10:03:59 -07001857 old = [ main.activeNodes[1], main.activeNodes[-2] ]
Jon Hall69b2b982016-05-11 12:04:59 -07001858 new = range( main.ONOSbench.maxNodes )[-2:]
1859 assert len( old ) == len( new ), "Length of nodes to swap don't match"
1860 handle = main.ONOSbench.handle
1861 for x, y in zip( old, new ):
1862 handle.sendline( "export OC{}=$OC{}".format( x + 1, y + 1 ) )
1863 handle.expect( "\$" ) # from the variable
1864 ret = handle.before
1865 handle.expect( "\$" ) # From the prompt
1866 ret += handle.before
1867 main.log.debug( ret )
1868 main.activeNodes.remove( x )
1869 main.activeNodes.append( y )
1870
1871 genResult = main.Server.generateFile( main.numCtrls )
1872 utilities.assert_equals( expect=main.TRUE, actual=genResult,
1873 onpass="New cluster metadata file generated",
1874 onfail="Failled to generate new metadata file" )
1875 time.sleep( 5 ) # Give time for nodes to read new file
1876
1877 main.step( "Start new nodes" ) # OR stop old nodes?
1878 started = main.TRUE
1879 for i in new:
1880 started = main.ONOSbench.onosStart( main.nodes[i].ip_address ) and main.TRUE
1881 utilities.assert_equals( expect=main.TRUE, actual=started,
1882 onpass="ONOS started",
1883 onfail="ONOS start NOT successful" )
1884
1885 main.step( "Checking if ONOS is up yet" )
1886 for i in range( 2 ):
1887 onosIsupResult = main.TRUE
1888 for i in main.activeNodes:
1889 node = main.nodes[i]
Jon Hall168c1862017-01-31 17:35:34 -08001890 main.ONOSbench.onosSecureSSH( node=node.ip_address )
Jon Hall69b2b982016-05-11 12:04:59 -07001891 started = main.ONOSbench.isup( node.ip_address )
1892 if not started:
1893 main.log.error( node.name + " didn't start!" )
1894 onosIsupResult = onosIsupResult and started
1895 if onosIsupResult == main.TRUE:
1896 break
1897 utilities.assert_equals( expect=main.TRUE, actual=onosIsupResult,
1898 onpass="ONOS started",
1899 onfail="ONOS start NOT successful" )
1900
Jon Hall6509dbf2016-06-21 17:01:17 -07001901 main.step( "Starting ONOS CLI sessions" )
Jon Hall69b2b982016-05-11 12:04:59 -07001902 cliResults = main.TRUE
1903 threads = []
1904 for i in main.activeNodes:
1905 t = main.Thread( target=main.CLIs[i].startOnosCli,
1906 name="startOnosCli-" + str( i ),
1907 args=[main.nodes[i].ip_address] )
1908 threads.append( t )
1909 t.start()
1910
1911 for t in threads:
1912 t.join()
1913 cliResults = cliResults and t.result
1914 utilities.assert_equals( expect=main.TRUE, actual=cliResults,
1915 onpass="ONOS cli started",
1916 onfail="ONOS clis did not start" )
1917
1918 main.step( "Checking ONOS nodes" )
1919 nodeResults = utilities.retry( main.HA.nodesCheck,
1920 False,
1921 args=[main.activeNodes],
1922 attempts=5 )
1923 utilities.assert_equals( expect=True, actual=nodeResults,
1924 onpass="Nodes check successful",
1925 onfail="Nodes check NOT successful" )
1926
1927 for i in range( 10 ):
1928 ready = True
1929 for i in main.activeNodes:
1930 cli = main.CLIs[i]
1931 output = cli.summary()
1932 if not output:
1933 ready = False
1934 if ready:
1935 break
1936 time.sleep( 30 )
1937 utilities.assert_equals( expect=True, actual=ready,
1938 onpass="ONOS summary command succeded",
1939 onfail="ONOS summary command failed" )
1940 if not ready:
1941 main.cleanup()
1942 main.exit()
1943
1944 # Rerun for election on new nodes
1945 runResults = main.TRUE
1946 for i in main.activeNodes:
1947 cli = main.CLIs[i]
1948 run = cli.electionTestRun()
1949 if run != main.TRUE:
1950 main.log.error( "Error running for election on " + cli.name )
1951 runResults = runResults and run
1952 utilities.assert_equals( expect=main.TRUE, actual=runResults,
1953 onpass="Reran for election",
1954 onfail="Failed to rerun for election" )
1955
1956 for node in main.activeNodes:
1957 main.log.warn( "\n****************** {} **************".format( main.nodes[node].ip_address ) )
1958 main.log.debug( main.CLIs[node].nodes( jsonFormat=False ) )
1959 main.log.debug( main.CLIs[node].leaders( jsonFormat=False ) )
1960 main.log.debug( main.CLIs[node].partitions( jsonFormat=False ) )
1961 main.log.debug( main.CLIs[node].apps( jsonFormat=False ) )
1962
1963 main.step( "Reapplying cell variable to environment" )
1964 cellName = main.params[ 'ENV' ][ 'cellName' ]
1965 cellResult = main.ONOSbench.setCell( cellName )
1966 utilities.assert_equals( expect=main.TRUE, actual=cellResult,
1967 onpass="Set cell successfull",
1968 onfail="Failled to set cell" )
1969
1970 def CASE7( self, main ):
1971 """
1972 Check state after ONOS scaling
1973 """
1974 import json
1975 assert main.numCtrls, "main.numCtrls not defined"
1976 assert main, "main not defined"
1977 assert utilities.assert_equals, "utilities.assert_equals not defined"
1978 assert main.CLIs, "main.CLIs not defined"
1979 assert main.nodes, "main.nodes not defined"
1980 main.case( "Running ONOS Constant State Tests" )
1981
1982 main.step( "Check that each switch has a master" )
1983 # Assert that each device has a master
1984 rolesNotNull = main.TRUE
1985 threads = []
1986 for i in main.activeNodes:
1987 t = main.Thread( target=main.CLIs[i].rolesNotNull,
1988 name="rolesNotNull-" + str( i ),
1989 args=[ ] )
1990 threads.append( t )
1991 t.start()
1992
1993 for t in threads:
1994 t.join()
1995 rolesNotNull = rolesNotNull and t.result
1996 utilities.assert_equals(
1997 expect=main.TRUE,
1998 actual=rolesNotNull,
1999 onpass="Each device has a master",
2000 onfail="Some devices don't have a master assigned" )
2001
2002 main.step( "Read device roles from ONOS" )
2003 ONOSMastership = []
2004 consistentMastership = True
2005 rolesResults = True
2006 threads = []
2007 for i in main.activeNodes:
2008 t = main.Thread( target=main.CLIs[i].roles,
2009 name="roles-" + str( i ),
2010 args=[] )
2011 threads.append( t )
2012 t.start()
2013
2014 for t in threads:
2015 t.join()
2016 ONOSMastership.append( t.result )
2017
2018 for i in range( len( ONOSMastership ) ):
2019 node = str( main.activeNodes[i] + 1 )
2020 if not ONOSMastership[i] or "Error" in ONOSMastership[i]:
2021 main.log.error( "Error in getting ONOS" + node + " roles" )
2022 main.log.warn( "ONOS" + node + " mastership response: " +
2023 repr( ONOSMastership[i] ) )
2024 rolesResults = False
2025 utilities.assert_equals(
2026 expect=True,
2027 actual=rolesResults,
2028 onpass="No error in reading roles output",
2029 onfail="Error in reading roles from ONOS" )
2030
2031 main.step( "Check for consistency in roles from each controller" )
2032 if all([ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
2033 main.log.info(
2034 "Switch roles are consistent across all ONOS nodes" )
2035 else:
2036 consistentMastership = False
2037 utilities.assert_equals(
2038 expect=True,
2039 actual=consistentMastership,
2040 onpass="Switch roles are consistent across all ONOS nodes",
2041 onfail="ONOS nodes have different views of switch roles" )
2042
2043 if rolesResults and not consistentMastership:
2044 for i in range( len( ONOSMastership ) ):
2045 node = str( main.activeNodes[i] + 1 )
2046 main.log.warn( "ONOS" + node + " roles: ",
2047 json.dumps( json.loads( ONOSMastership[ i ] ),
2048 sort_keys=True,
2049 indent=4,
2050 separators=( ',', ': ' ) ) )
2051
2052 # NOTE: we expect mastership to change on controller scaling down
2053
2054 main.step( "Get the intents and compare across all nodes" )
2055 ONOSIntents = []
2056 intentCheck = main.FALSE
2057 consistentIntents = True
2058 intentsResults = True
2059 threads = []
2060 for i in main.activeNodes:
2061 t = main.Thread( target=main.CLIs[i].intents,
2062 name="intents-" + str( i ),
2063 args=[],
2064 kwargs={ 'jsonFormat': True } )
2065 threads.append( t )
2066 t.start()
2067
2068 for t in threads:
2069 t.join()
2070 ONOSIntents.append( t.result )
2071
2072 for i in range( len( ONOSIntents) ):
2073 node = str( main.activeNodes[i] + 1 )
2074 if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
2075 main.log.error( "Error in getting ONOS" + node + " intents" )
2076 main.log.warn( "ONOS" + node + " intents response: " +
2077 repr( ONOSIntents[ i ] ) )
2078 intentsResults = False
2079 utilities.assert_equals(
2080 expect=True,
2081 actual=intentsResults,
2082 onpass="No error in reading intents output",
2083 onfail="Error in reading intents from ONOS" )
2084
2085 main.step( "Check for consistency in Intents from each controller" )
2086 if all([ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
2087 main.log.info( "Intents are consistent across all ONOS " +
2088 "nodes" )
2089 else:
2090 consistentIntents = False
2091
2092 # Try to make it easy to figure out what is happening
2093 #
2094 # Intent ONOS1 ONOS2 ...
2095 # 0x01 INSTALLED INSTALLING
2096 # ... ... ...
2097 # ... ... ...
2098 title = " ID"
2099 for n in main.activeNodes:
2100 title += " " * 10 + "ONOS" + str( n + 1 )
2101 main.log.warn( title )
2102 # get all intent keys in the cluster
2103 keys = []
2104 for nodeStr in ONOSIntents:
2105 node = json.loads( nodeStr )
2106 for intent in node:
2107 keys.append( intent.get( 'id' ) )
2108 keys = set( keys )
2109 for key in keys:
2110 row = "%-13s" % key
2111 for nodeStr in ONOSIntents:
2112 node = json.loads( nodeStr )
2113 for intent in node:
2114 if intent.get( 'id' ) == key:
2115 row += "%-15s" % intent.get( 'state' )
2116 main.log.warn( row )
2117 # End table view
2118
2119 utilities.assert_equals(
2120 expect=True,
2121 actual=consistentIntents,
2122 onpass="Intents are consistent across all ONOS nodes",
2123 onfail="ONOS nodes have different views of intents" )
2124 intentStates = []
2125 for node in ONOSIntents: # Iter through ONOS nodes
2126 nodeStates = []
2127 # Iter through intents of a node
2128 try:
2129 for intent in json.loads( node ):
2130 nodeStates.append( intent[ 'state' ] )
2131 except ( ValueError, TypeError ):
2132 main.log.exception( "Error in parsing intents" )
2133 main.log.error( repr( node ) )
2134 intentStates.append( nodeStates )
2135 out = [ (i, nodeStates.count( i ) ) for i in set( nodeStates ) ]
2136 main.log.info( dict( out ) )
2137
2138 if intentsResults and not consistentIntents:
2139 for i in range( len( main.activeNodes ) ):
2140 node = str( main.activeNodes[i] + 1 )
2141 main.log.warn( "ONOS" + node + " intents: " )
2142 main.log.warn( json.dumps(
2143 json.loads( ONOSIntents[ i ] ),
2144 sort_keys=True,
2145 indent=4,
2146 separators=( ',', ': ' ) ) )
2147 elif intentsResults and consistentIntents:
2148 intentCheck = main.TRUE
2149
2150 main.step( "Compare current intents with intents before the scaling" )
2151 # NOTE: this requires case 5 to pass for intentState to be set.
2152 # maybe we should stop the test if that fails?
2153 sameIntents = main.FALSE
2154 try:
2155 intentState
2156 except NameError:
2157 main.log.warn( "No previous intent state was saved" )
2158 else:
2159 if intentState and intentState == ONOSIntents[ 0 ]:
2160 sameIntents = main.TRUE
2161 main.log.info( "Intents are consistent with before scaling" )
2162 # TODO: possibly the states have changed? we may need to figure out
2163 # what the acceptable states are
2164 elif len( intentState ) == len( ONOSIntents[ 0 ] ):
2165 sameIntents = main.TRUE
2166 try:
2167 before = json.loads( intentState )
2168 after = json.loads( ONOSIntents[ 0 ] )
2169 for intent in before:
2170 if intent not in after:
2171 sameIntents = main.FALSE
2172 main.log.debug( "Intent is not currently in ONOS " +
2173 "(at least in the same form):" )
2174 main.log.debug( json.dumps( intent ) )
2175 except ( ValueError, TypeError ):
2176 main.log.exception( "Exception printing intents" )
2177 main.log.debug( repr( ONOSIntents[0] ) )
2178 main.log.debug( repr( intentState ) )
2179 if sameIntents == main.FALSE:
2180 try:
2181 main.log.debug( "ONOS intents before: " )
2182 main.log.debug( json.dumps( json.loads( intentState ),
2183 sort_keys=True, indent=4,
2184 separators=( ',', ': ' ) ) )
2185 main.log.debug( "Current ONOS intents: " )
2186 main.log.debug( json.dumps( json.loads( ONOSIntents[ 0 ] ),
2187 sort_keys=True, indent=4,
2188 separators=( ',', ': ' ) ) )
2189 except ( ValueError, TypeError ):
2190 main.log.exception( "Exception printing intents" )
2191 main.log.debug( repr( ONOSIntents[0] ) )
2192 main.log.debug( repr( intentState ) )
2193 utilities.assert_equals(
2194 expect=main.TRUE,
2195 actual=sameIntents,
2196 onpass="Intents are consistent with before scaling",
2197 onfail="The Intents changed during scaling" )
2198 intentCheck = intentCheck and sameIntents
2199
2200 main.step( "Get the OF Table entries and compare to before " +
2201 "component scaling" )
2202 FlowTables = main.TRUE
2203 for i in range( 28 ):
2204 main.log.info( "Checking flow table on s" + str( i + 1 ) )
2205 tmpFlows = main.Mininet1.getFlowTable( "s" + str( i + 1 ), version="1.3", debug=False )
2206 curSwitch = main.Mininet1.flowTableComp( flows[i], tmpFlows )
2207 FlowTables = FlowTables and curSwitch
2208 if curSwitch == main.FALSE:
2209 main.log.warn( "Differences in flow table for switch: s{}".format( i + 1 ) )
2210 utilities.assert_equals(
2211 expect=main.TRUE,
2212 actual=FlowTables,
2213 onpass="No changes were found in the flow tables",
2214 onfail="Changes were found in the flow tables" )
2215
2216 main.Mininet2.pingLongKill()
2217 '''
2218 # main.step( "Check the continuous pings to ensure that no packets " +
2219 # "were dropped during component failure" )
2220 main.Mininet2.pingKill( main.params[ 'TESTONUSER' ],
2221 main.params[ 'TESTONIP' ] )
2222 LossInPings = main.FALSE
2223 # NOTE: checkForLoss returns main.FALSE with 0% packet loss
2224 for i in range( 8, 18 ):
2225 main.log.info(
2226 "Checking for a loss in pings along flow from s" +
2227 str( i ) )
2228 LossInPings = main.Mininet2.checkForLoss(
2229 "/tmp/ping.h" +
2230 str( i ) ) or LossInPings
2231 if LossInPings == main.TRUE:
2232 main.log.info( "Loss in ping detected" )
2233 elif LossInPings == main.ERROR:
2234 main.log.info( "There are multiple mininet process running" )
2235 elif LossInPings == main.FALSE:
2236 main.log.info( "No Loss in the pings" )
2237 main.log.info( "No loss of dataplane connectivity" )
2238 # utilities.assert_equals(
2239 # expect=main.FALSE,
2240 # actual=LossInPings,
2241 # onpass="No Loss of connectivity",
2242 # onfail="Loss of dataplane connectivity detected" )
2243
2244 # NOTE: Since intents are not persisted with IntnentStore,
2245 # we expect loss in dataplane connectivity
2246 LossInPings = main.FALSE
2247 '''
2248
2249 main.step( "Leadership Election is still functional" )
2250 # Test of LeadershipElection
2251 leaderList = []
2252 leaderResult = main.TRUE
2253
2254 for i in main.activeNodes:
2255 cli = main.CLIs[i]
2256 leaderN = cli.electionTestLeader()
2257 leaderList.append( leaderN )
2258 if leaderN == main.FALSE:
2259 # error in response
2260 main.log.error( "Something is wrong with " +
2261 "electionTestLeader function, check the" +
2262 " error logs" )
2263 leaderResult = main.FALSE
2264 elif leaderN is None:
2265 main.log.error( cli.name +
2266 " shows no leader for the election-app." )
2267 leaderResult = main.FALSE
2268 if len( set( leaderList ) ) != 1:
2269 leaderResult = main.FALSE
2270 main.log.error(
2271 "Inconsistent view of leader for the election test app" )
2272 # TODO: print the list
2273 utilities.assert_equals(
2274 expect=main.TRUE,
2275 actual=leaderResult,
2276 onpass="Leadership election passed",
2277 onfail="Something went wrong with Leadership election" )
2278
2279 def CASE8( self, main ):
2280 """
2281 Compare topo
2282 """
2283 import json
2284 import time
2285 assert main.numCtrls, "main.numCtrls not defined"
2286 assert main, "main not defined"
2287 assert utilities.assert_equals, "utilities.assert_equals not defined"
2288 assert main.CLIs, "main.CLIs not defined"
2289 assert main.nodes, "main.nodes not defined"
2290
2291 main.case( "Compare ONOS Topology view to Mininet topology" )
2292 main.caseExplanation = "Compare topology objects between Mininet" +\
2293 " and ONOS"
2294 topoResult = main.FALSE
2295 topoFailMsg = "ONOS topology don't match Mininet"
2296 elapsed = 0
2297 count = 0
2298 main.step( "Comparing ONOS topology to MN topology" )
2299 startTime = time.time()
2300 # Give time for Gossip to work
2301 while topoResult == main.FALSE and ( elapsed < 60 or count < 3 ):
2302 devicesResults = main.TRUE
2303 linksResults = main.TRUE
2304 hostsResults = main.TRUE
2305 hostAttachmentResults = True
2306 count += 1
2307 cliStart = time.time()
2308 devices = []
2309 threads = []
2310 for i in main.activeNodes:
2311 t = main.Thread( target=utilities.retry,
2312 name="devices-" + str( i ),
2313 args=[ main.CLIs[i].devices, [ None ] ],
2314 kwargs= { 'sleep': 5, 'attempts': 5,
2315 'randomTime': True } )
2316 threads.append( t )
2317 t.start()
2318
2319 for t in threads:
2320 t.join()
2321 devices.append( t.result )
2322 hosts = []
2323 ipResult = main.TRUE
2324 threads = []
2325 for i in main.activeNodes:
2326 t = main.Thread( target=utilities.retry,
2327 name="hosts-" + str( i ),
2328 args=[ main.CLIs[i].hosts, [ None ] ],
2329 kwargs= { 'sleep': 5, 'attempts': 5,
2330 'randomTime': True } )
2331 threads.append( t )
2332 t.start()
2333
2334 for t in threads:
2335 t.join()
2336 try:
2337 hosts.append( json.loads( t.result ) )
2338 except ( ValueError, TypeError ):
2339 main.log.exception( "Error parsing hosts results" )
2340 main.log.error( repr( t.result ) )
2341 hosts.append( None )
2342 for controller in range( 0, len( hosts ) ):
2343 controllerStr = str( main.activeNodes[controller] + 1 )
2344 if hosts[ controller ]:
2345 for host in hosts[ controller ]:
2346 if host is None or host.get( 'ipAddresses', [] ) == []:
2347 main.log.error(
2348 "Error with host ipAddresses on controller" +
2349 controllerStr + ": " + str( host ) )
2350 ipResult = main.FALSE
2351 ports = []
2352 threads = []
2353 for i in main.activeNodes:
2354 t = main.Thread( target=utilities.retry,
2355 name="ports-" + str( i ),
2356 args=[ main.CLIs[i].ports, [ None ] ],
2357 kwargs= { 'sleep': 5, 'attempts': 5,
2358 'randomTime': True } )
2359 threads.append( t )
2360 t.start()
2361
2362 for t in threads:
2363 t.join()
2364 ports.append( t.result )
2365 links = []
2366 threads = []
2367 for i in main.activeNodes:
2368 t = main.Thread( target=utilities.retry,
2369 name="links-" + str( i ),
2370 args=[ main.CLIs[i].links, [ None ] ],
2371 kwargs= { 'sleep': 5, 'attempts': 5,
2372 'randomTime': True } )
2373 threads.append( t )
2374 t.start()
2375
2376 for t in threads:
2377 t.join()
2378 links.append( t.result )
2379 clusters = []
2380 threads = []
2381 for i in main.activeNodes:
2382 t = main.Thread( target=utilities.retry,
2383 name="clusters-" + str( i ),
2384 args=[ main.CLIs[i].clusters, [ None ] ],
2385 kwargs= { 'sleep': 5, 'attempts': 5,
2386 'randomTime': True } )
2387 threads.append( t )
2388 t.start()
2389
2390 for t in threads:
2391 t.join()
2392 clusters.append( t.result )
2393
2394 elapsed = time.time() - startTime
2395 cliTime = time.time() - cliStart
2396 print "Elapsed time: " + str( elapsed )
2397 print "CLI time: " + str( cliTime )
2398
2399 if all( e is None for e in devices ) and\
2400 all( e is None for e in hosts ) and\
2401 all( e is None for e in ports ) and\
2402 all( e is None for e in links ) and\
2403 all( e is None for e in clusters ):
2404 topoFailMsg = "Could not get topology from ONOS"
2405 main.log.error( topoFailMsg )
2406 continue # Try again, No use trying to compare
2407
2408 mnSwitches = main.Mininet1.getSwitches()
2409 mnLinks = main.Mininet1.getLinks()
2410 mnHosts = main.Mininet1.getHosts()
2411 for controller in range( len( main.activeNodes ) ):
2412 controllerStr = str( main.activeNodes[controller] + 1 )
2413 if devices[ controller ] and ports[ controller ] and\
2414 "Error" not in devices[ controller ] and\
2415 "Error" not in ports[ controller ]:
2416
2417 try:
2418 currentDevicesResult = main.Mininet1.compareSwitches(
2419 mnSwitches,
2420 json.loads( devices[ controller ] ),
2421 json.loads( ports[ controller ] ) )
2422 except ( TypeError, ValueError ):
2423 main.log.exception( "Object not as expected; devices={!r}\nports={!r}".format(
2424 devices[ controller ], ports[ controller ] ) )
2425 else:
2426 currentDevicesResult = main.FALSE
2427 utilities.assert_equals( expect=main.TRUE,
2428 actual=currentDevicesResult,
2429 onpass="ONOS" + controllerStr +
2430 " Switches view is correct",
2431 onfail="ONOS" + controllerStr +
2432 " Switches view is incorrect" )
2433
2434 if links[ controller ] and "Error" not in links[ controller ]:
2435 currentLinksResult = main.Mininet1.compareLinks(
2436 mnSwitches, mnLinks,
2437 json.loads( links[ controller ] ) )
2438 else:
2439 currentLinksResult = main.FALSE
2440 utilities.assert_equals( expect=main.TRUE,
2441 actual=currentLinksResult,
2442 onpass="ONOS" + controllerStr +
2443 " links view is correct",
2444 onfail="ONOS" + controllerStr +
2445 " links view is incorrect" )
2446 if hosts[ controller ] and "Error" not in hosts[ controller ]:
2447 currentHostsResult = main.Mininet1.compareHosts(
2448 mnHosts,
2449 hosts[ controller ] )
2450 elif hosts[ controller ] == []:
2451 currentHostsResult = main.TRUE
2452 else:
2453 currentHostsResult = main.FALSE
2454 utilities.assert_equals( expect=main.TRUE,
2455 actual=currentHostsResult,
2456 onpass="ONOS" + controllerStr +
2457 " hosts exist in Mininet",
2458 onfail="ONOS" + controllerStr +
2459 " hosts don't match Mininet" )
2460 # CHECKING HOST ATTACHMENT POINTS
2461 hostAttachment = True
2462 zeroHosts = False
2463 # FIXME: topo-HA/obelisk specific mappings:
2464 # key is mac and value is dpid
2465 mappings = {}
2466 for i in range( 1, 29 ): # hosts 1 through 28
2467 # set up correct variables:
2468 macId = "00:" * 5 + hex( i ).split( "0x" )[1].upper().zfill(2)
2469 if i == 1:
2470 deviceId = "1000".zfill(16)
2471 elif i == 2:
2472 deviceId = "2000".zfill(16)
2473 elif i == 3:
2474 deviceId = "3000".zfill(16)
2475 elif i == 4:
2476 deviceId = "3004".zfill(16)
2477 elif i == 5:
2478 deviceId = "5000".zfill(16)
2479 elif i == 6:
2480 deviceId = "6000".zfill(16)
2481 elif i == 7:
2482 deviceId = "6007".zfill(16)
2483 elif i >= 8 and i <= 17:
2484 dpid = '3' + str( i ).zfill( 3 )
2485 deviceId = dpid.zfill(16)
2486 elif i >= 18 and i <= 27:
2487 dpid = '6' + str( i ).zfill( 3 )
2488 deviceId = dpid.zfill(16)
2489 elif i == 28:
2490 deviceId = "2800".zfill(16)
2491 mappings[ macId ] = deviceId
2492 if hosts[ controller ] is not None and "Error" not in hosts[ controller ]:
2493 if hosts[ controller ] == []:
2494 main.log.warn( "There are no hosts discovered" )
2495 zeroHosts = True
2496 else:
2497 for host in hosts[ controller ]:
2498 mac = None
2499 location = None
2500 device = None
2501 port = None
2502 try:
2503 mac = host.get( 'mac' )
2504 assert mac, "mac field could not be found for this host object"
2505
2506 location = host.get( 'location' )
2507 assert location, "location field could not be found for this host object"
2508
2509 # Trim the protocol identifier off deviceId
2510 device = str( location.get( 'elementId' ) ).split(':')[1]
2511 assert device, "elementId field could not be found for this host location object"
2512
2513 port = location.get( 'port' )
2514 assert port, "port field could not be found for this host location object"
2515
2516 # Now check if this matches where they should be
2517 if mac and device and port:
2518 if str( port ) != "1":
2519 main.log.error( "The attachment port is incorrect for " +
2520 "host " + str( mac ) +
2521 ". Expected: 1 Actual: " + str( port) )
2522 hostAttachment = False
2523 if device != mappings[ str( mac ) ]:
2524 main.log.error( "The attachment device is incorrect for " +
2525 "host " + str( mac ) +
2526 ". Expected: " + mappings[ str( mac ) ] +
2527 " Actual: " + device )
2528 hostAttachment = False
2529 else:
2530 hostAttachment = False
2531 except AssertionError:
2532 main.log.exception( "Json object not as expected" )
2533 main.log.error( repr( host ) )
2534 hostAttachment = False
2535 else:
2536 main.log.error( "No hosts json output or \"Error\"" +
2537 " in output. hosts = " +
2538 repr( hosts[ controller ] ) )
2539 if zeroHosts is False:
2540 # TODO: Find a way to know if there should be hosts in a
2541 # given point of the test
2542 hostAttachment = True
2543
2544 # END CHECKING HOST ATTACHMENT POINTS
2545 devicesResults = devicesResults and currentDevicesResult
2546 linksResults = linksResults and currentLinksResult
2547 hostsResults = hostsResults and currentHostsResult
2548 hostAttachmentResults = hostAttachmentResults and\
2549 hostAttachment
2550 topoResult = ( devicesResults and linksResults
2551 and hostsResults and ipResult and
2552 hostAttachmentResults )
2553 utilities.assert_equals( expect=True,
2554 actual=topoResult,
2555 onpass="ONOS topology matches Mininet",
2556 onfail=topoFailMsg )
2557 # End of While loop to pull ONOS state
2558
2559 # Compare json objects for hosts and dataplane clusters
2560
2561 # hosts
2562 main.step( "Hosts view is consistent across all ONOS nodes" )
2563 consistentHostsResult = main.TRUE
2564 for controller in range( len( hosts ) ):
2565 controllerStr = str( main.activeNodes[controller] + 1 )
2566 if hosts[ controller ] is not None and "Error" not in hosts[ controller ]:
2567 if hosts[ controller ] == hosts[ 0 ]:
2568 continue
2569 else: # hosts not consistent
2570 main.log.error( "hosts from ONOS" + controllerStr +
2571 " is inconsistent with ONOS1" )
2572 main.log.warn( repr( hosts[ controller ] ) )
2573 consistentHostsResult = main.FALSE
2574
2575 else:
2576 main.log.error( "Error in getting ONOS hosts from ONOS" +
2577 controllerStr )
2578 consistentHostsResult = main.FALSE
2579 main.log.warn( "ONOS" + controllerStr +
2580 " hosts response: " +
2581 repr( hosts[ controller ] ) )
2582 utilities.assert_equals(
2583 expect=main.TRUE,
2584 actual=consistentHostsResult,
2585 onpass="Hosts view is consistent across all ONOS nodes",
2586 onfail="ONOS nodes have different views of hosts" )
2587
2588 main.step( "Hosts information is correct" )
2589 hostsResults = hostsResults and ipResult
2590 utilities.assert_equals(
2591 expect=main.TRUE,
2592 actual=hostsResults,
2593 onpass="Host information is correct",
2594 onfail="Host information is incorrect" )
2595
2596 main.step( "Host attachment points to the network" )
2597 utilities.assert_equals(
2598 expect=True,
2599 actual=hostAttachmentResults,
2600 onpass="Hosts are correctly attached to the network",
2601 onfail="ONOS did not correctly attach hosts to the network" )
2602
2603 # Strongly connected clusters of devices
2604 main.step( "Clusters view is consistent across all ONOS nodes" )
2605 consistentClustersResult = main.TRUE
2606 for controller in range( len( clusters ) ):
2607 controllerStr = str( main.activeNodes[controller] + 1 )
2608 if "Error" not in clusters[ controller ]:
2609 if clusters[ controller ] == clusters[ 0 ]:
2610 continue
2611 else: # clusters not consistent
2612 main.log.error( "clusters from ONOS" +
2613 controllerStr +
2614 " is inconsistent with ONOS1" )
2615 consistentClustersResult = main.FALSE
2616 else:
2617 main.log.error( "Error in getting dataplane clusters " +
2618 "from ONOS" + controllerStr )
2619 consistentClustersResult = main.FALSE
2620 main.log.warn( "ONOS" + controllerStr +
2621 " clusters response: " +
2622 repr( clusters[ controller ] ) )
2623 utilities.assert_equals(
2624 expect=main.TRUE,
2625 actual=consistentClustersResult,
2626 onpass="Clusters view is consistent across all ONOS nodes",
2627 onfail="ONOS nodes have different views of clusters" )
2628 if not consistentClustersResult:
2629 main.log.debug( clusters )
2630 for x in links:
2631 main.log.warn( "{}: {}".format( len( x ), x ) )
2632
2633
2634 main.step( "There is only one SCC" )
2635 # there should always only be one cluster
2636 try:
2637 numClusters = len( json.loads( clusters[ 0 ] ) )
2638 except ( ValueError, TypeError ):
2639 main.log.exception( "Error parsing clusters[0]: " +
2640 repr( clusters[0] ) )
2641 numClusters = "ERROR"
2642 clusterResults = main.FALSE
2643 if numClusters == 1:
2644 clusterResults = main.TRUE
2645 utilities.assert_equals(
2646 expect=1,
2647 actual=numClusters,
2648 onpass="ONOS shows 1 SCC",
2649 onfail="ONOS shows " + str( numClusters ) + " SCCs" )
2650
2651 topoResult = ( devicesResults and linksResults
2652 and hostsResults and consistentHostsResult
2653 and consistentClustersResult and clusterResults
2654 and ipResult and hostAttachmentResults )
2655
2656 topoResult = topoResult and int( count <= 2 )
2657 note = "note it takes about " + str( int( cliTime ) ) + \
2658 " seconds for the test to make all the cli calls to fetch " +\
2659 "the topology from each ONOS instance"
2660 main.log.info(
2661 "Very crass estimate for topology discovery/convergence( " +
2662 str( note ) + " ): " + str( elapsed ) + " seconds, " +
2663 str( count ) + " tries" )
2664
2665 main.step( "Device information is correct" )
2666 utilities.assert_equals(
2667 expect=main.TRUE,
2668 actual=devicesResults,
2669 onpass="Device information is correct",
2670 onfail="Device information is incorrect" )
2671
2672 main.step( "Links are correct" )
2673 utilities.assert_equals(
2674 expect=main.TRUE,
2675 actual=linksResults,
2676 onpass="Link are correct",
2677 onfail="Links are incorrect" )
2678
2679 main.step( "Hosts are correct" )
2680 utilities.assert_equals(
2681 expect=main.TRUE,
2682 actual=hostsResults,
2683 onpass="Hosts are correct",
2684 onfail="Hosts are incorrect" )
2685
2686 # FIXME: move this to an ONOS state case
2687 main.step( "Checking ONOS nodes" )
2688 nodeResults = utilities.retry( main.HA.nodesCheck,
2689 False,
2690 args=[main.activeNodes],
2691 attempts=5 )
2692 utilities.assert_equals( expect=True, actual=nodeResults,
2693 onpass="Nodes check successful",
2694 onfail="Nodes check NOT successful" )
2695 if not nodeResults:
2696 for i in main.activeNodes:
2697 main.log.debug( "{} components not ACTIVE: \n{}".format(
2698 main.CLIs[i].name,
2699 main.CLIs[i].sendline( "scr:list | grep -v ACTIVE" ) ) )
2700
Jon Halld2871c22016-07-26 11:01:14 -07002701 if not topoResult:
2702 main.cleanup()
2703 main.exit()
2704
Jon Hall69b2b982016-05-11 12:04:59 -07002705 def CASE9( self, main ):
2706 """
2707 Link s3-s28 down
2708 """
2709 import time
2710 assert main.numCtrls, "main.numCtrls not defined"
2711 assert main, "main not defined"
2712 assert utilities.assert_equals, "utilities.assert_equals not defined"
2713 assert main.CLIs, "main.CLIs not defined"
2714 assert main.nodes, "main.nodes not defined"
2715 # NOTE: You should probably run a topology check after this
2716
2717 linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
2718
2719 description = "Turn off a link to ensure that Link Discovery " +\
2720 "is working properly"
2721 main.case( description )
2722
2723 main.step( "Kill Link between s3 and s28" )
2724 LinkDown = main.Mininet1.link( END1="s3", END2="s28", OPTION="down" )
2725 main.log.info( "Waiting " + str( linkSleep ) +
2726 " seconds for link down to be discovered" )
2727 time.sleep( linkSleep )
2728 utilities.assert_equals( expect=main.TRUE, actual=LinkDown,
2729 onpass="Link down successful",
2730 onfail="Failed to bring link down" )
2731 # TODO do some sort of check here
2732
2733 def CASE10( self, main ):
2734 """
2735 Link s3-s28 up
2736 """
2737 import time
2738 assert main.numCtrls, "main.numCtrls not defined"
2739 assert main, "main not defined"
2740 assert utilities.assert_equals, "utilities.assert_equals not defined"
2741 assert main.CLIs, "main.CLIs not defined"
2742 assert main.nodes, "main.nodes not defined"
2743 # NOTE: You should probably run a topology check after this
2744
2745 linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
2746
2747 description = "Restore a link to ensure that Link Discovery is " + \
2748 "working properly"
2749 main.case( description )
2750
2751 main.step( "Bring link between s3 and s28 back up" )
2752 LinkUp = main.Mininet1.link( END1="s3", END2="s28", OPTION="up" )
2753 main.log.info( "Waiting " + str( linkSleep ) +
2754 " seconds for link up to be discovered" )
2755 time.sleep( linkSleep )
2756 utilities.assert_equals( expect=main.TRUE, actual=LinkUp,
2757 onpass="Link up successful",
2758 onfail="Failed to bring link up" )
2759 # TODO do some sort of check here
2760
2761 def CASE11( self, main ):
2762 """
2763 Switch Down
2764 """
2765 # NOTE: You should probably run a topology check after this
2766 import time
2767 assert main.numCtrls, "main.numCtrls not defined"
2768 assert main, "main not defined"
2769 assert utilities.assert_equals, "utilities.assert_equals not defined"
2770 assert main.CLIs, "main.CLIs not defined"
2771 assert main.nodes, "main.nodes not defined"
2772
2773 switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
2774
2775 description = "Killing a switch to ensure it is discovered correctly"
2776 onosCli = main.CLIs[ main.activeNodes[0] ]
2777 main.case( description )
2778 switch = main.params[ 'kill' ][ 'switch' ]
2779 switchDPID = main.params[ 'kill' ][ 'dpid' ]
2780
2781 # TODO: Make this switch parameterizable
2782 main.step( "Kill " + switch )
2783 main.log.info( "Deleting " + switch )
2784 main.Mininet1.delSwitch( switch )
2785 main.log.info( "Waiting " + str( switchSleep ) +
2786 " seconds for switch down to be discovered" )
2787 time.sleep( switchSleep )
2788 device = onosCli.getDevice( dpid=switchDPID )
2789 # Peek at the deleted switch
2790 main.log.warn( str( device ) )
2791 result = main.FALSE
2792 if device and device[ 'available' ] is False:
2793 result = main.TRUE
2794 utilities.assert_equals( expect=main.TRUE, actual=result,
2795 onpass="Kill switch successful",
2796 onfail="Failed to kill switch?" )
2797
2798 def CASE12( self, main ):
2799 """
2800 Switch Up
2801 """
2802 # NOTE: You should probably run a topology check after this
2803 import time
2804 assert main.numCtrls, "main.numCtrls not defined"
2805 assert main, "main not defined"
2806 assert utilities.assert_equals, "utilities.assert_equals not defined"
2807 assert main.CLIs, "main.CLIs not defined"
2808 assert main.nodes, "main.nodes not defined"
2809
2810 switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
2811 switch = main.params[ 'kill' ][ 'switch' ]
2812 switchDPID = main.params[ 'kill' ][ 'dpid' ]
2813 links = main.params[ 'kill' ][ 'links' ].split()
2814 onosCli = main.CLIs[ main.activeNodes[0] ]
2815 description = "Adding a switch to ensure it is discovered correctly"
2816 main.case( description )
2817
2818 main.step( "Add back " + switch )
2819 main.Mininet1.addSwitch( switch, dpid=switchDPID )
2820 for peer in links:
2821 main.Mininet1.addLink( switch, peer )
2822 ipList = [ node.ip_address for node in main.nodes ]
2823 main.Mininet1.assignSwController( sw=switch, ip=ipList )
2824 main.log.info( "Waiting " + str( switchSleep ) +
2825 " seconds for switch up to be discovered" )
2826 time.sleep( switchSleep )
2827 device = onosCli.getDevice( dpid=switchDPID )
2828 # Peek at the deleted switch
2829 main.log.warn( str( device ) )
2830 result = main.FALSE
2831 if device and device[ 'available' ]:
2832 result = main.TRUE
2833 utilities.assert_equals( expect=main.TRUE, actual=result,
2834 onpass="add switch successful",
2835 onfail="Failed to add switch?" )
2836
2837 def CASE13( self, main ):
2838 """
2839 Clean up
2840 """
2841 assert main.numCtrls, "main.numCtrls not defined"
2842 assert main, "main not defined"
2843 assert utilities.assert_equals, "utilities.assert_equals not defined"
2844 assert main.CLIs, "main.CLIs not defined"
2845 assert main.nodes, "main.nodes not defined"
2846
2847 main.case( "Test Cleanup" )
2848 main.step( "Killing tcpdumps" )
2849 main.Mininet2.stopTcpdump()
2850
2851 if main.params[ 'BACKUP' ][ 'ENABLED' ] == "True":
2852 main.step( "Copying MN pcap and ONOS log files to test station" )
2853 # NOTE: MN Pcap file is being saved to logdir.
2854 # We scp this file as MN and TestON aren't necessarily the same vm
2855
2856 # FIXME: To be replaced with a Jenkin's post script
2857 # TODO: Load these from params
2858 # NOTE: must end in /
2859 logFolder = "/opt/onos/log/"
2860 logFiles = [ "karaf.log", "karaf.log.1" ]
2861 # NOTE: must end in /
2862 for f in logFiles:
2863 for node in main.nodes:
2864 dstName = main.logdir + "/" + node.name + "-" + f
2865 main.ONOSbench.secureCopy( node.user_name, node.ip_address,
2866 logFolder + f, dstName )
2867 # std*.log's
2868 # NOTE: must end in /
2869 logFolder = "/opt/onos/var/"
2870 logFiles = [ "stderr.log", "stdout.log" ]
2871 # NOTE: must end in /
2872 for f in logFiles:
2873 for node in main.nodes:
2874 dstName = main.logdir + "/" + node.name + "-" + f
2875 main.ONOSbench.secureCopy( node.user_name, node.ip_address,
2876 logFolder + f, dstName )
2877 else:
2878 main.log.debug( "skipping saving log files" )
2879
2880 main.step( "Stopping Mininet" )
2881 mnResult = main.Mininet1.stopNet()
2882 utilities.assert_equals( expect=main.TRUE, actual=mnResult,
2883 onpass="Mininet stopped",
2884 onfail="MN cleanup NOT successful" )
2885
2886 main.step( "Checking ONOS Logs for errors" )
2887 for node in main.nodes:
2888 main.log.debug( "Checking logs for errors on " + node.name + ":" )
2889 main.log.warn( main.ONOSbench.checkLogs( node.ip_address ) )
2890
2891 try:
2892 timerLog = open( main.logdir + "/Timers.csv", 'w')
2893 main.log.error( ", ".join( labels ) + "\n" + ", ".join( data ) )
2894 timerLog.write( ", ".join( labels ) + "\n" + ", ".join( data ) )
2895 timerLog.close()
2896 except NameError, e:
2897 main.log.exception(e)
2898
2899 main.step( "Stopping webserver" )
2900 status = main.Server.stop( )
2901 utilities.assert_equals( expect=main.TRUE, actual=status,
2902 onpass="Stop Server",
2903 onfail="Failled to stop SimpleHTTPServer" )
2904 del main.Server
2905
2906 def CASE14( self, main ):
2907 """
2908 start election app on all onos nodes
2909 """
2910 import time
2911 assert main.numCtrls, "main.numCtrls not defined"
2912 assert main, "main not defined"
2913 assert utilities.assert_equals, "utilities.assert_equals not defined"
2914 assert main.CLIs, "main.CLIs not defined"
2915 assert main.nodes, "main.nodes not defined"
2916
2917 main.case("Start Leadership Election app")
2918 main.step( "Install leadership election app" )
2919 onosCli = main.CLIs[ main.activeNodes[0] ]
2920 appResult = onosCli.activateApp( "org.onosproject.election" )
2921 utilities.assert_equals(
2922 expect=main.TRUE,
2923 actual=appResult,
2924 onpass="Election app installed",
2925 onfail="Something went wrong with installing Leadership election" )
2926
2927 main.step( "Run for election on each node" )
2928 for i in main.activeNodes:
2929 main.CLIs[i].electionTestRun()
2930 time.sleep(5)
2931 activeCLIs = [ main.CLIs[i] for i in main.activeNodes ]
2932 sameResult, leaders = main.HA.consistentLeaderboards( activeCLIs )
2933 utilities.assert_equals(
2934 expect=True,
2935 actual=sameResult,
2936 onpass="All nodes see the same leaderboards",
2937 onfail="Inconsistent leaderboards" )
2938
2939 if sameResult:
2940 leader = leaders[ 0 ][ 0 ]
2941 if main.nodes[ main.activeNodes[0] ].ip_address in leader:
2942 correctLeader = True
2943 else:
2944 correctLeader = False
2945 main.step( "First node was elected leader" )
2946 utilities.assert_equals(
2947 expect=True,
2948 actual=correctLeader,
2949 onpass="Correct leader was elected",
2950 onfail="Incorrect leader" )
2951
2952 def CASE15( self, main ):
2953 """
2954 Check that Leadership Election is still functional
2955 15.1 Run election on each node
2956 15.2 Check that each node has the same leaders and candidates
2957 15.3 Find current leader and withdraw
2958 15.4 Check that a new node was elected leader
2959 15.5 Check that that new leader was the candidate of old leader
2960 15.6 Run for election on old leader
2961 15.7 Check that oldLeader is a candidate, and leader if only 1 node
2962 15.8 Make sure that the old leader was added to the candidate list
2963
2964 old and new variable prefixes refer to data from before vs after
2965 withdrawl and later before withdrawl vs after re-election
2966 """
2967 import time
2968 assert main.numCtrls, "main.numCtrls not defined"
2969 assert main, "main not defined"
2970 assert utilities.assert_equals, "utilities.assert_equals not defined"
2971 assert main.CLIs, "main.CLIs not defined"
2972 assert main.nodes, "main.nodes not defined"
2973
2974 description = "Check that Leadership Election is still functional"
2975 main.case( description )
2976 # NOTE: Need to re-run after restarts since being a canidate is not persistant
2977
2978 oldLeaders = [] # list of lists of each nodes' candidates before
2979 newLeaders = [] # list of lists of each nodes' candidates after
2980 oldLeader = '' # the old leader from oldLeaders, None if not same
2981 newLeader = '' # the new leaders fron newLoeaders, None if not same
2982 oldLeaderCLI = None # the CLI of the old leader used for re-electing
2983 expectNoLeader = False # True when there is only one leader
2984 if main.numCtrls == 1:
2985 expectNoLeader = True
2986
2987 main.step( "Run for election on each node" )
2988 electionResult = main.TRUE
2989
2990 for i in main.activeNodes: # run test election on each node
2991 if main.CLIs[i].electionTestRun() == main.FALSE:
2992 electionResult = main.FALSE
2993 utilities.assert_equals(
2994 expect=main.TRUE,
2995 actual=electionResult,
2996 onpass="All nodes successfully ran for leadership",
2997 onfail="At least one node failed to run for leadership" )
2998
2999 if electionResult == main.FALSE:
3000 main.log.error(
3001 "Skipping Test Case because Election Test App isn't loaded" )
3002 main.skipCase()
3003
3004 main.step( "Check that each node shows the same leader and candidates" )
3005 failMessage = "Nodes have different leaderboards"
3006 activeCLIs = [ main.CLIs[i] for i in main.activeNodes ]
3007 sameResult, oldLeaders = main.HA.consistentLeaderboards( activeCLIs )
3008 if sameResult:
3009 oldLeader = oldLeaders[ 0 ][ 0 ]
3010 main.log.warn( oldLeader )
3011 else:
3012 oldLeader = None
3013 utilities.assert_equals(
3014 expect=True,
3015 actual=sameResult,
3016 onpass="Leaderboards are consistent for the election topic",
3017 onfail=failMessage )
3018
3019 main.step( "Find current leader and withdraw" )
3020 withdrawResult = main.TRUE
3021 # do some sanity checking on leader before using it
3022 if oldLeader is None:
3023 main.log.error( "Leadership isn't consistent." )
3024 withdrawResult = main.FALSE
3025 # Get the CLI of the oldLeader
3026 for i in main.activeNodes:
3027 if oldLeader == main.nodes[ i ].ip_address:
3028 oldLeaderCLI = main.CLIs[ i ]
3029 break
3030 else: # FOR/ELSE statement
3031 main.log.error( "Leader election, could not find current leader" )
3032 if oldLeader:
3033 withdrawResult = oldLeaderCLI.electionTestWithdraw()
3034 utilities.assert_equals(
3035 expect=main.TRUE,
3036 actual=withdrawResult,
3037 onpass="Node was withdrawn from election",
3038 onfail="Node was not withdrawn from election" )
3039
3040 main.step( "Check that a new node was elected leader" )
3041 failMessage = "Nodes have different leaders"
3042 # Get new leaders and candidates
3043 newLeaderResult, newLeaders = main.HA.consistentLeaderboards( activeCLIs )
3044 newLeader = None
3045 if newLeaderResult:
3046 if newLeaders[ 0 ][ 0 ] == 'none':
3047 main.log.error( "No leader was elected on at least 1 node" )
3048 if not expectNoLeader:
3049 newLeaderResult = False
3050 newLeader = newLeaders[ 0 ][ 0 ]
3051
3052 # Check that the new leader is not the older leader, which was withdrawn
3053 if newLeader == oldLeader:
3054 newLeaderResult = False
3055 main.log.error( "All nodes still see old leader: " + str( oldLeader ) +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07003056 " as the current leader" )
Jon Hall69b2b982016-05-11 12:04:59 -07003057 utilities.assert_equals(
3058 expect=True,
3059 actual=newLeaderResult,
3060 onpass="Leadership election passed",
3061 onfail="Something went wrong with Leadership election" )
3062
3063 main.step( "Check that that new leader was the candidate of old leader" )
3064 # candidates[ 2 ] should become the top candidate after withdrawl
3065 correctCandidateResult = main.TRUE
3066 if expectNoLeader:
3067 if newLeader == 'none':
3068 main.log.info( "No leader expected. None found. Pass" )
3069 correctCandidateResult = main.TRUE
3070 else:
3071 main.log.info( "Expected no leader, got: " + str( newLeader ) )
3072 correctCandidateResult = main.FALSE
3073 elif len( oldLeaders[0] ) >= 3:
3074 if newLeader == oldLeaders[ 0 ][ 2 ]:
3075 # correct leader was elected
3076 correctCandidateResult = main.TRUE
3077 else:
3078 correctCandidateResult = main.FALSE
3079 main.log.error( "Candidate {} was elected. {} should have had priority.".format(
3080 newLeader, oldLeaders[ 0 ][ 2 ] ) )
3081 else:
3082 main.log.warn( "Could not determine who should be the correct leader" )
3083 main.log.debug( oldLeaders[ 0 ] )
3084 correctCandidateResult = main.FALSE
3085 utilities.assert_equals(
3086 expect=main.TRUE,
3087 actual=correctCandidateResult,
3088 onpass="Correct Candidate Elected",
3089 onfail="Incorrect Candidate Elected" )
3090
3091 main.step( "Run for election on old leader( just so everyone " +
3092 "is in the hat )" )
3093 if oldLeaderCLI is not None:
3094 runResult = oldLeaderCLI.electionTestRun()
3095 else:
3096 main.log.error( "No old leader to re-elect" )
3097 runResult = main.FALSE
3098 utilities.assert_equals(
3099 expect=main.TRUE,
3100 actual=runResult,
3101 onpass="App re-ran for election",
3102 onfail="App failed to run for election" )
3103
3104 main.step(
3105 "Check that oldLeader is a candidate, and leader if only 1 node" )
3106 # verify leader didn't just change
3107 # Get new leaders and candidates
3108 reRunLeaders = []
3109 time.sleep( 5 ) # Paremterize
3110 positionResult, reRunLeaders = main.HA.consistentLeaderboards( activeCLIs )
3111
3112 # Check that the re-elected node is last on the candidate List
3113 if not reRunLeaders[0]:
3114 positionResult = main.FALSE
3115 elif oldLeader != reRunLeaders[ 0 ][ -1 ]:
3116 main.log.error( "Old Leader ({}) not in the proper position: {} ".format( str( oldLeader),
3117 str( reRunLeaders[ 0 ] ) ) )
3118 positionResult = main.FALSE
3119 utilities.assert_equals(
3120 expect=True,
3121 actual=positionResult,
3122 onpass="Old leader successfully re-ran for election",
3123 onfail="Something went wrong with Leadership election after " +
3124 "the old leader re-ran for election" )
3125
3126 def CASE16( self, main ):
3127 """
3128 Install Distributed Primitives app
3129 """
3130 import time
3131 assert main.numCtrls, "main.numCtrls not defined"
3132 assert main, "main not defined"
3133 assert utilities.assert_equals, "utilities.assert_equals not defined"
3134 assert main.CLIs, "main.CLIs not defined"
3135 assert main.nodes, "main.nodes not defined"
3136
3137 # Variables for the distributed primitives tests
Jon Hall7a6ebfd2017-03-13 10:58:58 -07003138 main.pCounterName = "TestON-Partitions"
3139 main.pCounterValue = 0
3140 main.onosSet = set([])
3141 main.onosSetName = "TestON-set"
Jon Hall69b2b982016-05-11 12:04:59 -07003142
3143 description = "Install Primitives app"
3144 main.case( description )
3145 main.step( "Install Primitives app" )
3146 appName = "org.onosproject.distributedprimitives"
3147 node = main.activeNodes[0]
3148 appResults = main.CLIs[node].activateApp( appName )
3149 utilities.assert_equals( expect=main.TRUE,
3150 actual=appResults,
3151 onpass="Primitives app activated",
3152 onfail="Primitives app not activated" )
3153 time.sleep( 5 ) # To allow all nodes to activate
3154
3155 def CASE17( self, main ):
3156 """
3157 Check for basic functionality with distributed primitives
3158 """
Jon Hall7a6ebfd2017-03-13 10:58:58 -07003159 main.HA.CASE17( main )