blob: 8252e1e8f59fb1826601c45a1460983ad03dbffd [file] [log] [blame]
Jon Hallb5488012017-06-21 14:08:36 -07001"""
2Description: This test is to determine if ONOS can handle
3 a minority of it's nodes restarting
4
5List of test cases:
6CASE1: Compile ONOS and push it to the test machines
7CASE2: Assign devices to controllers
8CASE21: Assign mastership to controllers
9CASE3: Assign intents
10CASE4: Ping across added host intents
11CASE5: Reading state of ONOS
12CASE61: The Failure inducing case.
13CASE62: The Failure recovery case.
14CASE7: Check state after control plane failure
15CASE8: Compare topo
16CASE9: Link s3-s28 down
17CASE10: Link s3-s28 up
18CASE11: Switch down
19CASE12: Switch up
20CASE13: Clean up
21CASE14: start election app on all onos nodes
22CASE15: Check that Leadership Election is still functional
23CASE16: Install Distributed Primitives app
24CASE17: Check for basic functionality with distributed primitives
25"""
26class HAcontinuousStopNodes:
27
28 def __init__( self ):
29 self.default = ''
30
31 def CASE1( self, main ):
32 """
33 CASE1 is to compile ONOS and push it to the test machines
34
35 Startup sequence:
36 cell <name>
37 onos-verify-cell
38 NOTE: temporary - onos-remove-raft-logs
39 onos-uninstall
40 start mininet
41 git pull
42 mvn clean install
43 onos-package
44 onos-install -f
45 onos-wait-for-start
46 start cli sessions
47 start tcpdump
48 """
49 import imp
50 import pexpect
51 import time
52 import json
53 main.log.info( "ONOS HA test: Stop a minority of ONOS nodes - " +
54 "initialization" )
55 main.case( "Setting up test environment" )
56 main.caseExplanation = "Setup the test environment including " +\
57 "installing ONOS, starting Mininet and ONOS" +\
58 "cli sessions."
59
60 # load some variables from the params file
61 PULLCODE = False
62 if main.params[ 'Git' ] == 'True':
63 PULLCODE = True
64 gitBranch = main.params[ 'branch' ]
65 cellName = main.params[ 'ENV' ][ 'cellName' ]
66
67 main.numCtrls = int( main.params[ 'num_controllers' ] )
68 if main.ONOSbench.maxNodes:
69 if main.ONOSbench.maxNodes < main.numCtrls:
70 main.numCtrls = int( main.ONOSbench.maxNodes )
71 # set global variables
72 global ONOS1Port
73 global ONOS2Port
74 global ONOS3Port
75 global ONOS4Port
76 global ONOS5Port
77 global ONOS6Port
78 global ONOS7Port
79 # These are for csv plotting in jenkins
80 global labels
81 global data
82 labels = []
83 data = []
84
85 # FIXME: just get controller port from params?
86 # TODO: do we really need all these?
87 ONOS1Port = main.params[ 'CTRL' ][ 'port1' ]
88 ONOS2Port = main.params[ 'CTRL' ][ 'port2' ]
89 ONOS3Port = main.params[ 'CTRL' ][ 'port3' ]
90 ONOS4Port = main.params[ 'CTRL' ][ 'port4' ]
91 ONOS5Port = main.params[ 'CTRL' ][ 'port5' ]
92 ONOS6Port = main.params[ 'CTRL' ][ 'port6' ]
93 ONOS7Port = main.params[ 'CTRL' ][ 'port7' ]
94
95 try:
96 from tests.HA.dependencies.HA import HA
97 main.HA = HA()
98 except Exception as e:
99 main.log.exception( e )
100 main.cleanup()
101 main.exit()
102
103 main.CLIs = []
104 main.nodes = []
105 ipList = []
106 for i in range( 1, main.numCtrls + 1 ):
107 try:
108 main.CLIs.append( getattr( main, 'ONOScli' + str( i ) ) )
109 main.nodes.append( getattr( main, 'ONOS' + str( i ) ) )
110 ipList.append( main.nodes[ -1 ].ip_address )
111 except AttributeError:
112 break
113
114 main.step( "Create cell file" )
115 cellAppString = main.params[ 'ENV' ][ 'appString' ]
116 main.ONOSbench.createCellFile( main.ONOSbench.ip_address, cellName,
117 main.Mininet1.ip_address,
118 cellAppString, ipList, main.ONOScli1.karafUser )
119 main.step( "Applying cell variable to environment" )
120 cellResult = main.ONOSbench.setCell( cellName )
121 verifyResult = main.ONOSbench.verifyCell()
122
123 # FIXME:this is short term fix
124 main.log.info( "Removing raft logs" )
125 main.ONOSbench.onosRemoveRaftLogs()
126
127 main.log.info( "Uninstalling ONOS" )
128 for node in main.nodes:
129 main.ONOSbench.onosUninstall( node.ip_address )
130
131 # Make sure ONOS is DEAD
132 main.log.info( "Killing any ONOS processes" )
133 killResults = main.TRUE
134 for node in main.nodes:
135 killed = main.ONOSbench.onosKill( node.ip_address )
136 killResults = killResults and killed
137
138 gitPullResult = main.TRUE
139
140 main.step( "Starting Mininet" )
141 # scp topo file to mininet
142 # TODO: move to params?
143 topoName = "obelisk.py"
144 filePath = main.ONOSbench.home + "/tools/test/topos/"
145 main.ONOSbench.scp( main.Mininet1,
146 filePath + topoName,
147 main.Mininet1.home,
148 direction="to" )
149 mnResult = main.Mininet1.startNet()
150 utilities.assert_equals( expect=main.TRUE, actual=mnResult,
151 onpass="Mininet Started",
152 onfail="Error starting Mininet" )
153
154 main.step( "Git checkout and pull " + gitBranch )
155 if PULLCODE:
156 main.ONOSbench.gitCheckout( gitBranch )
157 gitPullResult = main.ONOSbench.gitPull()
158 # values of 1 or 3 are good
159 utilities.assert_lesser( expect=0, actual=gitPullResult,
160 onpass="Git pull successful",
161 onfail="Git pull failed" )
162 main.ONOSbench.getVersion( report=True )
163
164 # GRAPHS
165 # NOTE: important params here:
166 # job = name of Jenkins job
167 # Plot Name = Plot-HA, only can be used if multiple plots
168 # index = The number of the graph under plot name
169 job = "HAcontinuousStopNodes"
170 plotName = "Plot-HA"
171 index = "2"
172 graphs = '<ac:structured-macro ac:name="html">\n'
173 graphs += '<ac:plain-text-body><![CDATA[\n'
174 graphs += '<iframe src="https://onos-jenkins.onlab.us/job/' + job +\
175 '/plot/' + plotName + '/getPlot?index=' + index +\
176 '&width=500&height=300"' +\
177 'noborder="0" width="500" height="300" scrolling="yes" ' +\
178 'seamless="seamless"></iframe>\n'
179 graphs += ']]></ac:plain-text-body>\n'
180 graphs += '</ac:structured-macro>\n'
181 main.log.wiki( graphs )
182
183 main.step( "Creating ONOS package" )
184 # copy gen-partions file to ONOS
185 # NOTE: this assumes TestON and ONOS are on the same machine
186 srcFile = main.testDir + "/HA/dependencies/onos-gen-partitions"
187 dstDir = main.ONOSbench.home + "/tools/test/bin/onos-gen-partitions"
188 cpResult = main.ONOSbench.secureCopy( main.ONOSbench.user_name,
189 main.ONOSbench.ip_address,
190 srcFile,
191 dstDir,
192 pwd=main.ONOSbench.pwd,
193 direction="from" )
194 packageResult = main.ONOSbench.buckBuild()
195 utilities.assert_equals( expect=main.TRUE, actual=packageResult,
196 onpass="ONOS package successful",
197 onfail="ONOS package failed" )
198
199 main.step( "Installing ONOS package" )
200 onosInstallResult = main.TRUE
201 for node in main.nodes:
202 tmpResult = main.ONOSbench.onosInstall( options="-f",
203 node=node.ip_address )
204 onosInstallResult = onosInstallResult and tmpResult
205 utilities.assert_equals( expect=main.TRUE, actual=onosInstallResult,
206 onpass="ONOS install successful",
207 onfail="ONOS install failed" )
208 # clean up gen-partitions file
209 try:
210 main.ONOSbench.handle.sendline( "cd " + main.ONOSbench.home )
211 main.ONOSbench.handle.expect( main.ONOSbench.home + "\$" )
212 main.ONOSbench.handle.sendline( "git checkout -- tools/test/bin/onos-gen-partitions" )
213 main.ONOSbench.handle.expect( main.ONOSbench.home + "\$" )
214 main.log.info( " Cleaning custom gen partitions file, response was: \n" +
215 str( main.ONOSbench.handle.before ) )
216 except ( pexpect.TIMEOUT, pexpect.EOF ):
217 main.log.exception( "ONOSbench: pexpect exception found:" +
218 main.ONOSbench.handle.before )
219 main.cleanup()
220 main.exit()
221
222 main.step( "Set up ONOS secure SSH" )
223 secureSshResult = main.TRUE
224 for node in main.nodes:
225 secureSshResult = secureSshResult and main.ONOSbench.onosSecureSSH( node=node.ip_address )
226 utilities.assert_equals( expect=main.TRUE, actual=secureSshResult,
227 onpass="Test step PASS",
228 onfail="Test step FAIL" )
229
230 main.step( "Checking if ONOS is up yet" )
231 for i in range( 2 ):
232 onosIsupResult = main.TRUE
233 for node in main.nodes:
234 started = main.ONOSbench.isup( node.ip_address )
235 if not started:
236 main.log.error( node.name + " hasn't started" )
237 onosIsupResult = onosIsupResult and started
238 if onosIsupResult == main.TRUE:
239 break
240 utilities.assert_equals( expect=main.TRUE, actual=onosIsupResult,
241 onpass="ONOS startup successful",
242 onfail="ONOS startup failed" )
243
244 main.step( "Starting ONOS CLI sessions" )
245 cliResults = main.TRUE
246 threads = []
247 for i in range( main.numCtrls ):
248 t = main.Thread( target=main.CLIs[ i ].startOnosCli,
249 name="startOnosCli-" + str( i ),
250 args=[ main.nodes[ i ].ip_address ] )
251 threads.append( t )
252 t.start()
253
254 for t in threads:
255 t.join()
256 cliResults = cliResults and t.result
257 utilities.assert_equals( expect=main.TRUE, actual=cliResults,
258 onpass="ONOS cli startup successful",
259 onfail="ONOS cli startup failed" )
260
261 # Create a list of active nodes for use when some nodes are stopped
262 main.activeNodes = [ i for i in range( 0, len( main.CLIs ) ) ]
263
264 if main.params[ 'tcpdump' ].lower() == "true":
265 main.step( "Start Packet Capture MN" )
266 main.Mininet2.startTcpdump(
267 str( main.params[ 'MNtcpdump' ][ 'folder' ] ) + str( main.TEST )
268 + "-MN.pcap",
269 intf=main.params[ 'MNtcpdump' ][ 'intf' ],
270 port=main.params[ 'MNtcpdump' ][ 'port' ] )
271
272 main.step( "Checking ONOS nodes" )
273 nodeResults = utilities.retry( main.HA.nodesCheck,
274 False,
275 args=[ main.activeNodes ],
276 attempts=5 )
277
278 utilities.assert_equals( expect=True, actual=nodeResults,
279 onpass="Nodes check successful",
280 onfail="Nodes check NOT successful" )
281
282 if not nodeResults:
283 for i in main.activeNodes:
284 cli = main.CLIs[ i ]
285 main.log.debug( "{} components not ACTIVE: \n{}".format(
286 cli.name,
287 cli.sendline( "scr:list | grep -v ACTIVE" ) ) )
288 main.log.error( "Failed to start ONOS, stopping test" )
289 main.cleanup()
290 main.exit()
291
292 main.step( "Activate apps defined in the params file" )
293 # get data from the params
294 apps = main.params.get( 'apps' )
295 if apps:
296 apps = apps.split( ',' )
297 main.log.warn( apps )
298 activateResult = True
299 for app in apps:
300 main.CLIs[ 0 ].app( app, "Activate" )
301 # TODO: check this worked
302 time.sleep( 10 ) # wait for apps to activate
303 for app in apps:
304 state = main.CLIs[ 0 ].appStatus( app )
305 if state == "ACTIVE":
306 activateResult = activateResult and True
307 else:
308 main.log.error( "{} is in {} state".format( app, state ) )
309 activateResult = False
310 utilities.assert_equals( expect=True,
311 actual=activateResult,
312 onpass="Successfully activated apps",
313 onfail="Failed to activate apps" )
314 else:
315 main.log.warn( "No apps were specified to be loaded after startup" )
316
317 main.step( "Set ONOS configurations" )
318 config = main.params.get( 'ONOS_Configuration' )
319 if config:
320 main.log.debug( config )
321 checkResult = main.TRUE
322 for component in config:
323 for setting in config[ component ]:
324 value = config[ component ][ setting ]
325 check = main.CLIs[ 0 ].setCfg( component, setting, value )
326 main.log.info( "Value was changed? {}".format( main.TRUE == check ) )
327 checkResult = check and checkResult
328 utilities.assert_equals( expect=main.TRUE,
329 actual=checkResult,
330 onpass="Successfully set config",
331 onfail="Failed to set config" )
332 else:
333 main.log.warn( "No configurations were specified to be changed after startup" )
334
335 main.step( "App Ids check" )
336 appCheck = main.TRUE
337 threads = []
338 for i in main.activeNodes:
339 t = main.Thread( target=main.CLIs[ i ].appToIDCheck,
340 name="appToIDCheck-" + str( i ),
341 args=[] )
342 threads.append( t )
343 t.start()
344
345 for t in threads:
346 t.join()
347 appCheck = appCheck and t.result
348 if appCheck != main.TRUE:
349 node = main.activeNodes[ 0 ]
350 main.log.warn( main.CLIs[ node ].apps() )
351 main.log.warn( main.CLIs[ node ].appIDs() )
352 utilities.assert_equals( expect=main.TRUE, actual=appCheck,
353 onpass="App Ids seem to be correct",
354 onfail="Something is wrong with app Ids" )
355
356 def CASE2( self, main ):
357 """
358 Assign devices to controllers
359 """
360 import re
361 assert main.numCtrls, "main.numCtrls not defined"
362 assert main, "main not defined"
363 assert utilities.assert_equals, "utilities.assert_equals not defined"
364 assert main.CLIs, "main.CLIs not defined"
365 assert main.nodes, "main.nodes not defined"
366 assert ONOS1Port, "ONOS1Port not defined"
367 assert ONOS2Port, "ONOS2Port not defined"
368 assert ONOS3Port, "ONOS3Port not defined"
369 assert ONOS4Port, "ONOS4Port not defined"
370 assert ONOS5Port, "ONOS5Port not defined"
371 assert ONOS6Port, "ONOS6Port not defined"
372 assert ONOS7Port, "ONOS7Port not defined"
373
374 main.case( "Assigning devices to controllers" )
375 main.caseExplanation = "Assign switches to ONOS using 'ovs-vsctl' " +\
376 "and check that an ONOS node becomes the " +\
377 "master of the device."
378 main.step( "Assign switches to controllers" )
379
380 ipList = []
381 for i in range( main.numCtrls ):
382 ipList.append( main.nodes[ i ].ip_address )
383 swList = []
384 for i in range( 1, 29 ):
385 swList.append( "s" + str( i ) )
386 main.Mininet1.assignSwController( sw=swList, ip=ipList )
387
388 mastershipCheck = main.TRUE
389 for i in range( 1, 29 ):
390 response = main.Mininet1.getSwController( "s" + str( i ) )
391 try:
392 main.log.info( str( response ) )
393 except Exception:
394 main.log.info( repr( response ) )
395 for node in main.nodes:
396 if re.search( "tcp:" + node.ip_address, response ):
397 mastershipCheck = mastershipCheck and main.TRUE
398 else:
399 main.log.error( "Error, node " + node.ip_address + " is " +
400 "not in the list of controllers s" +
401 str( i ) + " is connecting to." )
402 mastershipCheck = main.FALSE
403 utilities.assert_equals(
404 expect=main.TRUE,
405 actual=mastershipCheck,
406 onpass="Switch mastership assigned correctly",
407 onfail="Switches not assigned correctly to controllers" )
408
409 def CASE21( self, main ):
410 """
411 Assign mastership to controllers
412 """
413 import time
414 assert main.numCtrls, "main.numCtrls not defined"
415 assert main, "main not defined"
416 assert utilities.assert_equals, "utilities.assert_equals not defined"
417 assert main.CLIs, "main.CLIs not defined"
418 assert main.nodes, "main.nodes not defined"
419 assert ONOS1Port, "ONOS1Port not defined"
420 assert ONOS2Port, "ONOS2Port not defined"
421 assert ONOS3Port, "ONOS3Port not defined"
422 assert ONOS4Port, "ONOS4Port not defined"
423 assert ONOS5Port, "ONOS5Port not defined"
424 assert ONOS6Port, "ONOS6Port not defined"
425 assert ONOS7Port, "ONOS7Port not defined"
426
427 main.case( "Assigning Controller roles for switches" )
428 main.caseExplanation = "Check that ONOS is connected to each " +\
429 "device. Then manually assign" +\
430 " mastership to specific ONOS nodes using" +\
431 " 'device-role'"
432 main.step( "Assign mastership of switches to specific controllers" )
433 # Manually assign mastership to the controller we want
434 roleCall = main.TRUE
435
436 ipList = []
437 deviceList = []
438 onosCli = main.CLIs[ main.activeNodes[ 0 ] ]
439 try:
440 # Assign mastership to specific controllers. This assignment was
441 # determined for a 7 node cluser, but will work with any sized
442 # cluster
443 for i in range( 1, 29 ): # switches 1 through 28
444 # set up correct variables:
445 if i == 1:
446 c = 0
447 ip = main.nodes[ c ].ip_address # ONOS1
448 deviceId = onosCli.getDevice( "1000" ).get( 'id' )
449 elif i == 2:
450 c = 1 % main.numCtrls
451 ip = main.nodes[ c ].ip_address # ONOS2
452 deviceId = onosCli.getDevice( "2000" ).get( 'id' )
453 elif i == 3:
454 c = 1 % main.numCtrls
455 ip = main.nodes[ c ].ip_address # ONOS2
456 deviceId = onosCli.getDevice( "3000" ).get( 'id' )
457 elif i == 4:
458 c = 3 % main.numCtrls
459 ip = main.nodes[ c ].ip_address # ONOS4
460 deviceId = onosCli.getDevice( "3004" ).get( 'id' )
461 elif i == 5:
462 c = 2 % main.numCtrls
463 ip = main.nodes[ c ].ip_address # ONOS3
464 deviceId = onosCli.getDevice( "5000" ).get( 'id' )
465 elif i == 6:
466 c = 2 % main.numCtrls
467 ip = main.nodes[ c ].ip_address # ONOS3
468 deviceId = onosCli.getDevice( "6000" ).get( 'id' )
469 elif i == 7:
470 c = 5 % main.numCtrls
471 ip = main.nodes[ c ].ip_address # ONOS6
472 deviceId = onosCli.getDevice( "6007" ).get( 'id' )
473 elif i >= 8 and i <= 17:
474 c = 4 % main.numCtrls
475 ip = main.nodes[ c ].ip_address # ONOS5
476 dpid = '3' + str( i ).zfill( 3 )
477 deviceId = onosCli.getDevice( dpid ).get( 'id' )
478 elif i >= 18 and i <= 27:
479 c = 6 % main.numCtrls
480 ip = main.nodes[ c ].ip_address # ONOS7
481 dpid = '6' + str( i ).zfill( 3 )
482 deviceId = onosCli.getDevice( dpid ).get( 'id' )
483 elif i == 28:
484 c = 0
485 ip = main.nodes[ c ].ip_address # ONOS1
486 deviceId = onosCli.getDevice( "2800" ).get( 'id' )
487 else:
488 main.log.error( "You didn't write an else statement for " +
489 "switch s" + str( i ) )
490 roleCall = main.FALSE
491 # Assign switch
492 assert deviceId, "No device id for s" + str( i ) + " in ONOS"
493 # TODO: make this controller dynamic
494 roleCall = roleCall and onosCli.deviceRole( deviceId, ip )
495 ipList.append( ip )
496 deviceList.append( deviceId )
497 except ( AttributeError, AssertionError ):
498 main.log.exception( "Something is wrong with ONOS device view" )
499 main.log.info( onosCli.devices() )
500 utilities.assert_equals(
501 expect=main.TRUE,
502 actual=roleCall,
503 onpass="Re-assigned switch mastership to designated controller",
504 onfail="Something wrong with deviceRole calls" )
505
506 main.step( "Check mastership was correctly assigned" )
507 roleCheck = main.TRUE
508 # NOTE: This is due to the fact that device mastership change is not
509 # atomic and is actually a multi step process
510 time.sleep( 5 )
511 for i in range( len( ipList ) ):
512 ip = ipList[ i ]
513 deviceId = deviceList[ i ]
514 # Check assignment
515 master = onosCli.getRole( deviceId ).get( 'master' )
516 if ip in master:
517 roleCheck = roleCheck and main.TRUE
518 else:
519 roleCheck = roleCheck and main.FALSE
520 main.log.error( "Error, controller " + ip + " is not" +
521 " master " + "of device " +
522 str( deviceId ) + ". Master is " +
523 repr( master ) + "." )
524 utilities.assert_equals(
525 expect=main.TRUE,
526 actual=roleCheck,
527 onpass="Switches were successfully reassigned to designated " +
528 "controller",
529 onfail="Switches were not successfully reassigned" )
530
531 def CASE3( self, main ):
532 """
533 Assign intents
534 """
535 import time
536 import json
537 assert main.numCtrls, "main.numCtrls not defined"
538 assert main, "main not defined"
539 assert utilities.assert_equals, "utilities.assert_equals not defined"
540 assert main.CLIs, "main.CLIs not defined"
541 assert main.nodes, "main.nodes not defined"
542 main.case( "Adding host Intents" )
543 main.caseExplanation = "Discover hosts by using pingall then " +\
544 "assign predetermined host-to-host intents." +\
545 " After installation, check that the intent" +\
546 " is distributed to all nodes and the state" +\
547 " is INSTALLED"
548
549 # install onos-app-fwd
550 main.step( "Install reactive forwarding app" )
551 onosCli = main.CLIs[ main.activeNodes[ 0 ] ]
552 installResults = onosCli.activateApp( "org.onosproject.fwd" )
553 utilities.assert_equals( expect=main.TRUE, actual=installResults,
554 onpass="Install fwd successful",
555 onfail="Install fwd failed" )
556
557 main.step( "Check app ids" )
558 appCheck = main.TRUE
559 threads = []
560 for i in main.activeNodes:
561 t = main.Thread( target=main.CLIs[ i ].appToIDCheck,
562 name="appToIDCheck-" + str( i ),
563 args=[] )
564 threads.append( t )
565 t.start()
566
567 for t in threads:
568 t.join()
569 appCheck = appCheck and t.result
570 if appCheck != main.TRUE:
571 main.log.warn( onosCli.apps() )
572 main.log.warn( onosCli.appIDs() )
573 utilities.assert_equals( expect=main.TRUE, actual=appCheck,
574 onpass="App Ids seem to be correct",
575 onfail="Something is wrong with app Ids" )
576
577 main.step( "Discovering Hosts( Via pingall for now )" )
578 # FIXME: Once we have a host discovery mechanism, use that instead
579 # REACTIVE FWD test
580 pingResult = main.FALSE
581 passMsg = "Reactive Pingall test passed"
582 time1 = time.time()
583 pingResult = main.Mininet1.pingall()
584 time2 = time.time()
585 if not pingResult:
586 main.log.warn( "First pingall failed. Trying again..." )
587 pingResult = main.Mininet1.pingall()
588 passMsg += " on the second try"
589 utilities.assert_equals(
590 expect=main.TRUE,
591 actual=pingResult,
592 onpass=passMsg,
593 onfail="Reactive Pingall failed, " +
594 "one or more ping pairs failed" )
595 main.log.info( "Time for pingall: %2f seconds" %
596 ( time2 - time1 ) )
597 # timeout for fwd flows
598 time.sleep( 11 )
599 # uninstall onos-app-fwd
600 main.step( "Uninstall reactive forwarding app" )
601 node = main.activeNodes[ 0 ]
602 uninstallResult = main.CLIs[ node ].deactivateApp( "org.onosproject.fwd" )
603 utilities.assert_equals( expect=main.TRUE, actual=uninstallResult,
604 onpass="Uninstall fwd successful",
605 onfail="Uninstall fwd failed" )
606
607 main.step( "Check app ids" )
608 threads = []
609 appCheck2 = main.TRUE
610 for i in main.activeNodes:
611 t = main.Thread( target=main.CLIs[ i ].appToIDCheck,
612 name="appToIDCheck-" + str( i ),
613 args=[] )
614 threads.append( t )
615 t.start()
616
617 for t in threads:
618 t.join()
619 appCheck2 = appCheck2 and t.result
620 if appCheck2 != main.TRUE:
621 node = main.activeNodes[ 0 ]
622 main.log.warn( main.CLIs[ node ].apps() )
623 main.log.warn( main.CLIs[ node ].appIDs() )
624 utilities.assert_equals( expect=main.TRUE, actual=appCheck2,
625 onpass="App Ids seem to be correct",
626 onfail="Something is wrong with app Ids" )
627
628 main.step( "Add host intents via cli" )
629 intentIds = []
630 # TODO: move the host numbers to params
631 # Maybe look at all the paths we ping?
632 intentAddResult = True
633 hostResult = main.TRUE
634 for i in range( 8, 18 ):
635 main.log.info( "Adding host intent between h" + str( i ) +
636 " and h" + str( i + 10 ) )
637 host1 = "00:00:00:00:00:" + \
638 str( hex( i )[ 2: ] ).zfill( 2 ).upper()
639 host2 = "00:00:00:00:00:" + \
640 str( hex( i + 10 )[ 2: ] ).zfill( 2 ).upper()
641 # NOTE: getHost can return None
642 host1Dict = onosCli.getHost( host1 )
643 host2Dict = onosCli.getHost( host2 )
644 host1Id = None
645 host2Id = None
646 if host1Dict and host2Dict:
647 host1Id = host1Dict.get( 'id', None )
648 host2Id = host2Dict.get( 'id', None )
649 if host1Id and host2Id:
650 nodeNum = ( i % len( main.activeNodes ) )
651 node = main.activeNodes[ nodeNum ]
652 tmpId = main.CLIs[ node ].addHostIntent( host1Id, host2Id )
653 if tmpId:
654 main.log.info( "Added intent with id: " + tmpId )
655 intentIds.append( tmpId )
656 else:
657 main.log.error( "addHostIntent returned: " +
658 repr( tmpId ) )
659 else:
660 main.log.error( "Error, getHost() failed for h" + str( i ) +
661 " and/or h" + str( i + 10 ) )
662 node = main.activeNodes[ 0 ]
663 hosts = main.CLIs[ node ].hosts()
664 main.log.warn( "Hosts output: " )
665 try:
666 main.log.warn( json.dumps( json.loads( hosts ),
667 sort_keys=True,
668 indent=4,
669 separators=( ',', ': ' ) ) )
670 except ( ValueError, TypeError ):
671 main.log.warn( repr( hosts ) )
672 hostResult = main.FALSE
673 utilities.assert_equals( expect=main.TRUE, actual=hostResult,
674 onpass="Found a host id for each host",
675 onfail="Error looking up host ids" )
676
677 intentStart = time.time()
678 onosIds = onosCli.getAllIntentsId()
679 main.log.info( "Submitted intents: " + str( intentIds ) )
680 main.log.info( "Intents in ONOS: " + str( onosIds ) )
681 for intent in intentIds:
682 if intent in onosIds:
683 pass # intent submitted is in onos
684 else:
685 intentAddResult = False
686 if intentAddResult:
687 intentStop = time.time()
688 else:
689 intentStop = None
690 # Print the intent states
691 intents = onosCli.intents()
692 intentStates = []
693 installedCheck = True
694 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
695 count = 0
696 try:
697 for intent in json.loads( intents ):
698 state = intent.get( 'state', None )
699 if "INSTALLED" not in state:
700 installedCheck = False
701 intentId = intent.get( 'id', None )
702 intentStates.append( ( intentId, state ) )
703 except ( ValueError, TypeError ):
704 main.log.exception( "Error parsing intents" )
705 # add submitted intents not in the store
706 tmplist = [ i for i, s in intentStates ]
707 missingIntents = False
708 for i in intentIds:
709 if i not in tmplist:
710 intentStates.append( ( i, " - " ) )
711 missingIntents = True
712 intentStates.sort()
713 for i, s in intentStates:
714 count += 1
715 main.log.info( "%-6s%-15s%-15s" %
716 ( str( count ), str( i ), str( s ) ) )
717 leaders = onosCli.leaders()
718 try:
719 missing = False
720 if leaders:
721 parsedLeaders = json.loads( leaders )
722 main.log.warn( json.dumps( parsedLeaders,
723 sort_keys=True,
724 indent=4,
725 separators=( ',', ': ' ) ) )
726 # check for all intent partitions
727 topics = []
728 for i in range( 14 ):
729 topics.append( "work-partition-" + str( i ) )
730 main.log.debug( topics )
731 ONOStopics = [ j[ 'topic' ] for j in parsedLeaders ]
732 for topic in topics:
733 if topic not in ONOStopics:
734 main.log.error( "Error: " + topic +
735 " not in leaders" )
736 missing = True
737 else:
738 main.log.error( "leaders() returned None" )
739 except ( ValueError, TypeError ):
740 main.log.exception( "Error parsing leaders" )
741 main.log.error( repr( leaders ) )
742 # Check all nodes
743 if missing:
744 for i in main.activeNodes:
745 response = main.CLIs[ i ].leaders( jsonFormat=False )
746 main.log.warn( str( main.CLIs[ i ].name ) + " leaders output: \n" +
747 str( response ) )
748
749 partitions = onosCli.partitions()
750 try:
751 if partitions:
752 parsedPartitions = json.loads( partitions )
753 main.log.warn( json.dumps( parsedPartitions,
754 sort_keys=True,
755 indent=4,
756 separators=( ',', ': ' ) ) )
757 # TODO check for a leader in all paritions
758 # TODO check for consistency among nodes
759 else:
760 main.log.error( "partitions() returned None" )
761 except ( ValueError, TypeError ):
762 main.log.exception( "Error parsing partitions" )
763 main.log.error( repr( partitions ) )
764 pendingMap = onosCli.pendingMap()
765 try:
766 if pendingMap:
767 parsedPending = json.loads( pendingMap )
768 main.log.warn( json.dumps( parsedPending,
769 sort_keys=True,
770 indent=4,
771 separators=( ',', ': ' ) ) )
772 # TODO check something here?
773 else:
774 main.log.error( "pendingMap() returned None" )
775 except ( ValueError, TypeError ):
776 main.log.exception( "Error parsing pending map" )
777 main.log.error( repr( pendingMap ) )
778
779 intentAddResult = bool( intentAddResult and not missingIntents and
780 installedCheck )
781 if not intentAddResult:
782 main.log.error( "Error in pushing host intents to ONOS" )
783
784 main.step( "Intent Anti-Entropy dispersion" )
785 for j in range( 100 ):
786 correct = True
787 main.log.info( "Submitted intents: " + str( sorted( intentIds ) ) )
788 for i in main.activeNodes:
789 onosIds = []
790 ids = main.CLIs[ i ].getAllIntentsId()
791 onosIds.append( ids )
792 main.log.debug( "Intents in " + main.CLIs[ i ].name + ": " +
793 str( sorted( onosIds ) ) )
794 if sorted( ids ) != sorted( intentIds ):
795 main.log.warn( "Set of intent IDs doesn't match" )
796 correct = False
797 break
798 else:
799 intents = json.loads( main.CLIs[ i ].intents() )
800 for intent in intents:
801 if intent[ 'state' ] != "INSTALLED":
802 main.log.warn( "Intent " + intent[ 'id' ] +
803 " is " + intent[ 'state' ] )
804 correct = False
805 break
806 if correct:
807 break
808 else:
809 time.sleep( 1 )
810 if not intentStop:
811 intentStop = time.time()
812 global gossipTime
813 gossipTime = intentStop - intentStart
814 main.log.info( "It took about " + str( gossipTime ) +
815 " seconds for all intents to appear in each node" )
816 gossipPeriod = int( main.params[ 'timers' ][ 'gossip' ] )
817 maxGossipTime = gossipPeriod * len( main.activeNodes )
818 utilities.assert_greater_equals(
819 expect=maxGossipTime, actual=gossipTime,
820 onpass="ECM anti-entropy for intents worked within " +
821 "expected time",
822 onfail="Intent ECM anti-entropy took too long. " +
823 "Expected time:{}, Actual time:{}".format( maxGossipTime,
824 gossipTime ) )
825 if gossipTime <= maxGossipTime:
826 intentAddResult = True
827
828 if not intentAddResult or "key" in pendingMap:
829 import time
830 installedCheck = True
831 main.log.info( "Sleeping 60 seconds to see if intents are found" )
832 time.sleep( 60 )
833 onosIds = onosCli.getAllIntentsId()
834 main.log.info( "Submitted intents: " + str( intentIds ) )
835 main.log.info( "Intents in ONOS: " + str( onosIds ) )
836 # Print the intent states
837 intents = onosCli.intents()
838 intentStates = []
839 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
840 count = 0
841 try:
842 for intent in json.loads( intents ):
843 # Iter through intents of a node
844 state = intent.get( 'state', None )
845 if "INSTALLED" not in state:
846 installedCheck = False
847 intentId = intent.get( 'id', None )
848 intentStates.append( ( intentId, state ) )
849 except ( ValueError, TypeError ):
850 main.log.exception( "Error parsing intents" )
851 # add submitted intents not in the store
852 tmplist = [ i for i, s in intentStates ]
853 for i in intentIds:
854 if i not in tmplist:
855 intentStates.append( ( i, " - " ) )
856 intentStates.sort()
857 for i, s in intentStates:
858 count += 1
859 main.log.info( "%-6s%-15s%-15s" %
860 ( str( count ), str( i ), str( s ) ) )
861 leaders = onosCli.leaders()
862 try:
863 missing = False
864 if leaders:
865 parsedLeaders = json.loads( leaders )
866 main.log.warn( json.dumps( parsedLeaders,
867 sort_keys=True,
868 indent=4,
869 separators=( ',', ': ' ) ) )
870 # check for all intent partitions
871 # check for election
872 topics = []
873 for i in range( 14 ):
874 topics.append( "work-partition-" + str( i ) )
875 # FIXME: this should only be after we start the app
876 topics.append( "org.onosproject.election" )
877 main.log.debug( topics )
878 ONOStopics = [ j[ 'topic' ] for j in parsedLeaders ]
879 for topic in topics:
880 if topic not in ONOStopics:
881 main.log.error( "Error: " + topic +
882 " not in leaders" )
883 missing = True
884 else:
885 main.log.error( "leaders() returned None" )
886 except ( ValueError, TypeError ):
887 main.log.exception( "Error parsing leaders" )
888 main.log.error( repr( leaders ) )
889 # Check all nodes
890 if missing:
891 for i in main.activeNodes:
892 node = main.CLIs[ i ]
893 response = node.leaders( jsonFormat=False )
894 main.log.warn( str( node.name ) + " leaders output: \n" +
895 str( response ) )
896
897 partitions = onosCli.partitions()
898 try:
899 if partitions:
900 parsedPartitions = json.loads( partitions )
901 main.log.warn( json.dumps( parsedPartitions,
902 sort_keys=True,
903 indent=4,
904 separators=( ',', ': ' ) ) )
905 # TODO check for a leader in all paritions
906 # TODO check for consistency among nodes
907 else:
908 main.log.error( "partitions() returned None" )
909 except ( ValueError, TypeError ):
910 main.log.exception( "Error parsing partitions" )
911 main.log.error( repr( partitions ) )
912 pendingMap = onosCli.pendingMap()
913 try:
914 if pendingMap:
915 parsedPending = json.loads( pendingMap )
916 main.log.warn( json.dumps( parsedPending,
917 sort_keys=True,
918 indent=4,
919 separators=( ',', ': ' ) ) )
920 # TODO check something here?
921 else:
922 main.log.error( "pendingMap() returned None" )
923 except ( ValueError, TypeError ):
924 main.log.exception( "Error parsing pending map" )
925 main.log.error( repr( pendingMap ) )
926
927 def CASE4( self, main ):
928 """
929 Ping across added host intents
930 """
931 import json
932 import time
933 assert main.numCtrls, "main.numCtrls not defined"
934 assert main, "main not defined"
935 assert utilities.assert_equals, "utilities.assert_equals not defined"
936 assert main.CLIs, "main.CLIs not defined"
937 assert main.nodes, "main.nodes not defined"
938 main.case( "Verify connectivity by sending traffic across Intents" )
939 main.caseExplanation = "Ping across added host intents to check " +\
940 "functionality and check the state of " +\
941 "the intent"
942
943 onosCli = main.CLIs[ main.activeNodes[ 0 ] ]
944 main.step( "Check Intent state" )
945 installedCheck = False
946 loopCount = 0
947 while not installedCheck and loopCount < 40:
948 installedCheck = True
949 # Print the intent states
950 intents = onosCli.intents()
951 intentStates = []
952 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
953 count = 0
954 # Iter through intents of a node
955 try:
956 for intent in json.loads( intents ):
957 state = intent.get( 'state', None )
958 if "INSTALLED" not in state:
959 installedCheck = False
960 intentId = intent.get( 'id', None )
961 intentStates.append( ( intentId, state ) )
962 except ( ValueError, TypeError ):
963 main.log.exception( "Error parsing intents." )
964 # Print states
965 intentStates.sort()
966 for i, s in intentStates:
967 count += 1
968 main.log.info( "%-6s%-15s%-15s" %
969 ( str( count ), str( i ), str( s ) ) )
970 if not installedCheck:
971 time.sleep( 1 )
972 loopCount += 1
973 utilities.assert_equals( expect=True, actual=installedCheck,
974 onpass="Intents are all INSTALLED",
975 onfail="Intents are not all in " +
976 "INSTALLED state" )
977
978 main.step( "Ping across added host intents" )
979 PingResult = main.TRUE
980 for i in range( 8, 18 ):
981 ping = main.Mininet1.pingHost( src="h" + str( i ),
982 target="h" + str( i + 10 ) )
983 PingResult = PingResult and ping
984 if ping == main.FALSE:
985 main.log.warn( "Ping failed between h" + str( i ) +
986 " and h" + str( i + 10 ) )
987 elif ping == main.TRUE:
988 main.log.info( "Ping test passed!" )
989 # Don't set PingResult or you'd override failures
990 if PingResult == main.FALSE:
991 main.log.error(
992 "Intents have not been installed correctly, pings failed." )
993 # TODO: pretty print
994 main.log.warn( "ONOS1 intents: " )
995 try:
996 tmpIntents = onosCli.intents()
997 main.log.warn( json.dumps( json.loads( tmpIntents ),
998 sort_keys=True,
999 indent=4,
1000 separators=( ',', ': ' ) ) )
1001 except ( ValueError, TypeError ):
1002 main.log.warn( repr( tmpIntents ) )
1003 utilities.assert_equals(
1004 expect=main.TRUE,
1005 actual=PingResult,
1006 onpass="Intents have been installed correctly and pings work",
1007 onfail="Intents have not been installed correctly, pings failed." )
1008
1009 main.step( "Check leadership of topics" )
1010 leaders = onosCli.leaders()
1011 topicCheck = main.TRUE
1012 try:
1013 if leaders:
1014 parsedLeaders = json.loads( leaders )
1015 main.log.warn( json.dumps( parsedLeaders,
1016 sort_keys=True,
1017 indent=4,
1018 separators=( ',', ': ' ) ) )
1019 # check for all intent partitions
1020 # check for election
1021 # TODO: Look at Devices as topics now that it uses this system
1022 topics = []
1023 for i in range( 14 ):
1024 topics.append( "work-partition-" + str( i ) )
1025 # FIXME: this should only be after we start the app
1026 # FIXME: topics.append( "org.onosproject.election" )
1027 # Print leaders output
1028 main.log.debug( topics )
1029 ONOStopics = [ j[ 'topic' ] for j in parsedLeaders ]
1030 for topic in topics:
1031 if topic not in ONOStopics:
1032 main.log.error( "Error: " + topic +
1033 " not in leaders" )
1034 topicCheck = main.FALSE
1035 else:
1036 main.log.error( "leaders() returned None" )
1037 topicCheck = main.FALSE
1038 except ( ValueError, TypeError ):
1039 topicCheck = main.FALSE
1040 main.log.exception( "Error parsing leaders" )
1041 main.log.error( repr( leaders ) )
1042 # TODO: Check for a leader of these topics
1043 # Check all nodes
1044 if topicCheck:
1045 for i in main.activeNodes:
1046 node = main.CLIs[ i ]
1047 response = node.leaders( jsonFormat=False )
1048 main.log.warn( str( node.name ) + " leaders output: \n" +
1049 str( response ) )
1050
1051 utilities.assert_equals( expect=main.TRUE, actual=topicCheck,
1052 onpass="intent Partitions is in leaders",
1053 onfail="Some topics were lost " )
1054 # Print partitions
1055 partitions = onosCli.partitions()
1056 try:
1057 if partitions:
1058 parsedPartitions = json.loads( partitions )
1059 main.log.warn( json.dumps( parsedPartitions,
1060 sort_keys=True,
1061 indent=4,
1062 separators=( ',', ': ' ) ) )
1063 # TODO check for a leader in all paritions
1064 # TODO check for consistency among nodes
1065 else:
1066 main.log.error( "partitions() returned None" )
1067 except ( ValueError, TypeError ):
1068 main.log.exception( "Error parsing partitions" )
1069 main.log.error( repr( partitions ) )
1070 # Print Pending Map
1071 pendingMap = onosCli.pendingMap()
1072 try:
1073 if pendingMap:
1074 parsedPending = json.loads( pendingMap )
1075 main.log.warn( json.dumps( parsedPending,
1076 sort_keys=True,
1077 indent=4,
1078 separators=( ',', ': ' ) ) )
1079 # TODO check something here?
1080 else:
1081 main.log.error( "pendingMap() returned None" )
1082 except ( ValueError, TypeError ):
1083 main.log.exception( "Error parsing pending map" )
1084 main.log.error( repr( pendingMap ) )
1085
1086 if not installedCheck:
1087 main.log.info( "Waiting 60 seconds to see if the state of " +
1088 "intents change" )
1089 time.sleep( 60 )
1090 # Print the intent states
1091 intents = onosCli.intents()
1092 intentStates = []
1093 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
1094 count = 0
1095 # Iter through intents of a node
1096 try:
1097 for intent in json.loads( intents ):
1098 state = intent.get( 'state', None )
1099 if "INSTALLED" not in state:
1100 installedCheck = False
1101 intentId = intent.get( 'id', None )
1102 intentStates.append( ( intentId, state ) )
1103 except ( ValueError, TypeError ):
1104 main.log.exception( "Error parsing intents." )
1105 intentStates.sort()
1106 for i, s in intentStates:
1107 count += 1
1108 main.log.info( "%-6s%-15s%-15s" %
1109 ( str( count ), str( i ), str( s ) ) )
1110 leaders = onosCli.leaders()
1111 try:
1112 missing = False
1113 if leaders:
1114 parsedLeaders = json.loads( leaders )
1115 main.log.warn( json.dumps( parsedLeaders,
1116 sort_keys=True,
1117 indent=4,
1118 separators=( ',', ': ' ) ) )
1119 # check for all intent partitions
1120 # check for election
1121 topics = []
1122 for i in range( 14 ):
1123 topics.append( "work-partition-" + str( i ) )
1124 # FIXME: this should only be after we start the app
1125 topics.append( "org.onosproject.election" )
1126 main.log.debug( topics )
1127 ONOStopics = [ j[ 'topic' ] for j in parsedLeaders ]
1128 for topic in topics:
1129 if topic not in ONOStopics:
1130 main.log.error( "Error: " + topic +
1131 " not in leaders" )
1132 missing = True
1133 else:
1134 main.log.error( "leaders() returned None" )
1135 except ( ValueError, TypeError ):
1136 main.log.exception( "Error parsing leaders" )
1137 main.log.error( repr( leaders ) )
1138 if missing:
1139 for i in main.activeNodes:
1140 node = main.CLIs[ i ]
1141 response = node.leaders( jsonFormat=False )
1142 main.log.warn( str( node.name ) + " leaders output: \n" +
1143 str( response ) )
1144
1145 partitions = onosCli.partitions()
1146 try:
1147 if partitions:
1148 parsedPartitions = json.loads( partitions )
1149 main.log.warn( json.dumps( parsedPartitions,
1150 sort_keys=True,
1151 indent=4,
1152 separators=( ',', ': ' ) ) )
1153 # TODO check for a leader in all paritions
1154 # TODO check for consistency among nodes
1155 else:
1156 main.log.error( "partitions() returned None" )
1157 except ( ValueError, TypeError ):
1158 main.log.exception( "Error parsing partitions" )
1159 main.log.error( repr( partitions ) )
1160 pendingMap = onosCli.pendingMap()
1161 try:
1162 if pendingMap:
1163 parsedPending = json.loads( pendingMap )
1164 main.log.warn( json.dumps( parsedPending,
1165 sort_keys=True,
1166 indent=4,
1167 separators=( ',', ': ' ) ) )
1168 # TODO check something here?
1169 else:
1170 main.log.error( "pendingMap() returned None" )
1171 except ( ValueError, TypeError ):
1172 main.log.exception( "Error parsing pending map" )
1173 main.log.error( repr( pendingMap ) )
1174 # Print flowrules
1175 main.log.debug( onosCli.flows( jsonFormat=False ) )
1176 main.step( "Wait a minute then ping again" )
1177 # the wait is above
1178 PingResult = main.TRUE
1179 for i in range( 8, 18 ):
1180 ping = main.Mininet1.pingHost( src="h" + str( i ),
1181 target="h" + str( i + 10 ) )
1182 PingResult = PingResult and ping
1183 if ping == main.FALSE:
1184 main.log.warn( "Ping failed between h" + str( i ) +
1185 " and h" + str( i + 10 ) )
1186 elif ping == main.TRUE:
1187 main.log.info( "Ping test passed!" )
1188 # Don't set PingResult or you'd override failures
1189 if PingResult == main.FALSE:
1190 main.log.error(
1191 "Intents have not been installed correctly, pings failed." )
1192 # TODO: pretty print
1193 main.log.warn( "ONOS1 intents: " )
1194 try:
1195 tmpIntents = onosCli.intents()
1196 main.log.warn( json.dumps( json.loads( tmpIntents ),
1197 sort_keys=True,
1198 indent=4,
1199 separators=( ',', ': ' ) ) )
1200 except ( ValueError, TypeError ):
1201 main.log.warn( repr( tmpIntents ) )
1202 utilities.assert_equals(
1203 expect=main.TRUE,
1204 actual=PingResult,
1205 onpass="Intents have been installed correctly and pings work",
1206 onfail="Intents have not been installed correctly, pings failed." )
1207
1208 def CASE5( self, main ):
1209 """
1210 Reading state of ONOS
1211 """
1212 import json
1213 import time
1214 assert main.numCtrls, "main.numCtrls not defined"
1215 assert main, "main not defined"
1216 assert utilities.assert_equals, "utilities.assert_equals not defined"
1217 assert main.CLIs, "main.CLIs not defined"
1218 assert main.nodes, "main.nodes not defined"
1219
1220 main.case( "Setting up and gathering data for current state" )
1221 # The general idea for this test case is to pull the state of
1222 # ( intents,flows, topology,... ) from each ONOS node
1223 # We can then compare them with each other and also with past states
1224
1225 main.step( "Check that each switch has a master" )
1226 global mastershipState
1227 mastershipState = '[]'
1228
1229 # Assert that each device has a master
1230 rolesNotNull = main.TRUE
1231 threads = []
1232 for i in main.activeNodes:
1233 t = main.Thread( target=main.CLIs[ i ].rolesNotNull,
1234 name="rolesNotNull-" + str( i ),
1235 args=[] )
1236 threads.append( t )
1237 t.start()
1238
1239 for t in threads:
1240 t.join()
1241 rolesNotNull = rolesNotNull and t.result
1242 utilities.assert_equals(
1243 expect=main.TRUE,
1244 actual=rolesNotNull,
1245 onpass="Each device has a master",
1246 onfail="Some devices don't have a master assigned" )
1247
1248 main.step( "Get the Mastership of each switch from each controller" )
1249 ONOSMastership = []
1250 mastershipCheck = main.FALSE
1251 consistentMastership = True
1252 rolesResults = True
1253 threads = []
1254 for i in main.activeNodes:
1255 t = main.Thread( target=main.CLIs[ i ].roles,
1256 name="roles-" + str( i ),
1257 args=[] )
1258 threads.append( t )
1259 t.start()
1260
1261 for t in threads:
1262 t.join()
1263 ONOSMastership.append( t.result )
1264
1265 for i in range( len( ONOSMastership ) ):
1266 node = str( main.activeNodes[ i ] + 1 )
1267 if not ONOSMastership[ i ] or "Error" in ONOSMastership[ i ]:
1268 main.log.error( "Error in getting ONOS" + node + " roles" )
1269 main.log.warn( "ONOS" + node + " mastership response: " +
1270 repr( ONOSMastership[ i ] ) )
1271 rolesResults = False
1272 utilities.assert_equals(
1273 expect=True,
1274 actual=rolesResults,
1275 onpass="No error in reading roles output",
1276 onfail="Error in reading roles from ONOS" )
1277
1278 main.step( "Check for consistency in roles from each controller" )
1279 if all( [ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
1280 main.log.info(
1281 "Switch roles are consistent across all ONOS nodes" )
1282 else:
1283 consistentMastership = False
1284 utilities.assert_equals(
1285 expect=True,
1286 actual=consistentMastership,
1287 onpass="Switch roles are consistent across all ONOS nodes",
1288 onfail="ONOS nodes have different views of switch roles" )
1289
1290 if rolesResults and not consistentMastership:
1291 for i in range( len( main.activeNodes ) ):
1292 node = str( main.activeNodes[ i ] + 1 )
1293 try:
1294 main.log.warn(
1295 "ONOS" + node + " roles: ",
1296 json.dumps(
1297 json.loads( ONOSMastership[ i ] ),
1298 sort_keys=True,
1299 indent=4,
1300 separators=( ',', ': ' ) ) )
1301 except ( ValueError, TypeError ):
1302 main.log.warn( repr( ONOSMastership[ i ] ) )
1303 elif rolesResults and consistentMastership:
1304 mastershipCheck = main.TRUE
1305 mastershipState = ONOSMastership[ 0 ]
1306
1307 main.step( "Get the intents from each controller" )
1308 global intentState
1309 intentState = []
1310 ONOSIntents = []
1311 intentCheck = main.FALSE
1312 consistentIntents = True
1313 intentsResults = True
1314 threads = []
1315 for i in main.activeNodes:
1316 t = main.Thread( target=main.CLIs[ i ].intents,
1317 name="intents-" + str( i ),
1318 args=[],
1319 kwargs={ 'jsonFormat': True } )
1320 threads.append( t )
1321 t.start()
1322
1323 for t in threads:
1324 t.join()
1325 ONOSIntents.append( t.result )
1326
1327 for i in range( len( ONOSIntents ) ):
1328 node = str( main.activeNodes[ i ] + 1 )
1329 if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
1330 main.log.error( "Error in getting ONOS" + node + " intents" )
1331 main.log.warn( "ONOS" + node + " intents response: " +
1332 repr( ONOSIntents[ i ] ) )
1333 intentsResults = False
1334 utilities.assert_equals(
1335 expect=True,
1336 actual=intentsResults,
1337 onpass="No error in reading intents output",
1338 onfail="Error in reading intents from ONOS" )
1339
1340 main.step( "Check for consistency in Intents from each controller" )
1341 if all( [ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
1342 main.log.info( "Intents are consistent across all ONOS " +
1343 "nodes" )
1344 else:
1345 consistentIntents = False
1346 main.log.error( "Intents not consistent" )
1347 utilities.assert_equals(
1348 expect=True,
1349 actual=consistentIntents,
1350 onpass="Intents are consistent across all ONOS nodes",
1351 onfail="ONOS nodes have different views of intents" )
1352
1353 if intentsResults:
1354 # Try to make it easy to figure out what is happening
1355 #
1356 # Intent ONOS1 ONOS2 ...
1357 # 0x01 INSTALLED INSTALLING
1358 # ... ... ...
1359 # ... ... ...
1360 title = " Id"
1361 for n in main.activeNodes:
1362 title += " " * 10 + "ONOS" + str( n + 1 )
1363 main.log.warn( title )
1364 # get all intent keys in the cluster
1365 keys = []
1366 try:
1367 # Get the set of all intent keys
1368 for nodeStr in ONOSIntents:
1369 node = json.loads( nodeStr )
1370 for intent in node:
1371 keys.append( intent.get( 'id' ) )
1372 keys = set( keys )
1373 # For each intent key, print the state on each node
1374 for key in keys:
1375 row = "%-13s" % key
1376 for nodeStr in ONOSIntents:
1377 node = json.loads( nodeStr )
1378 for intent in node:
1379 if intent.get( 'id', "Error" ) == key:
1380 row += "%-15s" % intent.get( 'state' )
1381 main.log.warn( row )
1382 # End of intent state table
1383 except ValueError as e:
1384 main.log.exception( e )
1385 main.log.debug( "nodeStr was: " + repr( nodeStr ) )
1386
1387 if intentsResults and not consistentIntents:
1388 # print the json objects
1389 n = str( main.activeNodes[ -1 ] + 1 )
1390 main.log.debug( "ONOS" + n + " intents: " )
1391 main.log.debug( json.dumps( json.loads( ONOSIntents[ -1 ] ),
1392 sort_keys=True,
1393 indent=4,
1394 separators=( ',', ': ' ) ) )
1395 for i in range( len( ONOSIntents ) ):
1396 node = str( main.activeNodes[ i ] + 1 )
1397 if ONOSIntents[ i ] != ONOSIntents[ -1 ]:
1398 main.log.debug( "ONOS" + node + " intents: " )
1399 main.log.debug( json.dumps( json.loads( ONOSIntents[ i ] ),
1400 sort_keys=True,
1401 indent=4,
1402 separators=( ',', ': ' ) ) )
1403 else:
1404 main.log.debug( "ONOS" + node + " intents match ONOS" +
1405 n + " intents" )
1406 elif intentsResults and consistentIntents:
1407 intentCheck = main.TRUE
1408 intentState = ONOSIntents[ 0 ]
1409
1410 main.step( "Get the flows from each controller" )
1411 global flowState
1412 flowState = []
1413 ONOSFlows = []
1414 ONOSFlowsJson = []
1415 flowCheck = main.FALSE
1416 consistentFlows = True
1417 flowsResults = True
1418 threads = []
1419 for i in main.activeNodes:
1420 t = main.Thread( target=main.CLIs[ i ].flows,
1421 name="flows-" + str( i ),
1422 args=[],
1423 kwargs={ 'jsonFormat': True } )
1424 threads.append( t )
1425 t.start()
1426
1427 # NOTE: Flows command can take some time to run
1428 time.sleep( 30 )
1429 for t in threads:
1430 t.join()
1431 result = t.result
1432 ONOSFlows.append( result )
1433
1434 for i in range( len( ONOSFlows ) ):
1435 num = str( main.activeNodes[ i ] + 1 )
1436 if not ONOSFlows[ i ] or "Error" in ONOSFlows[ i ]:
1437 main.log.error( "Error in getting ONOS" + num + " flows" )
1438 main.log.warn( "ONOS" + num + " flows response: " +
1439 repr( ONOSFlows[ i ] ) )
1440 flowsResults = False
1441 ONOSFlowsJson.append( None )
1442 else:
1443 try:
1444 ONOSFlowsJson.append( json.loads( ONOSFlows[ i ] ) )
1445 except ( ValueError, TypeError ):
1446 # FIXME: change this to log.error?
1447 main.log.exception( "Error in parsing ONOS" + num +
1448 " response as json." )
1449 main.log.error( repr( ONOSFlows[ i ] ) )
1450 ONOSFlowsJson.append( None )
1451 flowsResults = False
1452 utilities.assert_equals(
1453 expect=True,
1454 actual=flowsResults,
1455 onpass="No error in reading flows output",
1456 onfail="Error in reading flows from ONOS" )
1457
1458 main.step( "Check for consistency in Flows from each controller" )
1459 tmp = [ len( i ) == len( ONOSFlowsJson[ 0 ] ) for i in ONOSFlowsJson ]
1460 if all( tmp ):
1461 main.log.info( "Flow count is consistent across all ONOS nodes" )
1462 else:
1463 consistentFlows = False
1464 utilities.assert_equals(
1465 expect=True,
1466 actual=consistentFlows,
1467 onpass="The flow count is consistent across all ONOS nodes",
1468 onfail="ONOS nodes have different flow counts" )
1469
1470 if flowsResults and not consistentFlows:
1471 for i in range( len( ONOSFlows ) ):
1472 node = str( main.activeNodes[ i ] + 1 )
1473 try:
1474 main.log.warn(
1475 "ONOS" + node + " flows: " +
1476 json.dumps( json.loads( ONOSFlows[ i ] ), sort_keys=True,
1477 indent=4, separators=( ',', ': ' ) ) )
1478 except ( ValueError, TypeError ):
1479 main.log.warn( "ONOS" + node + " flows: " +
1480 repr( ONOSFlows[ i ] ) )
1481 elif flowsResults and consistentFlows:
1482 flowCheck = main.TRUE
1483 flowState = ONOSFlows[ 0 ]
1484
1485 main.step( "Get the OF Table entries" )
1486 global flows
1487 flows = []
1488 for i in range( 1, 29 ):
1489 flows.append( main.Mininet1.getFlowTable( "s" + str( i ), version="1.3", debug=False ) )
1490 if flowCheck == main.FALSE:
1491 for table in flows:
1492 main.log.warn( table )
1493 # TODO: Compare switch flow tables with ONOS flow tables
1494
1495 main.step( "Start continuous pings" )
1496 main.Mininet2.pingLong(
1497 src=main.params[ 'PING' ][ 'source1' ],
1498 target=main.params[ 'PING' ][ 'target1' ],
1499 pingTime=500 )
1500 main.Mininet2.pingLong(
1501 src=main.params[ 'PING' ][ 'source2' ],
1502 target=main.params[ 'PING' ][ 'target2' ],
1503 pingTime=500 )
1504 main.Mininet2.pingLong(
1505 src=main.params[ 'PING' ][ 'source3' ],
1506 target=main.params[ 'PING' ][ 'target3' ],
1507 pingTime=500 )
1508 main.Mininet2.pingLong(
1509 src=main.params[ 'PING' ][ 'source4' ],
1510 target=main.params[ 'PING' ][ 'target4' ],
1511 pingTime=500 )
1512 main.Mininet2.pingLong(
1513 src=main.params[ 'PING' ][ 'source5' ],
1514 target=main.params[ 'PING' ][ 'target5' ],
1515 pingTime=500 )
1516 main.Mininet2.pingLong(
1517 src=main.params[ 'PING' ][ 'source6' ],
1518 target=main.params[ 'PING' ][ 'target6' ],
1519 pingTime=500 )
1520 main.Mininet2.pingLong(
1521 src=main.params[ 'PING' ][ 'source7' ],
1522 target=main.params[ 'PING' ][ 'target7' ],
1523 pingTime=500 )
1524 main.Mininet2.pingLong(
1525 src=main.params[ 'PING' ][ 'source8' ],
1526 target=main.params[ 'PING' ][ 'target8' ],
1527 pingTime=500 )
1528 main.Mininet2.pingLong(
1529 src=main.params[ 'PING' ][ 'source9' ],
1530 target=main.params[ 'PING' ][ 'target9' ],
1531 pingTime=500 )
1532 main.Mininet2.pingLong(
1533 src=main.params[ 'PING' ][ 'source10' ],
1534 target=main.params[ 'PING' ][ 'target10' ],
1535 pingTime=500 )
1536
1537 main.step( "Collecting topology information from ONOS" )
1538 devices = []
1539 threads = []
1540 for i in main.activeNodes:
1541 t = main.Thread( target=main.CLIs[ i ].devices,
1542 name="devices-" + str( i ),
1543 args=[] )
1544 threads.append( t )
1545 t.start()
1546
1547 for t in threads:
1548 t.join()
1549 devices.append( t.result )
1550 hosts = []
1551 threads = []
1552 for i in main.activeNodes:
1553 t = main.Thread( target=main.CLIs[ i ].hosts,
1554 name="hosts-" + str( i ),
1555 args=[] )
1556 threads.append( t )
1557 t.start()
1558
1559 for t in threads:
1560 t.join()
1561 try:
1562 hosts.append( json.loads( t.result ) )
1563 except ( ValueError, TypeError ):
1564 # FIXME: better handling of this, print which node
1565 # Maybe use thread name?
1566 main.log.exception( "Error parsing json output of hosts" )
1567 main.log.warn( repr( t.result ) )
1568 hosts.append( None )
1569
1570 ports = []
1571 threads = []
1572 for i in main.activeNodes:
1573 t = main.Thread( target=main.CLIs[ i ].ports,
1574 name="ports-" + str( i ),
1575 args=[] )
1576 threads.append( t )
1577 t.start()
1578
1579 for t in threads:
1580 t.join()
1581 ports.append( t.result )
1582 links = []
1583 threads = []
1584 for i in main.activeNodes:
1585 t = main.Thread( target=main.CLIs[ i ].links,
1586 name="links-" + str( i ),
1587 args=[] )
1588 threads.append( t )
1589 t.start()
1590
1591 for t in threads:
1592 t.join()
1593 links.append( t.result )
1594 clusters = []
1595 threads = []
1596 for i in main.activeNodes:
1597 t = main.Thread( target=main.CLIs[ i ].clusters,
1598 name="clusters-" + str( i ),
1599 args=[] )
1600 threads.append( t )
1601 t.start()
1602
1603 for t in threads:
1604 t.join()
1605 clusters.append( t.result )
1606 # Compare json objects for hosts and dataplane clusters
1607
1608 # hosts
1609 main.step( "Host view is consistent across ONOS nodes" )
1610 consistentHostsResult = main.TRUE
1611 for controller in range( len( hosts ) ):
1612 controllerStr = str( main.activeNodes[ controller ] + 1 )
1613 if hosts[ controller ] and "Error" not in hosts[ controller ]:
1614 if hosts[ controller ] == hosts[ 0 ]:
1615 continue
1616 else: # hosts not consistent
1617 main.log.error( "hosts from ONOS" +
1618 controllerStr +
1619 " is inconsistent with ONOS1" )
1620 main.log.warn( repr( hosts[ controller ] ) )
1621 consistentHostsResult = main.FALSE
1622
1623 else:
1624 main.log.error( "Error in getting ONOS hosts from ONOS" +
1625 controllerStr )
1626 consistentHostsResult = main.FALSE
1627 main.log.warn( "ONOS" + controllerStr +
1628 " hosts response: " +
1629 repr( hosts[ controller ] ) )
1630 utilities.assert_equals(
1631 expect=main.TRUE,
1632 actual=consistentHostsResult,
1633 onpass="Hosts view is consistent across all ONOS nodes",
1634 onfail="ONOS nodes have different views of hosts" )
1635
1636 main.step( "Each host has an IP address" )
1637 ipResult = main.TRUE
1638 for controller in range( 0, len( hosts ) ):
1639 controllerStr = str( main.activeNodes[ controller ] + 1 )
1640 if hosts[ controller ]:
1641 for host in hosts[ controller ]:
1642 if not host.get( 'ipAddresses', [] ):
1643 main.log.error( "Error with host ips on controller" +
1644 controllerStr + ": " + str( host ) )
1645 ipResult = main.FALSE
1646 utilities.assert_equals(
1647 expect=main.TRUE,
1648 actual=ipResult,
1649 onpass="The ips of the hosts aren't empty",
1650 onfail="The ip of at least one host is missing" )
1651
1652 # Strongly connected clusters of devices
1653 main.step( "Cluster view is consistent across ONOS nodes" )
1654 consistentClustersResult = main.TRUE
1655 for controller in range( len( clusters ) ):
1656 controllerStr = str( main.activeNodes[ controller ] + 1 )
1657 if "Error" not in clusters[ controller ]:
1658 if clusters[ controller ] == clusters[ 0 ]:
1659 continue
1660 else: # clusters not consistent
1661 main.log.error( "clusters from ONOS" + controllerStr +
1662 " is inconsistent with ONOS1" )
1663 consistentClustersResult = main.FALSE
1664
1665 else:
1666 main.log.error( "Error in getting dataplane clusters " +
1667 "from ONOS" + controllerStr )
1668 consistentClustersResult = main.FALSE
1669 main.log.warn( "ONOS" + controllerStr +
1670 " clusters response: " +
1671 repr( clusters[ controller ] ) )
1672 utilities.assert_equals(
1673 expect=main.TRUE,
1674 actual=consistentClustersResult,
1675 onpass="Clusters view is consistent across all ONOS nodes",
1676 onfail="ONOS nodes have different views of clusters" )
1677 if not consistentClustersResult:
1678 main.log.debug( clusters )
1679
1680 # there should always only be one cluster
1681 main.step( "Cluster view correct across ONOS nodes" )
1682 try:
1683 numClusters = len( json.loads( clusters[ 0 ] ) )
1684 except ( ValueError, TypeError ):
1685 main.log.exception( "Error parsing clusters[0]: " +
1686 repr( clusters[ 0 ] ) )
1687 numClusters = "ERROR"
1688 clusterResults = main.FALSE
1689 if numClusters == 1:
1690 clusterResults = main.TRUE
1691 utilities.assert_equals(
1692 expect=1,
1693 actual=numClusters,
1694 onpass="ONOS shows 1 SCC",
1695 onfail="ONOS shows " + str( numClusters ) + " SCCs" )
1696
1697 main.step( "Comparing ONOS topology to MN" )
1698 devicesResults = main.TRUE
1699 linksResults = main.TRUE
1700 hostsResults = main.TRUE
1701 mnSwitches = main.Mininet1.getSwitches()
1702 mnLinks = main.Mininet1.getLinks()
1703 mnHosts = main.Mininet1.getHosts()
1704 for controller in main.activeNodes:
1705 controllerStr = str( main.activeNodes[ controller ] + 1 )
1706 if devices[ controller ] and ports[ controller ] and\
1707 "Error" not in devices[ controller ] and\
1708 "Error" not in ports[ controller ]:
1709 currentDevicesResult = main.Mininet1.compareSwitches(
1710 mnSwitches,
1711 json.loads( devices[ controller ] ),
1712 json.loads( ports[ controller ] ) )
1713 else:
1714 currentDevicesResult = main.FALSE
1715 utilities.assert_equals( expect=main.TRUE,
1716 actual=currentDevicesResult,
1717 onpass="ONOS" + controllerStr +
1718 " Switches view is correct",
1719 onfail="ONOS" + controllerStr +
1720 " Switches view is incorrect" )
1721 if links[ controller ] and "Error" not in links[ controller ]:
1722 currentLinksResult = main.Mininet1.compareLinks(
1723 mnSwitches, mnLinks,
1724 json.loads( links[ controller ] ) )
1725 else:
1726 currentLinksResult = main.FALSE
1727 utilities.assert_equals( expect=main.TRUE,
1728 actual=currentLinksResult,
1729 onpass="ONOS" + controllerStr +
1730 " links view is correct",
1731 onfail="ONOS" + controllerStr +
1732 " links view is incorrect" )
1733
1734 if hosts[ controller ] and "Error" not in hosts[ controller ]:
1735 currentHostsResult = main.Mininet1.compareHosts(
1736 mnHosts,
1737 hosts[ controller ] )
1738 else:
1739 currentHostsResult = main.FALSE
1740 utilities.assert_equals( expect=main.TRUE,
1741 actual=currentHostsResult,
1742 onpass="ONOS" + controllerStr +
1743 " hosts exist in Mininet",
1744 onfail="ONOS" + controllerStr +
1745 " hosts don't match Mininet" )
1746
1747 devicesResults = devicesResults and currentDevicesResult
1748 linksResults = linksResults and currentLinksResult
1749 hostsResults = hostsResults and currentHostsResult
1750
1751 main.step( "Device information is correct" )
1752 utilities.assert_equals(
1753 expect=main.TRUE,
1754 actual=devicesResults,
1755 onpass="Device information is correct",
1756 onfail="Device information is incorrect" )
1757
1758 main.step( "Links are correct" )
1759 utilities.assert_equals(
1760 expect=main.TRUE,
1761 actual=linksResults,
1762 onpass="Link are correct",
1763 onfail="Links are incorrect" )
1764
1765 main.step( "Hosts are correct" )
1766 utilities.assert_equals(
1767 expect=main.TRUE,
1768 actual=hostsResults,
1769 onpass="Hosts are correct",
1770 onfail="Hosts are incorrect" )
1771
1772 def CASE61( self, main ):
1773 """
1774 The Failure case.
1775 """
1776 assert main.numCtrls, "main.numCtrls not defined"
1777 assert main, "main not defined"
1778 assert utilities.assert_equals, "utilities.assert_equals not defined"
1779 assert main.CLIs, "main.CLIs not defined"
1780 assert main.nodes, "main.nodes not defined"
1781 try:
1782 assert main.nodeIndex is not None, "main.nodeIndex not defined"
1783 assert main.killCount is not None, "main.killCount not defined"
1784 except AttributeError as e:
1785 main.log.warn( "Node to kill not selected, defaulting to node 1" )
1786 main.nodeIndex = 0
1787 main.killCount = 1
1788
1789 main.case( "Stopping ONOS nodes - iteration " + str( main.killCount ) )
1790
1791 main.step( "Checking ONOS Logs for errors" )
1792 for node in main.nodes:
1793 main.log.debug( "Checking logs for errors on " + node.name + ":" )
1794 main.log.warn( main.ONOSbench.checkLogs( node.ip_address ) )
1795
1796 # NOTE: For now only kill one node. If we move to killing more, we need to
1797 # make sure we don't lose any partitions
1798 n = len( main.nodes ) # Number of nodes
1799 main.nodeIndex = ( main.nodeIndex + 1 ) % n
1800 main.kill = [ main.nodeIndex ] # ONOS node to kill, listed by index in main.nodes
1801
1802 # TODO: Be able to configure bringing up old node vs. a new/fresh installation
1803 main.step( "Stopping " + str( len( main.kill ) ) + " ONOS nodes" )
1804 killResults = main.TRUE
1805 for i in main.kill:
1806 killResults = killResults and\
1807 main.ONOSbench.onosStop( main.nodes[ i ].ip_address )
1808 main.activeNodes.remove( i )
1809 utilities.assert_equals( expect=main.TRUE, actual=killResults,
1810 onpass="ONOS nodes stopped successfully",
1811 onfail="ONOS nodes NOT successfully stopped" )
1812
1813 main.step( "Checking ONOS nodes" )
1814 nodeResults = utilities.retry( main.HA.nodesCheck,
1815 False,
1816 args=[ main.activeNodes ],
1817 sleep=15,
1818 attempts=5 )
1819
1820 utilities.assert_equals( expect=True, actual=nodeResults,
1821 onpass="Nodes check successful",
1822 onfail="Nodes check NOT successful" )
1823
1824 if not nodeResults:
1825 for i in main.activeNodes:
1826 cli = main.CLIs[ i ]
1827 main.log.debug( "{} components not ACTIVE: \n{}".format(
1828 cli.name,
1829 cli.sendline( "scr:list | grep -v ACTIVE" ) ) )
1830 main.log.error( "Failed to start ONOS, stopping test" )
1831 main.cleanup()
1832 main.exit()
1833
1834 main.killCount += 1
1835
1836 def CASE62( self, main ):
1837 """
1838 The bring up stopped nodes
1839 """
1840 import time
1841 assert main.numCtrls, "main.numCtrls not defined"
1842 assert main, "main not defined"
1843 assert utilities.assert_equals, "utilities.assert_equals not defined"
1844 assert main.CLIs, "main.CLIs not defined"
1845 assert main.nodes, "main.nodes not defined"
1846 assert main.kill, "main.kill not defined"
1847 main.case( "Restart minority of ONOS nodes" )
1848
1849 main.step( "Restarting " + str( len( main.kill ) ) + " ONOS nodes" )
1850 startResults = main.TRUE
1851 restartTime = time.time()
1852 for i in main.kill:
1853 startResults = startResults and\
1854 main.ONOSbench.onosStart( main.nodes[ i ].ip_address )
1855 utilities.assert_equals( expect=main.TRUE, actual=startResults,
1856 onpass="ONOS nodes started successfully",
1857 onfail="ONOS nodes NOT successfully started" )
1858
1859 main.step( "Checking if ONOS is up yet" )
1860 count = 0
1861 onosIsupResult = main.FALSE
1862 while onosIsupResult == main.FALSE and count < 10:
1863 onosIsupResult = main.TRUE
1864 for i in main.kill:
1865 onosIsupResult = onosIsupResult and\
1866 main.ONOSbench.isup( main.nodes[ i ].ip_address )
1867 count = count + 1
1868 utilities.assert_equals( expect=main.TRUE, actual=onosIsupResult,
1869 onpass="ONOS restarted successfully",
1870 onfail="ONOS restart NOT successful" )
1871
1872 main.step( "Restarting ONOS main.CLIs" )
1873 cliResults = main.TRUE
1874 for i in main.kill:
1875 cliResults = cliResults and\
1876 main.CLIs[ i ].startOnosCli( main.nodes[ i ].ip_address )
1877 main.activeNodes.append( i )
1878 utilities.assert_equals( expect=main.TRUE, actual=cliResults,
1879 onpass="ONOS cli restarted",
1880 onfail="ONOS cli did not restart" )
1881 main.activeNodes.sort()
1882 try:
1883 assert list( set( main.activeNodes ) ) == main.activeNodes,\
1884 "List of active nodes has duplicates, this likely indicates something was run out of order"
1885 except AssertionError:
1886 main.log.exception( "" )
1887 main.cleanup()
1888 main.exit()
1889
1890 # Grab the time of restart so we chan check how long the gossip
1891 # protocol has had time to work
1892 main.restartTime = time.time() - restartTime
1893 main.log.debug( "Restart time: " + str( main.restartTime ) )
1894
1895 main.step( "Checking ONOS nodes" )
1896 nodeResults = utilities.retry( main.HA.nodesCheck,
1897 False,
1898 args=[ main.activeNodes ],
1899 sleep=15,
1900 attempts=5 )
1901
1902 utilities.assert_equals( expect=True, actual=nodeResults,
1903 onpass="Nodes check successful",
1904 onfail="Nodes check NOT successful" )
1905
1906 if not nodeResults:
1907 for i in main.activeNodes:
1908 cli = main.CLIs[ i ]
1909 main.log.debug( "{} components not ACTIVE: \n{}".format(
1910 cli.name,
1911 cli.sendline( "scr:list | grep -v ACTIVE" ) ) )
1912 main.log.error( "Failed to start ONOS, stopping test" )
1913 main.cleanup()
1914 main.exit()
1915 node = main.activeNodes[ 0 ]
1916 main.log.debug( main.CLIs[ node ].nodes( jsonFormat=False ) )
1917 main.log.debug( main.CLIs[ node ].leaders( jsonFormat=False ) )
1918 main.log.debug( main.CLIs[ node ].partitions( jsonFormat=False ) )
1919 main.log.debug( main.CLIs[ node ].apps( jsonFormat=False ) )
1920
1921 main.step( "Rerun for election on the node(s) that were killed" )
1922 runResults = main.TRUE
1923 for i in main.kill:
1924 runResults = runResults and\
1925 main.CLIs[ i ].electionTestRun()
1926 utilities.assert_equals( expect=main.TRUE, actual=runResults,
1927 onpass="ONOS nodes reran for election topic",
1928 onfail="Errror rerunning for election" )
1929
1930 def CASE7( self, main ):
1931 """
1932 Check state after ONOS failure
1933 """
1934 import json
1935 assert main.numCtrls, "main.numCtrls not defined"
1936 assert main, "main not defined"
1937 assert utilities.assert_equals, "utilities.assert_equals not defined"
1938 assert main.CLIs, "main.CLIs not defined"
1939 assert main.nodes, "main.nodes not defined"
1940 try:
1941 main.kill
1942 except AttributeError:
1943 main.kill = []
1944
1945 main.case( "Running ONOS Constant State Tests" )
1946
1947 main.step( "Check that each switch has a master" )
1948 # Assert that each device has a master
1949 rolesNotNull = main.TRUE
1950 threads = []
1951 for i in main.activeNodes:
1952 t = main.Thread( target=main.CLIs[ i ].rolesNotNull,
1953 name="rolesNotNull-" + str( i ),
1954 args=[] )
1955 threads.append( t )
1956 t.start()
1957
1958 for t in threads:
1959 t.join()
1960 rolesNotNull = rolesNotNull and t.result
1961 utilities.assert_equals(
1962 expect=main.TRUE,
1963 actual=rolesNotNull,
1964 onpass="Each device has a master",
1965 onfail="Some devices don't have a master assigned" )
1966
1967 main.step( "Read device roles from ONOS" )
1968 ONOSMastership = []
1969 mastershipCheck = main.FALSE
1970 consistentMastership = True
1971 rolesResults = True
1972 threads = []
1973 for i in main.activeNodes:
1974 t = main.Thread( target=main.CLIs[ i ].roles,
1975 name="roles-" + str( i ),
1976 args=[] )
1977 threads.append( t )
1978 t.start()
1979
1980 for t in threads:
1981 t.join()
1982 ONOSMastership.append( t.result )
1983
1984 for i in range( len( ONOSMastership ) ):
1985 node = str( main.activeNodes[ i ] + 1 )
1986 if not ONOSMastership[ i ] or "Error" in ONOSMastership[ i ]:
1987 main.log.error( "Error in getting ONOS" + node + " roles" )
1988 main.log.warn( "ONOS" + node + " mastership response: " +
1989 repr( ONOSMastership[ i ] ) )
1990 rolesResults = False
1991 utilities.assert_equals(
1992 expect=True,
1993 actual=rolesResults,
1994 onpass="No error in reading roles output",
1995 onfail="Error in reading roles from ONOS" )
1996
1997 main.step( "Check for consistency in roles from each controller" )
1998 if all( [ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
1999 main.log.info(
2000 "Switch roles are consistent across all ONOS nodes" )
2001 else:
2002 consistentMastership = False
2003 utilities.assert_equals(
2004 expect=True,
2005 actual=consistentMastership,
2006 onpass="Switch roles are consistent across all ONOS nodes",
2007 onfail="ONOS nodes have different views of switch roles" )
2008
2009 if rolesResults and not consistentMastership:
2010 for i in range( len( ONOSMastership ) ):
2011 node = str( main.activeNodes[ i ] + 1 )
2012 main.log.warn( "ONOS" + node + " roles: ",
2013 json.dumps( json.loads( ONOSMastership[ i ] ),
2014 sort_keys=True,
2015 indent=4,
2016 separators=( ',', ': ' ) ) )
2017
2018 # NOTE: we expect mastership to change on controller failure
2019
2020 main.step( "Get the intents and compare across all nodes" )
2021 ONOSIntents = []
2022 intentCheck = main.FALSE
2023 consistentIntents = True
2024 intentsResults = True
2025 threads = []
2026 for i in main.activeNodes:
2027 t = main.Thread( target=main.CLIs[ i ].intents,
2028 name="intents-" + str( i ),
2029 args=[],
2030 kwargs={ 'jsonFormat': True } )
2031 threads.append( t )
2032 t.start()
2033
2034 for t in threads:
2035 t.join()
2036 ONOSIntents.append( t.result )
2037
2038 for i in range( len( ONOSIntents ) ):
2039 node = str( main.activeNodes[ i ] + 1 )
2040 if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
2041 main.log.error( "Error in getting ONOS" + node + " intents" )
2042 main.log.warn( "ONOS" + node + " intents response: " +
2043 repr( ONOSIntents[ i ] ) )
2044 intentsResults = False
2045 utilities.assert_equals(
2046 expect=True,
2047 actual=intentsResults,
2048 onpass="No error in reading intents output",
2049 onfail="Error in reading intents from ONOS" )
2050
2051 main.step( "Check for consistency in Intents from each controller" )
2052 if all( [ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
2053 main.log.info( "Intents are consistent across all ONOS " +
2054 "nodes" )
2055 else:
2056 consistentIntents = False
2057
2058 # Try to make it easy to figure out what is happening
2059 #
2060 # Intent ONOS1 ONOS2 ...
2061 # 0x01 INSTALLED INSTALLING
2062 # ... ... ...
2063 # ... ... ...
2064 title = " ID"
2065 for n in main.activeNodes:
2066 title += " " * 10 + "ONOS" + str( n + 1 )
2067 main.log.warn( title )
2068 # get all intent keys in the cluster
2069 keys = []
2070 for nodeStr in ONOSIntents:
2071 node = json.loads( nodeStr )
2072 for intent in node:
2073 keys.append( intent.get( 'id' ) )
2074 keys = set( keys )
2075 for key in keys:
2076 row = "%-13s" % key
2077 for nodeStr in ONOSIntents:
2078 node = json.loads( nodeStr )
2079 for intent in node:
2080 if intent.get( 'id' ) == key:
2081 row += "%-15s" % intent.get( 'state' )
2082 main.log.warn( row )
2083 # End table view
2084
2085 utilities.assert_equals(
2086 expect=True,
2087 actual=consistentIntents,
2088 onpass="Intents are consistent across all ONOS nodes",
2089 onfail="ONOS nodes have different views of intents" )
2090 intentStates = []
2091 for node in ONOSIntents: # Iter through ONOS nodes
2092 nodeStates = []
2093 # Iter through intents of a node
2094 try:
2095 for intent in json.loads( node ):
2096 nodeStates.append( intent[ 'state' ] )
2097 except ( ValueError, TypeError ):
2098 main.log.exception( "Error in parsing intents" )
2099 main.log.error( repr( node ) )
2100 intentStates.append( nodeStates )
2101 out = [ ( i, nodeStates.count( i ) ) for i in set( nodeStates ) ]
2102 main.log.info( dict( out ) )
2103
2104 if intentsResults and not consistentIntents:
2105 for i in range( len( main.activeNodes ) ):
2106 node = str( main.activeNodes[ i ] + 1 )
2107 main.log.warn( "ONOS" + node + " intents: " )
2108 main.log.warn( json.dumps(
2109 json.loads( ONOSIntents[ i ] ),
2110 sort_keys=True,
2111 indent=4,
2112 separators=( ',', ': ' ) ) )
2113 elif intentsResults and consistentIntents:
2114 intentCheck = main.TRUE
2115
2116 # NOTE: Store has no durability, so intents are lost across system
2117 # restarts
2118 main.step( "Compare current intents with intents before the failure" )
2119 # NOTE: this requires case 5 to pass for intentState to be set.
2120 # maybe we should stop the test if that fails?
2121 sameIntents = main.FALSE
2122 try:
2123 intentState
2124 except NameError:
2125 main.log.warn( "No previous intent state was saved" )
2126 else:
2127 if intentState and intentState == ONOSIntents[ 0 ]:
2128 sameIntents = main.TRUE
2129 main.log.info( "Intents are consistent with before failure" )
2130 # TODO: possibly the states have changed? we may need to figure out
2131 # what the acceptable states are
2132 elif len( intentState ) == len( ONOSIntents[ 0 ] ):
2133 sameIntents = main.TRUE
2134 try:
2135 before = json.loads( intentState )
2136 after = json.loads( ONOSIntents[ 0 ] )
2137 for intent in before:
2138 if intent not in after:
2139 sameIntents = main.FALSE
2140 main.log.debug( "Intent is not currently in ONOS " +
2141 "(at least in the same form):" )
2142 main.log.debug( json.dumps( intent ) )
2143 except ( ValueError, TypeError ):
2144 main.log.exception( "Exception printing intents" )
2145 main.log.debug( repr( ONOSIntents[ 0 ] ) )
2146 main.log.debug( repr( intentState ) )
2147 if sameIntents == main.FALSE:
2148 try:
2149 main.log.debug( "ONOS intents before: " )
2150 main.log.debug( json.dumps( json.loads( intentState ),
2151 sort_keys=True, indent=4,
2152 separators=( ',', ': ' ) ) )
2153 main.log.debug( "Current ONOS intents: " )
2154 main.log.debug( json.dumps( json.loads( ONOSIntents[ 0 ] ),
2155 sort_keys=True, indent=4,
2156 separators=( ',', ': ' ) ) )
2157 except ( ValueError, TypeError ):
2158 main.log.exception( "Exception printing intents" )
2159 main.log.debug( repr( ONOSIntents[ 0 ] ) )
2160 main.log.debug( repr( intentState ) )
2161 utilities.assert_equals(
2162 expect=main.TRUE,
2163 actual=sameIntents,
2164 onpass="Intents are consistent with before failure",
2165 onfail="The Intents changed during failure" )
2166 intentCheck = intentCheck and sameIntents
2167
2168 main.step( "Get the OF Table entries and compare to before " +
2169 "component failure" )
2170 FlowTables = main.TRUE
2171 for i in range( 28 ):
2172 main.log.info( "Checking flow table on s" + str( i + 1 ) )
2173 tmpFlows = main.Mininet1.getFlowTable( "s" + str( i + 1 ), version="1.3", debug=False )
2174 curSwitch = main.Mininet1.flowTableComp( flows[ i ], tmpFlows )
2175 FlowTables = FlowTables and curSwitch
2176 if curSwitch == main.FALSE:
2177 main.log.warn( "Differences in flow table for switch: s{}".format( i + 1 ) )
2178 utilities.assert_equals(
2179 expect=main.TRUE,
2180 actual=FlowTables,
2181 onpass="No changes were found in the flow tables",
2182 onfail="Changes were found in the flow tables" )
2183
2184 main.Mininet2.pingLongKill()
2185 """
2186 main.step( "Check the continuous pings to ensure that no packets " +
2187 "were dropped during component failure" )
2188 main.Mininet2.pingKill( main.params[ 'TESTONUSER' ],
2189 main.params[ 'TESTONIP' ] )
2190 LossInPings = main.FALSE
2191 # NOTE: checkForLoss returns main.FALSE with 0% packet loss
2192 for i in range( 8, 18 ):
2193 main.log.info(
2194 "Checking for a loss in pings along flow from s" +
2195 str( i ) )
2196 LossInPings = main.Mininet2.checkForLoss(
2197 "/tmp/ping.h" +
2198 str( i ) ) or LossInPings
2199 if LossInPings == main.TRUE:
2200 main.log.info( "Loss in ping detected" )
2201 elif LossInPings == main.ERROR:
2202 main.log.info( "There are multiple mininet process running" )
2203 elif LossInPings == main.FALSE:
2204 main.log.info( "No Loss in the pings" )
2205 main.log.info( "No loss of dataplane connectivity" )
2206 utilities.assert_equals(
2207 expect=main.FALSE,
2208 actual=LossInPings,
2209 onpass="No Loss of connectivity",
2210 onfail="Loss of dataplane connectivity detected" )
2211 """
2212 main.step( "Leadership Election is still functional" )
2213 # Test of LeadershipElection
2214 leaderList = []
2215
2216 restarted = []
2217 for i in main.kill:
2218 restarted.append( main.nodes[ i ].ip_address )
2219 leaderResult = main.TRUE
2220
2221 for i in main.activeNodes:
2222 cli = main.CLIs[ i ]
2223 leaderN = cli.electionTestLeader()
2224 leaderList.append( leaderN )
2225 if leaderN == main.FALSE:
2226 # error in response
2227 main.log.error( "Something is wrong with " +
2228 "electionTestLeader function, check the" +
2229 " error logs" )
2230 leaderResult = main.FALSE
2231 elif leaderN is None:
2232 main.log.error( cli.name +
2233 " shows no leader for the election-app was" +
2234 " elected after the old one died" )
2235 leaderResult = main.FALSE
2236 elif leaderN in restarted:
2237 main.log.error( cli.name + " shows " + str( leaderN ) +
2238 " as leader for the election-app, but it " +
2239 "was restarted" )
2240 leaderResult = main.FALSE
2241 if len( set( leaderList ) ) != 1:
2242 leaderResult = main.FALSE
2243 main.log.error(
2244 "Inconsistent view of leader for the election test app" )
2245 # TODO: print the list
2246 utilities.assert_equals(
2247 expect=main.TRUE,
2248 actual=leaderResult,
2249 onpass="Leadership election passed",
2250 onfail="Something went wrong with Leadership election" )
2251
2252 def CASE8( self, main ):
2253 """
2254 Compare topo
2255 """
2256 import json
2257 import time
2258 assert main.numCtrls, "main.numCtrls not defined"
2259 assert main, "main not defined"
2260 assert utilities.assert_equals, "utilities.assert_equals not defined"
2261 assert main.CLIs, "main.CLIs not defined"
2262 assert main.nodes, "main.nodes not defined"
2263
2264 main.case( "Compare ONOS Topology view to Mininet topology" )
2265 main.caseExplanation = "Compare topology objects between Mininet" +\
2266 " and ONOS"
2267 topoResult = main.FALSE
2268 topoFailMsg = "ONOS topology don't match Mininet"
2269 elapsed = 0
2270 count = 0
2271 main.step( "Comparing ONOS topology to MN topology" )
2272 startTime = time.time()
2273 # Give time for Gossip to work
2274 while topoResult == main.FALSE and ( elapsed < 60 or count < 3 ):
2275 devicesResults = main.TRUE
2276 linksResults = main.TRUE
2277 hostsResults = main.TRUE
2278 hostAttachmentResults = True
2279 count += 1
2280 cliStart = time.time()
2281 devices = []
2282 threads = []
2283 for i in main.activeNodes:
2284 t = main.Thread( target=utilities.retry,
2285 name="devices-" + str( i ),
2286 args=[ main.CLIs[ i ].devices, [ None ] ],
2287 kwargs={ 'sleep': 5, 'attempts': 5,
2288 'randomTime': True } )
2289 threads.append( t )
2290 t.start()
2291
2292 for t in threads:
2293 t.join()
2294 devices.append( t.result )
2295 hosts = []
2296 ipResult = main.TRUE
2297 threads = []
2298 for i in main.activeNodes:
2299 t = main.Thread( target=utilities.retry,
2300 name="hosts-" + str( i ),
2301 args=[ main.CLIs[ i ].hosts, [ None ] ],
2302 kwargs={ 'sleep': 5, 'attempts': 5,
2303 'randomTime': True } )
2304 threads.append( t )
2305 t.start()
2306
2307 for t in threads:
2308 t.join()
2309 try:
2310 hosts.append( json.loads( t.result ) )
2311 except ( ValueError, TypeError ):
2312 main.log.exception( "Error parsing hosts results" )
2313 main.log.error( repr( t.result ) )
2314 hosts.append( None )
2315 for controller in range( 0, len( hosts ) ):
2316 controllerStr = str( main.activeNodes[ controller ] + 1 )
2317 if hosts[ controller ]:
2318 for host in hosts[ controller ]:
2319 if host is None or host.get( 'ipAddresses', [] ) == []:
2320 main.log.error(
2321 "Error with host ipAddresses on controller" +
2322 controllerStr + ": " + str( host ) )
2323 ipResult = main.FALSE
2324 ports = []
2325 threads = []
2326 for i in main.activeNodes:
2327 t = main.Thread( target=utilities.retry,
2328 name="ports-" + str( i ),
2329 args=[ main.CLIs[ i ].ports, [ None ] ],
2330 kwargs={ 'sleep': 5, 'attempts': 5,
2331 'randomTime': True } )
2332 threads.append( t )
2333 t.start()
2334
2335 for t in threads:
2336 t.join()
2337 ports.append( t.result )
2338 links = []
2339 threads = []
2340 for i in main.activeNodes:
2341 t = main.Thread( target=utilities.retry,
2342 name="links-" + str( i ),
2343 args=[ main.CLIs[ i ].links, [ None ] ],
2344 kwargs={ 'sleep': 5, 'attempts': 5,
2345 'randomTime': True } )
2346 threads.append( t )
2347 t.start()
2348
2349 for t in threads:
2350 t.join()
2351 links.append( t.result )
2352 clusters = []
2353 threads = []
2354 for i in main.activeNodes:
2355 t = main.Thread( target=utilities.retry,
2356 name="clusters-" + str( i ),
2357 args=[ main.CLIs[ i ].clusters, [ None ] ],
2358 kwargs={ 'sleep': 5, 'attempts': 5,
2359 'randomTime': True } )
2360 threads.append( t )
2361 t.start()
2362
2363 for t in threads:
2364 t.join()
2365 clusters.append( t.result )
2366
2367 elapsed = time.time() - startTime
2368 cliTime = time.time() - cliStart
2369 print "Elapsed time: " + str( elapsed )
2370 print "CLI time: " + str( cliTime )
2371
2372 if all( e is None for e in devices ) and\
2373 all( e is None for e in hosts ) and\
2374 all( e is None for e in ports ) and\
2375 all( e is None for e in links ) and\
2376 all( e is None for e in clusters ):
2377 topoFailMsg = "Could not get topology from ONOS"
2378 main.log.error( topoFailMsg )
2379 continue # Try again, No use trying to compare
2380
2381 mnSwitches = main.Mininet1.getSwitches()
2382 mnLinks = main.Mininet1.getLinks()
2383 mnHosts = main.Mininet1.getHosts()
2384 for controller in range( len( main.activeNodes ) ):
2385 controllerStr = str( main.activeNodes[ controller ] + 1 )
2386 if devices[ controller ] and ports[ controller ] and\
2387 "Error" not in devices[ controller ] and\
2388 "Error" not in ports[ controller ]:
2389
2390 try:
2391 currentDevicesResult = main.Mininet1.compareSwitches(
2392 mnSwitches,
2393 json.loads( devices[ controller ] ),
2394 json.loads( ports[ controller ] ) )
2395 except ( TypeError, ValueError ) as e:
2396 main.log.exception( "Object not as expected; devices={!r}\nports={!r}".format(
2397 devices[ controller ], ports[ controller ] ) )
2398 else:
2399 currentDevicesResult = main.FALSE
2400 utilities.assert_equals( expect=main.TRUE,
2401 actual=currentDevicesResult,
2402 onpass="ONOS" + controllerStr +
2403 " Switches view is correct",
2404 onfail="ONOS" + controllerStr +
2405 " Switches view is incorrect" )
2406
2407 if links[ controller ] and "Error" not in links[ controller ]:
2408 currentLinksResult = main.Mininet1.compareLinks(
2409 mnSwitches, mnLinks,
2410 json.loads( links[ controller ] ) )
2411 else:
2412 currentLinksResult = main.FALSE
2413 utilities.assert_equals( expect=main.TRUE,
2414 actual=currentLinksResult,
2415 onpass="ONOS" + controllerStr +
2416 " links view is correct",
2417 onfail="ONOS" + controllerStr +
2418 " links view is incorrect" )
2419 if hosts[ controller ] and "Error" not in hosts[ controller ]:
2420 currentHostsResult = main.Mininet1.compareHosts(
2421 mnHosts,
2422 hosts[ controller ] )
2423 elif hosts[ controller ] == []:
2424 currentHostsResult = main.TRUE
2425 else:
2426 currentHostsResult = main.FALSE
2427 utilities.assert_equals( expect=main.TRUE,
2428 actual=currentHostsResult,
2429 onpass="ONOS" + controllerStr +
2430 " hosts exist in Mininet",
2431 onfail="ONOS" + controllerStr +
2432 " hosts don't match Mininet" )
2433 # CHECKING HOST ATTACHMENT POINTS
2434 hostAttachment = True
2435 zeroHosts = False
2436 # FIXME: topo-HA/obelisk specific mappings:
2437 # key is mac and value is dpid
2438 mappings = {}
2439 for i in range( 1, 29 ): # hosts 1 through 28
2440 # set up correct variables:
2441 macId = "00:" * 5 + hex( i ).split( "0x" )[ 1 ].upper().zfill( 2 )
2442 if i == 1:
2443 deviceId = "1000".zfill( 16 )
2444 elif i == 2:
2445 deviceId = "2000".zfill( 16 )
2446 elif i == 3:
2447 deviceId = "3000".zfill( 16 )
2448 elif i == 4:
2449 deviceId = "3004".zfill( 16 )
2450 elif i == 5:
2451 deviceId = "5000".zfill( 16 )
2452 elif i == 6:
2453 deviceId = "6000".zfill( 16 )
2454 elif i == 7:
2455 deviceId = "6007".zfill( 16 )
2456 elif i >= 8 and i <= 17:
2457 dpid = '3' + str( i ).zfill( 3 )
2458 deviceId = dpid.zfill( 16 )
2459 elif i >= 18 and i <= 27:
2460 dpid = '6' + str( i ).zfill( 3 )
2461 deviceId = dpid.zfill( 16 )
2462 elif i == 28:
2463 deviceId = "2800".zfill( 16 )
2464 mappings[ macId ] = deviceId
2465 if hosts[ controller ] is not None and "Error" not in hosts[ controller ]:
2466 if hosts[ controller ] == []:
2467 main.log.warn( "There are no hosts discovered" )
2468 zeroHosts = True
2469 else:
2470 for host in hosts[ controller ]:
2471 mac = None
2472 location = None
2473 device = None
2474 port = None
2475 try:
2476 mac = host.get( 'mac' )
2477 assert mac, "mac field could not be found for this host object"
2478
2479 location = host.get( 'locations' )[ 0 ]
2480 assert location, "location field could not be found for this host object"
2481
2482 # Trim the protocol identifier off deviceId
2483 device = str( location.get( 'elementId' ) ).split( ':' )[ 1 ]
2484 assert device, "elementId field could not be found for this host location object"
2485
2486 port = location.get( 'port' )
2487 assert port, "port field could not be found for this host location object"
2488
2489 # Now check if this matches where they should be
2490 if mac and device and port:
2491 if str( port ) != "1":
2492 main.log.error( "The attachment port is incorrect for " +
2493 "host " + str( mac ) +
2494 ". Expected: 1 Actual: " + str( port ) )
2495 hostAttachment = False
2496 if device != mappings[ str( mac ) ]:
2497 main.log.error( "The attachment device is incorrect for " +
2498 "host " + str( mac ) +
2499 ". Expected: " + mappings[ str( mac ) ] +
2500 " Actual: " + device )
2501 hostAttachment = False
2502 else:
2503 hostAttachment = False
2504 except AssertionError:
2505 main.log.exception( "Json object not as expected" )
2506 main.log.error( repr( host ) )
2507 hostAttachment = False
2508 else:
2509 main.log.error( "No hosts json output or \"Error\"" +
2510 " in output. hosts = " +
2511 repr( hosts[ controller ] ) )
2512 if zeroHosts is False:
2513 hostAttachment = True
2514
2515 # END CHECKING HOST ATTACHMENT POINTS
2516 devicesResults = devicesResults and currentDevicesResult
2517 linksResults = linksResults and currentLinksResult
2518 hostsResults = hostsResults and currentHostsResult
2519 hostAttachmentResults = hostAttachmentResults and\
2520 hostAttachment
2521 topoResult = ( devicesResults and linksResults
2522 and hostsResults and ipResult and
2523 hostAttachmentResults )
2524 utilities.assert_equals( expect=True,
2525 actual=topoResult,
2526 onpass="ONOS topology matches Mininet",
2527 onfail=topoFailMsg )
2528 # End of While loop to pull ONOS state
2529
2530 # Compare json objects for hosts and dataplane clusters
2531
2532 # hosts
2533 main.step( "Hosts view is consistent across all ONOS nodes" )
2534 consistentHostsResult = main.TRUE
2535 for controller in range( len( hosts ) ):
2536 controllerStr = str( main.activeNodes[ controller ] + 1 )
2537 if hosts[ controller ] is not None and "Error" not in hosts[ controller ]:
2538 if hosts[ controller ] == hosts[ 0 ]:
2539 continue
2540 else: # hosts not consistent
2541 main.log.error( "hosts from ONOS" + controllerStr +
2542 " is inconsistent with ONOS1" )
2543 main.log.warn( repr( hosts[ controller ] ) )
2544 consistentHostsResult = main.FALSE
2545
2546 else:
2547 main.log.error( "Error in getting ONOS hosts from ONOS" +
2548 controllerStr )
2549 consistentHostsResult = main.FALSE
2550 main.log.warn( "ONOS" + controllerStr +
2551 " hosts response: " +
2552 repr( hosts[ controller ] ) )
2553 utilities.assert_equals(
2554 expect=main.TRUE,
2555 actual=consistentHostsResult,
2556 onpass="Hosts view is consistent across all ONOS nodes",
2557 onfail="ONOS nodes have different views of hosts" )
2558
2559 main.step( "Hosts information is correct" )
2560 hostsResults = hostsResults and ipResult
2561 utilities.assert_equals(
2562 expect=main.TRUE,
2563 actual=hostsResults,
2564 onpass="Host information is correct",
2565 onfail="Host information is incorrect" )
2566
2567 main.step( "Host attachment points to the network" )
2568 utilities.assert_equals(
2569 expect=True,
2570 actual=hostAttachmentResults,
2571 onpass="Hosts are correctly attached to the network",
2572 onfail="ONOS did not correctly attach hosts to the network" )
2573
2574 # Strongly connected clusters of devices
2575 main.step( "Clusters view is consistent across all ONOS nodes" )
2576 consistentClustersResult = main.TRUE
2577 for controller in range( len( clusters ) ):
2578 controllerStr = str( main.activeNodes[ controller ] + 1 )
2579 if "Error" not in clusters[ controller ]:
2580 if clusters[ controller ] == clusters[ 0 ]:
2581 continue
2582 else: # clusters not consistent
2583 main.log.error( "clusters from ONOS" +
2584 controllerStr +
2585 " is inconsistent with ONOS1" )
2586 consistentClustersResult = main.FALSE
2587 else:
2588 main.log.error( "Error in getting dataplane clusters " +
2589 "from ONOS" + controllerStr )
2590 consistentClustersResult = main.FALSE
2591 main.log.warn( "ONOS" + controllerStr +
2592 " clusters response: " +
2593 repr( clusters[ controller ] ) )
2594 utilities.assert_equals(
2595 expect=main.TRUE,
2596 actual=consistentClustersResult,
2597 onpass="Clusters view is consistent across all ONOS nodes",
2598 onfail="ONOS nodes have different views of clusters" )
2599 if not consistentClustersResult:
2600 main.log.debug( clusters )
2601
2602 main.step( "There is only one SCC" )
2603 # there should always only be one cluster
2604 try:
2605 numClusters = len( json.loads( clusters[ 0 ] ) )
2606 except ( ValueError, TypeError ):
2607 main.log.exception( "Error parsing clusters[0]: " +
2608 repr( clusters[ 0 ] ) )
2609 numClusters = "ERROR"
2610 clusterResults = main.FALSE
2611 if numClusters == 1:
2612 clusterResults = main.TRUE
2613 utilities.assert_equals(
2614 expect=1,
2615 actual=numClusters,
2616 onpass="ONOS shows 1 SCC",
2617 onfail="ONOS shows " + str( numClusters ) + " SCCs" )
2618
2619 topoResult = ( devicesResults and linksResults
2620 and hostsResults and consistentHostsResult
2621 and consistentClustersResult and clusterResults
2622 and ipResult and hostAttachmentResults )
2623
2624 topoResult = topoResult and int( count <= 2 )
2625 note = "note it takes about " + str( int( cliTime ) ) + \
2626 " seconds for the test to make all the cli calls to fetch " +\
2627 "the topology from each ONOS instance"
2628 main.log.info(
2629 "Very crass estimate for topology discovery/convergence( " +
2630 str( note ) + " ): " + str( elapsed ) + " seconds, " +
2631 str( count ) + " tries" )
2632
2633 main.step( "Device information is correct" )
2634 utilities.assert_equals(
2635 expect=main.TRUE,
2636 actual=devicesResults,
2637 onpass="Device information is correct",
2638 onfail="Device information is incorrect" )
2639
2640 main.step( "Links are correct" )
2641 utilities.assert_equals(
2642 expect=main.TRUE,
2643 actual=linksResults,
2644 onpass="Link are correct",
2645 onfail="Links are incorrect" )
2646
2647 main.step( "Hosts are correct" )
2648 utilities.assert_equals(
2649 expect=main.TRUE,
2650 actual=hostsResults,
2651 onpass="Hosts are correct",
2652 onfail="Hosts are incorrect" )
2653
2654 # FIXME: move this to an ONOS state case
2655 main.step( "Checking ONOS nodes" )
2656 nodeResults = utilities.retry( main.HA.nodesCheck,
2657 False,
2658 args=[ main.activeNodes ],
2659 attempts=5 )
2660
2661 utilities.assert_equals( expect=True, actual=nodeResults,
2662 onpass="Nodes check successful",
2663 onfail="Nodes check NOT successful" )
2664 if not nodeResults:
2665 for i in main.activeNodes:
2666 main.log.debug( "{} components not ACTIVE: \n{}".format(
2667 main.CLIs[ i ].name,
2668 main.CLIs[ i ].sendline( "scr:list | grep -v ACTIVE" ) ) )
2669
2670 if not topoResult:
2671 main.cleanup()
2672 main.exit()
2673
2674 def CASE9( self, main ):
2675 """
2676 Link s3-s28 down
2677 """
2678 import time
2679 assert main.numCtrls, "main.numCtrls not defined"
2680 assert main, "main not defined"
2681 assert utilities.assert_equals, "utilities.assert_equals not defined"
2682 assert main.CLIs, "main.CLIs not defined"
2683 assert main.nodes, "main.nodes not defined"
2684 # NOTE: You should probably run a topology check after this
2685
2686 linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
2687
2688 description = "Turn off a link to ensure that Link Discovery " +\
2689 "is working properly"
2690 main.case( description )
2691
2692 main.step( "Kill Link between s3 and s28" )
2693 LinkDown = main.Mininet1.link( END1="s3", END2="s28", OPTION="down" )
2694 main.log.info( "Waiting " + str( linkSleep ) +
2695 " seconds for link down to be discovered" )
2696 time.sleep( linkSleep )
2697 utilities.assert_equals( expect=main.TRUE, actual=LinkDown,
2698 onpass="Link down successful",
2699 onfail="Failed to bring link down" )
2700 # TODO do some sort of check here
2701
2702 def CASE10( self, main ):
2703 """
2704 Link s3-s28 up
2705 """
2706 import time
2707 assert main.numCtrls, "main.numCtrls not defined"
2708 assert main, "main not defined"
2709 assert utilities.assert_equals, "utilities.assert_equals not defined"
2710 assert main.CLIs, "main.CLIs not defined"
2711 assert main.nodes, "main.nodes not defined"
2712 # NOTE: You should probably run a topology check after this
2713
2714 linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
2715
2716 description = "Restore a link to ensure that Link Discovery is " + \
2717 "working properly"
2718 main.case( description )
2719
2720 main.step( "Bring link between s3 and s28 back up" )
2721 LinkUp = main.Mininet1.link( END1="s3", END2="s28", OPTION="up" )
2722 main.log.info( "Waiting " + str( linkSleep ) +
2723 " seconds for link up to be discovered" )
2724 time.sleep( linkSleep )
2725 utilities.assert_equals( expect=main.TRUE, actual=LinkUp,
2726 onpass="Link up successful",
2727 onfail="Failed to bring link up" )
2728 # TODO do some sort of check here
2729
2730 def CASE11( self, main ):
2731 """
2732 Switch Down
2733 """
2734 # NOTE: You should probably run a topology check after this
2735 import time
2736 assert main.numCtrls, "main.numCtrls not defined"
2737 assert main, "main not defined"
2738 assert utilities.assert_equals, "utilities.assert_equals not defined"
2739 assert main.CLIs, "main.CLIs not defined"
2740 assert main.nodes, "main.nodes not defined"
2741
2742 switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
2743
2744 description = "Killing a switch to ensure it is discovered correctly"
2745 onosCli = main.CLIs[ main.activeNodes[ 0 ] ]
2746 main.case( description )
2747 switch = main.params[ 'kill' ][ 'switch' ]
2748 switchDPID = main.params[ 'kill' ][ 'dpid' ]
2749
2750 # TODO: Make this switch parameterizable
2751 main.step( "Kill " + switch )
2752 main.log.info( "Deleting " + switch )
2753 main.Mininet1.delSwitch( switch )
2754 main.log.info( "Waiting " + str( switchSleep ) +
2755 " seconds for switch down to be discovered" )
2756 time.sleep( switchSleep )
2757 device = onosCli.getDevice( dpid=switchDPID )
2758 # Peek at the deleted switch
2759 main.log.warn( str( device ) )
2760 result = main.FALSE
2761 if device and device[ 'available' ] is False:
2762 result = main.TRUE
2763 utilities.assert_equals( expect=main.TRUE, actual=result,
2764 onpass="Kill switch successful",
2765 onfail="Failed to kill switch?" )
2766
2767 def CASE12( self, main ):
2768 """
2769 Switch Up
2770 """
2771 # NOTE: You should probably run a topology check after this
2772 import time
2773 assert main.numCtrls, "main.numCtrls not defined"
2774 assert main, "main not defined"
2775 assert utilities.assert_equals, "utilities.assert_equals not defined"
2776 assert main.CLIs, "main.CLIs not defined"
2777 assert main.nodes, "main.nodes not defined"
2778 assert ONOS1Port, "ONOS1Port not defined"
2779 assert ONOS2Port, "ONOS2Port not defined"
2780 assert ONOS3Port, "ONOS3Port not defined"
2781 assert ONOS4Port, "ONOS4Port not defined"
2782 assert ONOS5Port, "ONOS5Port not defined"
2783 assert ONOS6Port, "ONOS6Port not defined"
2784 assert ONOS7Port, "ONOS7Port not defined"
2785
2786 switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
2787 switch = main.params[ 'kill' ][ 'switch' ]
2788 switchDPID = main.params[ 'kill' ][ 'dpid' ]
2789 links = main.params[ 'kill' ][ 'links' ].split()
2790 onosCli = main.CLIs[ main.activeNodes[ 0 ] ]
2791 description = "Adding a switch to ensure it is discovered correctly"
2792 main.case( description )
2793
2794 main.step( "Add back " + switch )
2795 main.Mininet1.addSwitch( switch, dpid=switchDPID )
2796 for peer in links:
2797 main.Mininet1.addLink( switch, peer )
2798 ipList = [ node.ip_address for node in main.nodes ]
2799 main.Mininet1.assignSwController( sw=switch, ip=ipList )
2800 main.log.info( "Waiting " + str( switchSleep ) +
2801 " seconds for switch up to be discovered" )
2802 time.sleep( switchSleep )
2803 device = onosCli.getDevice( dpid=switchDPID )
2804 # Peek at the deleted switch
2805 main.log.warn( str( device ) )
2806 result = main.FALSE
2807 if device and device[ 'available' ]:
2808 result = main.TRUE
2809 utilities.assert_equals( expect=main.TRUE, actual=result,
2810 onpass="add switch successful",
2811 onfail="Failed to add switch?" )
2812
2813 def CASE13( self, main ):
2814 """
2815 Clean up
2816 """
2817 import os
2818 import time
2819 assert main.numCtrls, "main.numCtrls not defined"
2820 assert main, "main not defined"
2821 assert utilities.assert_equals, "utilities.assert_equals not defined"
2822 assert main.CLIs, "main.CLIs not defined"
2823 assert main.nodes, "main.nodes not defined"
2824
2825 # printing colors to terminal
2826 colors = { 'cyan': '\033[96m', 'purple': '\033[95m',
2827 'blue': '\033[94m', 'green': '\033[92m',
2828 'yellow': '\033[93m', 'red': '\033[91m', 'end': '\033[0m' }
2829 main.case( "Test Cleanup" )
2830 main.step( "Killing tcpdumps" )
2831 main.Mininet2.stopTcpdump()
2832
2833 testname = main.TEST
2834 if main.params[ 'BACKUP' ][ 'ENABLED' ] == "True":
2835 main.step( "Copying MN pcap and ONOS log files to test station" )
2836 teststationUser = main.params[ 'BACKUP' ][ 'TESTONUSER' ]
2837 teststationIP = main.params[ 'BACKUP' ][ 'TESTONIP' ]
2838 # NOTE: MN Pcap file is being saved to logdir.
2839 # We scp this file as MN and TestON aren't necessarily the same vm
2840
2841 # FIXME: To be replaced with a Jenkin's post script
2842 # TODO: Load these from params
2843 # NOTE: must end in /
2844 logFolder = "/opt/onos/log/"
2845 logFiles = [ "karaf.log", "karaf.log.1" ]
2846 # NOTE: must end in /
2847 for f in logFiles:
2848 for node in main.nodes:
2849 dstName = main.logdir + "/" + node.name + "-" + f
2850 main.ONOSbench.secureCopy( node.user_name, node.ip_address,
2851 logFolder + f, dstName )
2852 # std*.log's
2853 # NOTE: must end in /
2854 logFolder = "/opt/onos/var/"
2855 logFiles = [ "stderr.log", "stdout.log" ]
2856 # NOTE: must end in /
2857 for f in logFiles:
2858 for node in main.nodes:
2859 dstName = main.logdir + "/" + node.name + "-" + f
2860 main.ONOSbench.secureCopy( node.user_name, node.ip_address,
2861 logFolder + f, dstName )
2862 else:
2863 main.log.debug( "skipping saving log files" )
2864
2865 main.step( "Stopping Mininet" )
2866 mnResult = main.Mininet1.stopNet()
2867 utilities.assert_equals( expect=main.TRUE, actual=mnResult,
2868 onpass="Mininet stopped",
2869 onfail="MN cleanup NOT successful" )
2870
2871 main.step( "Checking ONOS Logs for errors" )
2872 for node in main.nodes:
2873 main.log.debug( "Checking logs for errors on " + node.name + ":" )
2874 main.log.warn( main.ONOSbench.checkLogs( node.ip_address ) )
2875
2876 try:
2877 timerLog = open( main.logdir + "/Timers.csv", 'w' )
2878 # Overwrite with empty line and close
2879 labels = "Gossip Intents, Restart"
2880 data = str( gossipTime ) + ", " + str( main.restartTime )
2881 timerLog.write( labels + "\n" + data )
2882 timerLog.close()
2883 except NameError as e:
2884 main.log.exception( e )
2885
2886 def CASE14( self, main ):
2887 """
2888 start election app on all onos nodes
2889 """
2890 assert main.numCtrls, "main.numCtrls not defined"
2891 assert main, "main not defined"
2892 assert utilities.assert_equals, "utilities.assert_equals not defined"
2893 assert main.CLIs, "main.CLIs not defined"
2894 assert main.nodes, "main.nodes not defined"
2895
2896 main.case( "Start Leadership Election app" )
2897 main.step( "Install leadership election app" )
2898 onosCli = main.CLIs[ main.activeNodes[ 0 ] ]
2899 appResult = onosCli.activateApp( "org.onosproject.election" )
2900 utilities.assert_equals(
2901 expect=main.TRUE,
2902 actual=appResult,
2903 onpass="Election app installed",
2904 onfail="Something went wrong with installing Leadership election" )
2905
2906 main.step( "Run for election on each node" )
2907 for i in main.activeNodes:
2908 main.CLIs[ i ].electionTestRun()
2909 time.sleep( 5 )
2910 activeCLIs = [ main.CLIs[ i ] for i in main.activeNodes ]
2911 sameResult, leaders = main.HA.consistentLeaderboards( activeCLIs )
2912 utilities.assert_equals(
2913 expect=True,
2914 actual=sameResult,
2915 onpass="All nodes see the same leaderboards",
2916 onfail="Inconsistent leaderboards" )
2917
2918 if sameResult:
2919 leader = leaders[ 0 ][ 0 ]
2920 if main.nodes[ main.activeNodes[ 0 ] ].ip_address in leader:
2921 correctLeader = True
2922 else:
2923 correctLeader = False
2924 main.step( "First node was elected leader" )
2925 utilities.assert_equals(
2926 expect=True,
2927 actual=correctLeader,
2928 onpass="Correct leader was elected",
2929 onfail="Incorrect leader" )
2930
2931 def CASE15( self, main ):
2932 """
2933 Check that Leadership Election is still functional
2934 15.1 Run election on each node
2935 15.2 Check that each node has the same leaders and candidates
2936 15.3 Find current leader and withdraw
2937 15.4 Check that a new node was elected leader
2938 15.5 Check that that new leader was the candidate of old leader
2939 15.6 Run for election on old leader
2940 15.7 Check that oldLeader is a candidate, and leader if only 1 node
2941 15.8 Make sure that the old leader was added to the candidate list
2942
2943 old and new variable prefixes refer to data from before vs after
2944 withdrawl and later before withdrawl vs after re-election
2945 """
2946 import time
2947 assert main.numCtrls, "main.numCtrls not defined"
2948 assert main, "main not defined"
2949 assert utilities.assert_equals, "utilities.assert_equals not defined"
2950 assert main.CLIs, "main.CLIs not defined"
2951 assert main.nodes, "main.nodes not defined"
2952
2953 description = "Check that Leadership Election is still functional"
2954 main.case( description )
2955 # NOTE: Need to re-run after restarts since being a canidate is not persistant
2956
2957 oldLeaders = [] # list of lists of each nodes' candidates before
2958 newLeaders = [] # list of lists of each nodes' candidates after
2959 oldLeader = '' # the old leader from oldLeaders, None if not same
2960 newLeader = '' # the new leaders fron newLoeaders, None if not same
2961 oldLeaderCLI = None # the CLI of the old leader used for re-electing
2962 expectNoLeader = False # True when there is only one leader
2963 if main.numCtrls == 1:
2964 expectNoLeader = True
2965
2966 main.step( "Run for election on each node" )
2967 electionResult = main.TRUE
2968
2969 for i in main.activeNodes: # run test election on each node
2970 if main.CLIs[ i ].electionTestRun() == main.FALSE:
2971 electionResult = main.FALSE
2972 utilities.assert_equals(
2973 expect=main.TRUE,
2974 actual=electionResult,
2975 onpass="All nodes successfully ran for leadership",
2976 onfail="At least one node failed to run for leadership" )
2977
2978 if electionResult == main.FALSE:
2979 main.log.error(
2980 "Skipping Test Case because Election Test App isn't loaded" )
2981 main.skipCase()
2982
2983 main.step( "Check that each node shows the same leader and candidates" )
2984 failMessage = "Nodes have different leaderboards"
2985 activeCLIs = [ main.CLIs[ i ] for i in main.activeNodes ]
2986 sameResult, oldLeaders = main.HA.consistentLeaderboards( activeCLIs )
2987 if sameResult:
2988 oldLeader = oldLeaders[ 0 ][ 0 ]
2989 main.log.warn( oldLeader )
2990 else:
2991 oldLeader = None
2992 utilities.assert_equals(
2993 expect=True,
2994 actual=sameResult,
2995 onpass="Leaderboards are consistent for the election topic",
2996 onfail=failMessage )
2997
2998 main.step( "Find current leader and withdraw" )
2999 withdrawResult = main.TRUE
3000 # do some sanity checking on leader before using it
3001 if oldLeader is None:
3002 main.log.error( "Leadership isn't consistent." )
3003 withdrawResult = main.FALSE
3004 # Get the CLI of the oldLeader
3005 for i in main.activeNodes:
3006 if oldLeader == main.nodes[ i ].ip_address:
3007 oldLeaderCLI = main.CLIs[ i ]
3008 break
3009 else: # FOR/ELSE statement
3010 main.log.error( "Leader election, could not find current leader" )
3011 if oldLeader:
3012 withdrawResult = oldLeaderCLI.electionTestWithdraw()
3013 utilities.assert_equals(
3014 expect=main.TRUE,
3015 actual=withdrawResult,
3016 onpass="Node was withdrawn from election",
3017 onfail="Node was not withdrawn from election" )
3018
3019 main.step( "Check that a new node was elected leader" )
3020 failMessage = "Nodes have different leaders"
3021 # Get new leaders and candidates
3022 newLeaderResult, newLeaders = main.HA.consistentLeaderboards( activeCLIs )
3023 newLeader = None
3024 if newLeaderResult:
3025 if newLeaders[ 0 ][ 0 ] == 'none':
3026 main.log.error( "No leader was elected on at least 1 node" )
3027 if not expectNoLeader:
3028 newLeaderResult = False
3029 newLeader = newLeaders[ 0 ][ 0 ]
3030
3031 # Check that the new leader is not the older leader, which was withdrawn
3032 if newLeader == oldLeader:
3033 newLeaderResult = False
3034 main.log.error( "All nodes still see old leader: " + str( oldLeader ) +
3035 " as the current leader" )
3036 utilities.assert_equals(
3037 expect=True,
3038 actual=newLeaderResult,
3039 onpass="Leadership election passed",
3040 onfail="Something went wrong with Leadership election" )
3041
3042 main.step( "Check that that new leader was the candidate of old leader" )
3043 # candidates[ 2 ] should become the top candidate after withdrawl
3044 correctCandidateResult = main.TRUE
3045 if expectNoLeader:
3046 if newLeader == 'none':
3047 main.log.info( "No leader expected. None found. Pass" )
3048 correctCandidateResult = main.TRUE
3049 else:
3050 main.log.info( "Expected no leader, got: " + str( newLeader ) )
3051 correctCandidateResult = main.FALSE
3052 elif len( oldLeaders[ 0 ] ) >= 3:
3053 if newLeader == oldLeaders[ 0 ][ 2 ]:
3054 # correct leader was elected
3055 correctCandidateResult = main.TRUE
3056 else:
3057 correctCandidateResult = main.FALSE
3058 main.log.error( "Candidate {} was elected. {} should have had priority.".format(
3059 newLeader, oldLeaders[ 0 ][ 2 ] ) )
3060 else:
3061 main.log.warn( "Could not determine who should be the correct leader" )
3062 main.log.debug( oldLeaders[ 0 ] )
3063 correctCandidateResult = main.FALSE
3064 utilities.assert_equals(
3065 expect=main.TRUE,
3066 actual=correctCandidateResult,
3067 onpass="Correct Candidate Elected",
3068 onfail="Incorrect Candidate Elected" )
3069
3070 main.step( "Run for election on old leader( just so everyone " +
3071 "is in the hat )" )
3072 if oldLeaderCLI is not None:
3073 runResult = oldLeaderCLI.electionTestRun()
3074 else:
3075 main.log.error( "No old leader to re-elect" )
3076 runResult = main.FALSE
3077 utilities.assert_equals(
3078 expect=main.TRUE,
3079 actual=runResult,
3080 onpass="App re-ran for election",
3081 onfail="App failed to run for election" )
3082
3083 main.step(
3084 "Check that oldLeader is a candidate, and leader if only 1 node" )
3085 # verify leader didn't just change
3086 # Get new leaders and candidates
3087 reRunLeaders = []
3088 time.sleep( 5 ) # Paremterize
3089 positionResult, reRunLeaders = main.HA.consistentLeaderboards( activeCLIs )
3090
3091 # Check that the re-elected node is last on the candidate List
3092 if not reRunLeaders[ 0 ]:
3093 positionResult = main.FALSE
3094 elif oldLeader != reRunLeaders[ 0 ][ -1 ]:
3095 main.log.error( "Old Leader ({}) not in the proper position: {} ".format( str( oldLeader ),
3096 str( reRunLeaders[ 0 ] ) ) )
3097 positionResult = main.FALSE
3098 utilities.assert_equals(
3099 expect=True,
3100 actual=positionResult,
3101 onpass="Old leader successfully re-ran for election",
3102 onfail="Something went wrong with Leadership election after " +
3103 "the old leader re-ran for election" )
3104
3105 def CASE16( self, main ):
3106 """
3107 Install Distributed Primitives app
3108 """
3109 import time
3110 assert main.numCtrls, "main.numCtrls not defined"
3111 assert main, "main not defined"
3112 assert utilities.assert_equals, "utilities.assert_equals not defined"
3113 assert main.CLIs, "main.CLIs not defined"
3114 assert main.nodes, "main.nodes not defined"
3115
3116 # Variables for the distributed primitives tests
3117 main.pCounterName = "TestON-Partitions"
3118 main.pCounterValue = 0
3119 main.onosSet = set( [] )
3120 main.onosSetName = "TestON-set"
3121
3122 description = "Install Primitives app"
3123 main.case( description )
3124 main.step( "Install Primitives app" )
3125 appName = "org.onosproject.distributedprimitives"
3126 node = main.activeNodes[ 0 ]
3127 appResults = main.CLIs[ node ].activateApp( appName )
3128 utilities.assert_equals( expect=main.TRUE,
3129 actual=appResults,
3130 onpass="Primitives app activated",
3131 onfail="Primitives app not activated" )
3132 time.sleep( 5 ) # To allow all nodes to activate
3133
3134 def CASE17( self, main ):
3135 """
3136 Check for basic functionality with distributed primitives
3137 """
3138 main.HA.CASE17( main )