blob: 14d73b9d9ceff6578415da6fba89c98df7784246 [file] [log] [blame]
Jon Hall6e709752016-02-01 13:38:46 -08001"""
2Description: This test is to determine if ONOS can handle
3 a full network partion
4
5List of test cases:
6CASE1: Compile ONOS and push it to the test machines
7CASE2: Assign devices to controllers
8CASE21: Assign mastership to controllers
9CASE3: Assign intents
10CASE4: Ping across added host intents
11CASE5: Reading state of ONOS
12CASE61: The Failure inducing case.
13CASE62: The Failure recovery case.
14CASE7: Check state after control plane failure
15CASE8: Compare topo
16CASE9: Link s3-s28 down
17CASE10: Link s3-s28 up
18CASE11: Switch down
19CASE12: Switch up
20CASE13: Clean up
21CASE14: start election app on all onos nodes
22CASE15: Check that Leadership Election is still functional
23CASE16: Install Distributed Primitives app
24CASE17: Check for basic functionality with distributed primitives
25"""
Jon Hall6e709752016-02-01 13:38:46 -080026class HAfullNetPartition:
27
28 def __init__( self ):
29 self.default = ''
30
31 def CASE1( self, main ):
32 """
33 CASE1 is to compile ONOS and push it to the test machines
34
35 Startup sequence:
36 cell <name>
37 onos-verify-cell
38 NOTE: temporary - onos-remove-raft-logs
39 onos-uninstall
40 start mininet
41 git pull
42 mvn clean install
43 onos-package
44 onos-install -f
45 onos-wait-for-start
46 start cli sessions
47 start tcpdump
48 """
49 import imp
50 import pexpect
51 import time
Jon Halla440e872016-03-31 15:15:50 -070052 import json
Jon Hall6e709752016-02-01 13:38:46 -080053 main.log.info( "ONOS HA test: Partition ONOS nodes into two sub-clusters - " +
54 "initialization" )
55 main.case( "Setting up test environment" )
56 main.caseExplanation = "Setup the test environment including " +\
57 "installing ONOS, starting Mininet and ONOS" +\
58 "cli sessions."
59
60 # load some variables from the params file
61 PULLCODE = False
62 if main.params[ 'Git' ] == 'True':
63 PULLCODE = True
64 gitBranch = main.params[ 'branch' ]
65 cellName = main.params[ 'ENV' ][ 'cellName' ]
66
67 main.numCtrls = int( main.params[ 'num_controllers' ] )
68 if main.ONOSbench.maxNodes:
69 if main.ONOSbench.maxNodes < main.numCtrls:
70 main.numCtrls = int( main.ONOSbench.maxNodes )
71 # set global variables
72 global ONOS1Port
73 global ONOS2Port
74 global ONOS3Port
75 global ONOS4Port
76 global ONOS5Port
77 global ONOS6Port
78 global ONOS7Port
Jon Halla440e872016-03-31 15:15:50 -070079 # These are for csv plotting in jenkins
80 global labels
81 global data
82 labels = []
83 data = []
Jon Hall6e709752016-02-01 13:38:46 -080084
85 # FIXME: just get controller port from params?
86 # TODO: do we really need all these?
87 ONOS1Port = main.params[ 'CTRL' ][ 'port1' ]
88 ONOS2Port = main.params[ 'CTRL' ][ 'port2' ]
89 ONOS3Port = main.params[ 'CTRL' ][ 'port3' ]
90 ONOS4Port = main.params[ 'CTRL' ][ 'port4' ]
91 ONOS5Port = main.params[ 'CTRL' ][ 'port5' ]
92 ONOS6Port = main.params[ 'CTRL' ][ 'port6' ]
93 ONOS7Port = main.params[ 'CTRL' ][ 'port7' ]
94
95 try:
Jon Hall53c5e662016-04-13 16:06:56 -070096 from tests.HA.dependencies.HA import HA
Jon Hall41d39f12016-04-11 22:54:35 -070097 main.HA = HA()
Jon Hall6e709752016-02-01 13:38:46 -080098 except Exception as e:
99 main.log.exception( e )
100 main.cleanup()
101 main.exit()
102
103 main.CLIs = []
104 main.nodes = []
105 ipList = []
106 for i in range( 1, main.numCtrls + 1 ):
107 try:
108 main.CLIs.append( getattr( main, 'ONOScli' + str( i ) ) )
109 main.nodes.append( getattr( main, 'ONOS' + str( i ) ) )
110 ipList.append( main.nodes[ -1 ].ip_address )
111 except AttributeError:
112 break
113
114 main.step( "Create cell file" )
115 cellAppString = main.params[ 'ENV' ][ 'appString' ]
116 main.ONOSbench.createCellFile( main.ONOSbench.ip_address, cellName,
117 main.Mininet1.ip_address,
118 cellAppString, ipList )
119 main.step( "Applying cell variable to environment" )
120 cellResult = main.ONOSbench.setCell( cellName )
121 verifyResult = main.ONOSbench.verifyCell()
122
123 # FIXME:this is short term fix
124 main.log.info( "Removing raft logs" )
125 main.ONOSbench.onosRemoveRaftLogs()
126
127 main.log.info( "Uninstalling ONOS" )
128 for node in main.nodes:
129 main.ONOSbench.onosUninstall( node.ip_address )
130
131 # Make sure ONOS is DEAD
132 main.log.info( "Killing any ONOS processes" )
133 killResults = main.TRUE
134 for node in main.nodes:
135 killed = main.ONOSbench.onosKill( node.ip_address )
136 killResults = killResults and killed
137
138 cleanInstallResult = main.TRUE
139 gitPullResult = main.TRUE
140
141 main.step( "Starting Mininet" )
142 # scp topo file to mininet
143 # TODO: move to params?
144 topoName = "obelisk.py"
145 filePath = main.ONOSbench.home + "/tools/test/topos/"
146 main.ONOSbench.scp( main.Mininet1,
147 filePath + topoName,
148 main.Mininet1.home,
149 direction="to" )
Jon Hallf37d44d2017-05-24 10:37:30 -0700150 mnResult = main.Mininet1.startNet()
Jon Hall6e709752016-02-01 13:38:46 -0800151 utilities.assert_equals( expect=main.TRUE, actual=mnResult,
152 onpass="Mininet Started",
153 onfail="Error starting Mininet" )
154
155 main.step( "Git checkout and pull " + gitBranch )
156 if PULLCODE:
157 main.ONOSbench.gitCheckout( gitBranch )
158 gitPullResult = main.ONOSbench.gitPull()
159 # values of 1 or 3 are good
160 utilities.assert_lesser( expect=0, actual=gitPullResult,
161 onpass="Git pull successful",
162 onfail="Git pull failed" )
163 main.ONOSbench.getVersion( report=True )
164
165 main.step( "Using mvn clean install" )
166 cleanInstallResult = main.TRUE
167 if PULLCODE and gitPullResult == main.TRUE:
168 cleanInstallResult = main.ONOSbench.cleanInstall()
169 else:
170 main.log.warn( "Did not pull new code so skipping mvn " +
171 "clean install" )
172 utilities.assert_equals( expect=main.TRUE,
173 actual=cleanInstallResult,
174 onpass="MCI successful",
175 onfail="MCI failed" )
176 # GRAPHS
177 # NOTE: important params here:
178 # job = name of Jenkins job
179 # Plot Name = Plot-HA, only can be used if multiple plots
180 # index = The number of the graph under plot name
181 job = "HAfullNetPartition"
182 plotName = "Plot-HA"
Jon Hall676e5432016-09-26 11:32:50 -0700183 index = "2"
Jon Hall6e709752016-02-01 13:38:46 -0800184 graphs = '<ac:structured-macro ac:name="html">\n'
185 graphs += '<ac:plain-text-body><![CDATA[\n'
186 graphs += '<iframe src="https://onos-jenkins.onlab.us/job/' + job +\
187 '/plot/' + plotName + '/getPlot?index=' + index +\
188 '&width=500&height=300"' +\
189 'noborder="0" width="500" height="300" scrolling="yes" ' +\
190 'seamless="seamless"></iframe>\n'
191 graphs += ']]></ac:plain-text-body>\n'
192 graphs += '</ac:structured-macro>\n'
Jon Hallf37d44d2017-05-24 10:37:30 -0700193 main.log.wiki( graphs )
Jon Hall6e709752016-02-01 13:38:46 -0800194
195 main.step( "Creating ONOS package" )
196 # copy gen-partions file to ONOS
197 # NOTE: this assumes TestON and ONOS are on the same machine
Jon Hall53c5e662016-04-13 16:06:56 -0700198 srcFile = main.testDir + "/HA/dependencies/onos-gen-partitions"
Jon Hall6e709752016-02-01 13:38:46 -0800199 dstDir = main.ONOSbench.home + "/tools/test/bin/onos-gen-partitions"
200 cpResult = main.ONOSbench.secureCopy( main.ONOSbench.user_name,
201 main.ONOSbench.ip_address,
202 srcFile,
203 dstDir,
204 pwd=main.ONOSbench.pwd,
205 direction="from" )
Jon Hallbd60ea02016-08-23 10:03:59 -0700206 packageResult = main.ONOSbench.buckBuild()
Jon Hall6e709752016-02-01 13:38:46 -0800207 utilities.assert_equals( expect=main.TRUE, actual=packageResult,
208 onpass="ONOS package successful",
209 onfail="ONOS package failed" )
210
211 main.step( "Installing ONOS package" )
212 onosInstallResult = main.TRUE
213 for node in main.nodes:
214 tmpResult = main.ONOSbench.onosInstall( options="-f",
215 node=node.ip_address )
216 onosInstallResult = onosInstallResult and tmpResult
217 utilities.assert_equals( expect=main.TRUE, actual=onosInstallResult,
218 onpass="ONOS install successful",
219 onfail="ONOS install failed" )
220 # clean up gen-partitions file
221 try:
222 main.ONOSbench.handle.sendline( "cd " + main.ONOSbench.home )
223 main.ONOSbench.handle.expect( main.ONOSbench.home + "\$" )
224 main.ONOSbench.handle.sendline( "git checkout -- tools/test/bin/onos-gen-partitions" )
225 main.ONOSbench.handle.expect( main.ONOSbench.home + "\$" )
226 main.log.info( " Cleaning custom gen partitions file, response was: \n" +
227 str( main.ONOSbench.handle.before ) )
228 except ( pexpect.TIMEOUT, pexpect.EOF ):
229 main.log.exception( "ONOSbench: pexpect exception found:" +
230 main.ONOSbench.handle.before )
231 main.cleanup()
232 main.exit()
233
You Wangf5de25b2017-01-06 15:13:01 -0800234 main.step( "Set up ONOS secure SSH" )
235 secureSshResult = main.TRUE
236 for node in main.nodes:
237 secureSshResult = secureSshResult and main.ONOSbench.onosSecureSSH( node=node.ip_address )
238 utilities.assert_equals( expect=main.TRUE, actual=secureSshResult,
239 onpass="Test step PASS",
240 onfail="Test step FAIL" )
241
Jon Hall6e709752016-02-01 13:38:46 -0800242 main.step( "Checking if ONOS is up yet" )
243 for i in range( 2 ):
244 onosIsupResult = main.TRUE
245 for node in main.nodes:
246 started = main.ONOSbench.isup( node.ip_address )
247 if not started:
248 main.log.error( node.name + " hasn't started" )
249 onosIsupResult = onosIsupResult and started
250 if onosIsupResult == main.TRUE:
251 break
252 utilities.assert_equals( expect=main.TRUE, actual=onosIsupResult,
253 onpass="ONOS startup successful",
254 onfail="ONOS startup failed" )
255
Jon Hall6509dbf2016-06-21 17:01:17 -0700256 main.step( "Starting ONOS CLI sessions" )
Jon Hall6e709752016-02-01 13:38:46 -0800257 cliResults = main.TRUE
258 threads = []
259 for i in range( main.numCtrls ):
Jon Hallf37d44d2017-05-24 10:37:30 -0700260 t = main.Thread( target=main.CLIs[ i ].startOnosCli,
Jon Hall6e709752016-02-01 13:38:46 -0800261 name="startOnosCli-" + str( i ),
Jon Hallf37d44d2017-05-24 10:37:30 -0700262 args=[ main.nodes[ i ].ip_address ] )
Jon Hall6e709752016-02-01 13:38:46 -0800263 threads.append( t )
264 t.start()
265
266 for t in threads:
267 t.join()
268 cliResults = cliResults and t.result
269 utilities.assert_equals( expect=main.TRUE, actual=cliResults,
270 onpass="ONOS cli startup successful",
271 onfail="ONOS cli startup failed" )
272
273 # Create a list of active nodes for use when some nodes are stopped
274 main.activeNodes = [ i for i in range( 0, len( main.CLIs ) ) ]
275
276 if main.params[ 'tcpdump' ].lower() == "true":
277 main.step( "Start Packet Capture MN" )
278 main.Mininet2.startTcpdump(
279 str( main.params[ 'MNtcpdump' ][ 'folder' ] ) + str( main.TEST )
280 + "-MN.pcap",
281 intf=main.params[ 'MNtcpdump' ][ 'intf' ],
282 port=main.params[ 'MNtcpdump' ][ 'port' ] )
283
Jon Halla440e872016-03-31 15:15:50 -0700284 main.step( "Checking ONOS nodes" )
Jon Hall41d39f12016-04-11 22:54:35 -0700285 nodeResults = utilities.retry( main.HA.nodesCheck,
286 False,
Jon Hallf37d44d2017-05-24 10:37:30 -0700287 args=[ main.activeNodes ],
Jon Hall41d39f12016-04-11 22:54:35 -0700288 attempts=5 )
Jon Halla440e872016-03-31 15:15:50 -0700289
Jon Hall41d39f12016-04-11 22:54:35 -0700290 utilities.assert_equals( expect=True, actual=nodeResults,
Jon Halla440e872016-03-31 15:15:50 -0700291 onpass="Nodes check successful",
292 onfail="Nodes check NOT successful" )
293
294 if not nodeResults:
Jon Hall7ac7bc32016-05-05 10:57:02 -0700295 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -0700296 cli = main.CLIs[ i ]
Jon Halla440e872016-03-31 15:15:50 -0700297 main.log.debug( "{} components not ACTIVE: \n{}".format(
298 cli.name,
299 cli.sendline( "scr:list | grep -v ACTIVE" ) ) )
Jon Hall6e709752016-02-01 13:38:46 -0800300 main.log.error( "Failed to start ONOS, stopping test" )
301 main.cleanup()
302 main.exit()
303
Jon Hall172b7ba2016-04-07 18:12:20 -0700304 main.step( "Activate apps defined in the params file" )
305 # get data from the params
306 apps = main.params.get( 'apps' )
307 if apps:
Jon Hallf37d44d2017-05-24 10:37:30 -0700308 apps = apps.split( ',' )
Jon Hall172b7ba2016-04-07 18:12:20 -0700309 main.log.warn( apps )
310 activateResult = True
311 for app in apps:
312 main.CLIs[ 0 ].app( app, "Activate" )
313 # TODO: check this worked
314 time.sleep( 10 ) # wait for apps to activate
315 for app in apps:
316 state = main.CLIs[ 0 ].appStatus( app )
317 if state == "ACTIVE":
Jon Hall937bc812017-01-31 16:44:10 -0800318 activateResult = activateResult and True
Jon Hall172b7ba2016-04-07 18:12:20 -0700319 else:
320 main.log.error( "{} is in {} state".format( app, state ) )
Jon Hall937bc812017-01-31 16:44:10 -0800321 activateResult = False
Jon Hall172b7ba2016-04-07 18:12:20 -0700322 utilities.assert_equals( expect=True,
323 actual=activateResult,
324 onpass="Successfully activated apps",
325 onfail="Failed to activate apps" )
326 else:
327 main.log.warn( "No apps were specified to be loaded after startup" )
328
329 main.step( "Set ONOS configurations" )
330 config = main.params.get( 'ONOS_Configuration' )
331 if config:
332 main.log.debug( config )
333 checkResult = main.TRUE
334 for component in config:
Jon Hallf37d44d2017-05-24 10:37:30 -0700335 for setting in config[ component ]:
336 value = config[ component ][ setting ]
Jon Hall172b7ba2016-04-07 18:12:20 -0700337 check = main.CLIs[ 0 ].setCfg( component, setting, value )
338 main.log.info( "Value was changed? {}".format( main.TRUE == check ) )
339 checkResult = check and checkResult
340 utilities.assert_equals( expect=main.TRUE,
341 actual=checkResult,
342 onpass="Successfully set config",
343 onfail="Failed to set config" )
344 else:
345 main.log.warn( "No configurations were specified to be changed after startup" )
346
Jon Hall9d2dcad2016-04-08 10:15:20 -0700347 main.step( "App Ids check" )
348 appCheck = main.TRUE
349 threads = []
350 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -0700351 t = main.Thread( target=main.CLIs[ i ].appToIDCheck,
Jon Hall9d2dcad2016-04-08 10:15:20 -0700352 name="appToIDCheck-" + str( i ),
353 args=[] )
354 threads.append( t )
355 t.start()
356
357 for t in threads:
358 t.join()
359 appCheck = appCheck and t.result
360 if appCheck != main.TRUE:
Jon Hallf37d44d2017-05-24 10:37:30 -0700361 node = main.activeNodes[ 0 ]
362 main.log.warn( main.CLIs[ node ].apps() )
363 main.log.warn( main.CLIs[ node ].appIDs() )
Jon Hall9d2dcad2016-04-08 10:15:20 -0700364 utilities.assert_equals( expect=main.TRUE, actual=appCheck,
365 onpass="App Ids seem to be correct",
366 onfail="Something is wrong with app Ids" )
367
Jon Hall6e709752016-02-01 13:38:46 -0800368 def CASE2( self, main ):
369 """
370 Assign devices to controllers
371 """
372 import re
373 assert main.numCtrls, "main.numCtrls not defined"
374 assert main, "main not defined"
375 assert utilities.assert_equals, "utilities.assert_equals not defined"
376 assert main.CLIs, "main.CLIs not defined"
377 assert main.nodes, "main.nodes not defined"
378 assert ONOS1Port, "ONOS1Port not defined"
379 assert ONOS2Port, "ONOS2Port not defined"
380 assert ONOS3Port, "ONOS3Port not defined"
381 assert ONOS4Port, "ONOS4Port not defined"
382 assert ONOS5Port, "ONOS5Port not defined"
383 assert ONOS6Port, "ONOS6Port not defined"
384 assert ONOS7Port, "ONOS7Port not defined"
385
386 main.case( "Assigning devices to controllers" )
387 main.caseExplanation = "Assign switches to ONOS using 'ovs-vsctl' " +\
388 "and check that an ONOS node becomes the " +\
389 "master of the device."
390 main.step( "Assign switches to controllers" )
391
392 ipList = []
393 for i in range( main.numCtrls ):
394 ipList.append( main.nodes[ i ].ip_address )
395 swList = []
396 for i in range( 1, 29 ):
397 swList.append( "s" + str( i ) )
398 main.Mininet1.assignSwController( sw=swList, ip=ipList )
399
400 mastershipCheck = main.TRUE
401 for i in range( 1, 29 ):
402 response = main.Mininet1.getSwController( "s" + str( i ) )
403 try:
404 main.log.info( str( response ) )
405 except Exception:
406 main.log.info( repr( response ) )
407 for node in main.nodes:
408 if re.search( "tcp:" + node.ip_address, response ):
409 mastershipCheck = mastershipCheck and main.TRUE
410 else:
411 main.log.error( "Error, node " + node.ip_address + " is " +
412 "not in the list of controllers s" +
413 str( i ) + " is connecting to." )
414 mastershipCheck = main.FALSE
415 utilities.assert_equals(
416 expect=main.TRUE,
417 actual=mastershipCheck,
418 onpass="Switch mastership assigned correctly",
419 onfail="Switches not assigned correctly to controllers" )
420
421 def CASE21( self, main ):
422 """
423 Assign mastership to controllers
424 """
425 import time
426 assert main.numCtrls, "main.numCtrls not defined"
427 assert main, "main not defined"
428 assert utilities.assert_equals, "utilities.assert_equals not defined"
429 assert main.CLIs, "main.CLIs not defined"
430 assert main.nodes, "main.nodes not defined"
431 assert ONOS1Port, "ONOS1Port not defined"
432 assert ONOS2Port, "ONOS2Port not defined"
433 assert ONOS3Port, "ONOS3Port not defined"
434 assert ONOS4Port, "ONOS4Port not defined"
435 assert ONOS5Port, "ONOS5Port not defined"
436 assert ONOS6Port, "ONOS6Port not defined"
437 assert ONOS7Port, "ONOS7Port not defined"
438
439 main.case( "Assigning Controller roles for switches" )
440 main.caseExplanation = "Check that ONOS is connected to each " +\
441 "device. Then manually assign" +\
442 " mastership to specific ONOS nodes using" +\
443 " 'device-role'"
444 main.step( "Assign mastership of switches to specific controllers" )
445 # Manually assign mastership to the controller we want
446 roleCall = main.TRUE
447
Jon Hallf37d44d2017-05-24 10:37:30 -0700448 ipList = []
Jon Hall6e709752016-02-01 13:38:46 -0800449 deviceList = []
Jon Hallf37d44d2017-05-24 10:37:30 -0700450 onosCli = main.CLIs[ main.activeNodes[ 0 ] ]
Jon Hall6e709752016-02-01 13:38:46 -0800451 try:
452 # Assign mastership to specific controllers. This assignment was
453 # determined for a 7 node cluser, but will work with any sized
454 # cluster
455 for i in range( 1, 29 ): # switches 1 through 28
456 # set up correct variables:
457 if i == 1:
458 c = 0
459 ip = main.nodes[ c ].ip_address # ONOS1
460 deviceId = onosCli.getDevice( "1000" ).get( 'id' )
461 elif i == 2:
462 c = 1 % main.numCtrls
463 ip = main.nodes[ c ].ip_address # ONOS2
464 deviceId = onosCli.getDevice( "2000" ).get( 'id' )
465 elif i == 3:
466 c = 1 % main.numCtrls
467 ip = main.nodes[ c ].ip_address # ONOS2
468 deviceId = onosCli.getDevice( "3000" ).get( 'id' )
469 elif i == 4:
470 c = 3 % main.numCtrls
471 ip = main.nodes[ c ].ip_address # ONOS4
472 deviceId = onosCli.getDevice( "3004" ).get( 'id' )
473 elif i == 5:
474 c = 2 % main.numCtrls
475 ip = main.nodes[ c ].ip_address # ONOS3
476 deviceId = onosCli.getDevice( "5000" ).get( 'id' )
477 elif i == 6:
478 c = 2 % main.numCtrls
479 ip = main.nodes[ c ].ip_address # ONOS3
480 deviceId = onosCli.getDevice( "6000" ).get( 'id' )
481 elif i == 7:
482 c = 5 % main.numCtrls
483 ip = main.nodes[ c ].ip_address # ONOS6
484 deviceId = onosCli.getDevice( "6007" ).get( 'id' )
485 elif i >= 8 and i <= 17:
486 c = 4 % main.numCtrls
487 ip = main.nodes[ c ].ip_address # ONOS5
488 dpid = '3' + str( i ).zfill( 3 )
489 deviceId = onosCli.getDevice( dpid ).get( 'id' )
490 elif i >= 18 and i <= 27:
491 c = 6 % main.numCtrls
492 ip = main.nodes[ c ].ip_address # ONOS7
493 dpid = '6' + str( i ).zfill( 3 )
494 deviceId = onosCli.getDevice( dpid ).get( 'id' )
495 elif i == 28:
496 c = 0
497 ip = main.nodes[ c ].ip_address # ONOS1
498 deviceId = onosCli.getDevice( "2800" ).get( 'id' )
499 else:
500 main.log.error( "You didn't write an else statement for " +
501 "switch s" + str( i ) )
502 roleCall = main.FALSE
503 # Assign switch
504 assert deviceId, "No device id for s" + str( i ) + " in ONOS"
505 # TODO: make this controller dynamic
506 roleCall = roleCall and onosCli.deviceRole( deviceId, ip )
507 ipList.append( ip )
508 deviceList.append( deviceId )
509 except ( AttributeError, AssertionError ):
510 main.log.exception( "Something is wrong with ONOS device view" )
511 main.log.info( onosCli.devices() )
512 utilities.assert_equals(
513 expect=main.TRUE,
514 actual=roleCall,
515 onpass="Re-assigned switch mastership to designated controller",
516 onfail="Something wrong with deviceRole calls" )
517
518 main.step( "Check mastership was correctly assigned" )
519 roleCheck = main.TRUE
520 # NOTE: This is due to the fact that device mastership change is not
521 # atomic and is actually a multi step process
522 time.sleep( 5 )
523 for i in range( len( ipList ) ):
Jon Hallf37d44d2017-05-24 10:37:30 -0700524 ip = ipList[ i ]
525 deviceId = deviceList[ i ]
Jon Hall6e709752016-02-01 13:38:46 -0800526 # Check assignment
527 master = onosCli.getRole( deviceId ).get( 'master' )
528 if ip in master:
529 roleCheck = roleCheck and main.TRUE
530 else:
531 roleCheck = roleCheck and main.FALSE
532 main.log.error( "Error, controller " + ip + " is not" +
533 " master " + "of device " +
534 str( deviceId ) + ". Master is " +
535 repr( master ) + "." )
536 utilities.assert_equals(
537 expect=main.TRUE,
538 actual=roleCheck,
539 onpass="Switches were successfully reassigned to designated " +
540 "controller",
541 onfail="Switches were not successfully reassigned" )
542
543 def CASE3( self, main ):
544 """
545 Assign intents
546 """
547 import time
548 import json
549 assert main.numCtrls, "main.numCtrls not defined"
550 assert main, "main not defined"
551 assert utilities.assert_equals, "utilities.assert_equals not defined"
552 assert main.CLIs, "main.CLIs not defined"
553 assert main.nodes, "main.nodes not defined"
554 main.case( "Adding host Intents" )
555 main.caseExplanation = "Discover hosts by using pingall then " +\
556 "assign predetermined host-to-host intents." +\
557 " After installation, check that the intent" +\
558 " is distributed to all nodes and the state" +\
559 " is INSTALLED"
560
561 # install onos-app-fwd
562 main.step( "Install reactive forwarding app" )
Jon Hallf37d44d2017-05-24 10:37:30 -0700563 onosCli = main.CLIs[ main.activeNodes[ 0 ] ]
Jon Hall6e709752016-02-01 13:38:46 -0800564 installResults = onosCli.activateApp( "org.onosproject.fwd" )
565 utilities.assert_equals( expect=main.TRUE, actual=installResults,
566 onpass="Install fwd successful",
567 onfail="Install fwd failed" )
568
569 main.step( "Check app ids" )
570 appCheck = main.TRUE
571 threads = []
572 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -0700573 t = main.Thread( target=main.CLIs[ i ].appToIDCheck,
Jon Hall6e709752016-02-01 13:38:46 -0800574 name="appToIDCheck-" + str( i ),
575 args=[] )
576 threads.append( t )
577 t.start()
578
579 for t in threads:
580 t.join()
581 appCheck = appCheck and t.result
582 if appCheck != main.TRUE:
583 main.log.warn( onosCli.apps() )
584 main.log.warn( onosCli.appIDs() )
585 utilities.assert_equals( expect=main.TRUE, actual=appCheck,
586 onpass="App Ids seem to be correct",
587 onfail="Something is wrong with app Ids" )
588
589 main.step( "Discovering Hosts( Via pingall for now )" )
590 # FIXME: Once we have a host discovery mechanism, use that instead
591 # REACTIVE FWD test
592 pingResult = main.FALSE
593 passMsg = "Reactive Pingall test passed"
594 time1 = time.time()
595 pingResult = main.Mininet1.pingall()
596 time2 = time.time()
597 if not pingResult:
Jon Hallf37d44d2017-05-24 10:37:30 -0700598 main.log.warn( "First pingall failed. Trying again..." )
Jon Hall6e709752016-02-01 13:38:46 -0800599 pingResult = main.Mininet1.pingall()
600 passMsg += " on the second try"
601 utilities.assert_equals(
602 expect=main.TRUE,
603 actual=pingResult,
Jon Hallf37d44d2017-05-24 10:37:30 -0700604 onpass=passMsg,
Jon Hall6e709752016-02-01 13:38:46 -0800605 onfail="Reactive Pingall failed, " +
606 "one or more ping pairs failed" )
607 main.log.info( "Time for pingall: %2f seconds" %
608 ( time2 - time1 ) )
609 # timeout for fwd flows
610 time.sleep( 11 )
611 # uninstall onos-app-fwd
612 main.step( "Uninstall reactive forwarding app" )
Jon Hallf37d44d2017-05-24 10:37:30 -0700613 node = main.activeNodes[ 0 ]
614 uninstallResult = main.CLIs[ node ].deactivateApp( "org.onosproject.fwd" )
Jon Hall6e709752016-02-01 13:38:46 -0800615 utilities.assert_equals( expect=main.TRUE, actual=uninstallResult,
616 onpass="Uninstall fwd successful",
617 onfail="Uninstall fwd failed" )
618
619 main.step( "Check app ids" )
620 threads = []
621 appCheck2 = main.TRUE
622 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -0700623 t = main.Thread( target=main.CLIs[ i ].appToIDCheck,
Jon Hall6e709752016-02-01 13:38:46 -0800624 name="appToIDCheck-" + str( i ),
625 args=[] )
626 threads.append( t )
627 t.start()
628
629 for t in threads:
630 t.join()
631 appCheck2 = appCheck2 and t.result
632 if appCheck2 != main.TRUE:
Jon Hallf37d44d2017-05-24 10:37:30 -0700633 node = main.activeNodes[ 0 ]
634 main.log.warn( main.CLIs[ node ].apps() )
635 main.log.warn( main.CLIs[ node ].appIDs() )
Jon Hall6e709752016-02-01 13:38:46 -0800636 utilities.assert_equals( expect=main.TRUE, actual=appCheck2,
637 onpass="App Ids seem to be correct",
638 onfail="Something is wrong with app Ids" )
639
640 main.step( "Add host intents via cli" )
641 intentIds = []
642 # TODO: move the host numbers to params
643 # Maybe look at all the paths we ping?
644 intentAddResult = True
645 hostResult = main.TRUE
646 for i in range( 8, 18 ):
647 main.log.info( "Adding host intent between h" + str( i ) +
648 " and h" + str( i + 10 ) )
649 host1 = "00:00:00:00:00:" + \
650 str( hex( i )[ 2: ] ).zfill( 2 ).upper()
651 host2 = "00:00:00:00:00:" + \
652 str( hex( i + 10 )[ 2: ] ).zfill( 2 ).upper()
653 # NOTE: getHost can return None
654 host1Dict = onosCli.getHost( host1 )
655 host2Dict = onosCli.getHost( host2 )
656 host1Id = None
657 host2Id = None
658 if host1Dict and host2Dict:
659 host1Id = host1Dict.get( 'id', None )
660 host2Id = host2Dict.get( 'id', None )
661 if host1Id and host2Id:
662 nodeNum = ( i % len( main.activeNodes ) )
Jon Hallf37d44d2017-05-24 10:37:30 -0700663 node = main.activeNodes[ nodeNum ]
664 tmpId = main.CLIs[ node ].addHostIntent( host1Id, host2Id )
Jon Hall6e709752016-02-01 13:38:46 -0800665 if tmpId:
666 main.log.info( "Added intent with id: " + tmpId )
667 intentIds.append( tmpId )
668 else:
669 main.log.error( "addHostIntent returned: " +
670 repr( tmpId ) )
671 else:
672 main.log.error( "Error, getHost() failed for h" + str( i ) +
673 " and/or h" + str( i + 10 ) )
Jon Hallf37d44d2017-05-24 10:37:30 -0700674 node = main.activeNodes[ 0 ]
675 hosts = main.CLIs[ node ].hosts()
Jon Hall6e709752016-02-01 13:38:46 -0800676 main.log.warn( "Hosts output: " )
677 try:
678 main.log.warn( json.dumps( json.loads( hosts ),
679 sort_keys=True,
680 indent=4,
681 separators=( ',', ': ' ) ) )
682 except ( ValueError, TypeError ):
683 main.log.warn( repr( hosts ) )
684 hostResult = main.FALSE
685 utilities.assert_equals( expect=main.TRUE, actual=hostResult,
686 onpass="Found a host id for each host",
687 onfail="Error looking up host ids" )
688
689 intentStart = time.time()
690 onosIds = onosCli.getAllIntentsId()
691 main.log.info( "Submitted intents: " + str( intentIds ) )
692 main.log.info( "Intents in ONOS: " + str( onosIds ) )
693 for intent in intentIds:
694 if intent in onosIds:
695 pass # intent submitted is in onos
696 else:
697 intentAddResult = False
698 if intentAddResult:
699 intentStop = time.time()
700 else:
701 intentStop = None
702 # Print the intent states
703 intents = onosCli.intents()
704 intentStates = []
705 installedCheck = True
706 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
707 count = 0
708 try:
709 for intent in json.loads( intents ):
710 state = intent.get( 'state', None )
711 if "INSTALLED" not in state:
712 installedCheck = False
713 intentId = intent.get( 'id', None )
714 intentStates.append( ( intentId, state ) )
715 except ( ValueError, TypeError ):
716 main.log.exception( "Error parsing intents" )
717 # add submitted intents not in the store
718 tmplist = [ i for i, s in intentStates ]
719 missingIntents = False
720 for i in intentIds:
721 if i not in tmplist:
722 intentStates.append( ( i, " - " ) )
723 missingIntents = True
724 intentStates.sort()
725 for i, s in intentStates:
726 count += 1
727 main.log.info( "%-6s%-15s%-15s" %
728 ( str( count ), str( i ), str( s ) ) )
729 leaders = onosCli.leaders()
730 try:
731 missing = False
732 if leaders:
733 parsedLeaders = json.loads( leaders )
734 main.log.warn( json.dumps( parsedLeaders,
735 sort_keys=True,
736 indent=4,
737 separators=( ',', ': ' ) ) )
738 # check for all intent partitions
739 topics = []
740 for i in range( 14 ):
Jon Hall8dafdcc2016-09-16 10:21:25 -0700741 topics.append( "work-partition-" + str( i ) )
Jon Hall6e709752016-02-01 13:38:46 -0800742 main.log.debug( topics )
Jon Hallf37d44d2017-05-24 10:37:30 -0700743 ONOStopics = [ j[ 'topic' ] for j in parsedLeaders ]
Jon Hall6e709752016-02-01 13:38:46 -0800744 for topic in topics:
745 if topic not in ONOStopics:
746 main.log.error( "Error: " + topic +
747 " not in leaders" )
748 missing = True
749 else:
750 main.log.error( "leaders() returned None" )
751 except ( ValueError, TypeError ):
752 main.log.exception( "Error parsing leaders" )
753 main.log.error( repr( leaders ) )
754 # Check all nodes
755 if missing:
756 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -0700757 response = main.CLIs[ i ].leaders( jsonFormat=False )
758 main.log.warn( str( main.CLIs[ i ].name ) + " leaders output: \n" +
Jon Hall6e709752016-02-01 13:38:46 -0800759 str( response ) )
760
761 partitions = onosCli.partitions()
762 try:
Jon Hallf37d44d2017-05-24 10:37:30 -0700763 if partitions:
Jon Hall6e709752016-02-01 13:38:46 -0800764 parsedPartitions = json.loads( partitions )
765 main.log.warn( json.dumps( parsedPartitions,
766 sort_keys=True,
767 indent=4,
768 separators=( ',', ': ' ) ) )
769 # TODO check for a leader in all paritions
770 # TODO check for consistency among nodes
771 else:
772 main.log.error( "partitions() returned None" )
773 except ( ValueError, TypeError ):
774 main.log.exception( "Error parsing partitions" )
775 main.log.error( repr( partitions ) )
776 pendingMap = onosCli.pendingMap()
777 try:
Jon Hallf37d44d2017-05-24 10:37:30 -0700778 if pendingMap:
Jon Hall6e709752016-02-01 13:38:46 -0800779 parsedPending = json.loads( pendingMap )
780 main.log.warn( json.dumps( parsedPending,
781 sort_keys=True,
782 indent=4,
783 separators=( ',', ': ' ) ) )
784 # TODO check something here?
785 else:
786 main.log.error( "pendingMap() returned None" )
787 except ( ValueError, TypeError ):
788 main.log.exception( "Error parsing pending map" )
789 main.log.error( repr( pendingMap ) )
790
791 intentAddResult = bool( intentAddResult and not missingIntents and
792 installedCheck )
793 if not intentAddResult:
794 main.log.error( "Error in pushing host intents to ONOS" )
795
796 main.step( "Intent Anti-Entropy dispersion" )
Jon Hallf37d44d2017-05-24 10:37:30 -0700797 for j in range( 100 ):
Jon Hall6e709752016-02-01 13:38:46 -0800798 correct = True
799 main.log.info( "Submitted intents: " + str( sorted( intentIds ) ) )
800 for i in main.activeNodes:
801 onosIds = []
Jon Hallf37d44d2017-05-24 10:37:30 -0700802 ids = main.CLIs[ i ].getAllIntentsId()
Jon Hall6e709752016-02-01 13:38:46 -0800803 onosIds.append( ids )
Jon Hallf37d44d2017-05-24 10:37:30 -0700804 main.log.debug( "Intents in " + main.CLIs[ i ].name + ": " +
Jon Hall6e709752016-02-01 13:38:46 -0800805 str( sorted( onosIds ) ) )
806 if sorted( ids ) != sorted( intentIds ):
807 main.log.warn( "Set of intent IDs doesn't match" )
808 correct = False
809 break
810 else:
Jon Hallf37d44d2017-05-24 10:37:30 -0700811 intents = json.loads( main.CLIs[ i ].intents() )
Jon Hall6e709752016-02-01 13:38:46 -0800812 for intent in intents:
813 if intent[ 'state' ] != "INSTALLED":
814 main.log.warn( "Intent " + intent[ 'id' ] +
815 " is " + intent[ 'state' ] )
816 correct = False
817 break
818 if correct:
819 break
820 else:
Jon Hallf37d44d2017-05-24 10:37:30 -0700821 time.sleep( 1 )
Jon Hall6e709752016-02-01 13:38:46 -0800822 if not intentStop:
823 intentStop = time.time()
824 global gossipTime
825 gossipTime = intentStop - intentStart
826 main.log.info( "It took about " + str( gossipTime ) +
827 " seconds for all intents to appear in each node" )
Jon Hallf37d44d2017-05-24 10:37:30 -0700828 gossipPeriod = int( main.params[ 'timers' ][ 'gossip' ] )
Jon Hall6e709752016-02-01 13:38:46 -0800829 maxGossipTime = gossipPeriod * len( main.activeNodes )
830 utilities.assert_greater_equals(
831 expect=maxGossipTime, actual=gossipTime,
832 onpass="ECM anti-entropy for intents worked within " +
833 "expected time",
834 onfail="Intent ECM anti-entropy took too long. " +
835 "Expected time:{}, Actual time:{}".format( maxGossipTime,
836 gossipTime ) )
837 if gossipTime <= maxGossipTime:
838 intentAddResult = True
839
840 if not intentAddResult or "key" in pendingMap:
841 import time
842 installedCheck = True
843 main.log.info( "Sleeping 60 seconds to see if intents are found" )
844 time.sleep( 60 )
845 onosIds = onosCli.getAllIntentsId()
846 main.log.info( "Submitted intents: " + str( intentIds ) )
847 main.log.info( "Intents in ONOS: " + str( onosIds ) )
848 # Print the intent states
849 intents = onosCli.intents()
850 intentStates = []
851 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
852 count = 0
853 try:
854 for intent in json.loads( intents ):
855 # Iter through intents of a node
856 state = intent.get( 'state', None )
857 if "INSTALLED" not in state:
858 installedCheck = False
859 intentId = intent.get( 'id', None )
860 intentStates.append( ( intentId, state ) )
861 except ( ValueError, TypeError ):
862 main.log.exception( "Error parsing intents" )
863 # add submitted intents not in the store
864 tmplist = [ i for i, s in intentStates ]
865 for i in intentIds:
866 if i not in tmplist:
867 intentStates.append( ( i, " - " ) )
868 intentStates.sort()
869 for i, s in intentStates:
870 count += 1
871 main.log.info( "%-6s%-15s%-15s" %
872 ( str( count ), str( i ), str( s ) ) )
873 leaders = onosCli.leaders()
874 try:
875 missing = False
876 if leaders:
877 parsedLeaders = json.loads( leaders )
878 main.log.warn( json.dumps( parsedLeaders,
879 sort_keys=True,
880 indent=4,
881 separators=( ',', ': ' ) ) )
882 # check for all intent partitions
883 # check for election
884 topics = []
885 for i in range( 14 ):
Jon Hall8dafdcc2016-09-16 10:21:25 -0700886 topics.append( "work-partition-" + str( i ) )
Jon Hall6e709752016-02-01 13:38:46 -0800887 # FIXME: this should only be after we start the app
888 topics.append( "org.onosproject.election" )
889 main.log.debug( topics )
Jon Hallf37d44d2017-05-24 10:37:30 -0700890 ONOStopics = [ j[ 'topic' ] for j in parsedLeaders ]
Jon Hall6e709752016-02-01 13:38:46 -0800891 for topic in topics:
892 if topic not in ONOStopics:
893 main.log.error( "Error: " + topic +
894 " not in leaders" )
895 missing = True
896 else:
897 main.log.error( "leaders() returned None" )
898 except ( ValueError, TypeError ):
899 main.log.exception( "Error parsing leaders" )
900 main.log.error( repr( leaders ) )
901 # Check all nodes
902 if missing:
903 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -0700904 node = main.CLIs[ i ]
905 response = node.leaders( jsonFormat=False )
Jon Hall6e709752016-02-01 13:38:46 -0800906 main.log.warn( str( node.name ) + " leaders output: \n" +
907 str( response ) )
908
909 partitions = onosCli.partitions()
910 try:
Jon Hallf37d44d2017-05-24 10:37:30 -0700911 if partitions:
Jon Hall6e709752016-02-01 13:38:46 -0800912 parsedPartitions = json.loads( partitions )
913 main.log.warn( json.dumps( parsedPartitions,
914 sort_keys=True,
915 indent=4,
916 separators=( ',', ': ' ) ) )
917 # TODO check for a leader in all paritions
918 # TODO check for consistency among nodes
919 else:
920 main.log.error( "partitions() returned None" )
921 except ( ValueError, TypeError ):
922 main.log.exception( "Error parsing partitions" )
923 main.log.error( repr( partitions ) )
924 pendingMap = onosCli.pendingMap()
925 try:
Jon Hallf37d44d2017-05-24 10:37:30 -0700926 if pendingMap:
Jon Hall6e709752016-02-01 13:38:46 -0800927 parsedPending = json.loads( pendingMap )
928 main.log.warn( json.dumps( parsedPending,
929 sort_keys=True,
930 indent=4,
931 separators=( ',', ': ' ) ) )
932 # TODO check something here?
933 else:
934 main.log.error( "pendingMap() returned None" )
935 except ( ValueError, TypeError ):
936 main.log.exception( "Error parsing pending map" )
937 main.log.error( repr( pendingMap ) )
938
939 def CASE4( self, main ):
940 """
941 Ping across added host intents
942 """
943 import json
944 import time
945 assert main.numCtrls, "main.numCtrls not defined"
946 assert main, "main not defined"
947 assert utilities.assert_equals, "utilities.assert_equals not defined"
948 assert main.CLIs, "main.CLIs not defined"
949 assert main.nodes, "main.nodes not defined"
950 main.case( "Verify connectivity by sending traffic across Intents" )
951 main.caseExplanation = "Ping across added host intents to check " +\
952 "functionality and check the state of " +\
953 "the intent"
Jon Hall6e709752016-02-01 13:38:46 -0800954
Jon Hallf37d44d2017-05-24 10:37:30 -0700955 onosCli = main.CLIs[ main.activeNodes[ 0 ] ]
Jon Hall6e709752016-02-01 13:38:46 -0800956 main.step( "Check Intent state" )
957 installedCheck = False
958 loopCount = 0
959 while not installedCheck and loopCount < 40:
960 installedCheck = True
961 # Print the intent states
962 intents = onosCli.intents()
963 intentStates = []
964 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
965 count = 0
966 # Iter through intents of a node
967 try:
968 for intent in json.loads( intents ):
969 state = intent.get( 'state', None )
970 if "INSTALLED" not in state:
971 installedCheck = False
972 intentId = intent.get( 'id', None )
973 intentStates.append( ( intentId, state ) )
974 except ( ValueError, TypeError ):
975 main.log.exception( "Error parsing intents." )
976 # Print states
977 intentStates.sort()
978 for i, s in intentStates:
979 count += 1
980 main.log.info( "%-6s%-15s%-15s" %
981 ( str( count ), str( i ), str( s ) ) )
982 if not installedCheck:
983 time.sleep( 1 )
984 loopCount += 1
985 utilities.assert_equals( expect=True, actual=installedCheck,
986 onpass="Intents are all INSTALLED",
987 onfail="Intents are not all in " +
988 "INSTALLED state" )
989
Jon Hall9d2dcad2016-04-08 10:15:20 -0700990 main.step( "Ping across added host intents" )
Jon Hall9d2dcad2016-04-08 10:15:20 -0700991 PingResult = main.TRUE
992 for i in range( 8, 18 ):
993 ping = main.Mininet1.pingHost( src="h" + str( i ),
994 target="h" + str( i + 10 ) )
995 PingResult = PingResult and ping
996 if ping == main.FALSE:
997 main.log.warn( "Ping failed between h" + str( i ) +
998 " and h" + str( i + 10 ) )
999 elif ping == main.TRUE:
1000 main.log.info( "Ping test passed!" )
1001 # Don't set PingResult or you'd override failures
1002 if PingResult == main.FALSE:
1003 main.log.error(
1004 "Intents have not been installed correctly, pings failed." )
1005 # TODO: pretty print
1006 main.log.warn( "ONOS1 intents: " )
1007 try:
1008 tmpIntents = onosCli.intents()
1009 main.log.warn( json.dumps( json.loads( tmpIntents ),
1010 sort_keys=True,
1011 indent=4,
1012 separators=( ',', ': ' ) ) )
1013 except ( ValueError, TypeError ):
1014 main.log.warn( repr( tmpIntents ) )
1015 utilities.assert_equals(
1016 expect=main.TRUE,
1017 actual=PingResult,
1018 onpass="Intents have been installed correctly and pings work",
1019 onfail="Intents have not been installed correctly, pings failed." )
1020
Jon Hall6e709752016-02-01 13:38:46 -08001021 main.step( "Check leadership of topics" )
1022 leaders = onosCli.leaders()
1023 topicCheck = main.TRUE
1024 try:
1025 if leaders:
1026 parsedLeaders = json.loads( leaders )
1027 main.log.warn( json.dumps( parsedLeaders,
1028 sort_keys=True,
1029 indent=4,
1030 separators=( ',', ': ' ) ) )
1031 # check for all intent partitions
1032 # check for election
1033 # TODO: Look at Devices as topics now that it uses this system
1034 topics = []
1035 for i in range( 14 ):
Jon Hall8dafdcc2016-09-16 10:21:25 -07001036 topics.append( "work-partition-" + str( i ) )
Jon Hall6e709752016-02-01 13:38:46 -08001037 # FIXME: this should only be after we start the app
1038 # FIXME: topics.append( "org.onosproject.election" )
1039 # Print leaders output
1040 main.log.debug( topics )
Jon Hallf37d44d2017-05-24 10:37:30 -07001041 ONOStopics = [ j[ 'topic' ] for j in parsedLeaders ]
Jon Hall6e709752016-02-01 13:38:46 -08001042 for topic in topics:
1043 if topic not in ONOStopics:
1044 main.log.error( "Error: " + topic +
1045 " not in leaders" )
1046 topicCheck = main.FALSE
1047 else:
1048 main.log.error( "leaders() returned None" )
1049 topicCheck = main.FALSE
1050 except ( ValueError, TypeError ):
1051 topicCheck = main.FALSE
1052 main.log.exception( "Error parsing leaders" )
1053 main.log.error( repr( leaders ) )
1054 # TODO: Check for a leader of these topics
1055 # Check all nodes
1056 if topicCheck:
1057 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07001058 node = main.CLIs[ i ]
1059 response = node.leaders( jsonFormat=False )
Jon Hall6e709752016-02-01 13:38:46 -08001060 main.log.warn( str( node.name ) + " leaders output: \n" +
1061 str( response ) )
1062
1063 utilities.assert_equals( expect=main.TRUE, actual=topicCheck,
1064 onpass="intent Partitions is in leaders",
1065 onfail="Some topics were lost " )
1066 # Print partitions
1067 partitions = onosCli.partitions()
1068 try:
Jon Hallf37d44d2017-05-24 10:37:30 -07001069 if partitions:
Jon Hall6e709752016-02-01 13:38:46 -08001070 parsedPartitions = json.loads( partitions )
1071 main.log.warn( json.dumps( parsedPartitions,
1072 sort_keys=True,
1073 indent=4,
1074 separators=( ',', ': ' ) ) )
1075 # TODO check for a leader in all paritions
1076 # TODO check for consistency among nodes
1077 else:
1078 main.log.error( "partitions() returned None" )
1079 except ( ValueError, TypeError ):
1080 main.log.exception( "Error parsing partitions" )
1081 main.log.error( repr( partitions ) )
1082 # Print Pending Map
1083 pendingMap = onosCli.pendingMap()
1084 try:
Jon Hallf37d44d2017-05-24 10:37:30 -07001085 if pendingMap:
Jon Hall6e709752016-02-01 13:38:46 -08001086 parsedPending = json.loads( pendingMap )
1087 main.log.warn( json.dumps( parsedPending,
1088 sort_keys=True,
1089 indent=4,
1090 separators=( ',', ': ' ) ) )
1091 # TODO check something here?
1092 else:
1093 main.log.error( "pendingMap() returned None" )
1094 except ( ValueError, TypeError ):
1095 main.log.exception( "Error parsing pending map" )
1096 main.log.error( repr( pendingMap ) )
1097
1098 if not installedCheck:
1099 main.log.info( "Waiting 60 seconds to see if the state of " +
1100 "intents change" )
1101 time.sleep( 60 )
1102 # Print the intent states
1103 intents = onosCli.intents()
1104 intentStates = []
1105 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
1106 count = 0
1107 # Iter through intents of a node
1108 try:
1109 for intent in json.loads( intents ):
1110 state = intent.get( 'state', None )
1111 if "INSTALLED" not in state:
1112 installedCheck = False
1113 intentId = intent.get( 'id', None )
1114 intentStates.append( ( intentId, state ) )
1115 except ( ValueError, TypeError ):
1116 main.log.exception( "Error parsing intents." )
1117 intentStates.sort()
1118 for i, s in intentStates:
1119 count += 1
1120 main.log.info( "%-6s%-15s%-15s" %
1121 ( str( count ), str( i ), str( s ) ) )
1122 leaders = onosCli.leaders()
1123 try:
1124 missing = False
1125 if leaders:
1126 parsedLeaders = json.loads( leaders )
1127 main.log.warn( json.dumps( parsedLeaders,
1128 sort_keys=True,
1129 indent=4,
1130 separators=( ',', ': ' ) ) )
1131 # check for all intent partitions
1132 # check for election
1133 topics = []
1134 for i in range( 14 ):
Jon Hall8dafdcc2016-09-16 10:21:25 -07001135 topics.append( "work-partition-" + str( i ) )
Jon Hall6e709752016-02-01 13:38:46 -08001136 # FIXME: this should only be after we start the app
1137 topics.append( "org.onosproject.election" )
1138 main.log.debug( topics )
Jon Hallf37d44d2017-05-24 10:37:30 -07001139 ONOStopics = [ j[ 'topic' ] for j in parsedLeaders ]
Jon Hall6e709752016-02-01 13:38:46 -08001140 for topic in topics:
1141 if topic not in ONOStopics:
1142 main.log.error( "Error: " + topic +
1143 " not in leaders" )
1144 missing = True
1145 else:
1146 main.log.error( "leaders() returned None" )
1147 except ( ValueError, TypeError ):
1148 main.log.exception( "Error parsing leaders" )
1149 main.log.error( repr( leaders ) )
1150 if missing:
1151 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07001152 node = main.CLIs[ i ]
1153 response = node.leaders( jsonFormat=False )
Jon Hall6e709752016-02-01 13:38:46 -08001154 main.log.warn( str( node.name ) + " leaders output: \n" +
1155 str( response ) )
1156
1157 partitions = onosCli.partitions()
1158 try:
Jon Hallf37d44d2017-05-24 10:37:30 -07001159 if partitions:
Jon Hall6e709752016-02-01 13:38:46 -08001160 parsedPartitions = json.loads( partitions )
1161 main.log.warn( json.dumps( parsedPartitions,
1162 sort_keys=True,
1163 indent=4,
1164 separators=( ',', ': ' ) ) )
1165 # TODO check for a leader in all paritions
1166 # TODO check for consistency among nodes
1167 else:
1168 main.log.error( "partitions() returned None" )
1169 except ( ValueError, TypeError ):
1170 main.log.exception( "Error parsing partitions" )
1171 main.log.error( repr( partitions ) )
1172 pendingMap = onosCli.pendingMap()
1173 try:
Jon Hallf37d44d2017-05-24 10:37:30 -07001174 if pendingMap:
Jon Hall6e709752016-02-01 13:38:46 -08001175 parsedPending = json.loads( pendingMap )
1176 main.log.warn( json.dumps( parsedPending,
1177 sort_keys=True,
1178 indent=4,
1179 separators=( ',', ': ' ) ) )
1180 # TODO check something here?
1181 else:
1182 main.log.error( "pendingMap() returned None" )
1183 except ( ValueError, TypeError ):
1184 main.log.exception( "Error parsing pending map" )
1185 main.log.error( repr( pendingMap ) )
1186 # Print flowrules
Jon Hallf37d44d2017-05-24 10:37:30 -07001187 node = main.activeNodes[ 0 ]
1188 main.log.debug( main.CLIs[ node ].flows( jsonFormat=False ) )
Jon Hall6e709752016-02-01 13:38:46 -08001189 main.step( "Wait a minute then ping again" )
1190 # the wait is above
1191 PingResult = main.TRUE
1192 for i in range( 8, 18 ):
1193 ping = main.Mininet1.pingHost( src="h" + str( i ),
1194 target="h" + str( i + 10 ) )
1195 PingResult = PingResult and ping
1196 if ping == main.FALSE:
1197 main.log.warn( "Ping failed between h" + str( i ) +
1198 " and h" + str( i + 10 ) )
1199 elif ping == main.TRUE:
1200 main.log.info( "Ping test passed!" )
1201 # Don't set PingResult or you'd override failures
1202 if PingResult == main.FALSE:
1203 main.log.error(
1204 "Intents have not been installed correctly, pings failed." )
1205 # TODO: pretty print
1206 main.log.warn( "ONOS1 intents: " )
1207 try:
1208 tmpIntents = onosCli.intents()
1209 main.log.warn( json.dumps( json.loads( tmpIntents ),
1210 sort_keys=True,
1211 indent=4,
1212 separators=( ',', ': ' ) ) )
1213 except ( ValueError, TypeError ):
1214 main.log.warn( repr( tmpIntents ) )
1215 utilities.assert_equals(
1216 expect=main.TRUE,
1217 actual=PingResult,
1218 onpass="Intents have been installed correctly and pings work",
1219 onfail="Intents have not been installed correctly, pings failed." )
1220
1221 def CASE5( self, main ):
1222 """
1223 Reading state of ONOS
1224 """
1225 import json
1226 import time
1227 assert main.numCtrls, "main.numCtrls not defined"
1228 assert main, "main not defined"
1229 assert utilities.assert_equals, "utilities.assert_equals not defined"
1230 assert main.CLIs, "main.CLIs not defined"
1231 assert main.nodes, "main.nodes not defined"
1232
1233 main.case( "Setting up and gathering data for current state" )
1234 # The general idea for this test case is to pull the state of
1235 # ( intents,flows, topology,... ) from each ONOS node
1236 # We can then compare them with each other and also with past states
1237
1238 main.step( "Check that each switch has a master" )
1239 global mastershipState
1240 mastershipState = '[]'
1241
1242 # Assert that each device has a master
1243 rolesNotNull = main.TRUE
1244 threads = []
1245 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07001246 t = main.Thread( target=main.CLIs[ i ].rolesNotNull,
Jon Hall6e709752016-02-01 13:38:46 -08001247 name="rolesNotNull-" + str( i ),
1248 args=[] )
1249 threads.append( t )
1250 t.start()
1251
1252 for t in threads:
1253 t.join()
1254 rolesNotNull = rolesNotNull and t.result
1255 utilities.assert_equals(
1256 expect=main.TRUE,
1257 actual=rolesNotNull,
1258 onpass="Each device has a master",
1259 onfail="Some devices don't have a master assigned" )
1260
1261 main.step( "Get the Mastership of each switch from each controller" )
1262 ONOSMastership = []
1263 mastershipCheck = main.FALSE
1264 consistentMastership = True
1265 rolesResults = True
1266 threads = []
1267 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07001268 t = main.Thread( target=main.CLIs[ i ].roles,
Jon Hall6e709752016-02-01 13:38:46 -08001269 name="roles-" + str( i ),
1270 args=[] )
1271 threads.append( t )
1272 t.start()
1273
1274 for t in threads:
1275 t.join()
1276 ONOSMastership.append( t.result )
1277
1278 for i in range( len( ONOSMastership ) ):
Jon Hallf37d44d2017-05-24 10:37:30 -07001279 node = str( main.activeNodes[ i ] + 1 )
1280 if not ONOSMastership[ i ] or "Error" in ONOSMastership[ i ]:
Jon Hall6e709752016-02-01 13:38:46 -08001281 main.log.error( "Error in getting ONOS" + node + " roles" )
1282 main.log.warn( "ONOS" + node + " mastership response: " +
Jon Hallf37d44d2017-05-24 10:37:30 -07001283 repr( ONOSMastership[ i ] ) )
Jon Hall6e709752016-02-01 13:38:46 -08001284 rolesResults = False
1285 utilities.assert_equals(
1286 expect=True,
1287 actual=rolesResults,
1288 onpass="No error in reading roles output",
1289 onfail="Error in reading roles from ONOS" )
1290
1291 main.step( "Check for consistency in roles from each controller" )
Jon Hallf37d44d2017-05-24 10:37:30 -07001292 if all( [ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
Jon Hall6e709752016-02-01 13:38:46 -08001293 main.log.info(
1294 "Switch roles are consistent across all ONOS nodes" )
1295 else:
1296 consistentMastership = False
1297 utilities.assert_equals(
1298 expect=True,
1299 actual=consistentMastership,
1300 onpass="Switch roles are consistent across all ONOS nodes",
1301 onfail="ONOS nodes have different views of switch roles" )
1302
1303 if rolesResults and not consistentMastership:
1304 for i in range( len( main.activeNodes ) ):
Jon Hallf37d44d2017-05-24 10:37:30 -07001305 node = str( main.activeNodes[ i ] + 1 )
Jon Hall6e709752016-02-01 13:38:46 -08001306 try:
1307 main.log.warn(
1308 "ONOS" + node + " roles: ",
1309 json.dumps(
1310 json.loads( ONOSMastership[ i ] ),
1311 sort_keys=True,
1312 indent=4,
1313 separators=( ',', ': ' ) ) )
1314 except ( ValueError, TypeError ):
1315 main.log.warn( repr( ONOSMastership[ i ] ) )
1316 elif rolesResults and consistentMastership:
1317 mastershipCheck = main.TRUE
1318 mastershipState = ONOSMastership[ 0 ]
1319
1320 main.step( "Get the intents from each controller" )
1321 global intentState
1322 intentState = []
1323 ONOSIntents = []
1324 intentCheck = main.FALSE
1325 consistentIntents = True
1326 intentsResults = True
1327 threads = []
1328 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07001329 t = main.Thread( target=main.CLIs[ i ].intents,
Jon Hall6e709752016-02-01 13:38:46 -08001330 name="intents-" + str( i ),
1331 args=[],
1332 kwargs={ 'jsonFormat': True } )
1333 threads.append( t )
1334 t.start()
1335
1336 for t in threads:
1337 t.join()
1338 ONOSIntents.append( t.result )
1339
1340 for i in range( len( ONOSIntents ) ):
Jon Hallf37d44d2017-05-24 10:37:30 -07001341 node = str( main.activeNodes[ i ] + 1 )
Jon Hall6e709752016-02-01 13:38:46 -08001342 if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
1343 main.log.error( "Error in getting ONOS" + node + " intents" )
1344 main.log.warn( "ONOS" + node + " intents response: " +
1345 repr( ONOSIntents[ i ] ) )
1346 intentsResults = False
1347 utilities.assert_equals(
1348 expect=True,
1349 actual=intentsResults,
1350 onpass="No error in reading intents output",
1351 onfail="Error in reading intents from ONOS" )
1352
1353 main.step( "Check for consistency in Intents from each controller" )
Jon Hallf37d44d2017-05-24 10:37:30 -07001354 if all( [ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
Jon Hall6e709752016-02-01 13:38:46 -08001355 main.log.info( "Intents are consistent across all ONOS " +
1356 "nodes" )
1357 else:
1358 consistentIntents = False
1359 main.log.error( "Intents not consistent" )
1360 utilities.assert_equals(
1361 expect=True,
1362 actual=consistentIntents,
1363 onpass="Intents are consistent across all ONOS nodes",
1364 onfail="ONOS nodes have different views of intents" )
1365
1366 if intentsResults:
1367 # Try to make it easy to figure out what is happening
1368 #
1369 # Intent ONOS1 ONOS2 ...
1370 # 0x01 INSTALLED INSTALLING
1371 # ... ... ...
1372 # ... ... ...
1373 title = " Id"
1374 for n in main.activeNodes:
1375 title += " " * 10 + "ONOS" + str( n + 1 )
1376 main.log.warn( title )
1377 # get all intent keys in the cluster
1378 keys = []
Jon Halla440e872016-03-31 15:15:50 -07001379 try:
1380 # Get the set of all intent keys
Jon Hall6e709752016-02-01 13:38:46 -08001381 for nodeStr in ONOSIntents:
1382 node = json.loads( nodeStr )
1383 for intent in node:
Jon Halla440e872016-03-31 15:15:50 -07001384 keys.append( intent.get( 'id' ) )
1385 keys = set( keys )
1386 # For each intent key, print the state on each node
1387 for key in keys:
1388 row = "%-13s" % key
1389 for nodeStr in ONOSIntents:
1390 node = json.loads( nodeStr )
1391 for intent in node:
1392 if intent.get( 'id', "Error" ) == key:
1393 row += "%-15s" % intent.get( 'state' )
1394 main.log.warn( row )
1395 # End of intent state table
1396 except ValueError as e:
1397 main.log.exception( e )
1398 main.log.debug( "nodeStr was: " + repr( nodeStr ) )
Jon Hall6e709752016-02-01 13:38:46 -08001399
1400 if intentsResults and not consistentIntents:
1401 # print the json objects
Jon Hallf37d44d2017-05-24 10:37:30 -07001402 n = str( main.activeNodes[ -1 ] + 1 )
Jon Hall6e709752016-02-01 13:38:46 -08001403 main.log.debug( "ONOS" + n + " intents: " )
1404 main.log.debug( json.dumps( json.loads( ONOSIntents[ -1 ] ),
1405 sort_keys=True,
1406 indent=4,
1407 separators=( ',', ': ' ) ) )
1408 for i in range( len( ONOSIntents ) ):
Jon Hallf37d44d2017-05-24 10:37:30 -07001409 node = str( main.activeNodes[ i ] + 1 )
Jon Hall6e709752016-02-01 13:38:46 -08001410 if ONOSIntents[ i ] != ONOSIntents[ -1 ]:
1411 main.log.debug( "ONOS" + node + " intents: " )
Jon Hallf37d44d2017-05-24 10:37:30 -07001412 main.log.debug( json.dumps( json.loads( ONOSIntents[ i ] ),
Jon Hall6e709752016-02-01 13:38:46 -08001413 sort_keys=True,
1414 indent=4,
1415 separators=( ',', ': ' ) ) )
1416 else:
1417 main.log.debug( "ONOS" + node + " intents match ONOS" +
1418 n + " intents" )
1419 elif intentsResults and consistentIntents:
1420 intentCheck = main.TRUE
1421 intentState = ONOSIntents[ 0 ]
1422
1423 main.step( "Get the flows from each controller" )
1424 global flowState
1425 flowState = []
1426 ONOSFlows = []
1427 ONOSFlowsJson = []
1428 flowCheck = main.FALSE
1429 consistentFlows = True
1430 flowsResults = True
1431 threads = []
1432 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07001433 t = main.Thread( target=main.CLIs[ i ].flows,
Jon Hall6e709752016-02-01 13:38:46 -08001434 name="flows-" + str( i ),
1435 args=[],
1436 kwargs={ 'jsonFormat': True } )
1437 threads.append( t )
1438 t.start()
1439
1440 # NOTE: Flows command can take some time to run
Jon Hallf37d44d2017-05-24 10:37:30 -07001441 time.sleep( 30 )
Jon Hall6e709752016-02-01 13:38:46 -08001442 for t in threads:
1443 t.join()
1444 result = t.result
1445 ONOSFlows.append( result )
1446
1447 for i in range( len( ONOSFlows ) ):
Jon Hallf37d44d2017-05-24 10:37:30 -07001448 num = str( main.activeNodes[ i ] + 1 )
Jon Hall6e709752016-02-01 13:38:46 -08001449 if not ONOSFlows[ i ] or "Error" in ONOSFlows[ i ]:
1450 main.log.error( "Error in getting ONOS" + num + " flows" )
1451 main.log.warn( "ONOS" + num + " flows response: " +
1452 repr( ONOSFlows[ i ] ) )
1453 flowsResults = False
1454 ONOSFlowsJson.append( None )
1455 else:
1456 try:
1457 ONOSFlowsJson.append( json.loads( ONOSFlows[ i ] ) )
1458 except ( ValueError, TypeError ):
1459 # FIXME: change this to log.error?
1460 main.log.exception( "Error in parsing ONOS" + num +
1461 " response as json." )
1462 main.log.error( repr( ONOSFlows[ i ] ) )
1463 ONOSFlowsJson.append( None )
1464 flowsResults = False
1465 utilities.assert_equals(
1466 expect=True,
1467 actual=flowsResults,
1468 onpass="No error in reading flows output",
1469 onfail="Error in reading flows from ONOS" )
1470
1471 main.step( "Check for consistency in Flows from each controller" )
1472 tmp = [ len( i ) == len( ONOSFlowsJson[ 0 ] ) for i in ONOSFlowsJson ]
1473 if all( tmp ):
1474 main.log.info( "Flow count is consistent across all ONOS nodes" )
1475 else:
1476 consistentFlows = False
1477 utilities.assert_equals(
1478 expect=True,
1479 actual=consistentFlows,
1480 onpass="The flow count is consistent across all ONOS nodes",
1481 onfail="ONOS nodes have different flow counts" )
1482
1483 if flowsResults and not consistentFlows:
1484 for i in range( len( ONOSFlows ) ):
Jon Hallf37d44d2017-05-24 10:37:30 -07001485 node = str( main.activeNodes[ i ] + 1 )
Jon Hall6e709752016-02-01 13:38:46 -08001486 try:
1487 main.log.warn(
1488 "ONOS" + node + " flows: " +
Jon Hallf37d44d2017-05-24 10:37:30 -07001489 json.dumps( json.loads( ONOSFlows[ i ] ), sort_keys=True,
Jon Hall6e709752016-02-01 13:38:46 -08001490 indent=4, separators=( ',', ': ' ) ) )
1491 except ( ValueError, TypeError ):
1492 main.log.warn( "ONOS" + node + " flows: " +
1493 repr( ONOSFlows[ i ] ) )
1494 elif flowsResults and consistentFlows:
1495 flowCheck = main.TRUE
1496 flowState = ONOSFlows[ 0 ]
1497
1498 main.step( "Get the OF Table entries" )
1499 global flows
1500 flows = []
1501 for i in range( 1, 29 ):
1502 flows.append( main.Mininet1.getFlowTable( "s" + str( i ), version="1.3", debug=False ) )
1503 if flowCheck == main.FALSE:
1504 for table in flows:
1505 main.log.warn( table )
1506 # TODO: Compare switch flow tables with ONOS flow tables
1507
1508 main.step( "Start continuous pings" )
1509 main.Mininet2.pingLong(
1510 src=main.params[ 'PING' ][ 'source1' ],
1511 target=main.params[ 'PING' ][ 'target1' ],
1512 pingTime=500 )
1513 main.Mininet2.pingLong(
1514 src=main.params[ 'PING' ][ 'source2' ],
1515 target=main.params[ 'PING' ][ 'target2' ],
1516 pingTime=500 )
1517 main.Mininet2.pingLong(
1518 src=main.params[ 'PING' ][ 'source3' ],
1519 target=main.params[ 'PING' ][ 'target3' ],
1520 pingTime=500 )
1521 main.Mininet2.pingLong(
1522 src=main.params[ 'PING' ][ 'source4' ],
1523 target=main.params[ 'PING' ][ 'target4' ],
1524 pingTime=500 )
1525 main.Mininet2.pingLong(
1526 src=main.params[ 'PING' ][ 'source5' ],
1527 target=main.params[ 'PING' ][ 'target5' ],
1528 pingTime=500 )
1529 main.Mininet2.pingLong(
1530 src=main.params[ 'PING' ][ 'source6' ],
1531 target=main.params[ 'PING' ][ 'target6' ],
1532 pingTime=500 )
1533 main.Mininet2.pingLong(
1534 src=main.params[ 'PING' ][ 'source7' ],
1535 target=main.params[ 'PING' ][ 'target7' ],
1536 pingTime=500 )
1537 main.Mininet2.pingLong(
1538 src=main.params[ 'PING' ][ 'source8' ],
1539 target=main.params[ 'PING' ][ 'target8' ],
1540 pingTime=500 )
1541 main.Mininet2.pingLong(
1542 src=main.params[ 'PING' ][ 'source9' ],
1543 target=main.params[ 'PING' ][ 'target9' ],
1544 pingTime=500 )
1545 main.Mininet2.pingLong(
1546 src=main.params[ 'PING' ][ 'source10' ],
1547 target=main.params[ 'PING' ][ 'target10' ],
1548 pingTime=500 )
1549
1550 main.step( "Collecting topology information from ONOS" )
1551 devices = []
1552 threads = []
1553 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07001554 t = main.Thread( target=main.CLIs[ i ].devices,
Jon Hall6e709752016-02-01 13:38:46 -08001555 name="devices-" + str( i ),
Jon Hallf37d44d2017-05-24 10:37:30 -07001556 args=[] )
Jon Hall6e709752016-02-01 13:38:46 -08001557 threads.append( t )
1558 t.start()
1559
1560 for t in threads:
1561 t.join()
1562 devices.append( t.result )
1563 hosts = []
1564 threads = []
1565 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07001566 t = main.Thread( target=main.CLIs[ i ].hosts,
Jon Hall6e709752016-02-01 13:38:46 -08001567 name="hosts-" + str( i ),
Jon Hallf37d44d2017-05-24 10:37:30 -07001568 args=[] )
Jon Hall6e709752016-02-01 13:38:46 -08001569 threads.append( t )
1570 t.start()
1571
1572 for t in threads:
1573 t.join()
1574 try:
1575 hosts.append( json.loads( t.result ) )
1576 except ( ValueError, TypeError ):
1577 # FIXME: better handling of this, print which node
1578 # Maybe use thread name?
1579 main.log.exception( "Error parsing json output of hosts" )
1580 main.log.warn( repr( t.result ) )
1581 hosts.append( None )
1582
1583 ports = []
1584 threads = []
1585 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07001586 t = main.Thread( target=main.CLIs[ i ].ports,
Jon Hall6e709752016-02-01 13:38:46 -08001587 name="ports-" + str( i ),
Jon Hallf37d44d2017-05-24 10:37:30 -07001588 args=[] )
Jon Hall6e709752016-02-01 13:38:46 -08001589 threads.append( t )
1590 t.start()
1591
1592 for t in threads:
1593 t.join()
1594 ports.append( t.result )
1595 links = []
1596 threads = []
1597 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07001598 t = main.Thread( target=main.CLIs[ i ].links,
Jon Hall6e709752016-02-01 13:38:46 -08001599 name="links-" + str( i ),
Jon Hallf37d44d2017-05-24 10:37:30 -07001600 args=[] )
Jon Hall6e709752016-02-01 13:38:46 -08001601 threads.append( t )
1602 t.start()
1603
1604 for t in threads:
1605 t.join()
1606 links.append( t.result )
1607 clusters = []
1608 threads = []
1609 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07001610 t = main.Thread( target=main.CLIs[ i ].clusters,
Jon Hall6e709752016-02-01 13:38:46 -08001611 name="clusters-" + str( i ),
Jon Hallf37d44d2017-05-24 10:37:30 -07001612 args=[] )
Jon Hall6e709752016-02-01 13:38:46 -08001613 threads.append( t )
1614 t.start()
1615
1616 for t in threads:
1617 t.join()
1618 clusters.append( t.result )
1619 # Compare json objects for hosts and dataplane clusters
1620
1621 # hosts
1622 main.step( "Host view is consistent across ONOS nodes" )
1623 consistentHostsResult = main.TRUE
1624 for controller in range( len( hosts ) ):
Jon Hallf37d44d2017-05-24 10:37:30 -07001625 controllerStr = str( main.activeNodes[ controller ] + 1 )
Jon Hall6e709752016-02-01 13:38:46 -08001626 if hosts[ controller ] and "Error" not in hosts[ controller ]:
1627 if hosts[ controller ] == hosts[ 0 ]:
1628 continue
1629 else: # hosts not consistent
1630 main.log.error( "hosts from ONOS" +
1631 controllerStr +
1632 " is inconsistent with ONOS1" )
1633 main.log.warn( repr( hosts[ controller ] ) )
1634 consistentHostsResult = main.FALSE
1635
1636 else:
1637 main.log.error( "Error in getting ONOS hosts from ONOS" +
1638 controllerStr )
1639 consistentHostsResult = main.FALSE
1640 main.log.warn( "ONOS" + controllerStr +
1641 " hosts response: " +
1642 repr( hosts[ controller ] ) )
1643 utilities.assert_equals(
1644 expect=main.TRUE,
1645 actual=consistentHostsResult,
1646 onpass="Hosts view is consistent across all ONOS nodes",
1647 onfail="ONOS nodes have different views of hosts" )
1648
1649 main.step( "Each host has an IP address" )
1650 ipResult = main.TRUE
1651 for controller in range( 0, len( hosts ) ):
Jon Hallf37d44d2017-05-24 10:37:30 -07001652 controllerStr = str( main.activeNodes[ controller ] + 1 )
Jon Hall6e709752016-02-01 13:38:46 -08001653 if hosts[ controller ]:
1654 for host in hosts[ controller ]:
Jon Hallf37d44d2017-05-24 10:37:30 -07001655 if not host.get( 'ipAddresses', [] ):
Jon Hall6e709752016-02-01 13:38:46 -08001656 main.log.error( "Error with host ips on controller" +
1657 controllerStr + ": " + str( host ) )
1658 ipResult = main.FALSE
1659 utilities.assert_equals(
1660 expect=main.TRUE,
1661 actual=ipResult,
1662 onpass="The ips of the hosts aren't empty",
1663 onfail="The ip of at least one host is missing" )
1664
1665 # Strongly connected clusters of devices
1666 main.step( "Cluster view is consistent across ONOS nodes" )
1667 consistentClustersResult = main.TRUE
1668 for controller in range( len( clusters ) ):
Jon Hallf37d44d2017-05-24 10:37:30 -07001669 controllerStr = str( main.activeNodes[ controller ] + 1 )
Jon Hall6e709752016-02-01 13:38:46 -08001670 if "Error" not in clusters[ controller ]:
1671 if clusters[ controller ] == clusters[ 0 ]:
1672 continue
1673 else: # clusters not consistent
1674 main.log.error( "clusters from ONOS" + controllerStr +
1675 " is inconsistent with ONOS1" )
1676 consistentClustersResult = main.FALSE
1677
1678 else:
1679 main.log.error( "Error in getting dataplane clusters " +
1680 "from ONOS" + controllerStr )
1681 consistentClustersResult = main.FALSE
1682 main.log.warn( "ONOS" + controllerStr +
1683 " clusters response: " +
1684 repr( clusters[ controller ] ) )
1685 utilities.assert_equals(
1686 expect=main.TRUE,
1687 actual=consistentClustersResult,
1688 onpass="Clusters view is consistent across all ONOS nodes",
1689 onfail="ONOS nodes have different views of clusters" )
Jon Hall64948022016-05-12 13:38:50 -07001690 if not consistentClustersResult:
Jon Hall172b7ba2016-04-07 18:12:20 -07001691 main.log.debug( clusters )
Jon Hall64948022016-05-12 13:38:50 -07001692
Jon Hall6e709752016-02-01 13:38:46 -08001693 # there should always only be one cluster
1694 main.step( "Cluster view correct across ONOS nodes" )
1695 try:
1696 numClusters = len( json.loads( clusters[ 0 ] ) )
1697 except ( ValueError, TypeError ):
1698 main.log.exception( "Error parsing clusters[0]: " +
1699 repr( clusters[ 0 ] ) )
1700 numClusters = "ERROR"
1701 clusterResults = main.FALSE
1702 if numClusters == 1:
1703 clusterResults = main.TRUE
1704 utilities.assert_equals(
1705 expect=1,
1706 actual=numClusters,
1707 onpass="ONOS shows 1 SCC",
1708 onfail="ONOS shows " + str( numClusters ) + " SCCs" )
1709
1710 main.step( "Comparing ONOS topology to MN" )
1711 devicesResults = main.TRUE
1712 linksResults = main.TRUE
1713 hostsResults = main.TRUE
1714 mnSwitches = main.Mininet1.getSwitches()
1715 mnLinks = main.Mininet1.getLinks()
1716 mnHosts = main.Mininet1.getHosts()
1717 for controller in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07001718 controllerStr = str( main.activeNodes[ controller ] + 1 )
Jon Hall6e709752016-02-01 13:38:46 -08001719 if devices[ controller ] and ports[ controller ] and\
Jon Hallf37d44d2017-05-24 10:37:30 -07001720 "Error" not in devices[ controller ] and\
1721 "Error" not in ports[ controller ]:
1722 currentDevicesResult = main.Mininet1.compareSwitches(
1723 mnSwitches,
1724 json.loads( devices[ controller ] ),
1725 json.loads( ports[ controller ] ) )
Jon Hall6e709752016-02-01 13:38:46 -08001726 else:
1727 currentDevicesResult = main.FALSE
1728 utilities.assert_equals( expect=main.TRUE,
1729 actual=currentDevicesResult,
1730 onpass="ONOS" + controllerStr +
1731 " Switches view is correct",
1732 onfail="ONOS" + controllerStr +
1733 " Switches view is incorrect" )
1734 if links[ controller ] and "Error" not in links[ controller ]:
1735 currentLinksResult = main.Mininet1.compareLinks(
1736 mnSwitches, mnLinks,
1737 json.loads( links[ controller ] ) )
1738 else:
1739 currentLinksResult = main.FALSE
1740 utilities.assert_equals( expect=main.TRUE,
1741 actual=currentLinksResult,
1742 onpass="ONOS" + controllerStr +
1743 " links view is correct",
1744 onfail="ONOS" + controllerStr +
1745 " links view is incorrect" )
1746
1747 if hosts[ controller ] and "Error" not in hosts[ controller ]:
1748 currentHostsResult = main.Mininet1.compareHosts(
1749 mnHosts,
1750 hosts[ controller ] )
1751 else:
1752 currentHostsResult = main.FALSE
1753 utilities.assert_equals( expect=main.TRUE,
1754 actual=currentHostsResult,
1755 onpass="ONOS" + controllerStr +
1756 " hosts exist in Mininet",
1757 onfail="ONOS" + controllerStr +
1758 " hosts don't match Mininet" )
1759
1760 devicesResults = devicesResults and currentDevicesResult
1761 linksResults = linksResults and currentLinksResult
1762 hostsResults = hostsResults and currentHostsResult
1763
1764 main.step( "Device information is correct" )
1765 utilities.assert_equals(
1766 expect=main.TRUE,
1767 actual=devicesResults,
1768 onpass="Device information is correct",
1769 onfail="Device information is incorrect" )
1770
1771 main.step( "Links are correct" )
1772 utilities.assert_equals(
1773 expect=main.TRUE,
1774 actual=linksResults,
1775 onpass="Link are correct",
1776 onfail="Links are incorrect" )
1777
1778 main.step( "Hosts are correct" )
1779 utilities.assert_equals(
1780 expect=main.TRUE,
1781 actual=hostsResults,
1782 onpass="Hosts are correct",
1783 onfail="Hosts are incorrect" )
1784
1785 def CASE61( self, main ):
1786 """
1787 The Failure case.
1788 """
1789 import math
1790 assert main.numCtrls, "main.numCtrls not defined"
1791 assert main, "main not defined"
1792 assert utilities.assert_equals, "utilities.assert_equals not defined"
1793 assert main.CLIs, "main.CLIs not defined"
1794 assert main.nodes, "main.nodes not defined"
1795 main.case( "Partition ONOS nodes into two distinct partitions" )
1796
1797 main.step( "Checking ONOS Logs for errors" )
1798 for node in main.nodes:
1799 main.log.debug( "Checking logs for errors on " + node.name + ":" )
1800 main.log.warn( main.ONOSbench.checkLogs( node.ip_address ) )
1801
Jon Hallf37d44d2017-05-24 10:37:30 -07001802 main.log.debug( main.CLIs[ 0 ].roles( jsonFormat=False ) )
Jon Halld2871c22016-07-26 11:01:14 -07001803
Jon Hall6e709752016-02-01 13:38:46 -08001804 n = len( main.nodes ) # Number of nodes
1805 p = ( ( n + 1 ) / 2 ) + 1 # Number of partitions
1806 main.partition = [ 0 ] # ONOS node to partition, listed by index in main.nodes
1807 if n > 3:
1808 main.partition.append( p - 1 )
1809 # NOTE: This only works for cluster sizes of 3,5, or 7.
1810
1811 main.step( "Partitioning ONOS nodes" )
1812 nodeList = [ str( i + 1 ) for i in main.partition ]
1813 main.log.info( "Nodes to be partitioned: " + str( nodeList ) )
1814 partitionResults = main.TRUE
1815 for i in range( 0, n ):
Jon Hallf37d44d2017-05-24 10:37:30 -07001816 this = main.nodes[ i ]
Jon Hall6e709752016-02-01 13:38:46 -08001817 if i not in main.partition:
1818 for j in main.partition:
Jon Hallf37d44d2017-05-24 10:37:30 -07001819 foe = main.nodes[ j ]
Jon Hall6e709752016-02-01 13:38:46 -08001820 main.log.warn( "Setting IP Tables rule from {} to {}. ".format( this.ip_address, foe.ip_address ) )
1821 #CMD HERE
1822 cmdStr = "sudo iptables -A {} -d {} -s {} -j DROP".format( "INPUT", this.ip_address, foe.ip_address )
1823 this.handle.sendline( cmdStr )
1824 this.handle.expect( "\$" )
1825 main.log.debug( this.handle.before )
1826 else:
1827 for j in range( 0, n ):
1828 if j not in main.partition:
Jon Hallf37d44d2017-05-24 10:37:30 -07001829 foe = main.nodes[ j ]
Jon Hall6e709752016-02-01 13:38:46 -08001830 main.log.warn( "Setting IP Tables rule from {} to {}. ".format( this.ip_address, foe.ip_address ) )
1831 #CMD HERE
1832 cmdStr = "sudo iptables -A {} -d {} -s {} -j DROP".format( "INPUT", this.ip_address, foe.ip_address )
1833 this.handle.sendline( cmdStr )
1834 this.handle.expect( "\$" )
1835 main.log.debug( this.handle.before )
1836 main.activeNodes.remove( i )
1837 # NOTE: When dynamic clustering is finished, we need to start checking
1838 # main.partion nodes still work when partitioned
1839 utilities.assert_equals( expect=main.TRUE, actual=partitionResults,
1840 onpass="Firewall rules set successfully",
1841 onfail="Error setting firewall rules" )
1842
Jon Hall6509dbf2016-06-21 17:01:17 -07001843 main.step( "Sleeping 60 seconds" )
Jon Hall6e709752016-02-01 13:38:46 -08001844 time.sleep( 60 )
1845
1846 def CASE62( self, main ):
1847 """
1848 Healing Partition
1849 """
1850 import time
1851 assert main.numCtrls, "main.numCtrls not defined"
1852 assert main, "main not defined"
1853 assert utilities.assert_equals, "utilities.assert_equals not defined"
1854 assert main.CLIs, "main.CLIs not defined"
1855 assert main.nodes, "main.nodes not defined"
1856 assert main.partition, "main.partition not defined"
1857 main.case( "Healing Partition" )
1858
1859 main.step( "Deleteing firewall rules" )
1860 healResults = main.TRUE
1861 for node in main.nodes:
1862 cmdStr = "sudo iptables -F"
1863 node.handle.sendline( cmdStr )
1864 node.handle.expect( "\$" )
1865 main.log.debug( node.handle.before )
1866 utilities.assert_equals( expect=main.TRUE, actual=healResults,
1867 onpass="Firewall rules removed",
1868 onfail="Error removing firewall rules" )
1869
1870 for node in main.partition:
1871 main.activeNodes.append( node )
1872 main.activeNodes.sort()
1873 try:
1874 assert list( set( main.activeNodes ) ) == main.activeNodes,\
1875 "List of active nodes has duplicates, this likely indicates something was run out of order"
1876 except AssertionError:
1877 main.log.exception( "" )
1878 main.cleanup()
1879 main.exit()
1880
Jon Halld2871c22016-07-26 11:01:14 -07001881 main.step( "Checking ONOS nodes" )
1882 nodeResults = utilities.retry( main.HA.nodesCheck,
1883 False,
Jon Hallf37d44d2017-05-24 10:37:30 -07001884 args=[ main.activeNodes ],
Jon Halld2871c22016-07-26 11:01:14 -07001885 sleep=15,
1886 attempts=5 )
1887
1888 utilities.assert_equals( expect=True, actual=nodeResults,
1889 onpass="Nodes check successful",
1890 onfail="Nodes check NOT successful" )
1891
1892 if not nodeResults:
1893 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07001894 cli = main.CLIs[ i ]
Jon Halld2871c22016-07-26 11:01:14 -07001895 main.log.debug( "{} components not ACTIVE: \n{}".format(
1896 cli.name,
1897 cli.sendline( "scr:list | grep -v ACTIVE" ) ) )
1898 main.log.error( "Failed to start ONOS, stopping test" )
1899 main.cleanup()
1900 main.exit()
1901
Jon Hall6e709752016-02-01 13:38:46 -08001902 def CASE7( self, main ):
1903 """
1904 Check state after ONOS failure
1905 """
1906 import json
1907 assert main.numCtrls, "main.numCtrls not defined"
1908 assert main, "main not defined"
1909 assert utilities.assert_equals, "utilities.assert_equals not defined"
1910 assert main.CLIs, "main.CLIs not defined"
1911 assert main.nodes, "main.nodes not defined"
1912 try:
1913 main.partition
1914 except AttributeError:
1915 main.partition = []
1916
1917 main.case( "Running ONOS Constant State Tests" )
1918
1919 main.step( "Check that each switch has a master" )
1920 # Assert that each device has a master
1921 rolesNotNull = main.TRUE
1922 threads = []
1923 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07001924 t = main.Thread( target=main.CLIs[ i ].rolesNotNull,
Jon Hall6e709752016-02-01 13:38:46 -08001925 name="rolesNotNull-" + str( i ),
Jon Hallf37d44d2017-05-24 10:37:30 -07001926 args=[] )
Jon Hall6e709752016-02-01 13:38:46 -08001927 threads.append( t )
1928 t.start()
1929
1930 for t in threads:
1931 t.join()
1932 rolesNotNull = rolesNotNull and t.result
1933 utilities.assert_equals(
1934 expect=main.TRUE,
1935 actual=rolesNotNull,
1936 onpass="Each device has a master",
1937 onfail="Some devices don't have a master assigned" )
1938
1939 main.step( "Read device roles from ONOS" )
1940 ONOSMastership = []
1941 mastershipCheck = main.FALSE
1942 consistentMastership = True
1943 rolesResults = True
1944 threads = []
1945 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07001946 t = main.Thread( target=main.CLIs[ i ].roles,
Jon Hall6e709752016-02-01 13:38:46 -08001947 name="roles-" + str( i ),
1948 args=[] )
1949 threads.append( t )
1950 t.start()
1951
1952 for t in threads:
1953 t.join()
1954 ONOSMastership.append( t.result )
1955
1956 for i in range( len( ONOSMastership ) ):
Jon Hallf37d44d2017-05-24 10:37:30 -07001957 node = str( main.activeNodes[ i ] + 1 )
1958 if not ONOSMastership[ i ] or "Error" in ONOSMastership[ i ]:
Jon Hall6e709752016-02-01 13:38:46 -08001959 main.log.error( "Error in getting ONOS" + node + " roles" )
1960 main.log.warn( "ONOS" + node + " mastership response: " +
Jon Hallf37d44d2017-05-24 10:37:30 -07001961 repr( ONOSMastership[ i ] ) )
Jon Hall6e709752016-02-01 13:38:46 -08001962 rolesResults = False
1963 utilities.assert_equals(
1964 expect=True,
1965 actual=rolesResults,
1966 onpass="No error in reading roles output",
1967 onfail="Error in reading roles from ONOS" )
1968
1969 main.step( "Check for consistency in roles from each controller" )
Jon Hallf37d44d2017-05-24 10:37:30 -07001970 if all( [ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
Jon Hall6e709752016-02-01 13:38:46 -08001971 main.log.info(
1972 "Switch roles are consistent across all ONOS nodes" )
1973 else:
1974 consistentMastership = False
1975 utilities.assert_equals(
1976 expect=True,
1977 actual=consistentMastership,
1978 onpass="Switch roles are consistent across all ONOS nodes",
1979 onfail="ONOS nodes have different views of switch roles" )
1980
1981 if rolesResults and not consistentMastership:
1982 for i in range( len( ONOSMastership ) ):
Jon Hallf37d44d2017-05-24 10:37:30 -07001983 node = str( main.activeNodes[ i ] + 1 )
Jon Hall6e709752016-02-01 13:38:46 -08001984 main.log.warn( "ONOS" + node + " roles: ",
1985 json.dumps( json.loads( ONOSMastership[ i ] ),
1986 sort_keys=True,
1987 indent=4,
1988 separators=( ',', ': ' ) ) )
1989
1990 # NOTE: we expect mastership to change on controller failure
1991
1992 main.step( "Get the intents and compare across all nodes" )
1993 ONOSIntents = []
1994 intentCheck = main.FALSE
1995 consistentIntents = True
1996 intentsResults = True
1997 threads = []
1998 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07001999 t = main.Thread( target=main.CLIs[ i ].intents,
Jon Hall6e709752016-02-01 13:38:46 -08002000 name="intents-" + str( i ),
2001 args=[],
2002 kwargs={ 'jsonFormat': True } )
2003 threads.append( t )
2004 t.start()
2005
2006 for t in threads:
2007 t.join()
2008 ONOSIntents.append( t.result )
2009
Jon Hallf37d44d2017-05-24 10:37:30 -07002010 for i in range( len( ONOSIntents ) ):
2011 node = str( main.activeNodes[ i ] + 1 )
Jon Hall6e709752016-02-01 13:38:46 -08002012 if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
2013 main.log.error( "Error in getting ONOS" + node + " intents" )
2014 main.log.warn( "ONOS" + node + " intents response: " +
2015 repr( ONOSIntents[ i ] ) )
2016 intentsResults = False
2017 utilities.assert_equals(
2018 expect=True,
2019 actual=intentsResults,
2020 onpass="No error in reading intents output",
2021 onfail="Error in reading intents from ONOS" )
2022
2023 main.step( "Check for consistency in Intents from each controller" )
Jon Hallf37d44d2017-05-24 10:37:30 -07002024 if all( [ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
Jon Hall6e709752016-02-01 13:38:46 -08002025 main.log.info( "Intents are consistent across all ONOS " +
2026 "nodes" )
2027 else:
2028 consistentIntents = False
2029
2030 # Try to make it easy to figure out what is happening
2031 #
2032 # Intent ONOS1 ONOS2 ...
2033 # 0x01 INSTALLED INSTALLING
2034 # ... ... ...
2035 # ... ... ...
2036 title = " ID"
2037 for n in main.activeNodes:
2038 title += " " * 10 + "ONOS" + str( n + 1 )
2039 main.log.warn( title )
2040 # get all intent keys in the cluster
2041 keys = []
2042 for nodeStr in ONOSIntents:
2043 node = json.loads( nodeStr )
2044 for intent in node:
2045 keys.append( intent.get( 'id' ) )
2046 keys = set( keys )
2047 for key in keys:
2048 row = "%-13s" % key
2049 for nodeStr in ONOSIntents:
2050 node = json.loads( nodeStr )
2051 for intent in node:
2052 if intent.get( 'id' ) == key:
2053 row += "%-15s" % intent.get( 'state' )
2054 main.log.warn( row )
2055 # End table view
2056
2057 utilities.assert_equals(
2058 expect=True,
2059 actual=consistentIntents,
2060 onpass="Intents are consistent across all ONOS nodes",
2061 onfail="ONOS nodes have different views of intents" )
2062 intentStates = []
2063 for node in ONOSIntents: # Iter through ONOS nodes
2064 nodeStates = []
2065 # Iter through intents of a node
2066 try:
2067 for intent in json.loads( node ):
2068 nodeStates.append( intent[ 'state' ] )
2069 except ( ValueError, TypeError ):
2070 main.log.exception( "Error in parsing intents" )
2071 main.log.error( repr( node ) )
2072 intentStates.append( nodeStates )
Jon Hallf37d44d2017-05-24 10:37:30 -07002073 out = [ ( i, nodeStates.count( i ) ) for i in set( nodeStates ) ]
Jon Hall6e709752016-02-01 13:38:46 -08002074 main.log.info( dict( out ) )
2075
2076 if intentsResults and not consistentIntents:
2077 for i in range( len( main.activeNodes ) ):
Jon Hallf37d44d2017-05-24 10:37:30 -07002078 node = str( main.activeNodes[ i ] + 1 )
Jon Hall6e709752016-02-01 13:38:46 -08002079 main.log.warn( "ONOS" + node + " intents: " )
2080 main.log.warn( json.dumps(
2081 json.loads( ONOSIntents[ i ] ),
2082 sort_keys=True,
2083 indent=4,
2084 separators=( ',', ': ' ) ) )
2085 elif intentsResults and consistentIntents:
2086 intentCheck = main.TRUE
2087
2088 # NOTE: Store has no durability, so intents are lost across system
2089 # restarts
2090 main.step( "Compare current intents with intents before the failure" )
2091 # NOTE: this requires case 5 to pass for intentState to be set.
2092 # maybe we should stop the test if that fails?
2093 sameIntents = main.FALSE
2094 try:
2095 intentState
2096 except NameError:
2097 main.log.warn( "No previous intent state was saved" )
2098 else:
2099 if intentState and intentState == ONOSIntents[ 0 ]:
2100 sameIntents = main.TRUE
2101 main.log.info( "Intents are consistent with before failure" )
2102 # TODO: possibly the states have changed? we may need to figure out
2103 # what the acceptable states are
2104 elif len( intentState ) == len( ONOSIntents[ 0 ] ):
2105 sameIntents = main.TRUE
2106 try:
2107 before = json.loads( intentState )
2108 after = json.loads( ONOSIntents[ 0 ] )
2109 for intent in before:
2110 if intent not in after:
2111 sameIntents = main.FALSE
2112 main.log.debug( "Intent is not currently in ONOS " +
2113 "(at least in the same form):" )
2114 main.log.debug( json.dumps( intent ) )
2115 except ( ValueError, TypeError ):
2116 main.log.exception( "Exception printing intents" )
Jon Hallf37d44d2017-05-24 10:37:30 -07002117 main.log.debug( repr( ONOSIntents[ 0 ] ) )
Jon Hall6e709752016-02-01 13:38:46 -08002118 main.log.debug( repr( intentState ) )
2119 if sameIntents == main.FALSE:
2120 try:
2121 main.log.debug( "ONOS intents before: " )
2122 main.log.debug( json.dumps( json.loads( intentState ),
2123 sort_keys=True, indent=4,
2124 separators=( ',', ': ' ) ) )
2125 main.log.debug( "Current ONOS intents: " )
2126 main.log.debug( json.dumps( json.loads( ONOSIntents[ 0 ] ),
2127 sort_keys=True, indent=4,
2128 separators=( ',', ': ' ) ) )
2129 except ( ValueError, TypeError ):
2130 main.log.exception( "Exception printing intents" )
Jon Hallf37d44d2017-05-24 10:37:30 -07002131 main.log.debug( repr( ONOSIntents[ 0 ] ) )
Jon Hall6e709752016-02-01 13:38:46 -08002132 main.log.debug( repr( intentState ) )
2133 utilities.assert_equals(
2134 expect=main.TRUE,
2135 actual=sameIntents,
2136 onpass="Intents are consistent with before failure",
2137 onfail="The Intents changed during failure" )
2138 intentCheck = intentCheck and sameIntents
2139
2140 main.step( "Get the OF Table entries and compare to before " +
2141 "component failure" )
2142 FlowTables = main.TRUE
2143 for i in range( 28 ):
2144 main.log.info( "Checking flow table on s" + str( i + 1 ) )
2145 tmpFlows = main.Mininet1.getFlowTable( "s" + str( i + 1 ), version="1.3", debug=False )
Jon Hallf37d44d2017-05-24 10:37:30 -07002146 curSwitch = main.Mininet1.flowTableComp( flows[ i ], tmpFlows )
Jon Hall41d39f12016-04-11 22:54:35 -07002147 FlowTables = FlowTables and curSwitch
2148 if curSwitch == main.FALSE:
Jon Hall6e709752016-02-01 13:38:46 -08002149 main.log.warn( "Differences in flow table for switch: s{}".format( i + 1 ) )
2150 utilities.assert_equals(
2151 expect=main.TRUE,
2152 actual=FlowTables,
2153 onpass="No changes were found in the flow tables",
2154 onfail="Changes were found in the flow tables" )
2155
2156 main.Mininet2.pingLongKill()
Jon Hallf37d44d2017-05-24 10:37:30 -07002157 """
Jon Hall6e709752016-02-01 13:38:46 -08002158 main.step( "Check the continuous pings to ensure that no packets " +
2159 "were dropped during component failure" )
2160 main.Mininet2.pingKill( main.params[ 'TESTONUSER' ],
2161 main.params[ 'TESTONIP' ] )
2162 LossInPings = main.FALSE
2163 # NOTE: checkForLoss returns main.FALSE with 0% packet loss
2164 for i in range( 8, 18 ):
2165 main.log.info(
2166 "Checking for a loss in pings along flow from s" +
2167 str( i ) )
2168 LossInPings = main.Mininet2.checkForLoss(
2169 "/tmp/ping.h" +
2170 str( i ) ) or LossInPings
2171 if LossInPings == main.TRUE:
2172 main.log.info( "Loss in ping detected" )
2173 elif LossInPings == main.ERROR:
2174 main.log.info( "There are multiple mininet process running" )
2175 elif LossInPings == main.FALSE:
2176 main.log.info( "No Loss in the pings" )
2177 main.log.info( "No loss of dataplane connectivity" )
2178 utilities.assert_equals(
2179 expect=main.FALSE,
2180 actual=LossInPings,
2181 onpass="No Loss of connectivity",
2182 onfail="Loss of dataplane connectivity detected" )
Jon Hallf37d44d2017-05-24 10:37:30 -07002183 """
Jon Hall6e709752016-02-01 13:38:46 -08002184 main.step( "Leadership Election is still functional" )
2185 # Test of LeadershipElection
2186 leaderList = []
2187
2188 partitioned = []
2189 for i in main.partition:
Jon Hallf37d44d2017-05-24 10:37:30 -07002190 partitioned.append( main.nodes[ i ].ip_address )
Jon Hall6e709752016-02-01 13:38:46 -08002191 leaderResult = main.TRUE
2192
2193 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07002194 cli = main.CLIs[ i ]
Jon Hall6e709752016-02-01 13:38:46 -08002195 leaderN = cli.electionTestLeader()
2196 leaderList.append( leaderN )
2197 if leaderN == main.FALSE:
2198 # error in response
2199 main.log.error( "Something is wrong with " +
2200 "electionTestLeader function, check the" +
2201 " error logs" )
2202 leaderResult = main.FALSE
2203 elif leaderN is None:
2204 main.log.error( cli.name +
2205 " shows no leader for the election-app was" +
2206 " elected after the old one died" )
2207 leaderResult = main.FALSE
2208 elif leaderN in partitioned:
2209 main.log.error( cli.name + " shows " + str( leaderN ) +
2210 " as leader for the election-app, but it " +
2211 "was partitioned" )
2212 leaderResult = main.FALSE
2213 if len( set( leaderList ) ) != 1:
2214 leaderResult = main.FALSE
2215 main.log.error(
2216 "Inconsistent view of leader for the election test app" )
2217 # TODO: print the list
2218 utilities.assert_equals(
2219 expect=main.TRUE,
2220 actual=leaderResult,
2221 onpass="Leadership election passed",
2222 onfail="Something went wrong with Leadership election" )
2223
2224 def CASE8( self, main ):
2225 """
2226 Compare topo
2227 """
2228 import json
2229 import time
2230 assert main.numCtrls, "main.numCtrls not defined"
2231 assert main, "main not defined"
2232 assert utilities.assert_equals, "utilities.assert_equals not defined"
2233 assert main.CLIs, "main.CLIs not defined"
2234 assert main.nodes, "main.nodes not defined"
2235
2236 main.case( "Compare ONOS Topology view to Mininet topology" )
2237 main.caseExplanation = "Compare topology objects between Mininet" +\
2238 " and ONOS"
2239 topoResult = main.FALSE
2240 topoFailMsg = "ONOS topology don't match Mininet"
2241 elapsed = 0
2242 count = 0
2243 main.step( "Comparing ONOS topology to MN topology" )
2244 startTime = time.time()
2245 # Give time for Gossip to work
2246 while topoResult == main.FALSE and ( elapsed < 60 or count < 3 ):
2247 devicesResults = main.TRUE
2248 linksResults = main.TRUE
2249 hostsResults = main.TRUE
2250 hostAttachmentResults = True
2251 count += 1
2252 cliStart = time.time()
2253 devices = []
2254 threads = []
2255 for i in main.activeNodes:
2256 t = main.Thread( target=utilities.retry,
2257 name="devices-" + str( i ),
Jon Hallf37d44d2017-05-24 10:37:30 -07002258 args=[ main.CLIs[ i ].devices, [ None ] ],
2259 kwargs={ 'sleep': 5, 'attempts': 5,
Jon Hall6e709752016-02-01 13:38:46 -08002260 'randomTime': True } )
2261 threads.append( t )
2262 t.start()
2263
2264 for t in threads:
2265 t.join()
2266 devices.append( t.result )
2267 hosts = []
2268 ipResult = main.TRUE
2269 threads = []
2270 for i in main.activeNodes:
2271 t = main.Thread( target=utilities.retry,
2272 name="hosts-" + str( i ),
Jon Hallf37d44d2017-05-24 10:37:30 -07002273 args=[ main.CLIs[ i ].hosts, [ None ] ],
2274 kwargs={ 'sleep': 5, 'attempts': 5,
Jon Hall6e709752016-02-01 13:38:46 -08002275 'randomTime': True } )
2276 threads.append( t )
2277 t.start()
2278
2279 for t in threads:
2280 t.join()
2281 try:
2282 hosts.append( json.loads( t.result ) )
2283 except ( ValueError, TypeError ):
2284 main.log.exception( "Error parsing hosts results" )
2285 main.log.error( repr( t.result ) )
2286 hosts.append( None )
2287 for controller in range( 0, len( hosts ) ):
Jon Hallf37d44d2017-05-24 10:37:30 -07002288 controllerStr = str( main.activeNodes[ controller ] + 1 )
Jon Hall6e709752016-02-01 13:38:46 -08002289 if hosts[ controller ]:
2290 for host in hosts[ controller ]:
2291 if host is None or host.get( 'ipAddresses', [] ) == []:
2292 main.log.error(
2293 "Error with host ipAddresses on controller" +
2294 controllerStr + ": " + str( host ) )
2295 ipResult = main.FALSE
2296 ports = []
2297 threads = []
2298 for i in main.activeNodes:
2299 t = main.Thread( target=utilities.retry,
2300 name="ports-" + str( i ),
Jon Hallf37d44d2017-05-24 10:37:30 -07002301 args=[ main.CLIs[ i ].ports, [ None ] ],
2302 kwargs={ 'sleep': 5, 'attempts': 5,
Jon Hall6e709752016-02-01 13:38:46 -08002303 'randomTime': True } )
2304 threads.append( t )
2305 t.start()
2306
2307 for t in threads:
2308 t.join()
2309 ports.append( t.result )
2310 links = []
2311 threads = []
2312 for i in main.activeNodes:
2313 t = main.Thread( target=utilities.retry,
2314 name="links-" + str( i ),
Jon Hallf37d44d2017-05-24 10:37:30 -07002315 args=[ main.CLIs[ i ].links, [ None ] ],
2316 kwargs={ 'sleep': 5, 'attempts': 5,
Jon Hall6e709752016-02-01 13:38:46 -08002317 'randomTime': True } )
2318 threads.append( t )
2319 t.start()
2320
2321 for t in threads:
2322 t.join()
2323 links.append( t.result )
2324 clusters = []
2325 threads = []
2326 for i in main.activeNodes:
2327 t = main.Thread( target=utilities.retry,
2328 name="clusters-" + str( i ),
Jon Hallf37d44d2017-05-24 10:37:30 -07002329 args=[ main.CLIs[ i ].clusters, [ None ] ],
2330 kwargs={ 'sleep': 5, 'attempts': 5,
Jon Hall6e709752016-02-01 13:38:46 -08002331 'randomTime': True } )
2332 threads.append( t )
2333 t.start()
2334
2335 for t in threads:
2336 t.join()
2337 clusters.append( t.result )
2338
2339 elapsed = time.time() - startTime
2340 cliTime = time.time() - cliStart
2341 print "Elapsed time: " + str( elapsed )
2342 print "CLI time: " + str( cliTime )
2343
2344 if all( e is None for e in devices ) and\
2345 all( e is None for e in hosts ) and\
2346 all( e is None for e in ports ) and\
2347 all( e is None for e in links ) and\
2348 all( e is None for e in clusters ):
Jon Hallf37d44d2017-05-24 10:37:30 -07002349 topoFailMsg = "Could not get topology from ONOS"
2350 main.log.error( topoFailMsg )
2351 continue # Try again, No use trying to compare
Jon Hall6e709752016-02-01 13:38:46 -08002352
2353 mnSwitches = main.Mininet1.getSwitches()
2354 mnLinks = main.Mininet1.getLinks()
2355 mnHosts = main.Mininet1.getHosts()
2356 for controller in range( len( main.activeNodes ) ):
Jon Hallf37d44d2017-05-24 10:37:30 -07002357 controllerStr = str( main.activeNodes[ controller ] + 1 )
Jon Hall6e709752016-02-01 13:38:46 -08002358 if devices[ controller ] and ports[ controller ] and\
Jon Hallf37d44d2017-05-24 10:37:30 -07002359 "Error" not in devices[ controller ] and\
2360 "Error" not in ports[ controller ]:
Jon Hall6e709752016-02-01 13:38:46 -08002361
2362 try:
2363 currentDevicesResult = main.Mininet1.compareSwitches(
2364 mnSwitches,
2365 json.loads( devices[ controller ] ),
2366 json.loads( ports[ controller ] ) )
2367 except ( TypeError, ValueError ) as e:
2368 main.log.exception( "Object not as expected; devices={!r}\nports={!r}".format(
2369 devices[ controller ], ports[ controller ] ) )
2370 else:
2371 currentDevicesResult = main.FALSE
2372 utilities.assert_equals( expect=main.TRUE,
2373 actual=currentDevicesResult,
2374 onpass="ONOS" + controllerStr +
2375 " Switches view is correct",
2376 onfail="ONOS" + controllerStr +
2377 " Switches view is incorrect" )
2378
2379 if links[ controller ] and "Error" not in links[ controller ]:
2380 currentLinksResult = main.Mininet1.compareLinks(
2381 mnSwitches, mnLinks,
2382 json.loads( links[ controller ] ) )
2383 else:
2384 currentLinksResult = main.FALSE
2385 utilities.assert_equals( expect=main.TRUE,
2386 actual=currentLinksResult,
2387 onpass="ONOS" + controllerStr +
2388 " links view is correct",
2389 onfail="ONOS" + controllerStr +
2390 " links view is incorrect" )
2391 if hosts[ controller ] and "Error" not in hosts[ controller ]:
2392 currentHostsResult = main.Mininet1.compareHosts(
2393 mnHosts,
2394 hosts[ controller ] )
2395 elif hosts[ controller ] == []:
2396 currentHostsResult = main.TRUE
2397 else:
2398 currentHostsResult = main.FALSE
2399 utilities.assert_equals( expect=main.TRUE,
2400 actual=currentHostsResult,
2401 onpass="ONOS" + controllerStr +
2402 " hosts exist in Mininet",
2403 onfail="ONOS" + controllerStr +
2404 " hosts don't match Mininet" )
2405 # CHECKING HOST ATTACHMENT POINTS
2406 hostAttachment = True
2407 zeroHosts = False
2408 # FIXME: topo-HA/obelisk specific mappings:
2409 # key is mac and value is dpid
2410 mappings = {}
2411 for i in range( 1, 29 ): # hosts 1 through 28
2412 # set up correct variables:
Jon Hallf37d44d2017-05-24 10:37:30 -07002413 macId = "00:" * 5 + hex( i ).split( "0x" )[ 1 ].upper().zfill( 2 )
Jon Hall6e709752016-02-01 13:38:46 -08002414 if i == 1:
Jon Hallf37d44d2017-05-24 10:37:30 -07002415 deviceId = "1000".zfill( 16 )
Jon Hall6e709752016-02-01 13:38:46 -08002416 elif i == 2:
Jon Hallf37d44d2017-05-24 10:37:30 -07002417 deviceId = "2000".zfill( 16 )
Jon Hall6e709752016-02-01 13:38:46 -08002418 elif i == 3:
Jon Hallf37d44d2017-05-24 10:37:30 -07002419 deviceId = "3000".zfill( 16 )
Jon Hall6e709752016-02-01 13:38:46 -08002420 elif i == 4:
Jon Hallf37d44d2017-05-24 10:37:30 -07002421 deviceId = "3004".zfill( 16 )
Jon Hall6e709752016-02-01 13:38:46 -08002422 elif i == 5:
Jon Hallf37d44d2017-05-24 10:37:30 -07002423 deviceId = "5000".zfill( 16 )
Jon Hall6e709752016-02-01 13:38:46 -08002424 elif i == 6:
Jon Hallf37d44d2017-05-24 10:37:30 -07002425 deviceId = "6000".zfill( 16 )
Jon Hall6e709752016-02-01 13:38:46 -08002426 elif i == 7:
Jon Hallf37d44d2017-05-24 10:37:30 -07002427 deviceId = "6007".zfill( 16 )
Jon Hall6e709752016-02-01 13:38:46 -08002428 elif i >= 8 and i <= 17:
2429 dpid = '3' + str( i ).zfill( 3 )
Jon Hallf37d44d2017-05-24 10:37:30 -07002430 deviceId = dpid.zfill( 16 )
Jon Hall6e709752016-02-01 13:38:46 -08002431 elif i >= 18 and i <= 27:
2432 dpid = '6' + str( i ).zfill( 3 )
Jon Hallf37d44d2017-05-24 10:37:30 -07002433 deviceId = dpid.zfill( 16 )
Jon Hall6e709752016-02-01 13:38:46 -08002434 elif i == 28:
Jon Hallf37d44d2017-05-24 10:37:30 -07002435 deviceId = "2800".zfill( 16 )
Jon Hall6e709752016-02-01 13:38:46 -08002436 mappings[ macId ] = deviceId
2437 if hosts[ controller ] is not None and "Error" not in hosts[ controller ]:
2438 if hosts[ controller ] == []:
2439 main.log.warn( "There are no hosts discovered" )
2440 zeroHosts = True
2441 else:
2442 for host in hosts[ controller ]:
2443 mac = None
2444 location = None
2445 device = None
2446 port = None
2447 try:
2448 mac = host.get( 'mac' )
2449 assert mac, "mac field could not be found for this host object"
2450
2451 location = host.get( 'location' )
2452 assert location, "location field could not be found for this host object"
2453
2454 # Trim the protocol identifier off deviceId
Jon Hallf37d44d2017-05-24 10:37:30 -07002455 device = str( location.get( 'elementId' ) ).split( ':' )[ 1 ]
Jon Hall6e709752016-02-01 13:38:46 -08002456 assert device, "elementId field could not be found for this host location object"
2457
2458 port = location.get( 'port' )
2459 assert port, "port field could not be found for this host location object"
2460
2461 # Now check if this matches where they should be
2462 if mac and device and port:
2463 if str( port ) != "1":
2464 main.log.error( "The attachment port is incorrect for " +
2465 "host " + str( mac ) +
Jon Hallf37d44d2017-05-24 10:37:30 -07002466 ". Expected: 1 Actual: " + str( port ) )
Jon Hall6e709752016-02-01 13:38:46 -08002467 hostAttachment = False
2468 if device != mappings[ str( mac ) ]:
2469 main.log.error( "The attachment device is incorrect for " +
2470 "host " + str( mac ) +
2471 ". Expected: " + mappings[ str( mac ) ] +
2472 " Actual: " + device )
2473 hostAttachment = False
2474 else:
2475 hostAttachment = False
2476 except AssertionError:
2477 main.log.exception( "Json object not as expected" )
2478 main.log.error( repr( host ) )
2479 hostAttachment = False
2480 else:
2481 main.log.error( "No hosts json output or \"Error\"" +
2482 " in output. hosts = " +
2483 repr( hosts[ controller ] ) )
2484 if zeroHosts is False:
2485 hostAttachment = True
2486
2487 # END CHECKING HOST ATTACHMENT POINTS
2488 devicesResults = devicesResults and currentDevicesResult
2489 linksResults = linksResults and currentLinksResult
2490 hostsResults = hostsResults and currentHostsResult
2491 hostAttachmentResults = hostAttachmentResults and\
2492 hostAttachment
Jon Halla440e872016-03-31 15:15:50 -07002493 topoResult = ( devicesResults and linksResults
2494 and hostsResults and ipResult and
2495 hostAttachmentResults )
Jon Hall6e709752016-02-01 13:38:46 -08002496 utilities.assert_equals( expect=True,
2497 actual=topoResult,
2498 onpass="ONOS topology matches Mininet",
2499 onfail=topoFailMsg )
2500 # End of While loop to pull ONOS state
2501
2502 # Compare json objects for hosts and dataplane clusters
2503
2504 # hosts
2505 main.step( "Hosts view is consistent across all ONOS nodes" )
2506 consistentHostsResult = main.TRUE
2507 for controller in range( len( hosts ) ):
Jon Hallf37d44d2017-05-24 10:37:30 -07002508 controllerStr = str( main.activeNodes[ controller ] + 1 )
Jon Hall6e709752016-02-01 13:38:46 -08002509 if hosts[ controller ] is not None and "Error" not in hosts[ controller ]:
2510 if hosts[ controller ] == hosts[ 0 ]:
2511 continue
2512 else: # hosts not consistent
2513 main.log.error( "hosts from ONOS" + controllerStr +
2514 " is inconsistent with ONOS1" )
2515 main.log.warn( repr( hosts[ controller ] ) )
2516 consistentHostsResult = main.FALSE
2517
2518 else:
2519 main.log.error( "Error in getting ONOS hosts from ONOS" +
2520 controllerStr )
2521 consistentHostsResult = main.FALSE
2522 main.log.warn( "ONOS" + controllerStr +
2523 " hosts response: " +
2524 repr( hosts[ controller ] ) )
2525 utilities.assert_equals(
2526 expect=main.TRUE,
2527 actual=consistentHostsResult,
2528 onpass="Hosts view is consistent across all ONOS nodes",
2529 onfail="ONOS nodes have different views of hosts" )
2530
2531 main.step( "Hosts information is correct" )
2532 hostsResults = hostsResults and ipResult
2533 utilities.assert_equals(
2534 expect=main.TRUE,
2535 actual=hostsResults,
2536 onpass="Host information is correct",
2537 onfail="Host information is incorrect" )
2538
2539 main.step( "Host attachment points to the network" )
2540 utilities.assert_equals(
2541 expect=True,
2542 actual=hostAttachmentResults,
2543 onpass="Hosts are correctly attached to the network",
2544 onfail="ONOS did not correctly attach hosts to the network" )
2545
2546 # Strongly connected clusters of devices
2547 main.step( "Clusters view is consistent across all ONOS nodes" )
2548 consistentClustersResult = main.TRUE
2549 for controller in range( len( clusters ) ):
Jon Hallf37d44d2017-05-24 10:37:30 -07002550 controllerStr = str( main.activeNodes[ controller ] + 1 )
Jon Hall6e709752016-02-01 13:38:46 -08002551 if "Error" not in clusters[ controller ]:
2552 if clusters[ controller ] == clusters[ 0 ]:
2553 continue
2554 else: # clusters not consistent
2555 main.log.error( "clusters from ONOS" +
2556 controllerStr +
2557 " is inconsistent with ONOS1" )
2558 consistentClustersResult = main.FALSE
Jon Hall6e709752016-02-01 13:38:46 -08002559 else:
2560 main.log.error( "Error in getting dataplane clusters " +
2561 "from ONOS" + controllerStr )
2562 consistentClustersResult = main.FALSE
2563 main.log.warn( "ONOS" + controllerStr +
2564 " clusters response: " +
2565 repr( clusters[ controller ] ) )
2566 utilities.assert_equals(
2567 expect=main.TRUE,
2568 actual=consistentClustersResult,
2569 onpass="Clusters view is consistent across all ONOS nodes",
2570 onfail="ONOS nodes have different views of clusters" )
Jon Hall64948022016-05-12 13:38:50 -07002571 if not consistentClustersResult:
2572 main.log.debug( clusters )
Jon Hall6e709752016-02-01 13:38:46 -08002573
2574 main.step( "There is only one SCC" )
2575 # there should always only be one cluster
2576 try:
2577 numClusters = len( json.loads( clusters[ 0 ] ) )
2578 except ( ValueError, TypeError ):
2579 main.log.exception( "Error parsing clusters[0]: " +
Jon Hallf37d44d2017-05-24 10:37:30 -07002580 repr( clusters[ 0 ] ) )
Jon Halla440e872016-03-31 15:15:50 -07002581 numClusters = "ERROR"
Jon Hall6e709752016-02-01 13:38:46 -08002582 clusterResults = main.FALSE
2583 if numClusters == 1:
2584 clusterResults = main.TRUE
2585 utilities.assert_equals(
2586 expect=1,
2587 actual=numClusters,
2588 onpass="ONOS shows 1 SCC",
2589 onfail="ONOS shows " + str( numClusters ) + " SCCs" )
2590
2591 topoResult = ( devicesResults and linksResults
2592 and hostsResults and consistentHostsResult
2593 and consistentClustersResult and clusterResults
2594 and ipResult and hostAttachmentResults )
2595
2596 topoResult = topoResult and int( count <= 2 )
2597 note = "note it takes about " + str( int( cliTime ) ) + \
2598 " seconds for the test to make all the cli calls to fetch " +\
2599 "the topology from each ONOS instance"
2600 main.log.info(
2601 "Very crass estimate for topology discovery/convergence( " +
2602 str( note ) + " ): " + str( elapsed ) + " seconds, " +
2603 str( count ) + " tries" )
2604
2605 main.step( "Device information is correct" )
2606 utilities.assert_equals(
2607 expect=main.TRUE,
2608 actual=devicesResults,
2609 onpass="Device information is correct",
2610 onfail="Device information is incorrect" )
2611
2612 main.step( "Links are correct" )
2613 utilities.assert_equals(
2614 expect=main.TRUE,
2615 actual=linksResults,
2616 onpass="Link are correct",
2617 onfail="Links are incorrect" )
2618
Jon Halla440e872016-03-31 15:15:50 -07002619 main.step( "Hosts are correct" )
2620 utilities.assert_equals(
2621 expect=main.TRUE,
2622 actual=hostsResults,
2623 onpass="Hosts are correct",
2624 onfail="Hosts are incorrect" )
2625
Jon Hall6e709752016-02-01 13:38:46 -08002626 # FIXME: move this to an ONOS state case
2627 main.step( "Checking ONOS nodes" )
Jon Hall41d39f12016-04-11 22:54:35 -07002628 nodeResults = utilities.retry( main.HA.nodesCheck,
2629 False,
Jon Hallf37d44d2017-05-24 10:37:30 -07002630 args=[ main.activeNodes ],
Jon Hall41d39f12016-04-11 22:54:35 -07002631 attempts=5 )
Jon Hall6e709752016-02-01 13:38:46 -08002632
Jon Hall41d39f12016-04-11 22:54:35 -07002633 utilities.assert_equals( expect=True, actual=nodeResults,
Jon Hall6e709752016-02-01 13:38:46 -08002634 onpass="Nodes check successful",
2635 onfail="Nodes check NOT successful" )
Jon Halla440e872016-03-31 15:15:50 -07002636 if not nodeResults:
Jon Hall41d39f12016-04-11 22:54:35 -07002637 for i in main.activeNodes:
Jon Halla440e872016-03-31 15:15:50 -07002638 main.log.debug( "{} components not ACTIVE: \n{}".format(
Jon Hallf37d44d2017-05-24 10:37:30 -07002639 main.CLIs[ i ].name,
2640 main.CLIs[ i ].sendline( "scr:list | grep -v ACTIVE" ) ) )
Jon Hall6e709752016-02-01 13:38:46 -08002641
Jon Halld2871c22016-07-26 11:01:14 -07002642 if not topoResult:
2643 main.cleanup()
2644 main.exit()
2645
Jon Hall6e709752016-02-01 13:38:46 -08002646 def CASE9( self, main ):
2647 """
2648 Link s3-s28 down
2649 """
2650 import time
2651 assert main.numCtrls, "main.numCtrls not defined"
2652 assert main, "main not defined"
2653 assert utilities.assert_equals, "utilities.assert_equals not defined"
2654 assert main.CLIs, "main.CLIs not defined"
2655 assert main.nodes, "main.nodes not defined"
2656 # NOTE: You should probably run a topology check after this
2657
2658 linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
2659
2660 description = "Turn off a link to ensure that Link Discovery " +\
2661 "is working properly"
2662 main.case( description )
2663
2664 main.step( "Kill Link between s3 and s28" )
2665 LinkDown = main.Mininet1.link( END1="s3", END2="s28", OPTION="down" )
2666 main.log.info( "Waiting " + str( linkSleep ) +
2667 " seconds for link down to be discovered" )
2668 time.sleep( linkSleep )
2669 utilities.assert_equals( expect=main.TRUE, actual=LinkDown,
2670 onpass="Link down successful",
2671 onfail="Failed to bring link down" )
2672 # TODO do some sort of check here
2673
2674 def CASE10( self, main ):
2675 """
2676 Link s3-s28 up
2677 """
2678 import time
2679 assert main.numCtrls, "main.numCtrls not defined"
2680 assert main, "main not defined"
2681 assert utilities.assert_equals, "utilities.assert_equals not defined"
2682 assert main.CLIs, "main.CLIs not defined"
2683 assert main.nodes, "main.nodes not defined"
2684 # NOTE: You should probably run a topology check after this
2685
2686 linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
2687
2688 description = "Restore a link to ensure that Link Discovery is " + \
2689 "working properly"
2690 main.case( description )
2691
2692 main.step( "Bring link between s3 and s28 back up" )
2693 LinkUp = main.Mininet1.link( END1="s3", END2="s28", OPTION="up" )
2694 main.log.info( "Waiting " + str( linkSleep ) +
2695 " seconds for link up to be discovered" )
2696 time.sleep( linkSleep )
2697 utilities.assert_equals( expect=main.TRUE, actual=LinkUp,
2698 onpass="Link up successful",
2699 onfail="Failed to bring link up" )
2700 # TODO do some sort of check here
2701
2702 def CASE11( self, main ):
2703 """
2704 Switch Down
2705 """
2706 # NOTE: You should probably run a topology check after this
2707 import time
2708 assert main.numCtrls, "main.numCtrls not defined"
2709 assert main, "main not defined"
2710 assert utilities.assert_equals, "utilities.assert_equals not defined"
2711 assert main.CLIs, "main.CLIs not defined"
2712 assert main.nodes, "main.nodes not defined"
2713
2714 switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
2715
2716 description = "Killing a switch to ensure it is discovered correctly"
Jon Hallf37d44d2017-05-24 10:37:30 -07002717 onosCli = main.CLIs[ main.activeNodes[ 0 ] ]
Jon Hall6e709752016-02-01 13:38:46 -08002718 main.case( description )
2719 switch = main.params[ 'kill' ][ 'switch' ]
2720 switchDPID = main.params[ 'kill' ][ 'dpid' ]
2721
2722 # TODO: Make this switch parameterizable
2723 main.step( "Kill " + switch )
2724 main.log.info( "Deleting " + switch )
2725 main.Mininet1.delSwitch( switch )
2726 main.log.info( "Waiting " + str( switchSleep ) +
2727 " seconds for switch down to be discovered" )
2728 time.sleep( switchSleep )
2729 device = onosCli.getDevice( dpid=switchDPID )
2730 # Peek at the deleted switch
2731 main.log.warn( str( device ) )
2732 result = main.FALSE
2733 if device and device[ 'available' ] is False:
2734 result = main.TRUE
2735 utilities.assert_equals( expect=main.TRUE, actual=result,
2736 onpass="Kill switch successful",
2737 onfail="Failed to kill switch?" )
2738
2739 def CASE12( self, main ):
2740 """
2741 Switch Up
2742 """
2743 # NOTE: You should probably run a topology check after this
2744 import time
2745 assert main.numCtrls, "main.numCtrls not defined"
2746 assert main, "main not defined"
2747 assert utilities.assert_equals, "utilities.assert_equals not defined"
2748 assert main.CLIs, "main.CLIs not defined"
2749 assert main.nodes, "main.nodes not defined"
2750 assert ONOS1Port, "ONOS1Port not defined"
2751 assert ONOS2Port, "ONOS2Port not defined"
2752 assert ONOS3Port, "ONOS3Port not defined"
2753 assert ONOS4Port, "ONOS4Port not defined"
2754 assert ONOS5Port, "ONOS5Port not defined"
2755 assert ONOS6Port, "ONOS6Port not defined"
2756 assert ONOS7Port, "ONOS7Port not defined"
2757
2758 switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
2759 switch = main.params[ 'kill' ][ 'switch' ]
2760 switchDPID = main.params[ 'kill' ][ 'dpid' ]
2761 links = main.params[ 'kill' ][ 'links' ].split()
Jon Hallf37d44d2017-05-24 10:37:30 -07002762 onosCli = main.CLIs[ main.activeNodes[ 0 ] ]
Jon Hall6e709752016-02-01 13:38:46 -08002763 description = "Adding a switch to ensure it is discovered correctly"
2764 main.case( description )
2765
2766 main.step( "Add back " + switch )
2767 main.Mininet1.addSwitch( switch, dpid=switchDPID )
2768 for peer in links:
2769 main.Mininet1.addLink( switch, peer )
2770 ipList = [ node.ip_address for node in main.nodes ]
2771 main.Mininet1.assignSwController( sw=switch, ip=ipList )
2772 main.log.info( "Waiting " + str( switchSleep ) +
2773 " seconds for switch up to be discovered" )
2774 time.sleep( switchSleep )
2775 device = onosCli.getDevice( dpid=switchDPID )
2776 # Peek at the deleted switch
2777 main.log.warn( str( device ) )
2778 result = main.FALSE
2779 if device and device[ 'available' ]:
2780 result = main.TRUE
2781 utilities.assert_equals( expect=main.TRUE, actual=result,
2782 onpass="add switch successful",
2783 onfail="Failed to add switch?" )
2784
2785 def CASE13( self, main ):
2786 """
2787 Clean up
2788 """
2789 import os
2790 import time
2791 assert main.numCtrls, "main.numCtrls not defined"
2792 assert main, "main not defined"
2793 assert utilities.assert_equals, "utilities.assert_equals not defined"
2794 assert main.CLIs, "main.CLIs not defined"
2795 assert main.nodes, "main.nodes not defined"
2796
2797 # printing colors to terminal
2798 colors = { 'cyan': '\033[96m', 'purple': '\033[95m',
2799 'blue': '\033[94m', 'green': '\033[92m',
2800 'yellow': '\033[93m', 'red': '\033[91m', 'end': '\033[0m' }
2801 main.case( "Test Cleanup" )
2802 main.step( "Killing tcpdumps" )
2803 main.Mininet2.stopTcpdump()
2804
2805 testname = main.TEST
2806 if main.params[ 'BACKUP' ][ 'ENABLED' ] == "True":
2807 main.step( "Copying MN pcap and ONOS log files to test station" )
2808 teststationUser = main.params[ 'BACKUP' ][ 'TESTONUSER' ]
2809 teststationIP = main.params[ 'BACKUP' ][ 'TESTONIP' ]
2810 # NOTE: MN Pcap file is being saved to logdir.
2811 # We scp this file as MN and TestON aren't necessarily the same vm
2812
2813 # FIXME: To be replaced with a Jenkin's post script
2814 # TODO: Load these from params
2815 # NOTE: must end in /
2816 logFolder = "/opt/onos/log/"
2817 logFiles = [ "karaf.log", "karaf.log.1" ]
2818 # NOTE: must end in /
2819 for f in logFiles:
2820 for node in main.nodes:
2821 dstName = main.logdir + "/" + node.name + "-" + f
2822 main.ONOSbench.secureCopy( node.user_name, node.ip_address,
2823 logFolder + f, dstName )
2824 # std*.log's
2825 # NOTE: must end in /
2826 logFolder = "/opt/onos/var/"
2827 logFiles = [ "stderr.log", "stdout.log" ]
2828 # NOTE: must end in /
2829 for f in logFiles:
2830 for node in main.nodes:
2831 dstName = main.logdir + "/" + node.name + "-" + f
2832 main.ONOSbench.secureCopy( node.user_name, node.ip_address,
2833 logFolder + f, dstName )
2834 else:
2835 main.log.debug( "skipping saving log files" )
2836
2837 main.step( "Stopping Mininet" )
2838 mnResult = main.Mininet1.stopNet()
2839 utilities.assert_equals( expect=main.TRUE, actual=mnResult,
2840 onpass="Mininet stopped",
2841 onfail="MN cleanup NOT successful" )
2842
2843 main.step( "Checking ONOS Logs for errors" )
2844 for node in main.nodes:
2845 main.log.debug( "Checking logs for errors on " + node.name + ":" )
2846 main.log.warn( main.ONOSbench.checkLogs( node.ip_address ) )
2847
2848 try:
Jon Hallf37d44d2017-05-24 10:37:30 -07002849 timerLog = open( main.logdir + "/Timers.csv", 'w' )
Jon Hall6e709752016-02-01 13:38:46 -08002850 # Overwrite with empty line and close
2851 labels = "Gossip Intents"
2852 data = str( gossipTime )
2853 timerLog.write( labels + "\n" + data )
2854 timerLog.close()
Jon Hallf37d44d2017-05-24 10:37:30 -07002855 except NameError as e:
2856 main.log.exception( e )
Jon Hall6e709752016-02-01 13:38:46 -08002857
2858 def CASE14( self, main ):
2859 """
2860 start election app on all onos nodes
2861 """
2862 assert main.numCtrls, "main.numCtrls not defined"
2863 assert main, "main not defined"
2864 assert utilities.assert_equals, "utilities.assert_equals not defined"
2865 assert main.CLIs, "main.CLIs not defined"
2866 assert main.nodes, "main.nodes not defined"
2867
Jon Hallf37d44d2017-05-24 10:37:30 -07002868 main.case( "Start Leadership Election app" )
Jon Hall6e709752016-02-01 13:38:46 -08002869 main.step( "Install leadership election app" )
Jon Hallf37d44d2017-05-24 10:37:30 -07002870 onosCli = main.CLIs[ main.activeNodes[ 0 ] ]
Jon Hall6e709752016-02-01 13:38:46 -08002871 appResult = onosCli.activateApp( "org.onosproject.election" )
2872 utilities.assert_equals(
2873 expect=main.TRUE,
2874 actual=appResult,
2875 onpass="Election app installed",
2876 onfail="Something went wrong with installing Leadership election" )
2877
2878 main.step( "Run for election on each node" )
Jon Hall6e709752016-02-01 13:38:46 -08002879 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07002880 main.CLIs[ i ].electionTestRun()
2881 time.sleep( 5 )
2882 activeCLIs = [ main.CLIs[ i ] for i in main.activeNodes ]
Jon Hall25463a82016-04-13 14:03:52 -07002883 sameResult, leaders = main.HA.consistentLeaderboards( activeCLIs )
Jon Hall6e709752016-02-01 13:38:46 -08002884 utilities.assert_equals(
Jon Hall25463a82016-04-13 14:03:52 -07002885 expect=True,
2886 actual=sameResult,
2887 onpass="All nodes see the same leaderboards",
2888 onfail="Inconsistent leaderboards" )
Jon Hall6e709752016-02-01 13:38:46 -08002889
Jon Hall25463a82016-04-13 14:03:52 -07002890 if sameResult:
2891 leader = leaders[ 0 ][ 0 ]
Jon Hallf37d44d2017-05-24 10:37:30 -07002892 if main.nodes[ main.activeNodes[ 0 ] ].ip_address in leader:
Jon Hall25463a82016-04-13 14:03:52 -07002893 correctLeader = True
2894 else:
2895 correctLeader = False
2896 main.step( "First node was elected leader" )
2897 utilities.assert_equals(
2898 expect=True,
2899 actual=correctLeader,
2900 onpass="Correct leader was elected",
2901 onfail="Incorrect leader" )
Jon Hall6e709752016-02-01 13:38:46 -08002902
2903 def CASE15( self, main ):
2904 """
2905 Check that Leadership Election is still functional
2906 15.1 Run election on each node
2907 15.2 Check that each node has the same leaders and candidates
2908 15.3 Find current leader and withdraw
2909 15.4 Check that a new node was elected leader
2910 15.5 Check that that new leader was the candidate of old leader
2911 15.6 Run for election on old leader
2912 15.7 Check that oldLeader is a candidate, and leader if only 1 node
2913 15.8 Make sure that the old leader was added to the candidate list
2914
2915 old and new variable prefixes refer to data from before vs after
2916 withdrawl and later before withdrawl vs after re-election
2917 """
2918 import time
2919 assert main.numCtrls, "main.numCtrls not defined"
2920 assert main, "main not defined"
2921 assert utilities.assert_equals, "utilities.assert_equals not defined"
2922 assert main.CLIs, "main.CLIs not defined"
2923 assert main.nodes, "main.nodes not defined"
2924
2925 description = "Check that Leadership Election is still functional"
2926 main.case( description )
Jon Halla440e872016-03-31 15:15:50 -07002927 # NOTE: Need to re-run after restarts since being a canidate is not persistant
Jon Hall6e709752016-02-01 13:38:46 -08002928
Jon Halla440e872016-03-31 15:15:50 -07002929 oldLeaders = [] # list of lists of each nodes' candidates before
2930 newLeaders = [] # list of lists of each nodes' candidates after
Jon Hall6e709752016-02-01 13:38:46 -08002931 oldLeader = '' # the old leader from oldLeaders, None if not same
2932 newLeader = '' # the new leaders fron newLoeaders, None if not same
2933 oldLeaderCLI = None # the CLI of the old leader used for re-electing
2934 expectNoLeader = False # True when there is only one leader
2935 if main.numCtrls == 1:
2936 expectNoLeader = True
2937
2938 main.step( "Run for election on each node" )
2939 electionResult = main.TRUE
2940
2941 for i in main.activeNodes: # run test election on each node
Jon Hallf37d44d2017-05-24 10:37:30 -07002942 if main.CLIs[ i ].electionTestRun() == main.FALSE:
Jon Hall6e709752016-02-01 13:38:46 -08002943 electionResult = main.FALSE
2944 utilities.assert_equals(
2945 expect=main.TRUE,
2946 actual=electionResult,
2947 onpass="All nodes successfully ran for leadership",
2948 onfail="At least one node failed to run for leadership" )
2949
2950 if electionResult == main.FALSE:
2951 main.log.error(
2952 "Skipping Test Case because Election Test App isn't loaded" )
2953 main.skipCase()
2954
2955 main.step( "Check that each node shows the same leader and candidates" )
Jon Halla440e872016-03-31 15:15:50 -07002956 failMessage = "Nodes have different leaderboards"
Jon Hallf37d44d2017-05-24 10:37:30 -07002957 activeCLIs = [ main.CLIs[ i ] for i in main.activeNodes ]
Jon Hall41d39f12016-04-11 22:54:35 -07002958 sameResult, oldLeaders = main.HA.consistentLeaderboards( activeCLIs )
Jon Halla440e872016-03-31 15:15:50 -07002959 if sameResult:
2960 oldLeader = oldLeaders[ 0 ][ 0 ]
2961 main.log.warn( oldLeader )
Jon Hall6e709752016-02-01 13:38:46 -08002962 else:
Jon Halla440e872016-03-31 15:15:50 -07002963 oldLeader = None
Jon Hall6e709752016-02-01 13:38:46 -08002964 utilities.assert_equals(
Jon Halla440e872016-03-31 15:15:50 -07002965 expect=True,
Jon Hall6e709752016-02-01 13:38:46 -08002966 actual=sameResult,
Jon Halla440e872016-03-31 15:15:50 -07002967 onpass="Leaderboards are consistent for the election topic",
Jon Hall6e709752016-02-01 13:38:46 -08002968 onfail=failMessage )
2969
2970 main.step( "Find current leader and withdraw" )
2971 withdrawResult = main.TRUE
2972 # do some sanity checking on leader before using it
2973 if oldLeader is None:
2974 main.log.error( "Leadership isn't consistent." )
2975 withdrawResult = main.FALSE
2976 # Get the CLI of the oldLeader
2977 for i in main.activeNodes:
2978 if oldLeader == main.nodes[ i ].ip_address:
2979 oldLeaderCLI = main.CLIs[ i ]
2980 break
2981 else: # FOR/ELSE statement
2982 main.log.error( "Leader election, could not find current leader" )
2983 if oldLeader:
2984 withdrawResult = oldLeaderCLI.electionTestWithdraw()
2985 utilities.assert_equals(
2986 expect=main.TRUE,
2987 actual=withdrawResult,
2988 onpass="Node was withdrawn from election",
2989 onfail="Node was not withdrawn from election" )
2990
2991 main.step( "Check that a new node was elected leader" )
Jon Hall6e709752016-02-01 13:38:46 -08002992 failMessage = "Nodes have different leaders"
Jon Hall6e709752016-02-01 13:38:46 -08002993 # Get new leaders and candidates
Jon Hall41d39f12016-04-11 22:54:35 -07002994 newLeaderResult, newLeaders = main.HA.consistentLeaderboards( activeCLIs )
Jon Hall3a7843a2016-04-12 03:01:09 -07002995 newLeader = None
Jon Halla440e872016-03-31 15:15:50 -07002996 if newLeaderResult:
Jon Hall3a7843a2016-04-12 03:01:09 -07002997 if newLeaders[ 0 ][ 0 ] == 'none':
2998 main.log.error( "No leader was elected on at least 1 node" )
2999 if not expectNoLeader:
3000 newLeaderResult = False
Jon Hall25463a82016-04-13 14:03:52 -07003001 newLeader = newLeaders[ 0 ][ 0 ]
Jon Hall6e709752016-02-01 13:38:46 -08003002
3003 # Check that the new leader is not the older leader, which was withdrawn
3004 if newLeader == oldLeader:
Jon Halla440e872016-03-31 15:15:50 -07003005 newLeaderResult = False
Jon Hall6e709752016-02-01 13:38:46 -08003006 main.log.error( "All nodes still see old leader: " + str( oldLeader ) +
Jon Hallf37d44d2017-05-24 10:37:30 -07003007 " as the current leader" )
Jon Hall6e709752016-02-01 13:38:46 -08003008 utilities.assert_equals(
Jon Halla440e872016-03-31 15:15:50 -07003009 expect=True,
Jon Hall6e709752016-02-01 13:38:46 -08003010 actual=newLeaderResult,
3011 onpass="Leadership election passed",
3012 onfail="Something went wrong with Leadership election" )
3013
Jon Halla440e872016-03-31 15:15:50 -07003014 main.step( "Check that that new leader was the candidate of old leader" )
Jon Hall6e709752016-02-01 13:38:46 -08003015 # candidates[ 2 ] should become the top candidate after withdrawl
3016 correctCandidateResult = main.TRUE
3017 if expectNoLeader:
3018 if newLeader == 'none':
3019 main.log.info( "No leader expected. None found. Pass" )
3020 correctCandidateResult = main.TRUE
3021 else:
3022 main.log.info( "Expected no leader, got: " + str( newLeader ) )
3023 correctCandidateResult = main.FALSE
Jon Hallf37d44d2017-05-24 10:37:30 -07003024 elif len( oldLeaders[ 0 ] ) >= 3:
Jon Halla440e872016-03-31 15:15:50 -07003025 if newLeader == oldLeaders[ 0 ][ 2 ]:
3026 # correct leader was elected
3027 correctCandidateResult = main.TRUE
3028 else:
3029 correctCandidateResult = main.FALSE
3030 main.log.error( "Candidate {} was elected. {} should have had priority.".format(
3031 newLeader, oldLeaders[ 0 ][ 2 ] ) )
Jon Hall6e709752016-02-01 13:38:46 -08003032 else:
3033 main.log.warn( "Could not determine who should be the correct leader" )
Jon Halla440e872016-03-31 15:15:50 -07003034 main.log.debug( oldLeaders[ 0 ] )
Jon Hall6e709752016-02-01 13:38:46 -08003035 correctCandidateResult = main.FALSE
3036 utilities.assert_equals(
3037 expect=main.TRUE,
3038 actual=correctCandidateResult,
3039 onpass="Correct Candidate Elected",
3040 onfail="Incorrect Candidate Elected" )
3041
3042 main.step( "Run for election on old leader( just so everyone " +
3043 "is in the hat )" )
3044 if oldLeaderCLI is not None:
3045 runResult = oldLeaderCLI.electionTestRun()
3046 else:
3047 main.log.error( "No old leader to re-elect" )
3048 runResult = main.FALSE
3049 utilities.assert_equals(
3050 expect=main.TRUE,
3051 actual=runResult,
3052 onpass="App re-ran for election",
3053 onfail="App failed to run for election" )
Jon Halla440e872016-03-31 15:15:50 -07003054
Jon Hall6e709752016-02-01 13:38:46 -08003055 main.step(
3056 "Check that oldLeader is a candidate, and leader if only 1 node" )
3057 # verify leader didn't just change
Jon Halla440e872016-03-31 15:15:50 -07003058 # Get new leaders and candidates
3059 reRunLeaders = []
Jon Hall7a6ebfd2017-03-13 10:58:58 -07003060 time.sleep( 5 ) # Paremterize
Jon Hall41d39f12016-04-11 22:54:35 -07003061 positionResult, reRunLeaders = main.HA.consistentLeaderboards( activeCLIs )
Jon Hall6e709752016-02-01 13:38:46 -08003062
3063 # Check that the re-elected node is last on the candidate List
Jon Hallf37d44d2017-05-24 10:37:30 -07003064 if not reRunLeaders[ 0 ]:
Jon Hall3a7843a2016-04-12 03:01:09 -07003065 positionResult = main.FALSE
3066 elif oldLeader != reRunLeaders[ 0 ][ -1 ]:
Jon Hallf37d44d2017-05-24 10:37:30 -07003067 main.log.error( "Old Leader ({}) not in the proper position: {} ".format( str( oldLeader ),
Jon Halla440e872016-03-31 15:15:50 -07003068 str( reRunLeaders[ 0 ] ) ) )
Jon Hall6e709752016-02-01 13:38:46 -08003069 positionResult = main.FALSE
Jon Hall6e709752016-02-01 13:38:46 -08003070 utilities.assert_equals(
Jon Halla440e872016-03-31 15:15:50 -07003071 expect=True,
Jon Hall6e709752016-02-01 13:38:46 -08003072 actual=positionResult,
3073 onpass="Old leader successfully re-ran for election",
3074 onfail="Something went wrong with Leadership election after " +
3075 "the old leader re-ran for election" )
3076
3077 def CASE16( self, main ):
3078 """
3079 Install Distributed Primitives app
3080 """
3081 import time
3082 assert main.numCtrls, "main.numCtrls not defined"
3083 assert main, "main not defined"
3084 assert utilities.assert_equals, "utilities.assert_equals not defined"
3085 assert main.CLIs, "main.CLIs not defined"
3086 assert main.nodes, "main.nodes not defined"
3087
3088 # Variables for the distributed primitives tests
Jon Hall7a6ebfd2017-03-13 10:58:58 -07003089 main.pCounterName = "TestON-Partitions"
3090 main.pCounterValue = 0
Jon Hallf37d44d2017-05-24 10:37:30 -07003091 main.onosSet = set( [] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07003092 main.onosSetName = "TestON-set"
Jon Hall6e709752016-02-01 13:38:46 -08003093
3094 description = "Install Primitives app"
3095 main.case( description )
3096 main.step( "Install Primitives app" )
3097 appName = "org.onosproject.distributedprimitives"
Jon Hallf37d44d2017-05-24 10:37:30 -07003098 node = main.activeNodes[ 0 ]
3099 appResults = main.CLIs[ node ].activateApp( appName )
Jon Hall6e709752016-02-01 13:38:46 -08003100 utilities.assert_equals( expect=main.TRUE,
3101 actual=appResults,
3102 onpass="Primitives app activated",
3103 onfail="Primitives app not activated" )
3104 time.sleep( 5 ) # To allow all nodes to activate
3105
3106 def CASE17( self, main ):
3107 """
3108 Check for basic functionality with distributed primitives
3109 """
Jon Hall7a6ebfd2017-03-13 10:58:58 -07003110 main.HA.CASE17( main )