blob: e9e8e1a8d72207f9cd009bc95f99874fd1943bdf [file] [log] [blame]
Jon Hall6e709752016-02-01 13:38:46 -08001"""
2Description: This test is to determine if ONOS can handle
3 a full network partion
4
5List of test cases:
6CASE1: Compile ONOS and push it to the test machines
7CASE2: Assign devices to controllers
8CASE21: Assign mastership to controllers
9CASE3: Assign intents
10CASE4: Ping across added host intents
11CASE5: Reading state of ONOS
12CASE61: The Failure inducing case.
13CASE62: The Failure recovery case.
14CASE7: Check state after control plane failure
15CASE8: Compare topo
16CASE9: Link s3-s28 down
17CASE10: Link s3-s28 up
18CASE11: Switch down
19CASE12: Switch up
20CASE13: Clean up
21CASE14: start election app on all onos nodes
22CASE15: Check that Leadership Election is still functional
23CASE16: Install Distributed Primitives app
24CASE17: Check for basic functionality with distributed primitives
25"""
Jon Hall6e709752016-02-01 13:38:46 -080026class HAfullNetPartition:
27
28 def __init__( self ):
29 self.default = ''
30
31 def CASE1( self, main ):
32 """
33 CASE1 is to compile ONOS and push it to the test machines
34
35 Startup sequence:
36 cell <name>
37 onos-verify-cell
38 NOTE: temporary - onos-remove-raft-logs
39 onos-uninstall
40 start mininet
41 git pull
42 mvn clean install
43 onos-package
44 onos-install -f
45 onos-wait-for-start
46 start cli sessions
47 start tcpdump
48 """
49 import imp
50 import pexpect
51 import time
Jon Halla440e872016-03-31 15:15:50 -070052 import json
Jon Hall6e709752016-02-01 13:38:46 -080053 main.log.info( "ONOS HA test: Partition ONOS nodes into two sub-clusters - " +
54 "initialization" )
55 main.case( "Setting up test environment" )
56 main.caseExplanation = "Setup the test environment including " +\
57 "installing ONOS, starting Mininet and ONOS" +\
58 "cli sessions."
59
60 # load some variables from the params file
61 PULLCODE = False
62 if main.params[ 'Git' ] == 'True':
63 PULLCODE = True
64 gitBranch = main.params[ 'branch' ]
65 cellName = main.params[ 'ENV' ][ 'cellName' ]
66
67 main.numCtrls = int( main.params[ 'num_controllers' ] )
68 if main.ONOSbench.maxNodes:
69 if main.ONOSbench.maxNodes < main.numCtrls:
70 main.numCtrls = int( main.ONOSbench.maxNodes )
71 # set global variables
72 global ONOS1Port
73 global ONOS2Port
74 global ONOS3Port
75 global ONOS4Port
76 global ONOS5Port
77 global ONOS6Port
78 global ONOS7Port
Jon Halla440e872016-03-31 15:15:50 -070079 # These are for csv plotting in jenkins
80 global labels
81 global data
82 labels = []
83 data = []
Jon Hall6e709752016-02-01 13:38:46 -080084
85 # FIXME: just get controller port from params?
86 # TODO: do we really need all these?
87 ONOS1Port = main.params[ 'CTRL' ][ 'port1' ]
88 ONOS2Port = main.params[ 'CTRL' ][ 'port2' ]
89 ONOS3Port = main.params[ 'CTRL' ][ 'port3' ]
90 ONOS4Port = main.params[ 'CTRL' ][ 'port4' ]
91 ONOS5Port = main.params[ 'CTRL' ][ 'port5' ]
92 ONOS6Port = main.params[ 'CTRL' ][ 'port6' ]
93 ONOS7Port = main.params[ 'CTRL' ][ 'port7' ]
94
95 try:
Jon Hall53c5e662016-04-13 16:06:56 -070096 from tests.HA.dependencies.HA import HA
Jon Hall41d39f12016-04-11 22:54:35 -070097 main.HA = HA()
Jon Hall6e709752016-02-01 13:38:46 -080098 except Exception as e:
99 main.log.exception( e )
100 main.cleanup()
101 main.exit()
102
103 main.CLIs = []
104 main.nodes = []
105 ipList = []
106 for i in range( 1, main.numCtrls + 1 ):
107 try:
108 main.CLIs.append( getattr( main, 'ONOScli' + str( i ) ) )
109 main.nodes.append( getattr( main, 'ONOS' + str( i ) ) )
110 ipList.append( main.nodes[ -1 ].ip_address )
111 except AttributeError:
112 break
113
114 main.step( "Create cell file" )
115 cellAppString = main.params[ 'ENV' ][ 'appString' ]
116 main.ONOSbench.createCellFile( main.ONOSbench.ip_address, cellName,
117 main.Mininet1.ip_address,
Devin Limdc78e202017-06-09 18:30:07 -0700118 cellAppString, ipList, main.ONOScli1.karafUser )
Jon Hall6e709752016-02-01 13:38:46 -0800119 main.step( "Applying cell variable to environment" )
120 cellResult = main.ONOSbench.setCell( cellName )
121 verifyResult = main.ONOSbench.verifyCell()
122
123 # FIXME:this is short term fix
124 main.log.info( "Removing raft logs" )
125 main.ONOSbench.onosRemoveRaftLogs()
126
127 main.log.info( "Uninstalling ONOS" )
128 for node in main.nodes:
129 main.ONOSbench.onosUninstall( node.ip_address )
130
131 # Make sure ONOS is DEAD
132 main.log.info( "Killing any ONOS processes" )
133 killResults = main.TRUE
134 for node in main.nodes:
135 killed = main.ONOSbench.onosKill( node.ip_address )
136 killResults = killResults and killed
137
Jon Hall6e709752016-02-01 13:38:46 -0800138 gitPullResult = main.TRUE
139
140 main.step( "Starting Mininet" )
141 # scp topo file to mininet
142 # TODO: move to params?
143 topoName = "obelisk.py"
144 filePath = main.ONOSbench.home + "/tools/test/topos/"
145 main.ONOSbench.scp( main.Mininet1,
146 filePath + topoName,
147 main.Mininet1.home,
148 direction="to" )
Jon Hallf37d44d2017-05-24 10:37:30 -0700149 mnResult = main.Mininet1.startNet()
Jon Hall6e709752016-02-01 13:38:46 -0800150 utilities.assert_equals( expect=main.TRUE, actual=mnResult,
151 onpass="Mininet Started",
152 onfail="Error starting Mininet" )
153
154 main.step( "Git checkout and pull " + gitBranch )
155 if PULLCODE:
156 main.ONOSbench.gitCheckout( gitBranch )
157 gitPullResult = main.ONOSbench.gitPull()
158 # values of 1 or 3 are good
159 utilities.assert_lesser( expect=0, actual=gitPullResult,
160 onpass="Git pull successful",
161 onfail="Git pull failed" )
162 main.ONOSbench.getVersion( report=True )
163
Jon Hall6e709752016-02-01 13:38:46 -0800164 # GRAPHS
165 # NOTE: important params here:
166 # job = name of Jenkins job
167 # Plot Name = Plot-HA, only can be used if multiple plots
168 # index = The number of the graph under plot name
169 job = "HAfullNetPartition"
170 plotName = "Plot-HA"
Jon Hall676e5432016-09-26 11:32:50 -0700171 index = "2"
Jon Hall6e709752016-02-01 13:38:46 -0800172 graphs = '<ac:structured-macro ac:name="html">\n'
173 graphs += '<ac:plain-text-body><![CDATA[\n'
174 graphs += '<iframe src="https://onos-jenkins.onlab.us/job/' + job +\
175 '/plot/' + plotName + '/getPlot?index=' + index +\
176 '&width=500&height=300"' +\
177 'noborder="0" width="500" height="300" scrolling="yes" ' +\
178 'seamless="seamless"></iframe>\n'
179 graphs += ']]></ac:plain-text-body>\n'
180 graphs += '</ac:structured-macro>\n'
Jon Hallf37d44d2017-05-24 10:37:30 -0700181 main.log.wiki( graphs )
Jon Hall6e709752016-02-01 13:38:46 -0800182
183 main.step( "Creating ONOS package" )
184 # copy gen-partions file to ONOS
185 # NOTE: this assumes TestON and ONOS are on the same machine
Jon Hall53c5e662016-04-13 16:06:56 -0700186 srcFile = main.testDir + "/HA/dependencies/onos-gen-partitions"
Jon Hall6e709752016-02-01 13:38:46 -0800187 dstDir = main.ONOSbench.home + "/tools/test/bin/onos-gen-partitions"
188 cpResult = main.ONOSbench.secureCopy( main.ONOSbench.user_name,
189 main.ONOSbench.ip_address,
190 srcFile,
191 dstDir,
192 pwd=main.ONOSbench.pwd,
193 direction="from" )
Jon Hallbd60ea02016-08-23 10:03:59 -0700194 packageResult = main.ONOSbench.buckBuild()
Jon Hall6e709752016-02-01 13:38:46 -0800195 utilities.assert_equals( expect=main.TRUE, actual=packageResult,
196 onpass="ONOS package successful",
197 onfail="ONOS package failed" )
198
199 main.step( "Installing ONOS package" )
200 onosInstallResult = main.TRUE
201 for node in main.nodes:
202 tmpResult = main.ONOSbench.onosInstall( options="-f",
203 node=node.ip_address )
204 onosInstallResult = onosInstallResult and tmpResult
205 utilities.assert_equals( expect=main.TRUE, actual=onosInstallResult,
206 onpass="ONOS install successful",
207 onfail="ONOS install failed" )
208 # clean up gen-partitions file
209 try:
210 main.ONOSbench.handle.sendline( "cd " + main.ONOSbench.home )
211 main.ONOSbench.handle.expect( main.ONOSbench.home + "\$" )
212 main.ONOSbench.handle.sendline( "git checkout -- tools/test/bin/onos-gen-partitions" )
213 main.ONOSbench.handle.expect( main.ONOSbench.home + "\$" )
214 main.log.info( " Cleaning custom gen partitions file, response was: \n" +
215 str( main.ONOSbench.handle.before ) )
216 except ( pexpect.TIMEOUT, pexpect.EOF ):
217 main.log.exception( "ONOSbench: pexpect exception found:" +
218 main.ONOSbench.handle.before )
219 main.cleanup()
220 main.exit()
221
You Wangf5de25b2017-01-06 15:13:01 -0800222 main.step( "Set up ONOS secure SSH" )
223 secureSshResult = main.TRUE
224 for node in main.nodes:
225 secureSshResult = secureSshResult and main.ONOSbench.onosSecureSSH( node=node.ip_address )
226 utilities.assert_equals( expect=main.TRUE, actual=secureSshResult,
227 onpass="Test step PASS",
228 onfail="Test step FAIL" )
229
Jon Hall6e709752016-02-01 13:38:46 -0800230 main.step( "Checking if ONOS is up yet" )
231 for i in range( 2 ):
232 onosIsupResult = main.TRUE
233 for node in main.nodes:
234 started = main.ONOSbench.isup( node.ip_address )
235 if not started:
236 main.log.error( node.name + " hasn't started" )
237 onosIsupResult = onosIsupResult and started
238 if onosIsupResult == main.TRUE:
239 break
240 utilities.assert_equals( expect=main.TRUE, actual=onosIsupResult,
241 onpass="ONOS startup successful",
242 onfail="ONOS startup failed" )
243
Jon Hall6509dbf2016-06-21 17:01:17 -0700244 main.step( "Starting ONOS CLI sessions" )
Jon Hall6e709752016-02-01 13:38:46 -0800245 cliResults = main.TRUE
246 threads = []
247 for i in range( main.numCtrls ):
Jon Hallf37d44d2017-05-24 10:37:30 -0700248 t = main.Thread( target=main.CLIs[ i ].startOnosCli,
Jon Hall6e709752016-02-01 13:38:46 -0800249 name="startOnosCli-" + str( i ),
Jon Hallf37d44d2017-05-24 10:37:30 -0700250 args=[ main.nodes[ i ].ip_address ] )
Jon Hall6e709752016-02-01 13:38:46 -0800251 threads.append( t )
252 t.start()
253
254 for t in threads:
255 t.join()
256 cliResults = cliResults and t.result
257 utilities.assert_equals( expect=main.TRUE, actual=cliResults,
258 onpass="ONOS cli startup successful",
259 onfail="ONOS cli startup failed" )
260
261 # Create a list of active nodes for use when some nodes are stopped
262 main.activeNodes = [ i for i in range( 0, len( main.CLIs ) ) ]
263
264 if main.params[ 'tcpdump' ].lower() == "true":
265 main.step( "Start Packet Capture MN" )
266 main.Mininet2.startTcpdump(
267 str( main.params[ 'MNtcpdump' ][ 'folder' ] ) + str( main.TEST )
268 + "-MN.pcap",
269 intf=main.params[ 'MNtcpdump' ][ 'intf' ],
270 port=main.params[ 'MNtcpdump' ][ 'port' ] )
271
Jon Halla440e872016-03-31 15:15:50 -0700272 main.step( "Checking ONOS nodes" )
Jon Hall41d39f12016-04-11 22:54:35 -0700273 nodeResults = utilities.retry( main.HA.nodesCheck,
274 False,
Jon Hallf37d44d2017-05-24 10:37:30 -0700275 args=[ main.activeNodes ],
Jon Hall41d39f12016-04-11 22:54:35 -0700276 attempts=5 )
Jon Halla440e872016-03-31 15:15:50 -0700277
Jon Hall41d39f12016-04-11 22:54:35 -0700278 utilities.assert_equals( expect=True, actual=nodeResults,
Jon Halla440e872016-03-31 15:15:50 -0700279 onpass="Nodes check successful",
280 onfail="Nodes check NOT successful" )
281
282 if not nodeResults:
Jon Hall7ac7bc32016-05-05 10:57:02 -0700283 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -0700284 cli = main.CLIs[ i ]
Jon Halla440e872016-03-31 15:15:50 -0700285 main.log.debug( "{} components not ACTIVE: \n{}".format(
286 cli.name,
287 cli.sendline( "scr:list | grep -v ACTIVE" ) ) )
Jon Hall6e709752016-02-01 13:38:46 -0800288 main.log.error( "Failed to start ONOS, stopping test" )
289 main.cleanup()
290 main.exit()
291
Jon Hall172b7ba2016-04-07 18:12:20 -0700292 main.step( "Activate apps defined in the params file" )
293 # get data from the params
294 apps = main.params.get( 'apps' )
295 if apps:
Jon Hallf37d44d2017-05-24 10:37:30 -0700296 apps = apps.split( ',' )
Jon Hall172b7ba2016-04-07 18:12:20 -0700297 main.log.warn( apps )
298 activateResult = True
299 for app in apps:
300 main.CLIs[ 0 ].app( app, "Activate" )
301 # TODO: check this worked
302 time.sleep( 10 ) # wait for apps to activate
303 for app in apps:
304 state = main.CLIs[ 0 ].appStatus( app )
305 if state == "ACTIVE":
Jon Hall937bc812017-01-31 16:44:10 -0800306 activateResult = activateResult and True
Jon Hall172b7ba2016-04-07 18:12:20 -0700307 else:
308 main.log.error( "{} is in {} state".format( app, state ) )
Jon Hall937bc812017-01-31 16:44:10 -0800309 activateResult = False
Jon Hall172b7ba2016-04-07 18:12:20 -0700310 utilities.assert_equals( expect=True,
311 actual=activateResult,
312 onpass="Successfully activated apps",
313 onfail="Failed to activate apps" )
314 else:
315 main.log.warn( "No apps were specified to be loaded after startup" )
316
317 main.step( "Set ONOS configurations" )
318 config = main.params.get( 'ONOS_Configuration' )
319 if config:
320 main.log.debug( config )
321 checkResult = main.TRUE
322 for component in config:
Jon Hallf37d44d2017-05-24 10:37:30 -0700323 for setting in config[ component ]:
324 value = config[ component ][ setting ]
Jon Hall172b7ba2016-04-07 18:12:20 -0700325 check = main.CLIs[ 0 ].setCfg( component, setting, value )
326 main.log.info( "Value was changed? {}".format( main.TRUE == check ) )
327 checkResult = check and checkResult
328 utilities.assert_equals( expect=main.TRUE,
329 actual=checkResult,
330 onpass="Successfully set config",
331 onfail="Failed to set config" )
332 else:
333 main.log.warn( "No configurations were specified to be changed after startup" )
334
Jon Hall9d2dcad2016-04-08 10:15:20 -0700335 main.step( "App Ids check" )
336 appCheck = main.TRUE
337 threads = []
338 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -0700339 t = main.Thread( target=main.CLIs[ i ].appToIDCheck,
Jon Hall9d2dcad2016-04-08 10:15:20 -0700340 name="appToIDCheck-" + str( i ),
341 args=[] )
342 threads.append( t )
343 t.start()
344
345 for t in threads:
346 t.join()
347 appCheck = appCheck and t.result
348 if appCheck != main.TRUE:
Jon Hallf37d44d2017-05-24 10:37:30 -0700349 node = main.activeNodes[ 0 ]
350 main.log.warn( main.CLIs[ node ].apps() )
351 main.log.warn( main.CLIs[ node ].appIDs() )
Jon Hall9d2dcad2016-04-08 10:15:20 -0700352 utilities.assert_equals( expect=main.TRUE, actual=appCheck,
353 onpass="App Ids seem to be correct",
354 onfail="Something is wrong with app Ids" )
355
Jon Hall6e709752016-02-01 13:38:46 -0800356 def CASE2( self, main ):
357 """
358 Assign devices to controllers
359 """
360 import re
361 assert main.numCtrls, "main.numCtrls not defined"
362 assert main, "main not defined"
363 assert utilities.assert_equals, "utilities.assert_equals not defined"
364 assert main.CLIs, "main.CLIs not defined"
365 assert main.nodes, "main.nodes not defined"
366 assert ONOS1Port, "ONOS1Port not defined"
367 assert ONOS2Port, "ONOS2Port not defined"
368 assert ONOS3Port, "ONOS3Port not defined"
369 assert ONOS4Port, "ONOS4Port not defined"
370 assert ONOS5Port, "ONOS5Port not defined"
371 assert ONOS6Port, "ONOS6Port not defined"
372 assert ONOS7Port, "ONOS7Port not defined"
373
374 main.case( "Assigning devices to controllers" )
375 main.caseExplanation = "Assign switches to ONOS using 'ovs-vsctl' " +\
376 "and check that an ONOS node becomes the " +\
377 "master of the device."
378 main.step( "Assign switches to controllers" )
379
380 ipList = []
381 for i in range( main.numCtrls ):
382 ipList.append( main.nodes[ i ].ip_address )
383 swList = []
384 for i in range( 1, 29 ):
385 swList.append( "s" + str( i ) )
386 main.Mininet1.assignSwController( sw=swList, ip=ipList )
387
388 mastershipCheck = main.TRUE
389 for i in range( 1, 29 ):
390 response = main.Mininet1.getSwController( "s" + str( i ) )
391 try:
392 main.log.info( str( response ) )
393 except Exception:
394 main.log.info( repr( response ) )
395 for node in main.nodes:
396 if re.search( "tcp:" + node.ip_address, response ):
397 mastershipCheck = mastershipCheck and main.TRUE
398 else:
399 main.log.error( "Error, node " + node.ip_address + " is " +
400 "not in the list of controllers s" +
401 str( i ) + " is connecting to." )
402 mastershipCheck = main.FALSE
403 utilities.assert_equals(
404 expect=main.TRUE,
405 actual=mastershipCheck,
406 onpass="Switch mastership assigned correctly",
407 onfail="Switches not assigned correctly to controllers" )
408
409 def CASE21( self, main ):
410 """
411 Assign mastership to controllers
412 """
413 import time
414 assert main.numCtrls, "main.numCtrls not defined"
415 assert main, "main not defined"
416 assert utilities.assert_equals, "utilities.assert_equals not defined"
417 assert main.CLIs, "main.CLIs not defined"
418 assert main.nodes, "main.nodes not defined"
419 assert ONOS1Port, "ONOS1Port not defined"
420 assert ONOS2Port, "ONOS2Port not defined"
421 assert ONOS3Port, "ONOS3Port not defined"
422 assert ONOS4Port, "ONOS4Port not defined"
423 assert ONOS5Port, "ONOS5Port not defined"
424 assert ONOS6Port, "ONOS6Port not defined"
425 assert ONOS7Port, "ONOS7Port not defined"
426
427 main.case( "Assigning Controller roles for switches" )
428 main.caseExplanation = "Check that ONOS is connected to each " +\
429 "device. Then manually assign" +\
430 " mastership to specific ONOS nodes using" +\
431 " 'device-role'"
432 main.step( "Assign mastership of switches to specific controllers" )
433 # Manually assign mastership to the controller we want
434 roleCall = main.TRUE
435
Jon Hallf37d44d2017-05-24 10:37:30 -0700436 ipList = []
Jon Hall6e709752016-02-01 13:38:46 -0800437 deviceList = []
Jon Hallf37d44d2017-05-24 10:37:30 -0700438 onosCli = main.CLIs[ main.activeNodes[ 0 ] ]
Jon Hall6e709752016-02-01 13:38:46 -0800439 try:
440 # Assign mastership to specific controllers. This assignment was
441 # determined for a 7 node cluser, but will work with any sized
442 # cluster
443 for i in range( 1, 29 ): # switches 1 through 28
444 # set up correct variables:
445 if i == 1:
446 c = 0
447 ip = main.nodes[ c ].ip_address # ONOS1
448 deviceId = onosCli.getDevice( "1000" ).get( 'id' )
449 elif i == 2:
450 c = 1 % main.numCtrls
451 ip = main.nodes[ c ].ip_address # ONOS2
452 deviceId = onosCli.getDevice( "2000" ).get( 'id' )
453 elif i == 3:
454 c = 1 % main.numCtrls
455 ip = main.nodes[ c ].ip_address # ONOS2
456 deviceId = onosCli.getDevice( "3000" ).get( 'id' )
457 elif i == 4:
458 c = 3 % main.numCtrls
459 ip = main.nodes[ c ].ip_address # ONOS4
460 deviceId = onosCli.getDevice( "3004" ).get( 'id' )
461 elif i == 5:
462 c = 2 % main.numCtrls
463 ip = main.nodes[ c ].ip_address # ONOS3
464 deviceId = onosCli.getDevice( "5000" ).get( 'id' )
465 elif i == 6:
466 c = 2 % main.numCtrls
467 ip = main.nodes[ c ].ip_address # ONOS3
468 deviceId = onosCli.getDevice( "6000" ).get( 'id' )
469 elif i == 7:
470 c = 5 % main.numCtrls
471 ip = main.nodes[ c ].ip_address # ONOS6
472 deviceId = onosCli.getDevice( "6007" ).get( 'id' )
473 elif i >= 8 and i <= 17:
474 c = 4 % main.numCtrls
475 ip = main.nodes[ c ].ip_address # ONOS5
476 dpid = '3' + str( i ).zfill( 3 )
477 deviceId = onosCli.getDevice( dpid ).get( 'id' )
478 elif i >= 18 and i <= 27:
479 c = 6 % main.numCtrls
480 ip = main.nodes[ c ].ip_address # ONOS7
481 dpid = '6' + str( i ).zfill( 3 )
482 deviceId = onosCli.getDevice( dpid ).get( 'id' )
483 elif i == 28:
484 c = 0
485 ip = main.nodes[ c ].ip_address # ONOS1
486 deviceId = onosCli.getDevice( "2800" ).get( 'id' )
487 else:
488 main.log.error( "You didn't write an else statement for " +
489 "switch s" + str( i ) )
490 roleCall = main.FALSE
491 # Assign switch
492 assert deviceId, "No device id for s" + str( i ) + " in ONOS"
493 # TODO: make this controller dynamic
494 roleCall = roleCall and onosCli.deviceRole( deviceId, ip )
495 ipList.append( ip )
496 deviceList.append( deviceId )
497 except ( AttributeError, AssertionError ):
498 main.log.exception( "Something is wrong with ONOS device view" )
499 main.log.info( onosCli.devices() )
500 utilities.assert_equals(
501 expect=main.TRUE,
502 actual=roleCall,
503 onpass="Re-assigned switch mastership to designated controller",
504 onfail="Something wrong with deviceRole calls" )
505
506 main.step( "Check mastership was correctly assigned" )
507 roleCheck = main.TRUE
508 # NOTE: This is due to the fact that device mastership change is not
509 # atomic and is actually a multi step process
510 time.sleep( 5 )
511 for i in range( len( ipList ) ):
Jon Hallf37d44d2017-05-24 10:37:30 -0700512 ip = ipList[ i ]
513 deviceId = deviceList[ i ]
Jon Hall6e709752016-02-01 13:38:46 -0800514 # Check assignment
515 master = onosCli.getRole( deviceId ).get( 'master' )
516 if ip in master:
517 roleCheck = roleCheck and main.TRUE
518 else:
519 roleCheck = roleCheck and main.FALSE
520 main.log.error( "Error, controller " + ip + " is not" +
521 " master " + "of device " +
522 str( deviceId ) + ". Master is " +
523 repr( master ) + "." )
524 utilities.assert_equals(
525 expect=main.TRUE,
526 actual=roleCheck,
527 onpass="Switches were successfully reassigned to designated " +
528 "controller",
529 onfail="Switches were not successfully reassigned" )
530
531 def CASE3( self, main ):
532 """
533 Assign intents
534 """
535 import time
536 import json
537 assert main.numCtrls, "main.numCtrls not defined"
538 assert main, "main not defined"
539 assert utilities.assert_equals, "utilities.assert_equals not defined"
540 assert main.CLIs, "main.CLIs not defined"
541 assert main.nodes, "main.nodes not defined"
542 main.case( "Adding host Intents" )
543 main.caseExplanation = "Discover hosts by using pingall then " +\
544 "assign predetermined host-to-host intents." +\
545 " After installation, check that the intent" +\
546 " is distributed to all nodes and the state" +\
547 " is INSTALLED"
548
549 # install onos-app-fwd
550 main.step( "Install reactive forwarding app" )
Jon Hallf37d44d2017-05-24 10:37:30 -0700551 onosCli = main.CLIs[ main.activeNodes[ 0 ] ]
Jon Hall6e709752016-02-01 13:38:46 -0800552 installResults = onosCli.activateApp( "org.onosproject.fwd" )
553 utilities.assert_equals( expect=main.TRUE, actual=installResults,
554 onpass="Install fwd successful",
555 onfail="Install fwd failed" )
556
557 main.step( "Check app ids" )
558 appCheck = main.TRUE
559 threads = []
560 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -0700561 t = main.Thread( target=main.CLIs[ i ].appToIDCheck,
Jon Hall6e709752016-02-01 13:38:46 -0800562 name="appToIDCheck-" + str( i ),
563 args=[] )
564 threads.append( t )
565 t.start()
566
567 for t in threads:
568 t.join()
569 appCheck = appCheck and t.result
570 if appCheck != main.TRUE:
571 main.log.warn( onosCli.apps() )
572 main.log.warn( onosCli.appIDs() )
573 utilities.assert_equals( expect=main.TRUE, actual=appCheck,
574 onpass="App Ids seem to be correct",
575 onfail="Something is wrong with app Ids" )
576
577 main.step( "Discovering Hosts( Via pingall for now )" )
578 # FIXME: Once we have a host discovery mechanism, use that instead
579 # REACTIVE FWD test
580 pingResult = main.FALSE
581 passMsg = "Reactive Pingall test passed"
582 time1 = time.time()
583 pingResult = main.Mininet1.pingall()
584 time2 = time.time()
585 if not pingResult:
Jon Hallf37d44d2017-05-24 10:37:30 -0700586 main.log.warn( "First pingall failed. Trying again..." )
Jon Hall6e709752016-02-01 13:38:46 -0800587 pingResult = main.Mininet1.pingall()
588 passMsg += " on the second try"
589 utilities.assert_equals(
590 expect=main.TRUE,
591 actual=pingResult,
Jon Hallf37d44d2017-05-24 10:37:30 -0700592 onpass=passMsg,
Jon Hall6e709752016-02-01 13:38:46 -0800593 onfail="Reactive Pingall failed, " +
594 "one or more ping pairs failed" )
595 main.log.info( "Time for pingall: %2f seconds" %
596 ( time2 - time1 ) )
597 # timeout for fwd flows
598 time.sleep( 11 )
599 # uninstall onos-app-fwd
600 main.step( "Uninstall reactive forwarding app" )
Jon Hallf37d44d2017-05-24 10:37:30 -0700601 node = main.activeNodes[ 0 ]
602 uninstallResult = main.CLIs[ node ].deactivateApp( "org.onosproject.fwd" )
Jon Hall6e709752016-02-01 13:38:46 -0800603 utilities.assert_equals( expect=main.TRUE, actual=uninstallResult,
604 onpass="Uninstall fwd successful",
605 onfail="Uninstall fwd failed" )
606
607 main.step( "Check app ids" )
608 threads = []
609 appCheck2 = main.TRUE
610 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -0700611 t = main.Thread( target=main.CLIs[ i ].appToIDCheck,
Jon Hall6e709752016-02-01 13:38:46 -0800612 name="appToIDCheck-" + str( i ),
613 args=[] )
614 threads.append( t )
615 t.start()
616
617 for t in threads:
618 t.join()
619 appCheck2 = appCheck2 and t.result
620 if appCheck2 != main.TRUE:
Jon Hallf37d44d2017-05-24 10:37:30 -0700621 node = main.activeNodes[ 0 ]
622 main.log.warn( main.CLIs[ node ].apps() )
623 main.log.warn( main.CLIs[ node ].appIDs() )
Jon Hall6e709752016-02-01 13:38:46 -0800624 utilities.assert_equals( expect=main.TRUE, actual=appCheck2,
625 onpass="App Ids seem to be correct",
626 onfail="Something is wrong with app Ids" )
627
628 main.step( "Add host intents via cli" )
629 intentIds = []
630 # TODO: move the host numbers to params
631 # Maybe look at all the paths we ping?
632 intentAddResult = True
633 hostResult = main.TRUE
634 for i in range( 8, 18 ):
635 main.log.info( "Adding host intent between h" + str( i ) +
636 " and h" + str( i + 10 ) )
637 host1 = "00:00:00:00:00:" + \
638 str( hex( i )[ 2: ] ).zfill( 2 ).upper()
639 host2 = "00:00:00:00:00:" + \
640 str( hex( i + 10 )[ 2: ] ).zfill( 2 ).upper()
641 # NOTE: getHost can return None
642 host1Dict = onosCli.getHost( host1 )
643 host2Dict = onosCli.getHost( host2 )
644 host1Id = None
645 host2Id = None
646 if host1Dict and host2Dict:
647 host1Id = host1Dict.get( 'id', None )
648 host2Id = host2Dict.get( 'id', None )
649 if host1Id and host2Id:
650 nodeNum = ( i % len( main.activeNodes ) )
Jon Hallf37d44d2017-05-24 10:37:30 -0700651 node = main.activeNodes[ nodeNum ]
652 tmpId = main.CLIs[ node ].addHostIntent( host1Id, host2Id )
Jon Hall6e709752016-02-01 13:38:46 -0800653 if tmpId:
654 main.log.info( "Added intent with id: " + tmpId )
655 intentIds.append( tmpId )
656 else:
657 main.log.error( "addHostIntent returned: " +
658 repr( tmpId ) )
659 else:
660 main.log.error( "Error, getHost() failed for h" + str( i ) +
661 " and/or h" + str( i + 10 ) )
Jon Hallf37d44d2017-05-24 10:37:30 -0700662 node = main.activeNodes[ 0 ]
663 hosts = main.CLIs[ node ].hosts()
Jon Hall6e709752016-02-01 13:38:46 -0800664 main.log.warn( "Hosts output: " )
665 try:
666 main.log.warn( json.dumps( json.loads( hosts ),
667 sort_keys=True,
668 indent=4,
669 separators=( ',', ': ' ) ) )
670 except ( ValueError, TypeError ):
671 main.log.warn( repr( hosts ) )
672 hostResult = main.FALSE
673 utilities.assert_equals( expect=main.TRUE, actual=hostResult,
674 onpass="Found a host id for each host",
675 onfail="Error looking up host ids" )
676
677 intentStart = time.time()
678 onosIds = onosCli.getAllIntentsId()
679 main.log.info( "Submitted intents: " + str( intentIds ) )
680 main.log.info( "Intents in ONOS: " + str( onosIds ) )
681 for intent in intentIds:
682 if intent in onosIds:
683 pass # intent submitted is in onos
684 else:
685 intentAddResult = False
686 if intentAddResult:
687 intentStop = time.time()
688 else:
689 intentStop = None
690 # Print the intent states
691 intents = onosCli.intents()
692 intentStates = []
693 installedCheck = True
694 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
695 count = 0
696 try:
697 for intent in json.loads( intents ):
698 state = intent.get( 'state', None )
699 if "INSTALLED" not in state:
700 installedCheck = False
701 intentId = intent.get( 'id', None )
702 intentStates.append( ( intentId, state ) )
703 except ( ValueError, TypeError ):
704 main.log.exception( "Error parsing intents" )
705 # add submitted intents not in the store
706 tmplist = [ i for i, s in intentStates ]
707 missingIntents = False
708 for i in intentIds:
709 if i not in tmplist:
710 intentStates.append( ( i, " - " ) )
711 missingIntents = True
712 intentStates.sort()
713 for i, s in intentStates:
714 count += 1
715 main.log.info( "%-6s%-15s%-15s" %
716 ( str( count ), str( i ), str( s ) ) )
717 leaders = onosCli.leaders()
718 try:
719 missing = False
720 if leaders:
721 parsedLeaders = json.loads( leaders )
722 main.log.warn( json.dumps( parsedLeaders,
723 sort_keys=True,
724 indent=4,
725 separators=( ',', ': ' ) ) )
726 # check for all intent partitions
727 topics = []
728 for i in range( 14 ):
Jon Hall8dafdcc2016-09-16 10:21:25 -0700729 topics.append( "work-partition-" + str( i ) )
Jon Hall6e709752016-02-01 13:38:46 -0800730 main.log.debug( topics )
Jon Hallf37d44d2017-05-24 10:37:30 -0700731 ONOStopics = [ j[ 'topic' ] for j in parsedLeaders ]
Jon Hall6e709752016-02-01 13:38:46 -0800732 for topic in topics:
733 if topic not in ONOStopics:
734 main.log.error( "Error: " + topic +
735 " not in leaders" )
736 missing = True
737 else:
738 main.log.error( "leaders() returned None" )
739 except ( ValueError, TypeError ):
740 main.log.exception( "Error parsing leaders" )
741 main.log.error( repr( leaders ) )
742 # Check all nodes
743 if missing:
744 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -0700745 response = main.CLIs[ i ].leaders( jsonFormat=False )
746 main.log.warn( str( main.CLIs[ i ].name ) + " leaders output: \n" +
Jon Hall6e709752016-02-01 13:38:46 -0800747 str( response ) )
748
749 partitions = onosCli.partitions()
750 try:
Jon Hallf37d44d2017-05-24 10:37:30 -0700751 if partitions:
Jon Hall6e709752016-02-01 13:38:46 -0800752 parsedPartitions = json.loads( partitions )
753 main.log.warn( json.dumps( parsedPartitions,
754 sort_keys=True,
755 indent=4,
756 separators=( ',', ': ' ) ) )
757 # TODO check for a leader in all paritions
758 # TODO check for consistency among nodes
759 else:
760 main.log.error( "partitions() returned None" )
761 except ( ValueError, TypeError ):
762 main.log.exception( "Error parsing partitions" )
763 main.log.error( repr( partitions ) )
764 pendingMap = onosCli.pendingMap()
765 try:
Jon Hallf37d44d2017-05-24 10:37:30 -0700766 if pendingMap:
Jon Hall6e709752016-02-01 13:38:46 -0800767 parsedPending = json.loads( pendingMap )
768 main.log.warn( json.dumps( parsedPending,
769 sort_keys=True,
770 indent=4,
771 separators=( ',', ': ' ) ) )
772 # TODO check something here?
773 else:
774 main.log.error( "pendingMap() returned None" )
775 except ( ValueError, TypeError ):
776 main.log.exception( "Error parsing pending map" )
777 main.log.error( repr( pendingMap ) )
778
779 intentAddResult = bool( intentAddResult and not missingIntents and
780 installedCheck )
781 if not intentAddResult:
782 main.log.error( "Error in pushing host intents to ONOS" )
783
784 main.step( "Intent Anti-Entropy dispersion" )
Jon Hallf37d44d2017-05-24 10:37:30 -0700785 for j in range( 100 ):
Jon Hall6e709752016-02-01 13:38:46 -0800786 correct = True
787 main.log.info( "Submitted intents: " + str( sorted( intentIds ) ) )
788 for i in main.activeNodes:
789 onosIds = []
Jon Hallf37d44d2017-05-24 10:37:30 -0700790 ids = main.CLIs[ i ].getAllIntentsId()
Jon Hall6e709752016-02-01 13:38:46 -0800791 onosIds.append( ids )
Jon Hallf37d44d2017-05-24 10:37:30 -0700792 main.log.debug( "Intents in " + main.CLIs[ i ].name + ": " +
Jon Hall6e709752016-02-01 13:38:46 -0800793 str( sorted( onosIds ) ) )
794 if sorted( ids ) != sorted( intentIds ):
795 main.log.warn( "Set of intent IDs doesn't match" )
796 correct = False
797 break
798 else:
Jon Hallf37d44d2017-05-24 10:37:30 -0700799 intents = json.loads( main.CLIs[ i ].intents() )
Jon Hall6e709752016-02-01 13:38:46 -0800800 for intent in intents:
801 if intent[ 'state' ] != "INSTALLED":
802 main.log.warn( "Intent " + intent[ 'id' ] +
803 " is " + intent[ 'state' ] )
804 correct = False
805 break
806 if correct:
807 break
808 else:
Jon Hallf37d44d2017-05-24 10:37:30 -0700809 time.sleep( 1 )
Jon Hall6e709752016-02-01 13:38:46 -0800810 if not intentStop:
811 intentStop = time.time()
812 global gossipTime
813 gossipTime = intentStop - intentStart
814 main.log.info( "It took about " + str( gossipTime ) +
815 " seconds for all intents to appear in each node" )
Jon Hallf37d44d2017-05-24 10:37:30 -0700816 gossipPeriod = int( main.params[ 'timers' ][ 'gossip' ] )
Jon Hall6e709752016-02-01 13:38:46 -0800817 maxGossipTime = gossipPeriod * len( main.activeNodes )
818 utilities.assert_greater_equals(
819 expect=maxGossipTime, actual=gossipTime,
820 onpass="ECM anti-entropy for intents worked within " +
821 "expected time",
822 onfail="Intent ECM anti-entropy took too long. " +
823 "Expected time:{}, Actual time:{}".format( maxGossipTime,
824 gossipTime ) )
825 if gossipTime <= maxGossipTime:
826 intentAddResult = True
827
828 if not intentAddResult or "key" in pendingMap:
829 import time
830 installedCheck = True
831 main.log.info( "Sleeping 60 seconds to see if intents are found" )
832 time.sleep( 60 )
833 onosIds = onosCli.getAllIntentsId()
834 main.log.info( "Submitted intents: " + str( intentIds ) )
835 main.log.info( "Intents in ONOS: " + str( onosIds ) )
836 # Print the intent states
837 intents = onosCli.intents()
838 intentStates = []
839 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
840 count = 0
841 try:
842 for intent in json.loads( intents ):
843 # Iter through intents of a node
844 state = intent.get( 'state', None )
845 if "INSTALLED" not in state:
846 installedCheck = False
847 intentId = intent.get( 'id', None )
848 intentStates.append( ( intentId, state ) )
849 except ( ValueError, TypeError ):
850 main.log.exception( "Error parsing intents" )
851 # add submitted intents not in the store
852 tmplist = [ i for i, s in intentStates ]
853 for i in intentIds:
854 if i not in tmplist:
855 intentStates.append( ( i, " - " ) )
856 intentStates.sort()
857 for i, s in intentStates:
858 count += 1
859 main.log.info( "%-6s%-15s%-15s" %
860 ( str( count ), str( i ), str( s ) ) )
861 leaders = onosCli.leaders()
862 try:
863 missing = False
864 if leaders:
865 parsedLeaders = json.loads( leaders )
866 main.log.warn( json.dumps( parsedLeaders,
867 sort_keys=True,
868 indent=4,
869 separators=( ',', ': ' ) ) )
870 # check for all intent partitions
871 # check for election
872 topics = []
873 for i in range( 14 ):
Jon Hall8dafdcc2016-09-16 10:21:25 -0700874 topics.append( "work-partition-" + str( i ) )
Jon Hall6e709752016-02-01 13:38:46 -0800875 # FIXME: this should only be after we start the app
876 topics.append( "org.onosproject.election" )
877 main.log.debug( topics )
Jon Hallf37d44d2017-05-24 10:37:30 -0700878 ONOStopics = [ j[ 'topic' ] for j in parsedLeaders ]
Jon Hall6e709752016-02-01 13:38:46 -0800879 for topic in topics:
880 if topic not in ONOStopics:
881 main.log.error( "Error: " + topic +
882 " not in leaders" )
883 missing = True
884 else:
885 main.log.error( "leaders() returned None" )
886 except ( ValueError, TypeError ):
887 main.log.exception( "Error parsing leaders" )
888 main.log.error( repr( leaders ) )
889 # Check all nodes
890 if missing:
891 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -0700892 node = main.CLIs[ i ]
893 response = node.leaders( jsonFormat=False )
Jon Hall6e709752016-02-01 13:38:46 -0800894 main.log.warn( str( node.name ) + " leaders output: \n" +
895 str( response ) )
896
897 partitions = onosCli.partitions()
898 try:
Jon Hallf37d44d2017-05-24 10:37:30 -0700899 if partitions:
Jon Hall6e709752016-02-01 13:38:46 -0800900 parsedPartitions = json.loads( partitions )
901 main.log.warn( json.dumps( parsedPartitions,
902 sort_keys=True,
903 indent=4,
904 separators=( ',', ': ' ) ) )
905 # TODO check for a leader in all paritions
906 # TODO check for consistency among nodes
907 else:
908 main.log.error( "partitions() returned None" )
909 except ( ValueError, TypeError ):
910 main.log.exception( "Error parsing partitions" )
911 main.log.error( repr( partitions ) )
912 pendingMap = onosCli.pendingMap()
913 try:
Jon Hallf37d44d2017-05-24 10:37:30 -0700914 if pendingMap:
Jon Hall6e709752016-02-01 13:38:46 -0800915 parsedPending = json.loads( pendingMap )
916 main.log.warn( json.dumps( parsedPending,
917 sort_keys=True,
918 indent=4,
919 separators=( ',', ': ' ) ) )
920 # TODO check something here?
921 else:
922 main.log.error( "pendingMap() returned None" )
923 except ( ValueError, TypeError ):
924 main.log.exception( "Error parsing pending map" )
925 main.log.error( repr( pendingMap ) )
926
927 def CASE4( self, main ):
928 """
929 Ping across added host intents
930 """
931 import json
932 import time
933 assert main.numCtrls, "main.numCtrls not defined"
934 assert main, "main not defined"
935 assert utilities.assert_equals, "utilities.assert_equals not defined"
936 assert main.CLIs, "main.CLIs not defined"
937 assert main.nodes, "main.nodes not defined"
938 main.case( "Verify connectivity by sending traffic across Intents" )
939 main.caseExplanation = "Ping across added host intents to check " +\
940 "functionality and check the state of " +\
941 "the intent"
Jon Hall6e709752016-02-01 13:38:46 -0800942
Jon Hallf37d44d2017-05-24 10:37:30 -0700943 onosCli = main.CLIs[ main.activeNodes[ 0 ] ]
Jon Hall6e709752016-02-01 13:38:46 -0800944 main.step( "Check Intent state" )
945 installedCheck = False
946 loopCount = 0
947 while not installedCheck and loopCount < 40:
948 installedCheck = True
949 # Print the intent states
950 intents = onosCli.intents()
951 intentStates = []
952 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
953 count = 0
954 # Iter through intents of a node
955 try:
956 for intent in json.loads( intents ):
957 state = intent.get( 'state', None )
958 if "INSTALLED" not in state:
959 installedCheck = False
960 intentId = intent.get( 'id', None )
961 intentStates.append( ( intentId, state ) )
962 except ( ValueError, TypeError ):
963 main.log.exception( "Error parsing intents." )
964 # Print states
965 intentStates.sort()
966 for i, s in intentStates:
967 count += 1
968 main.log.info( "%-6s%-15s%-15s" %
969 ( str( count ), str( i ), str( s ) ) )
970 if not installedCheck:
971 time.sleep( 1 )
972 loopCount += 1
973 utilities.assert_equals( expect=True, actual=installedCheck,
974 onpass="Intents are all INSTALLED",
975 onfail="Intents are not all in " +
976 "INSTALLED state" )
977
Jon Hall9d2dcad2016-04-08 10:15:20 -0700978 main.step( "Ping across added host intents" )
Jon Hall9d2dcad2016-04-08 10:15:20 -0700979 PingResult = main.TRUE
980 for i in range( 8, 18 ):
981 ping = main.Mininet1.pingHost( src="h" + str( i ),
982 target="h" + str( i + 10 ) )
983 PingResult = PingResult and ping
984 if ping == main.FALSE:
985 main.log.warn( "Ping failed between h" + str( i ) +
986 " and h" + str( i + 10 ) )
987 elif ping == main.TRUE:
988 main.log.info( "Ping test passed!" )
989 # Don't set PingResult or you'd override failures
990 if PingResult == main.FALSE:
991 main.log.error(
992 "Intents have not been installed correctly, pings failed." )
993 # TODO: pretty print
994 main.log.warn( "ONOS1 intents: " )
995 try:
996 tmpIntents = onosCli.intents()
997 main.log.warn( json.dumps( json.loads( tmpIntents ),
998 sort_keys=True,
999 indent=4,
1000 separators=( ',', ': ' ) ) )
1001 except ( ValueError, TypeError ):
1002 main.log.warn( repr( tmpIntents ) )
1003 utilities.assert_equals(
1004 expect=main.TRUE,
1005 actual=PingResult,
1006 onpass="Intents have been installed correctly and pings work",
1007 onfail="Intents have not been installed correctly, pings failed." )
1008
Jon Hall6e709752016-02-01 13:38:46 -08001009 main.step( "Check leadership of topics" )
1010 leaders = onosCli.leaders()
1011 topicCheck = main.TRUE
1012 try:
1013 if leaders:
1014 parsedLeaders = json.loads( leaders )
1015 main.log.warn( json.dumps( parsedLeaders,
1016 sort_keys=True,
1017 indent=4,
1018 separators=( ',', ': ' ) ) )
1019 # check for all intent partitions
1020 # check for election
1021 # TODO: Look at Devices as topics now that it uses this system
1022 topics = []
1023 for i in range( 14 ):
Jon Hall8dafdcc2016-09-16 10:21:25 -07001024 topics.append( "work-partition-" + str( i ) )
Jon Hall6e709752016-02-01 13:38:46 -08001025 # FIXME: this should only be after we start the app
1026 # FIXME: topics.append( "org.onosproject.election" )
1027 # Print leaders output
1028 main.log.debug( topics )
Jon Hallf37d44d2017-05-24 10:37:30 -07001029 ONOStopics = [ j[ 'topic' ] for j in parsedLeaders ]
Jon Hall6e709752016-02-01 13:38:46 -08001030 for topic in topics:
1031 if topic not in ONOStopics:
1032 main.log.error( "Error: " + topic +
1033 " not in leaders" )
1034 topicCheck = main.FALSE
1035 else:
1036 main.log.error( "leaders() returned None" )
1037 topicCheck = main.FALSE
1038 except ( ValueError, TypeError ):
1039 topicCheck = main.FALSE
1040 main.log.exception( "Error parsing leaders" )
1041 main.log.error( repr( leaders ) )
1042 # TODO: Check for a leader of these topics
1043 # Check all nodes
1044 if topicCheck:
1045 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07001046 node = main.CLIs[ i ]
1047 response = node.leaders( jsonFormat=False )
Jon Hall6e709752016-02-01 13:38:46 -08001048 main.log.warn( str( node.name ) + " leaders output: \n" +
1049 str( response ) )
1050
1051 utilities.assert_equals( expect=main.TRUE, actual=topicCheck,
1052 onpass="intent Partitions is in leaders",
1053 onfail="Some topics were lost " )
1054 # Print partitions
1055 partitions = onosCli.partitions()
1056 try:
Jon Hallf37d44d2017-05-24 10:37:30 -07001057 if partitions:
Jon Hall6e709752016-02-01 13:38:46 -08001058 parsedPartitions = json.loads( partitions )
1059 main.log.warn( json.dumps( parsedPartitions,
1060 sort_keys=True,
1061 indent=4,
1062 separators=( ',', ': ' ) ) )
1063 # TODO check for a leader in all paritions
1064 # TODO check for consistency among nodes
1065 else:
1066 main.log.error( "partitions() returned None" )
1067 except ( ValueError, TypeError ):
1068 main.log.exception( "Error parsing partitions" )
1069 main.log.error( repr( partitions ) )
1070 # Print Pending Map
1071 pendingMap = onosCli.pendingMap()
1072 try:
Jon Hallf37d44d2017-05-24 10:37:30 -07001073 if pendingMap:
Jon Hall6e709752016-02-01 13:38:46 -08001074 parsedPending = json.loads( pendingMap )
1075 main.log.warn( json.dumps( parsedPending,
1076 sort_keys=True,
1077 indent=4,
1078 separators=( ',', ': ' ) ) )
1079 # TODO check something here?
1080 else:
1081 main.log.error( "pendingMap() returned None" )
1082 except ( ValueError, TypeError ):
1083 main.log.exception( "Error parsing pending map" )
1084 main.log.error( repr( pendingMap ) )
1085
1086 if not installedCheck:
1087 main.log.info( "Waiting 60 seconds to see if the state of " +
1088 "intents change" )
1089 time.sleep( 60 )
1090 # Print the intent states
1091 intents = onosCli.intents()
1092 intentStates = []
1093 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
1094 count = 0
1095 # Iter through intents of a node
1096 try:
1097 for intent in json.loads( intents ):
1098 state = intent.get( 'state', None )
1099 if "INSTALLED" not in state:
1100 installedCheck = False
1101 intentId = intent.get( 'id', None )
1102 intentStates.append( ( intentId, state ) )
1103 except ( ValueError, TypeError ):
1104 main.log.exception( "Error parsing intents." )
1105 intentStates.sort()
1106 for i, s in intentStates:
1107 count += 1
1108 main.log.info( "%-6s%-15s%-15s" %
1109 ( str( count ), str( i ), str( s ) ) )
1110 leaders = onosCli.leaders()
1111 try:
1112 missing = False
1113 if leaders:
1114 parsedLeaders = json.loads( leaders )
1115 main.log.warn( json.dumps( parsedLeaders,
1116 sort_keys=True,
1117 indent=4,
1118 separators=( ',', ': ' ) ) )
1119 # check for all intent partitions
1120 # check for election
1121 topics = []
1122 for i in range( 14 ):
Jon Hall8dafdcc2016-09-16 10:21:25 -07001123 topics.append( "work-partition-" + str( i ) )
Jon Hall6e709752016-02-01 13:38:46 -08001124 # FIXME: this should only be after we start the app
1125 topics.append( "org.onosproject.election" )
1126 main.log.debug( topics )
Jon Hallf37d44d2017-05-24 10:37:30 -07001127 ONOStopics = [ j[ 'topic' ] for j in parsedLeaders ]
Jon Hall6e709752016-02-01 13:38:46 -08001128 for topic in topics:
1129 if topic not in ONOStopics:
1130 main.log.error( "Error: " + topic +
1131 " not in leaders" )
1132 missing = True
1133 else:
1134 main.log.error( "leaders() returned None" )
1135 except ( ValueError, TypeError ):
1136 main.log.exception( "Error parsing leaders" )
1137 main.log.error( repr( leaders ) )
1138 if missing:
1139 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07001140 node = main.CLIs[ i ]
1141 response = node.leaders( jsonFormat=False )
Jon Hall6e709752016-02-01 13:38:46 -08001142 main.log.warn( str( node.name ) + " leaders output: \n" +
1143 str( response ) )
1144
1145 partitions = onosCli.partitions()
1146 try:
Jon Hallf37d44d2017-05-24 10:37:30 -07001147 if partitions:
Jon Hall6e709752016-02-01 13:38:46 -08001148 parsedPartitions = json.loads( partitions )
1149 main.log.warn( json.dumps( parsedPartitions,
1150 sort_keys=True,
1151 indent=4,
1152 separators=( ',', ': ' ) ) )
1153 # TODO check for a leader in all paritions
1154 # TODO check for consistency among nodes
1155 else:
1156 main.log.error( "partitions() returned None" )
1157 except ( ValueError, TypeError ):
1158 main.log.exception( "Error parsing partitions" )
1159 main.log.error( repr( partitions ) )
1160 pendingMap = onosCli.pendingMap()
1161 try:
Jon Hallf37d44d2017-05-24 10:37:30 -07001162 if pendingMap:
Jon Hall6e709752016-02-01 13:38:46 -08001163 parsedPending = json.loads( pendingMap )
1164 main.log.warn( json.dumps( parsedPending,
1165 sort_keys=True,
1166 indent=4,
1167 separators=( ',', ': ' ) ) )
1168 # TODO check something here?
1169 else:
1170 main.log.error( "pendingMap() returned None" )
1171 except ( ValueError, TypeError ):
1172 main.log.exception( "Error parsing pending map" )
1173 main.log.error( repr( pendingMap ) )
1174 # Print flowrules
Jon Hallf37d44d2017-05-24 10:37:30 -07001175 node = main.activeNodes[ 0 ]
1176 main.log.debug( main.CLIs[ node ].flows( jsonFormat=False ) )
Jon Hall6e709752016-02-01 13:38:46 -08001177 main.step( "Wait a minute then ping again" )
1178 # the wait is above
1179 PingResult = main.TRUE
1180 for i in range( 8, 18 ):
1181 ping = main.Mininet1.pingHost( src="h" + str( i ),
1182 target="h" + str( i + 10 ) )
1183 PingResult = PingResult and ping
1184 if ping == main.FALSE:
1185 main.log.warn( "Ping failed between h" + str( i ) +
1186 " and h" + str( i + 10 ) )
1187 elif ping == main.TRUE:
1188 main.log.info( "Ping test passed!" )
1189 # Don't set PingResult or you'd override failures
1190 if PingResult == main.FALSE:
1191 main.log.error(
1192 "Intents have not been installed correctly, pings failed." )
1193 # TODO: pretty print
1194 main.log.warn( "ONOS1 intents: " )
1195 try:
1196 tmpIntents = onosCli.intents()
1197 main.log.warn( json.dumps( json.loads( tmpIntents ),
1198 sort_keys=True,
1199 indent=4,
1200 separators=( ',', ': ' ) ) )
1201 except ( ValueError, TypeError ):
1202 main.log.warn( repr( tmpIntents ) )
1203 utilities.assert_equals(
1204 expect=main.TRUE,
1205 actual=PingResult,
1206 onpass="Intents have been installed correctly and pings work",
1207 onfail="Intents have not been installed correctly, pings failed." )
1208
1209 def CASE5( self, main ):
1210 """
1211 Reading state of ONOS
1212 """
1213 import json
1214 import time
1215 assert main.numCtrls, "main.numCtrls not defined"
1216 assert main, "main not defined"
1217 assert utilities.assert_equals, "utilities.assert_equals not defined"
1218 assert main.CLIs, "main.CLIs not defined"
1219 assert main.nodes, "main.nodes not defined"
1220
1221 main.case( "Setting up and gathering data for current state" )
1222 # The general idea for this test case is to pull the state of
1223 # ( intents,flows, topology,... ) from each ONOS node
1224 # We can then compare them with each other and also with past states
1225
1226 main.step( "Check that each switch has a master" )
1227 global mastershipState
1228 mastershipState = '[]'
1229
1230 # Assert that each device has a master
1231 rolesNotNull = main.TRUE
1232 threads = []
1233 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07001234 t = main.Thread( target=main.CLIs[ i ].rolesNotNull,
Jon Hall6e709752016-02-01 13:38:46 -08001235 name="rolesNotNull-" + str( i ),
1236 args=[] )
1237 threads.append( t )
1238 t.start()
1239
1240 for t in threads:
1241 t.join()
1242 rolesNotNull = rolesNotNull and t.result
1243 utilities.assert_equals(
1244 expect=main.TRUE,
1245 actual=rolesNotNull,
1246 onpass="Each device has a master",
1247 onfail="Some devices don't have a master assigned" )
1248
1249 main.step( "Get the Mastership of each switch from each controller" )
1250 ONOSMastership = []
1251 mastershipCheck = main.FALSE
1252 consistentMastership = True
1253 rolesResults = True
1254 threads = []
1255 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07001256 t = main.Thread( target=main.CLIs[ i ].roles,
Jon Hall6e709752016-02-01 13:38:46 -08001257 name="roles-" + str( i ),
1258 args=[] )
1259 threads.append( t )
1260 t.start()
1261
1262 for t in threads:
1263 t.join()
1264 ONOSMastership.append( t.result )
1265
1266 for i in range( len( ONOSMastership ) ):
Jon Hallf37d44d2017-05-24 10:37:30 -07001267 node = str( main.activeNodes[ i ] + 1 )
1268 if not ONOSMastership[ i ] or "Error" in ONOSMastership[ i ]:
Jon Hall6e709752016-02-01 13:38:46 -08001269 main.log.error( "Error in getting ONOS" + node + " roles" )
1270 main.log.warn( "ONOS" + node + " mastership response: " +
Jon Hallf37d44d2017-05-24 10:37:30 -07001271 repr( ONOSMastership[ i ] ) )
Jon Hall6e709752016-02-01 13:38:46 -08001272 rolesResults = False
1273 utilities.assert_equals(
1274 expect=True,
1275 actual=rolesResults,
1276 onpass="No error in reading roles output",
1277 onfail="Error in reading roles from ONOS" )
1278
1279 main.step( "Check for consistency in roles from each controller" )
Jon Hallf37d44d2017-05-24 10:37:30 -07001280 if all( [ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
Jon Hall6e709752016-02-01 13:38:46 -08001281 main.log.info(
1282 "Switch roles are consistent across all ONOS nodes" )
1283 else:
1284 consistentMastership = False
1285 utilities.assert_equals(
1286 expect=True,
1287 actual=consistentMastership,
1288 onpass="Switch roles are consistent across all ONOS nodes",
1289 onfail="ONOS nodes have different views of switch roles" )
1290
1291 if rolesResults and not consistentMastership:
1292 for i in range( len( main.activeNodes ) ):
Jon Hallf37d44d2017-05-24 10:37:30 -07001293 node = str( main.activeNodes[ i ] + 1 )
Jon Hall6e709752016-02-01 13:38:46 -08001294 try:
1295 main.log.warn(
1296 "ONOS" + node + " roles: ",
1297 json.dumps(
1298 json.loads( ONOSMastership[ i ] ),
1299 sort_keys=True,
1300 indent=4,
1301 separators=( ',', ': ' ) ) )
1302 except ( ValueError, TypeError ):
1303 main.log.warn( repr( ONOSMastership[ i ] ) )
1304 elif rolesResults and consistentMastership:
1305 mastershipCheck = main.TRUE
1306 mastershipState = ONOSMastership[ 0 ]
1307
1308 main.step( "Get the intents from each controller" )
1309 global intentState
1310 intentState = []
1311 ONOSIntents = []
1312 intentCheck = main.FALSE
1313 consistentIntents = True
1314 intentsResults = True
1315 threads = []
1316 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07001317 t = main.Thread( target=main.CLIs[ i ].intents,
Jon Hall6e709752016-02-01 13:38:46 -08001318 name="intents-" + str( i ),
1319 args=[],
1320 kwargs={ 'jsonFormat': True } )
1321 threads.append( t )
1322 t.start()
1323
1324 for t in threads:
1325 t.join()
1326 ONOSIntents.append( t.result )
1327
1328 for i in range( len( ONOSIntents ) ):
Jon Hallf37d44d2017-05-24 10:37:30 -07001329 node = str( main.activeNodes[ i ] + 1 )
Jon Hall6e709752016-02-01 13:38:46 -08001330 if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
1331 main.log.error( "Error in getting ONOS" + node + " intents" )
1332 main.log.warn( "ONOS" + node + " intents response: " +
1333 repr( ONOSIntents[ i ] ) )
1334 intentsResults = False
1335 utilities.assert_equals(
1336 expect=True,
1337 actual=intentsResults,
1338 onpass="No error in reading intents output",
1339 onfail="Error in reading intents from ONOS" )
1340
1341 main.step( "Check for consistency in Intents from each controller" )
Jon Hallf37d44d2017-05-24 10:37:30 -07001342 if all( [ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
Jon Hall6e709752016-02-01 13:38:46 -08001343 main.log.info( "Intents are consistent across all ONOS " +
1344 "nodes" )
1345 else:
1346 consistentIntents = False
1347 main.log.error( "Intents not consistent" )
1348 utilities.assert_equals(
1349 expect=True,
1350 actual=consistentIntents,
1351 onpass="Intents are consistent across all ONOS nodes",
1352 onfail="ONOS nodes have different views of intents" )
1353
1354 if intentsResults:
1355 # Try to make it easy to figure out what is happening
1356 #
1357 # Intent ONOS1 ONOS2 ...
1358 # 0x01 INSTALLED INSTALLING
1359 # ... ... ...
1360 # ... ... ...
1361 title = " Id"
1362 for n in main.activeNodes:
1363 title += " " * 10 + "ONOS" + str( n + 1 )
1364 main.log.warn( title )
1365 # get all intent keys in the cluster
1366 keys = []
Jon Halla440e872016-03-31 15:15:50 -07001367 try:
1368 # Get the set of all intent keys
Jon Hall6e709752016-02-01 13:38:46 -08001369 for nodeStr in ONOSIntents:
1370 node = json.loads( nodeStr )
1371 for intent in node:
Jon Halla440e872016-03-31 15:15:50 -07001372 keys.append( intent.get( 'id' ) )
1373 keys = set( keys )
1374 # For each intent key, print the state on each node
1375 for key in keys:
1376 row = "%-13s" % key
1377 for nodeStr in ONOSIntents:
1378 node = json.loads( nodeStr )
1379 for intent in node:
1380 if intent.get( 'id', "Error" ) == key:
1381 row += "%-15s" % intent.get( 'state' )
1382 main.log.warn( row )
1383 # End of intent state table
1384 except ValueError as e:
1385 main.log.exception( e )
1386 main.log.debug( "nodeStr was: " + repr( nodeStr ) )
Jon Hall6e709752016-02-01 13:38:46 -08001387
1388 if intentsResults and not consistentIntents:
1389 # print the json objects
Jon Hallf37d44d2017-05-24 10:37:30 -07001390 n = str( main.activeNodes[ -1 ] + 1 )
Jon Hall6e709752016-02-01 13:38:46 -08001391 main.log.debug( "ONOS" + n + " intents: " )
1392 main.log.debug( json.dumps( json.loads( ONOSIntents[ -1 ] ),
1393 sort_keys=True,
1394 indent=4,
1395 separators=( ',', ': ' ) ) )
1396 for i in range( len( ONOSIntents ) ):
Jon Hallf37d44d2017-05-24 10:37:30 -07001397 node = str( main.activeNodes[ i ] + 1 )
Jon Hall6e709752016-02-01 13:38:46 -08001398 if ONOSIntents[ i ] != ONOSIntents[ -1 ]:
1399 main.log.debug( "ONOS" + node + " intents: " )
Jon Hallf37d44d2017-05-24 10:37:30 -07001400 main.log.debug( json.dumps( json.loads( ONOSIntents[ i ] ),
Jon Hall6e709752016-02-01 13:38:46 -08001401 sort_keys=True,
1402 indent=4,
1403 separators=( ',', ': ' ) ) )
1404 else:
1405 main.log.debug( "ONOS" + node + " intents match ONOS" +
1406 n + " intents" )
1407 elif intentsResults and consistentIntents:
1408 intentCheck = main.TRUE
1409 intentState = ONOSIntents[ 0 ]
1410
1411 main.step( "Get the flows from each controller" )
1412 global flowState
1413 flowState = []
1414 ONOSFlows = []
1415 ONOSFlowsJson = []
1416 flowCheck = main.FALSE
1417 consistentFlows = True
1418 flowsResults = True
1419 threads = []
1420 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07001421 t = main.Thread( target=main.CLIs[ i ].flows,
Jon Hall6e709752016-02-01 13:38:46 -08001422 name="flows-" + str( i ),
1423 args=[],
1424 kwargs={ 'jsonFormat': True } )
1425 threads.append( t )
1426 t.start()
1427
1428 # NOTE: Flows command can take some time to run
Jon Hallf37d44d2017-05-24 10:37:30 -07001429 time.sleep( 30 )
Jon Hall6e709752016-02-01 13:38:46 -08001430 for t in threads:
1431 t.join()
1432 result = t.result
1433 ONOSFlows.append( result )
1434
1435 for i in range( len( ONOSFlows ) ):
Jon Hallf37d44d2017-05-24 10:37:30 -07001436 num = str( main.activeNodes[ i ] + 1 )
Jon Hall6e709752016-02-01 13:38:46 -08001437 if not ONOSFlows[ i ] or "Error" in ONOSFlows[ i ]:
1438 main.log.error( "Error in getting ONOS" + num + " flows" )
1439 main.log.warn( "ONOS" + num + " flows response: " +
1440 repr( ONOSFlows[ i ] ) )
1441 flowsResults = False
1442 ONOSFlowsJson.append( None )
1443 else:
1444 try:
1445 ONOSFlowsJson.append( json.loads( ONOSFlows[ i ] ) )
1446 except ( ValueError, TypeError ):
1447 # FIXME: change this to log.error?
1448 main.log.exception( "Error in parsing ONOS" + num +
1449 " response as json." )
1450 main.log.error( repr( ONOSFlows[ i ] ) )
1451 ONOSFlowsJson.append( None )
1452 flowsResults = False
1453 utilities.assert_equals(
1454 expect=True,
1455 actual=flowsResults,
1456 onpass="No error in reading flows output",
1457 onfail="Error in reading flows from ONOS" )
1458
1459 main.step( "Check for consistency in Flows from each controller" )
1460 tmp = [ len( i ) == len( ONOSFlowsJson[ 0 ] ) for i in ONOSFlowsJson ]
1461 if all( tmp ):
1462 main.log.info( "Flow count is consistent across all ONOS nodes" )
1463 else:
1464 consistentFlows = False
1465 utilities.assert_equals(
1466 expect=True,
1467 actual=consistentFlows,
1468 onpass="The flow count is consistent across all ONOS nodes",
1469 onfail="ONOS nodes have different flow counts" )
1470
1471 if flowsResults and not consistentFlows:
1472 for i in range( len( ONOSFlows ) ):
Jon Hallf37d44d2017-05-24 10:37:30 -07001473 node = str( main.activeNodes[ i ] + 1 )
Jon Hall6e709752016-02-01 13:38:46 -08001474 try:
1475 main.log.warn(
1476 "ONOS" + node + " flows: " +
Jon Hallf37d44d2017-05-24 10:37:30 -07001477 json.dumps( json.loads( ONOSFlows[ i ] ), sort_keys=True,
Jon Hall6e709752016-02-01 13:38:46 -08001478 indent=4, separators=( ',', ': ' ) ) )
1479 except ( ValueError, TypeError ):
1480 main.log.warn( "ONOS" + node + " flows: " +
1481 repr( ONOSFlows[ i ] ) )
1482 elif flowsResults and consistentFlows:
1483 flowCheck = main.TRUE
1484 flowState = ONOSFlows[ 0 ]
1485
1486 main.step( "Get the OF Table entries" )
1487 global flows
1488 flows = []
1489 for i in range( 1, 29 ):
1490 flows.append( main.Mininet1.getFlowTable( "s" + str( i ), version="1.3", debug=False ) )
1491 if flowCheck == main.FALSE:
1492 for table in flows:
1493 main.log.warn( table )
1494 # TODO: Compare switch flow tables with ONOS flow tables
1495
1496 main.step( "Start continuous pings" )
1497 main.Mininet2.pingLong(
1498 src=main.params[ 'PING' ][ 'source1' ],
1499 target=main.params[ 'PING' ][ 'target1' ],
1500 pingTime=500 )
1501 main.Mininet2.pingLong(
1502 src=main.params[ 'PING' ][ 'source2' ],
1503 target=main.params[ 'PING' ][ 'target2' ],
1504 pingTime=500 )
1505 main.Mininet2.pingLong(
1506 src=main.params[ 'PING' ][ 'source3' ],
1507 target=main.params[ 'PING' ][ 'target3' ],
1508 pingTime=500 )
1509 main.Mininet2.pingLong(
1510 src=main.params[ 'PING' ][ 'source4' ],
1511 target=main.params[ 'PING' ][ 'target4' ],
1512 pingTime=500 )
1513 main.Mininet2.pingLong(
1514 src=main.params[ 'PING' ][ 'source5' ],
1515 target=main.params[ 'PING' ][ 'target5' ],
1516 pingTime=500 )
1517 main.Mininet2.pingLong(
1518 src=main.params[ 'PING' ][ 'source6' ],
1519 target=main.params[ 'PING' ][ 'target6' ],
1520 pingTime=500 )
1521 main.Mininet2.pingLong(
1522 src=main.params[ 'PING' ][ 'source7' ],
1523 target=main.params[ 'PING' ][ 'target7' ],
1524 pingTime=500 )
1525 main.Mininet2.pingLong(
1526 src=main.params[ 'PING' ][ 'source8' ],
1527 target=main.params[ 'PING' ][ 'target8' ],
1528 pingTime=500 )
1529 main.Mininet2.pingLong(
1530 src=main.params[ 'PING' ][ 'source9' ],
1531 target=main.params[ 'PING' ][ 'target9' ],
1532 pingTime=500 )
1533 main.Mininet2.pingLong(
1534 src=main.params[ 'PING' ][ 'source10' ],
1535 target=main.params[ 'PING' ][ 'target10' ],
1536 pingTime=500 )
1537
1538 main.step( "Collecting topology information from ONOS" )
1539 devices = []
1540 threads = []
1541 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07001542 t = main.Thread( target=main.CLIs[ i ].devices,
Jon Hall6e709752016-02-01 13:38:46 -08001543 name="devices-" + str( i ),
Jon Hallf37d44d2017-05-24 10:37:30 -07001544 args=[] )
Jon Hall6e709752016-02-01 13:38:46 -08001545 threads.append( t )
1546 t.start()
1547
1548 for t in threads:
1549 t.join()
1550 devices.append( t.result )
1551 hosts = []
1552 threads = []
1553 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07001554 t = main.Thread( target=main.CLIs[ i ].hosts,
Jon Hall6e709752016-02-01 13:38:46 -08001555 name="hosts-" + str( i ),
Jon Hallf37d44d2017-05-24 10:37:30 -07001556 args=[] )
Jon Hall6e709752016-02-01 13:38:46 -08001557 threads.append( t )
1558 t.start()
1559
1560 for t in threads:
1561 t.join()
1562 try:
1563 hosts.append( json.loads( t.result ) )
1564 except ( ValueError, TypeError ):
1565 # FIXME: better handling of this, print which node
1566 # Maybe use thread name?
1567 main.log.exception( "Error parsing json output of hosts" )
1568 main.log.warn( repr( t.result ) )
1569 hosts.append( None )
1570
1571 ports = []
1572 threads = []
1573 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07001574 t = main.Thread( target=main.CLIs[ i ].ports,
Jon Hall6e709752016-02-01 13:38:46 -08001575 name="ports-" + str( i ),
Jon Hallf37d44d2017-05-24 10:37:30 -07001576 args=[] )
Jon Hall6e709752016-02-01 13:38:46 -08001577 threads.append( t )
1578 t.start()
1579
1580 for t in threads:
1581 t.join()
1582 ports.append( t.result )
1583 links = []
1584 threads = []
1585 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07001586 t = main.Thread( target=main.CLIs[ i ].links,
Jon Hall6e709752016-02-01 13:38:46 -08001587 name="links-" + str( i ),
Jon Hallf37d44d2017-05-24 10:37:30 -07001588 args=[] )
Jon Hall6e709752016-02-01 13:38:46 -08001589 threads.append( t )
1590 t.start()
1591
1592 for t in threads:
1593 t.join()
1594 links.append( t.result )
1595 clusters = []
1596 threads = []
1597 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07001598 t = main.Thread( target=main.CLIs[ i ].clusters,
Jon Hall6e709752016-02-01 13:38:46 -08001599 name="clusters-" + str( i ),
Jon Hallf37d44d2017-05-24 10:37:30 -07001600 args=[] )
Jon Hall6e709752016-02-01 13:38:46 -08001601 threads.append( t )
1602 t.start()
1603
1604 for t in threads:
1605 t.join()
1606 clusters.append( t.result )
1607 # Compare json objects for hosts and dataplane clusters
1608
1609 # hosts
1610 main.step( "Host view is consistent across ONOS nodes" )
1611 consistentHostsResult = main.TRUE
1612 for controller in range( len( hosts ) ):
Jon Hallf37d44d2017-05-24 10:37:30 -07001613 controllerStr = str( main.activeNodes[ controller ] + 1 )
Jon Hall6e709752016-02-01 13:38:46 -08001614 if hosts[ controller ] and "Error" not in hosts[ controller ]:
1615 if hosts[ controller ] == hosts[ 0 ]:
1616 continue
1617 else: # hosts not consistent
1618 main.log.error( "hosts from ONOS" +
1619 controllerStr +
1620 " is inconsistent with ONOS1" )
1621 main.log.warn( repr( hosts[ controller ] ) )
1622 consistentHostsResult = main.FALSE
1623
1624 else:
1625 main.log.error( "Error in getting ONOS hosts from ONOS" +
1626 controllerStr )
1627 consistentHostsResult = main.FALSE
1628 main.log.warn( "ONOS" + controllerStr +
1629 " hosts response: " +
1630 repr( hosts[ controller ] ) )
1631 utilities.assert_equals(
1632 expect=main.TRUE,
1633 actual=consistentHostsResult,
1634 onpass="Hosts view is consistent across all ONOS nodes",
1635 onfail="ONOS nodes have different views of hosts" )
1636
1637 main.step( "Each host has an IP address" )
1638 ipResult = main.TRUE
1639 for controller in range( 0, len( hosts ) ):
Jon Hallf37d44d2017-05-24 10:37:30 -07001640 controllerStr = str( main.activeNodes[ controller ] + 1 )
Jon Hall6e709752016-02-01 13:38:46 -08001641 if hosts[ controller ]:
1642 for host in hosts[ controller ]:
Jon Hallf37d44d2017-05-24 10:37:30 -07001643 if not host.get( 'ipAddresses', [] ):
Jon Hall6e709752016-02-01 13:38:46 -08001644 main.log.error( "Error with host ips on controller" +
1645 controllerStr + ": " + str( host ) )
1646 ipResult = main.FALSE
1647 utilities.assert_equals(
1648 expect=main.TRUE,
1649 actual=ipResult,
1650 onpass="The ips of the hosts aren't empty",
1651 onfail="The ip of at least one host is missing" )
1652
1653 # Strongly connected clusters of devices
1654 main.step( "Cluster view is consistent across ONOS nodes" )
1655 consistentClustersResult = main.TRUE
1656 for controller in range( len( clusters ) ):
Jon Hallf37d44d2017-05-24 10:37:30 -07001657 controllerStr = str( main.activeNodes[ controller ] + 1 )
Jon Hall6e709752016-02-01 13:38:46 -08001658 if "Error" not in clusters[ controller ]:
1659 if clusters[ controller ] == clusters[ 0 ]:
1660 continue
1661 else: # clusters not consistent
1662 main.log.error( "clusters from ONOS" + controllerStr +
1663 " is inconsistent with ONOS1" )
1664 consistentClustersResult = main.FALSE
1665
1666 else:
1667 main.log.error( "Error in getting dataplane clusters " +
1668 "from ONOS" + controllerStr )
1669 consistentClustersResult = main.FALSE
1670 main.log.warn( "ONOS" + controllerStr +
1671 " clusters response: " +
1672 repr( clusters[ controller ] ) )
1673 utilities.assert_equals(
1674 expect=main.TRUE,
1675 actual=consistentClustersResult,
1676 onpass="Clusters view is consistent across all ONOS nodes",
1677 onfail="ONOS nodes have different views of clusters" )
Jon Hall64948022016-05-12 13:38:50 -07001678 if not consistentClustersResult:
Jon Hall172b7ba2016-04-07 18:12:20 -07001679 main.log.debug( clusters )
Jon Hall64948022016-05-12 13:38:50 -07001680
Jon Hall6e709752016-02-01 13:38:46 -08001681 # there should always only be one cluster
1682 main.step( "Cluster view correct across ONOS nodes" )
1683 try:
1684 numClusters = len( json.loads( clusters[ 0 ] ) )
1685 except ( ValueError, TypeError ):
1686 main.log.exception( "Error parsing clusters[0]: " +
1687 repr( clusters[ 0 ] ) )
1688 numClusters = "ERROR"
1689 clusterResults = main.FALSE
1690 if numClusters == 1:
1691 clusterResults = main.TRUE
1692 utilities.assert_equals(
1693 expect=1,
1694 actual=numClusters,
1695 onpass="ONOS shows 1 SCC",
1696 onfail="ONOS shows " + str( numClusters ) + " SCCs" )
1697
1698 main.step( "Comparing ONOS topology to MN" )
1699 devicesResults = main.TRUE
1700 linksResults = main.TRUE
1701 hostsResults = main.TRUE
1702 mnSwitches = main.Mininet1.getSwitches()
1703 mnLinks = main.Mininet1.getLinks()
1704 mnHosts = main.Mininet1.getHosts()
1705 for controller in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07001706 controllerStr = str( main.activeNodes[ controller ] + 1 )
Jon Hall6e709752016-02-01 13:38:46 -08001707 if devices[ controller ] and ports[ controller ] and\
Jon Hallf37d44d2017-05-24 10:37:30 -07001708 "Error" not in devices[ controller ] and\
1709 "Error" not in ports[ controller ]:
1710 currentDevicesResult = main.Mininet1.compareSwitches(
1711 mnSwitches,
1712 json.loads( devices[ controller ] ),
1713 json.loads( ports[ controller ] ) )
Jon Hall6e709752016-02-01 13:38:46 -08001714 else:
1715 currentDevicesResult = main.FALSE
1716 utilities.assert_equals( expect=main.TRUE,
1717 actual=currentDevicesResult,
1718 onpass="ONOS" + controllerStr +
1719 " Switches view is correct",
1720 onfail="ONOS" + controllerStr +
1721 " Switches view is incorrect" )
1722 if links[ controller ] and "Error" not in links[ controller ]:
1723 currentLinksResult = main.Mininet1.compareLinks(
1724 mnSwitches, mnLinks,
1725 json.loads( links[ controller ] ) )
1726 else:
1727 currentLinksResult = main.FALSE
1728 utilities.assert_equals( expect=main.TRUE,
1729 actual=currentLinksResult,
1730 onpass="ONOS" + controllerStr +
1731 " links view is correct",
1732 onfail="ONOS" + controllerStr +
1733 " links view is incorrect" )
1734
1735 if hosts[ controller ] and "Error" not in hosts[ controller ]:
1736 currentHostsResult = main.Mininet1.compareHosts(
1737 mnHosts,
1738 hosts[ controller ] )
1739 else:
1740 currentHostsResult = main.FALSE
1741 utilities.assert_equals( expect=main.TRUE,
1742 actual=currentHostsResult,
1743 onpass="ONOS" + controllerStr +
1744 " hosts exist in Mininet",
1745 onfail="ONOS" + controllerStr +
1746 " hosts don't match Mininet" )
1747
1748 devicesResults = devicesResults and currentDevicesResult
1749 linksResults = linksResults and currentLinksResult
1750 hostsResults = hostsResults and currentHostsResult
1751
1752 main.step( "Device information is correct" )
1753 utilities.assert_equals(
1754 expect=main.TRUE,
1755 actual=devicesResults,
1756 onpass="Device information is correct",
1757 onfail="Device information is incorrect" )
1758
1759 main.step( "Links are correct" )
1760 utilities.assert_equals(
1761 expect=main.TRUE,
1762 actual=linksResults,
1763 onpass="Link are correct",
1764 onfail="Links are incorrect" )
1765
1766 main.step( "Hosts are correct" )
1767 utilities.assert_equals(
1768 expect=main.TRUE,
1769 actual=hostsResults,
1770 onpass="Hosts are correct",
1771 onfail="Hosts are incorrect" )
1772
1773 def CASE61( self, main ):
1774 """
1775 The Failure case.
1776 """
1777 import math
1778 assert main.numCtrls, "main.numCtrls not defined"
1779 assert main, "main not defined"
1780 assert utilities.assert_equals, "utilities.assert_equals not defined"
1781 assert main.CLIs, "main.CLIs not defined"
1782 assert main.nodes, "main.nodes not defined"
1783 main.case( "Partition ONOS nodes into two distinct partitions" )
1784
1785 main.step( "Checking ONOS Logs for errors" )
1786 for node in main.nodes:
1787 main.log.debug( "Checking logs for errors on " + node.name + ":" )
1788 main.log.warn( main.ONOSbench.checkLogs( node.ip_address ) )
1789
Jon Hallf37d44d2017-05-24 10:37:30 -07001790 main.log.debug( main.CLIs[ 0 ].roles( jsonFormat=False ) )
Jon Halld2871c22016-07-26 11:01:14 -07001791
Jon Hall6e709752016-02-01 13:38:46 -08001792 n = len( main.nodes ) # Number of nodes
1793 p = ( ( n + 1 ) / 2 ) + 1 # Number of partitions
1794 main.partition = [ 0 ] # ONOS node to partition, listed by index in main.nodes
1795 if n > 3:
1796 main.partition.append( p - 1 )
1797 # NOTE: This only works for cluster sizes of 3,5, or 7.
1798
1799 main.step( "Partitioning ONOS nodes" )
1800 nodeList = [ str( i + 1 ) for i in main.partition ]
1801 main.log.info( "Nodes to be partitioned: " + str( nodeList ) )
1802 partitionResults = main.TRUE
1803 for i in range( 0, n ):
Jon Hallf37d44d2017-05-24 10:37:30 -07001804 this = main.nodes[ i ]
Jon Hall6e709752016-02-01 13:38:46 -08001805 if i not in main.partition:
1806 for j in main.partition:
Jon Hallf37d44d2017-05-24 10:37:30 -07001807 foe = main.nodes[ j ]
Jon Hall6e709752016-02-01 13:38:46 -08001808 main.log.warn( "Setting IP Tables rule from {} to {}. ".format( this.ip_address, foe.ip_address ) )
1809 #CMD HERE
1810 cmdStr = "sudo iptables -A {} -d {} -s {} -j DROP".format( "INPUT", this.ip_address, foe.ip_address )
1811 this.handle.sendline( cmdStr )
1812 this.handle.expect( "\$" )
1813 main.log.debug( this.handle.before )
1814 else:
1815 for j in range( 0, n ):
1816 if j not in main.partition:
Jon Hallf37d44d2017-05-24 10:37:30 -07001817 foe = main.nodes[ j ]
Jon Hall6e709752016-02-01 13:38:46 -08001818 main.log.warn( "Setting IP Tables rule from {} to {}. ".format( this.ip_address, foe.ip_address ) )
1819 #CMD HERE
1820 cmdStr = "sudo iptables -A {} -d {} -s {} -j DROP".format( "INPUT", this.ip_address, foe.ip_address )
1821 this.handle.sendline( cmdStr )
1822 this.handle.expect( "\$" )
1823 main.log.debug( this.handle.before )
1824 main.activeNodes.remove( i )
1825 # NOTE: When dynamic clustering is finished, we need to start checking
1826 # main.partion nodes still work when partitioned
1827 utilities.assert_equals( expect=main.TRUE, actual=partitionResults,
1828 onpass="Firewall rules set successfully",
1829 onfail="Error setting firewall rules" )
1830
Jon Hall6509dbf2016-06-21 17:01:17 -07001831 main.step( "Sleeping 60 seconds" )
Jon Hall6e709752016-02-01 13:38:46 -08001832 time.sleep( 60 )
1833
1834 def CASE62( self, main ):
1835 """
1836 Healing Partition
1837 """
1838 import time
1839 assert main.numCtrls, "main.numCtrls not defined"
1840 assert main, "main not defined"
1841 assert utilities.assert_equals, "utilities.assert_equals not defined"
1842 assert main.CLIs, "main.CLIs not defined"
1843 assert main.nodes, "main.nodes not defined"
1844 assert main.partition, "main.partition not defined"
1845 main.case( "Healing Partition" )
1846
1847 main.step( "Deleteing firewall rules" )
1848 healResults = main.TRUE
1849 for node in main.nodes:
1850 cmdStr = "sudo iptables -F"
1851 node.handle.sendline( cmdStr )
1852 node.handle.expect( "\$" )
1853 main.log.debug( node.handle.before )
1854 utilities.assert_equals( expect=main.TRUE, actual=healResults,
1855 onpass="Firewall rules removed",
1856 onfail="Error removing firewall rules" )
1857
1858 for node in main.partition:
1859 main.activeNodes.append( node )
1860 main.activeNodes.sort()
1861 try:
1862 assert list( set( main.activeNodes ) ) == main.activeNodes,\
1863 "List of active nodes has duplicates, this likely indicates something was run out of order"
1864 except AssertionError:
1865 main.log.exception( "" )
1866 main.cleanup()
1867 main.exit()
1868
Jon Halld2871c22016-07-26 11:01:14 -07001869 main.step( "Checking ONOS nodes" )
1870 nodeResults = utilities.retry( main.HA.nodesCheck,
1871 False,
Jon Hallf37d44d2017-05-24 10:37:30 -07001872 args=[ main.activeNodes ],
Jon Halld2871c22016-07-26 11:01:14 -07001873 sleep=15,
1874 attempts=5 )
1875
1876 utilities.assert_equals( expect=True, actual=nodeResults,
1877 onpass="Nodes check successful",
1878 onfail="Nodes check NOT successful" )
1879
1880 if not nodeResults:
1881 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07001882 cli = main.CLIs[ i ]
Jon Halld2871c22016-07-26 11:01:14 -07001883 main.log.debug( "{} components not ACTIVE: \n{}".format(
1884 cli.name,
1885 cli.sendline( "scr:list | grep -v ACTIVE" ) ) )
1886 main.log.error( "Failed to start ONOS, stopping test" )
1887 main.cleanup()
1888 main.exit()
1889
Jon Hall6e709752016-02-01 13:38:46 -08001890 def CASE7( self, main ):
1891 """
1892 Check state after ONOS failure
1893 """
1894 import json
1895 assert main.numCtrls, "main.numCtrls not defined"
1896 assert main, "main not defined"
1897 assert utilities.assert_equals, "utilities.assert_equals not defined"
1898 assert main.CLIs, "main.CLIs not defined"
1899 assert main.nodes, "main.nodes not defined"
1900 try:
1901 main.partition
1902 except AttributeError:
1903 main.partition = []
1904
1905 main.case( "Running ONOS Constant State Tests" )
1906
1907 main.step( "Check that each switch has a master" )
1908 # Assert that each device has a master
1909 rolesNotNull = main.TRUE
1910 threads = []
1911 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07001912 t = main.Thread( target=main.CLIs[ i ].rolesNotNull,
Jon Hall6e709752016-02-01 13:38:46 -08001913 name="rolesNotNull-" + str( i ),
Jon Hallf37d44d2017-05-24 10:37:30 -07001914 args=[] )
Jon Hall6e709752016-02-01 13:38:46 -08001915 threads.append( t )
1916 t.start()
1917
1918 for t in threads:
1919 t.join()
1920 rolesNotNull = rolesNotNull and t.result
1921 utilities.assert_equals(
1922 expect=main.TRUE,
1923 actual=rolesNotNull,
1924 onpass="Each device has a master",
1925 onfail="Some devices don't have a master assigned" )
1926
1927 main.step( "Read device roles from ONOS" )
1928 ONOSMastership = []
1929 mastershipCheck = main.FALSE
1930 consistentMastership = True
1931 rolesResults = True
1932 threads = []
1933 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07001934 t = main.Thread( target=main.CLIs[ i ].roles,
Jon Hall6e709752016-02-01 13:38:46 -08001935 name="roles-" + str( i ),
1936 args=[] )
1937 threads.append( t )
1938 t.start()
1939
1940 for t in threads:
1941 t.join()
1942 ONOSMastership.append( t.result )
1943
1944 for i in range( len( ONOSMastership ) ):
Jon Hallf37d44d2017-05-24 10:37:30 -07001945 node = str( main.activeNodes[ i ] + 1 )
1946 if not ONOSMastership[ i ] or "Error" in ONOSMastership[ i ]:
Jon Hall6e709752016-02-01 13:38:46 -08001947 main.log.error( "Error in getting ONOS" + node + " roles" )
1948 main.log.warn( "ONOS" + node + " mastership response: " +
Jon Hallf37d44d2017-05-24 10:37:30 -07001949 repr( ONOSMastership[ i ] ) )
Jon Hall6e709752016-02-01 13:38:46 -08001950 rolesResults = False
1951 utilities.assert_equals(
1952 expect=True,
1953 actual=rolesResults,
1954 onpass="No error in reading roles output",
1955 onfail="Error in reading roles from ONOS" )
1956
1957 main.step( "Check for consistency in roles from each controller" )
Jon Hallf37d44d2017-05-24 10:37:30 -07001958 if all( [ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
Jon Hall6e709752016-02-01 13:38:46 -08001959 main.log.info(
1960 "Switch roles are consistent across all ONOS nodes" )
1961 else:
1962 consistentMastership = False
1963 utilities.assert_equals(
1964 expect=True,
1965 actual=consistentMastership,
1966 onpass="Switch roles are consistent across all ONOS nodes",
1967 onfail="ONOS nodes have different views of switch roles" )
1968
1969 if rolesResults and not consistentMastership:
1970 for i in range( len( ONOSMastership ) ):
Jon Hallf37d44d2017-05-24 10:37:30 -07001971 node = str( main.activeNodes[ i ] + 1 )
Jon Hall6e709752016-02-01 13:38:46 -08001972 main.log.warn( "ONOS" + node + " roles: ",
1973 json.dumps( json.loads( ONOSMastership[ i ] ),
1974 sort_keys=True,
1975 indent=4,
1976 separators=( ',', ': ' ) ) )
1977
1978 # NOTE: we expect mastership to change on controller failure
1979
1980 main.step( "Get the intents and compare across all nodes" )
1981 ONOSIntents = []
1982 intentCheck = main.FALSE
1983 consistentIntents = True
1984 intentsResults = True
1985 threads = []
1986 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07001987 t = main.Thread( target=main.CLIs[ i ].intents,
Jon Hall6e709752016-02-01 13:38:46 -08001988 name="intents-" + str( i ),
1989 args=[],
1990 kwargs={ 'jsonFormat': True } )
1991 threads.append( t )
1992 t.start()
1993
1994 for t in threads:
1995 t.join()
1996 ONOSIntents.append( t.result )
1997
Jon Hallf37d44d2017-05-24 10:37:30 -07001998 for i in range( len( ONOSIntents ) ):
1999 node = str( main.activeNodes[ i ] + 1 )
Jon Hall6e709752016-02-01 13:38:46 -08002000 if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
2001 main.log.error( "Error in getting ONOS" + node + " intents" )
2002 main.log.warn( "ONOS" + node + " intents response: " +
2003 repr( ONOSIntents[ i ] ) )
2004 intentsResults = False
2005 utilities.assert_equals(
2006 expect=True,
2007 actual=intentsResults,
2008 onpass="No error in reading intents output",
2009 onfail="Error in reading intents from ONOS" )
2010
2011 main.step( "Check for consistency in Intents from each controller" )
Jon Hallf37d44d2017-05-24 10:37:30 -07002012 if all( [ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
Jon Hall6e709752016-02-01 13:38:46 -08002013 main.log.info( "Intents are consistent across all ONOS " +
2014 "nodes" )
2015 else:
2016 consistentIntents = False
2017
2018 # Try to make it easy to figure out what is happening
2019 #
2020 # Intent ONOS1 ONOS2 ...
2021 # 0x01 INSTALLED INSTALLING
2022 # ... ... ...
2023 # ... ... ...
2024 title = " ID"
2025 for n in main.activeNodes:
2026 title += " " * 10 + "ONOS" + str( n + 1 )
2027 main.log.warn( title )
2028 # get all intent keys in the cluster
2029 keys = []
2030 for nodeStr in ONOSIntents:
2031 node = json.loads( nodeStr )
2032 for intent in node:
2033 keys.append( intent.get( 'id' ) )
2034 keys = set( keys )
2035 for key in keys:
2036 row = "%-13s" % key
2037 for nodeStr in ONOSIntents:
2038 node = json.loads( nodeStr )
2039 for intent in node:
2040 if intent.get( 'id' ) == key:
2041 row += "%-15s" % intent.get( 'state' )
2042 main.log.warn( row )
2043 # End table view
2044
2045 utilities.assert_equals(
2046 expect=True,
2047 actual=consistentIntents,
2048 onpass="Intents are consistent across all ONOS nodes",
2049 onfail="ONOS nodes have different views of intents" )
2050 intentStates = []
2051 for node in ONOSIntents: # Iter through ONOS nodes
2052 nodeStates = []
2053 # Iter through intents of a node
2054 try:
2055 for intent in json.loads( node ):
2056 nodeStates.append( intent[ 'state' ] )
2057 except ( ValueError, TypeError ):
2058 main.log.exception( "Error in parsing intents" )
2059 main.log.error( repr( node ) )
2060 intentStates.append( nodeStates )
Jon Hallf37d44d2017-05-24 10:37:30 -07002061 out = [ ( i, nodeStates.count( i ) ) for i in set( nodeStates ) ]
Jon Hall6e709752016-02-01 13:38:46 -08002062 main.log.info( dict( out ) )
2063
2064 if intentsResults and not consistentIntents:
2065 for i in range( len( main.activeNodes ) ):
Jon Hallf37d44d2017-05-24 10:37:30 -07002066 node = str( main.activeNodes[ i ] + 1 )
Jon Hall6e709752016-02-01 13:38:46 -08002067 main.log.warn( "ONOS" + node + " intents: " )
2068 main.log.warn( json.dumps(
2069 json.loads( ONOSIntents[ i ] ),
2070 sort_keys=True,
2071 indent=4,
2072 separators=( ',', ': ' ) ) )
2073 elif intentsResults and consistentIntents:
2074 intentCheck = main.TRUE
2075
2076 # NOTE: Store has no durability, so intents are lost across system
2077 # restarts
2078 main.step( "Compare current intents with intents before the failure" )
2079 # NOTE: this requires case 5 to pass for intentState to be set.
2080 # maybe we should stop the test if that fails?
2081 sameIntents = main.FALSE
2082 try:
2083 intentState
2084 except NameError:
2085 main.log.warn( "No previous intent state was saved" )
2086 else:
2087 if intentState and intentState == ONOSIntents[ 0 ]:
2088 sameIntents = main.TRUE
2089 main.log.info( "Intents are consistent with before failure" )
2090 # TODO: possibly the states have changed? we may need to figure out
2091 # what the acceptable states are
2092 elif len( intentState ) == len( ONOSIntents[ 0 ] ):
2093 sameIntents = main.TRUE
2094 try:
2095 before = json.loads( intentState )
2096 after = json.loads( ONOSIntents[ 0 ] )
2097 for intent in before:
2098 if intent not in after:
2099 sameIntents = main.FALSE
2100 main.log.debug( "Intent is not currently in ONOS " +
2101 "(at least in the same form):" )
2102 main.log.debug( json.dumps( intent ) )
2103 except ( ValueError, TypeError ):
2104 main.log.exception( "Exception printing intents" )
Jon Hallf37d44d2017-05-24 10:37:30 -07002105 main.log.debug( repr( ONOSIntents[ 0 ] ) )
Jon Hall6e709752016-02-01 13:38:46 -08002106 main.log.debug( repr( intentState ) )
2107 if sameIntents == main.FALSE:
2108 try:
2109 main.log.debug( "ONOS intents before: " )
2110 main.log.debug( json.dumps( json.loads( intentState ),
2111 sort_keys=True, indent=4,
2112 separators=( ',', ': ' ) ) )
2113 main.log.debug( "Current ONOS intents: " )
2114 main.log.debug( json.dumps( json.loads( ONOSIntents[ 0 ] ),
2115 sort_keys=True, indent=4,
2116 separators=( ',', ': ' ) ) )
2117 except ( ValueError, TypeError ):
2118 main.log.exception( "Exception printing intents" )
Jon Hallf37d44d2017-05-24 10:37:30 -07002119 main.log.debug( repr( ONOSIntents[ 0 ] ) )
Jon Hall6e709752016-02-01 13:38:46 -08002120 main.log.debug( repr( intentState ) )
2121 utilities.assert_equals(
2122 expect=main.TRUE,
2123 actual=sameIntents,
2124 onpass="Intents are consistent with before failure",
2125 onfail="The Intents changed during failure" )
2126 intentCheck = intentCheck and sameIntents
2127
2128 main.step( "Get the OF Table entries and compare to before " +
2129 "component failure" )
2130 FlowTables = main.TRUE
2131 for i in range( 28 ):
2132 main.log.info( "Checking flow table on s" + str( i + 1 ) )
2133 tmpFlows = main.Mininet1.getFlowTable( "s" + str( i + 1 ), version="1.3", debug=False )
Jon Hallf37d44d2017-05-24 10:37:30 -07002134 curSwitch = main.Mininet1.flowTableComp( flows[ i ], tmpFlows )
Jon Hall41d39f12016-04-11 22:54:35 -07002135 FlowTables = FlowTables and curSwitch
2136 if curSwitch == main.FALSE:
Jon Hall6e709752016-02-01 13:38:46 -08002137 main.log.warn( "Differences in flow table for switch: s{}".format( i + 1 ) )
2138 utilities.assert_equals(
2139 expect=main.TRUE,
2140 actual=FlowTables,
2141 onpass="No changes were found in the flow tables",
2142 onfail="Changes were found in the flow tables" )
2143
2144 main.Mininet2.pingLongKill()
Jon Hallf37d44d2017-05-24 10:37:30 -07002145 """
Jon Hall6e709752016-02-01 13:38:46 -08002146 main.step( "Check the continuous pings to ensure that no packets " +
2147 "were dropped during component failure" )
2148 main.Mininet2.pingKill( main.params[ 'TESTONUSER' ],
2149 main.params[ 'TESTONIP' ] )
2150 LossInPings = main.FALSE
2151 # NOTE: checkForLoss returns main.FALSE with 0% packet loss
2152 for i in range( 8, 18 ):
2153 main.log.info(
2154 "Checking for a loss in pings along flow from s" +
2155 str( i ) )
2156 LossInPings = main.Mininet2.checkForLoss(
2157 "/tmp/ping.h" +
2158 str( i ) ) or LossInPings
2159 if LossInPings == main.TRUE:
2160 main.log.info( "Loss in ping detected" )
2161 elif LossInPings == main.ERROR:
2162 main.log.info( "There are multiple mininet process running" )
2163 elif LossInPings == main.FALSE:
2164 main.log.info( "No Loss in the pings" )
2165 main.log.info( "No loss of dataplane connectivity" )
2166 utilities.assert_equals(
2167 expect=main.FALSE,
2168 actual=LossInPings,
2169 onpass="No Loss of connectivity",
2170 onfail="Loss of dataplane connectivity detected" )
Jon Hallf37d44d2017-05-24 10:37:30 -07002171 """
Jon Hall6e709752016-02-01 13:38:46 -08002172 main.step( "Leadership Election is still functional" )
2173 # Test of LeadershipElection
2174 leaderList = []
2175
2176 partitioned = []
2177 for i in main.partition:
Jon Hallf37d44d2017-05-24 10:37:30 -07002178 partitioned.append( main.nodes[ i ].ip_address )
Jon Hall6e709752016-02-01 13:38:46 -08002179 leaderResult = main.TRUE
2180
2181 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07002182 cli = main.CLIs[ i ]
Jon Hall6e709752016-02-01 13:38:46 -08002183 leaderN = cli.electionTestLeader()
2184 leaderList.append( leaderN )
2185 if leaderN == main.FALSE:
2186 # error in response
2187 main.log.error( "Something is wrong with " +
2188 "electionTestLeader function, check the" +
2189 " error logs" )
2190 leaderResult = main.FALSE
2191 elif leaderN is None:
2192 main.log.error( cli.name +
2193 " shows no leader for the election-app was" +
2194 " elected after the old one died" )
2195 leaderResult = main.FALSE
2196 elif leaderN in partitioned:
2197 main.log.error( cli.name + " shows " + str( leaderN ) +
2198 " as leader for the election-app, but it " +
2199 "was partitioned" )
2200 leaderResult = main.FALSE
2201 if len( set( leaderList ) ) != 1:
2202 leaderResult = main.FALSE
2203 main.log.error(
2204 "Inconsistent view of leader for the election test app" )
2205 # TODO: print the list
2206 utilities.assert_equals(
2207 expect=main.TRUE,
2208 actual=leaderResult,
2209 onpass="Leadership election passed",
2210 onfail="Something went wrong with Leadership election" )
2211
2212 def CASE8( self, main ):
2213 """
2214 Compare topo
2215 """
2216 import json
2217 import time
2218 assert main.numCtrls, "main.numCtrls not defined"
2219 assert main, "main not defined"
2220 assert utilities.assert_equals, "utilities.assert_equals not defined"
2221 assert main.CLIs, "main.CLIs not defined"
2222 assert main.nodes, "main.nodes not defined"
2223
2224 main.case( "Compare ONOS Topology view to Mininet topology" )
2225 main.caseExplanation = "Compare topology objects between Mininet" +\
2226 " and ONOS"
2227 topoResult = main.FALSE
2228 topoFailMsg = "ONOS topology don't match Mininet"
2229 elapsed = 0
2230 count = 0
2231 main.step( "Comparing ONOS topology to MN topology" )
2232 startTime = time.time()
2233 # Give time for Gossip to work
2234 while topoResult == main.FALSE and ( elapsed < 60 or count < 3 ):
2235 devicesResults = main.TRUE
2236 linksResults = main.TRUE
2237 hostsResults = main.TRUE
2238 hostAttachmentResults = True
2239 count += 1
2240 cliStart = time.time()
2241 devices = []
2242 threads = []
2243 for i in main.activeNodes:
2244 t = main.Thread( target=utilities.retry,
2245 name="devices-" + str( i ),
Jon Hallf37d44d2017-05-24 10:37:30 -07002246 args=[ main.CLIs[ i ].devices, [ None ] ],
2247 kwargs={ 'sleep': 5, 'attempts': 5,
Jon Hall6e709752016-02-01 13:38:46 -08002248 'randomTime': True } )
2249 threads.append( t )
2250 t.start()
2251
2252 for t in threads:
2253 t.join()
2254 devices.append( t.result )
2255 hosts = []
2256 ipResult = main.TRUE
2257 threads = []
2258 for i in main.activeNodes:
2259 t = main.Thread( target=utilities.retry,
2260 name="hosts-" + str( i ),
Jon Hallf37d44d2017-05-24 10:37:30 -07002261 args=[ main.CLIs[ i ].hosts, [ None ] ],
2262 kwargs={ 'sleep': 5, 'attempts': 5,
Jon Hall6e709752016-02-01 13:38:46 -08002263 'randomTime': True } )
2264 threads.append( t )
2265 t.start()
2266
2267 for t in threads:
2268 t.join()
2269 try:
2270 hosts.append( json.loads( t.result ) )
2271 except ( ValueError, TypeError ):
2272 main.log.exception( "Error parsing hosts results" )
2273 main.log.error( repr( t.result ) )
2274 hosts.append( None )
2275 for controller in range( 0, len( hosts ) ):
Jon Hallf37d44d2017-05-24 10:37:30 -07002276 controllerStr = str( main.activeNodes[ controller ] + 1 )
Jon Hall6e709752016-02-01 13:38:46 -08002277 if hosts[ controller ]:
2278 for host in hosts[ controller ]:
2279 if host is None or host.get( 'ipAddresses', [] ) == []:
2280 main.log.error(
2281 "Error with host ipAddresses on controller" +
2282 controllerStr + ": " + str( host ) )
2283 ipResult = main.FALSE
2284 ports = []
2285 threads = []
2286 for i in main.activeNodes:
2287 t = main.Thread( target=utilities.retry,
2288 name="ports-" + str( i ),
Jon Hallf37d44d2017-05-24 10:37:30 -07002289 args=[ main.CLIs[ i ].ports, [ None ] ],
2290 kwargs={ 'sleep': 5, 'attempts': 5,
Jon Hall6e709752016-02-01 13:38:46 -08002291 'randomTime': True } )
2292 threads.append( t )
2293 t.start()
2294
2295 for t in threads:
2296 t.join()
2297 ports.append( t.result )
2298 links = []
2299 threads = []
2300 for i in main.activeNodes:
2301 t = main.Thread( target=utilities.retry,
2302 name="links-" + str( i ),
Jon Hallf37d44d2017-05-24 10:37:30 -07002303 args=[ main.CLIs[ i ].links, [ None ] ],
2304 kwargs={ 'sleep': 5, 'attempts': 5,
Jon Hall6e709752016-02-01 13:38:46 -08002305 'randomTime': True } )
2306 threads.append( t )
2307 t.start()
2308
2309 for t in threads:
2310 t.join()
2311 links.append( t.result )
2312 clusters = []
2313 threads = []
2314 for i in main.activeNodes:
2315 t = main.Thread( target=utilities.retry,
2316 name="clusters-" + str( i ),
Jon Hallf37d44d2017-05-24 10:37:30 -07002317 args=[ main.CLIs[ i ].clusters, [ None ] ],
2318 kwargs={ 'sleep': 5, 'attempts': 5,
Jon Hall6e709752016-02-01 13:38:46 -08002319 'randomTime': True } )
2320 threads.append( t )
2321 t.start()
2322
2323 for t in threads:
2324 t.join()
2325 clusters.append( t.result )
2326
2327 elapsed = time.time() - startTime
2328 cliTime = time.time() - cliStart
2329 print "Elapsed time: " + str( elapsed )
2330 print "CLI time: " + str( cliTime )
2331
2332 if all( e is None for e in devices ) and\
2333 all( e is None for e in hosts ) and\
2334 all( e is None for e in ports ) and\
2335 all( e is None for e in links ) and\
2336 all( e is None for e in clusters ):
Jon Hallf37d44d2017-05-24 10:37:30 -07002337 topoFailMsg = "Could not get topology from ONOS"
2338 main.log.error( topoFailMsg )
2339 continue # Try again, No use trying to compare
Jon Hall6e709752016-02-01 13:38:46 -08002340
2341 mnSwitches = main.Mininet1.getSwitches()
2342 mnLinks = main.Mininet1.getLinks()
2343 mnHosts = main.Mininet1.getHosts()
2344 for controller in range( len( main.activeNodes ) ):
Jon Hallf37d44d2017-05-24 10:37:30 -07002345 controllerStr = str( main.activeNodes[ controller ] + 1 )
Jon Hall6e709752016-02-01 13:38:46 -08002346 if devices[ controller ] and ports[ controller ] and\
Jon Hallf37d44d2017-05-24 10:37:30 -07002347 "Error" not in devices[ controller ] and\
2348 "Error" not in ports[ controller ]:
Jon Hall6e709752016-02-01 13:38:46 -08002349
2350 try:
2351 currentDevicesResult = main.Mininet1.compareSwitches(
2352 mnSwitches,
2353 json.loads( devices[ controller ] ),
2354 json.loads( ports[ controller ] ) )
2355 except ( TypeError, ValueError ) as e:
2356 main.log.exception( "Object not as expected; devices={!r}\nports={!r}".format(
2357 devices[ controller ], ports[ controller ] ) )
2358 else:
2359 currentDevicesResult = main.FALSE
2360 utilities.assert_equals( expect=main.TRUE,
2361 actual=currentDevicesResult,
2362 onpass="ONOS" + controllerStr +
2363 " Switches view is correct",
2364 onfail="ONOS" + controllerStr +
2365 " Switches view is incorrect" )
2366
2367 if links[ controller ] and "Error" not in links[ controller ]:
2368 currentLinksResult = main.Mininet1.compareLinks(
2369 mnSwitches, mnLinks,
2370 json.loads( links[ controller ] ) )
2371 else:
2372 currentLinksResult = main.FALSE
2373 utilities.assert_equals( expect=main.TRUE,
2374 actual=currentLinksResult,
2375 onpass="ONOS" + controllerStr +
2376 " links view is correct",
2377 onfail="ONOS" + controllerStr +
2378 " links view is incorrect" )
2379 if hosts[ controller ] and "Error" not in hosts[ controller ]:
2380 currentHostsResult = main.Mininet1.compareHosts(
2381 mnHosts,
2382 hosts[ controller ] )
2383 elif hosts[ controller ] == []:
2384 currentHostsResult = main.TRUE
2385 else:
2386 currentHostsResult = main.FALSE
2387 utilities.assert_equals( expect=main.TRUE,
2388 actual=currentHostsResult,
2389 onpass="ONOS" + controllerStr +
2390 " hosts exist in Mininet",
2391 onfail="ONOS" + controllerStr +
2392 " hosts don't match Mininet" )
2393 # CHECKING HOST ATTACHMENT POINTS
2394 hostAttachment = True
2395 zeroHosts = False
2396 # FIXME: topo-HA/obelisk specific mappings:
2397 # key is mac and value is dpid
2398 mappings = {}
2399 for i in range( 1, 29 ): # hosts 1 through 28
2400 # set up correct variables:
Jon Hallf37d44d2017-05-24 10:37:30 -07002401 macId = "00:" * 5 + hex( i ).split( "0x" )[ 1 ].upper().zfill( 2 )
Jon Hall6e709752016-02-01 13:38:46 -08002402 if i == 1:
Jon Hallf37d44d2017-05-24 10:37:30 -07002403 deviceId = "1000".zfill( 16 )
Jon Hall6e709752016-02-01 13:38:46 -08002404 elif i == 2:
Jon Hallf37d44d2017-05-24 10:37:30 -07002405 deviceId = "2000".zfill( 16 )
Jon Hall6e709752016-02-01 13:38:46 -08002406 elif i == 3:
Jon Hallf37d44d2017-05-24 10:37:30 -07002407 deviceId = "3000".zfill( 16 )
Jon Hall6e709752016-02-01 13:38:46 -08002408 elif i == 4:
Jon Hallf37d44d2017-05-24 10:37:30 -07002409 deviceId = "3004".zfill( 16 )
Jon Hall6e709752016-02-01 13:38:46 -08002410 elif i == 5:
Jon Hallf37d44d2017-05-24 10:37:30 -07002411 deviceId = "5000".zfill( 16 )
Jon Hall6e709752016-02-01 13:38:46 -08002412 elif i == 6:
Jon Hallf37d44d2017-05-24 10:37:30 -07002413 deviceId = "6000".zfill( 16 )
Jon Hall6e709752016-02-01 13:38:46 -08002414 elif i == 7:
Jon Hallf37d44d2017-05-24 10:37:30 -07002415 deviceId = "6007".zfill( 16 )
Jon Hall6e709752016-02-01 13:38:46 -08002416 elif i >= 8 and i <= 17:
2417 dpid = '3' + str( i ).zfill( 3 )
Jon Hallf37d44d2017-05-24 10:37:30 -07002418 deviceId = dpid.zfill( 16 )
Jon Hall6e709752016-02-01 13:38:46 -08002419 elif i >= 18 and i <= 27:
2420 dpid = '6' + str( i ).zfill( 3 )
Jon Hallf37d44d2017-05-24 10:37:30 -07002421 deviceId = dpid.zfill( 16 )
Jon Hall6e709752016-02-01 13:38:46 -08002422 elif i == 28:
Jon Hallf37d44d2017-05-24 10:37:30 -07002423 deviceId = "2800".zfill( 16 )
Jon Hall6e709752016-02-01 13:38:46 -08002424 mappings[ macId ] = deviceId
2425 if hosts[ controller ] is not None and "Error" not in hosts[ controller ]:
2426 if hosts[ controller ] == []:
2427 main.log.warn( "There are no hosts discovered" )
2428 zeroHosts = True
2429 else:
2430 for host in hosts[ controller ]:
2431 mac = None
2432 location = None
2433 device = None
2434 port = None
2435 try:
2436 mac = host.get( 'mac' )
2437 assert mac, "mac field could not be found for this host object"
2438
Jeremy Ronquillo0e538bc2017-06-13 15:16:09 -07002439 location = host.get( 'locations' )[ 0 ]
Jon Hall6e709752016-02-01 13:38:46 -08002440 assert location, "location field could not be found for this host object"
2441
2442 # Trim the protocol identifier off deviceId
Jon Hallf37d44d2017-05-24 10:37:30 -07002443 device = str( location.get( 'elementId' ) ).split( ':' )[ 1 ]
Jon Hall6e709752016-02-01 13:38:46 -08002444 assert device, "elementId field could not be found for this host location object"
2445
2446 port = location.get( 'port' )
2447 assert port, "port field could not be found for this host location object"
2448
2449 # Now check if this matches where they should be
2450 if mac and device and port:
2451 if str( port ) != "1":
2452 main.log.error( "The attachment port is incorrect for " +
2453 "host " + str( mac ) +
Jon Hallf37d44d2017-05-24 10:37:30 -07002454 ". Expected: 1 Actual: " + str( port ) )
Jon Hall6e709752016-02-01 13:38:46 -08002455 hostAttachment = False
2456 if device != mappings[ str( mac ) ]:
2457 main.log.error( "The attachment device is incorrect for " +
2458 "host " + str( mac ) +
2459 ". Expected: " + mappings[ str( mac ) ] +
2460 " Actual: " + device )
2461 hostAttachment = False
2462 else:
2463 hostAttachment = False
2464 except AssertionError:
2465 main.log.exception( "Json object not as expected" )
2466 main.log.error( repr( host ) )
2467 hostAttachment = False
2468 else:
2469 main.log.error( "No hosts json output or \"Error\"" +
2470 " in output. hosts = " +
2471 repr( hosts[ controller ] ) )
2472 if zeroHosts is False:
2473 hostAttachment = True
2474
2475 # END CHECKING HOST ATTACHMENT POINTS
2476 devicesResults = devicesResults and currentDevicesResult
2477 linksResults = linksResults and currentLinksResult
2478 hostsResults = hostsResults and currentHostsResult
2479 hostAttachmentResults = hostAttachmentResults and\
2480 hostAttachment
Jon Halla440e872016-03-31 15:15:50 -07002481 topoResult = ( devicesResults and linksResults
2482 and hostsResults and ipResult and
2483 hostAttachmentResults )
Jon Hall6e709752016-02-01 13:38:46 -08002484 utilities.assert_equals( expect=True,
2485 actual=topoResult,
2486 onpass="ONOS topology matches Mininet",
2487 onfail=topoFailMsg )
2488 # End of While loop to pull ONOS state
2489
2490 # Compare json objects for hosts and dataplane clusters
2491
2492 # hosts
2493 main.step( "Hosts view is consistent across all ONOS nodes" )
2494 consistentHostsResult = main.TRUE
2495 for controller in range( len( hosts ) ):
Jon Hallf37d44d2017-05-24 10:37:30 -07002496 controllerStr = str( main.activeNodes[ controller ] + 1 )
Jon Hall6e709752016-02-01 13:38:46 -08002497 if hosts[ controller ] is not None and "Error" not in hosts[ controller ]:
2498 if hosts[ controller ] == hosts[ 0 ]:
2499 continue
2500 else: # hosts not consistent
2501 main.log.error( "hosts from ONOS" + controllerStr +
2502 " is inconsistent with ONOS1" )
2503 main.log.warn( repr( hosts[ controller ] ) )
2504 consistentHostsResult = main.FALSE
2505
2506 else:
2507 main.log.error( "Error in getting ONOS hosts from ONOS" +
2508 controllerStr )
2509 consistentHostsResult = main.FALSE
2510 main.log.warn( "ONOS" + controllerStr +
2511 " hosts response: " +
2512 repr( hosts[ controller ] ) )
2513 utilities.assert_equals(
2514 expect=main.TRUE,
2515 actual=consistentHostsResult,
2516 onpass="Hosts view is consistent across all ONOS nodes",
2517 onfail="ONOS nodes have different views of hosts" )
2518
2519 main.step( "Hosts information is correct" )
2520 hostsResults = hostsResults and ipResult
2521 utilities.assert_equals(
2522 expect=main.TRUE,
2523 actual=hostsResults,
2524 onpass="Host information is correct",
2525 onfail="Host information is incorrect" )
2526
2527 main.step( "Host attachment points to the network" )
2528 utilities.assert_equals(
2529 expect=True,
2530 actual=hostAttachmentResults,
2531 onpass="Hosts are correctly attached to the network",
2532 onfail="ONOS did not correctly attach hosts to the network" )
2533
2534 # Strongly connected clusters of devices
2535 main.step( "Clusters view is consistent across all ONOS nodes" )
2536 consistentClustersResult = main.TRUE
2537 for controller in range( len( clusters ) ):
Jon Hallf37d44d2017-05-24 10:37:30 -07002538 controllerStr = str( main.activeNodes[ controller ] + 1 )
Jon Hall6e709752016-02-01 13:38:46 -08002539 if "Error" not in clusters[ controller ]:
2540 if clusters[ controller ] == clusters[ 0 ]:
2541 continue
2542 else: # clusters not consistent
2543 main.log.error( "clusters from ONOS" +
2544 controllerStr +
2545 " is inconsistent with ONOS1" )
2546 consistentClustersResult = main.FALSE
Jon Hall6e709752016-02-01 13:38:46 -08002547 else:
2548 main.log.error( "Error in getting dataplane clusters " +
2549 "from ONOS" + controllerStr )
2550 consistentClustersResult = main.FALSE
2551 main.log.warn( "ONOS" + controllerStr +
2552 " clusters response: " +
2553 repr( clusters[ controller ] ) )
2554 utilities.assert_equals(
2555 expect=main.TRUE,
2556 actual=consistentClustersResult,
2557 onpass="Clusters view is consistent across all ONOS nodes",
2558 onfail="ONOS nodes have different views of clusters" )
Jon Hall64948022016-05-12 13:38:50 -07002559 if not consistentClustersResult:
2560 main.log.debug( clusters )
Jon Hall6e709752016-02-01 13:38:46 -08002561
2562 main.step( "There is only one SCC" )
2563 # there should always only be one cluster
2564 try:
2565 numClusters = len( json.loads( clusters[ 0 ] ) )
2566 except ( ValueError, TypeError ):
2567 main.log.exception( "Error parsing clusters[0]: " +
Jon Hallf37d44d2017-05-24 10:37:30 -07002568 repr( clusters[ 0 ] ) )
Jon Halla440e872016-03-31 15:15:50 -07002569 numClusters = "ERROR"
Jon Hall6e709752016-02-01 13:38:46 -08002570 clusterResults = main.FALSE
2571 if numClusters == 1:
2572 clusterResults = main.TRUE
2573 utilities.assert_equals(
2574 expect=1,
2575 actual=numClusters,
2576 onpass="ONOS shows 1 SCC",
2577 onfail="ONOS shows " + str( numClusters ) + " SCCs" )
2578
2579 topoResult = ( devicesResults and linksResults
2580 and hostsResults and consistentHostsResult
2581 and consistentClustersResult and clusterResults
2582 and ipResult and hostAttachmentResults )
2583
2584 topoResult = topoResult and int( count <= 2 )
2585 note = "note it takes about " + str( int( cliTime ) ) + \
2586 " seconds for the test to make all the cli calls to fetch " +\
2587 "the topology from each ONOS instance"
2588 main.log.info(
2589 "Very crass estimate for topology discovery/convergence( " +
2590 str( note ) + " ): " + str( elapsed ) + " seconds, " +
2591 str( count ) + " tries" )
2592
2593 main.step( "Device information is correct" )
2594 utilities.assert_equals(
2595 expect=main.TRUE,
2596 actual=devicesResults,
2597 onpass="Device information is correct",
2598 onfail="Device information is incorrect" )
2599
2600 main.step( "Links are correct" )
2601 utilities.assert_equals(
2602 expect=main.TRUE,
2603 actual=linksResults,
2604 onpass="Link are correct",
2605 onfail="Links are incorrect" )
2606
Jon Halla440e872016-03-31 15:15:50 -07002607 main.step( "Hosts are correct" )
2608 utilities.assert_equals(
2609 expect=main.TRUE,
2610 actual=hostsResults,
2611 onpass="Hosts are correct",
2612 onfail="Hosts are incorrect" )
2613
Jon Hall6e709752016-02-01 13:38:46 -08002614 # FIXME: move this to an ONOS state case
2615 main.step( "Checking ONOS nodes" )
Jon Hall41d39f12016-04-11 22:54:35 -07002616 nodeResults = utilities.retry( main.HA.nodesCheck,
2617 False,
Jon Hallf37d44d2017-05-24 10:37:30 -07002618 args=[ main.activeNodes ],
Jon Hall41d39f12016-04-11 22:54:35 -07002619 attempts=5 )
Jon Hall6e709752016-02-01 13:38:46 -08002620
Jon Hall41d39f12016-04-11 22:54:35 -07002621 utilities.assert_equals( expect=True, actual=nodeResults,
Jon Hall6e709752016-02-01 13:38:46 -08002622 onpass="Nodes check successful",
2623 onfail="Nodes check NOT successful" )
Jon Halla440e872016-03-31 15:15:50 -07002624 if not nodeResults:
Jon Hall41d39f12016-04-11 22:54:35 -07002625 for i in main.activeNodes:
Jon Halla440e872016-03-31 15:15:50 -07002626 main.log.debug( "{} components not ACTIVE: \n{}".format(
Jon Hallf37d44d2017-05-24 10:37:30 -07002627 main.CLIs[ i ].name,
2628 main.CLIs[ i ].sendline( "scr:list | grep -v ACTIVE" ) ) )
Jon Hall6e709752016-02-01 13:38:46 -08002629
Jon Halld2871c22016-07-26 11:01:14 -07002630 if not topoResult:
2631 main.cleanup()
2632 main.exit()
2633
Jon Hall6e709752016-02-01 13:38:46 -08002634 def CASE9( self, main ):
2635 """
2636 Link s3-s28 down
2637 """
2638 import time
2639 assert main.numCtrls, "main.numCtrls not defined"
2640 assert main, "main not defined"
2641 assert utilities.assert_equals, "utilities.assert_equals not defined"
2642 assert main.CLIs, "main.CLIs not defined"
2643 assert main.nodes, "main.nodes not defined"
2644 # NOTE: You should probably run a topology check after this
2645
2646 linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
2647
2648 description = "Turn off a link to ensure that Link Discovery " +\
2649 "is working properly"
2650 main.case( description )
2651
2652 main.step( "Kill Link between s3 and s28" )
2653 LinkDown = main.Mininet1.link( END1="s3", END2="s28", OPTION="down" )
2654 main.log.info( "Waiting " + str( linkSleep ) +
2655 " seconds for link down to be discovered" )
2656 time.sleep( linkSleep )
2657 utilities.assert_equals( expect=main.TRUE, actual=LinkDown,
2658 onpass="Link down successful",
2659 onfail="Failed to bring link down" )
2660 # TODO do some sort of check here
2661
2662 def CASE10( self, main ):
2663 """
2664 Link s3-s28 up
2665 """
2666 import time
2667 assert main.numCtrls, "main.numCtrls not defined"
2668 assert main, "main not defined"
2669 assert utilities.assert_equals, "utilities.assert_equals not defined"
2670 assert main.CLIs, "main.CLIs not defined"
2671 assert main.nodes, "main.nodes not defined"
2672 # NOTE: You should probably run a topology check after this
2673
2674 linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
2675
2676 description = "Restore a link to ensure that Link Discovery is " + \
2677 "working properly"
2678 main.case( description )
2679
2680 main.step( "Bring link between s3 and s28 back up" )
2681 LinkUp = main.Mininet1.link( END1="s3", END2="s28", OPTION="up" )
2682 main.log.info( "Waiting " + str( linkSleep ) +
2683 " seconds for link up to be discovered" )
2684 time.sleep( linkSleep )
2685 utilities.assert_equals( expect=main.TRUE, actual=LinkUp,
2686 onpass="Link up successful",
2687 onfail="Failed to bring link up" )
2688 # TODO do some sort of check here
2689
2690 def CASE11( self, main ):
2691 """
2692 Switch Down
2693 """
2694 # NOTE: You should probably run a topology check after this
2695 import time
2696 assert main.numCtrls, "main.numCtrls not defined"
2697 assert main, "main not defined"
2698 assert utilities.assert_equals, "utilities.assert_equals not defined"
2699 assert main.CLIs, "main.CLIs not defined"
2700 assert main.nodes, "main.nodes not defined"
2701
2702 switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
2703
2704 description = "Killing a switch to ensure it is discovered correctly"
Jon Hallf37d44d2017-05-24 10:37:30 -07002705 onosCli = main.CLIs[ main.activeNodes[ 0 ] ]
Jon Hall6e709752016-02-01 13:38:46 -08002706 main.case( description )
2707 switch = main.params[ 'kill' ][ 'switch' ]
2708 switchDPID = main.params[ 'kill' ][ 'dpid' ]
2709
2710 # TODO: Make this switch parameterizable
2711 main.step( "Kill " + switch )
2712 main.log.info( "Deleting " + switch )
2713 main.Mininet1.delSwitch( switch )
2714 main.log.info( "Waiting " + str( switchSleep ) +
2715 " seconds for switch down to be discovered" )
2716 time.sleep( switchSleep )
2717 device = onosCli.getDevice( dpid=switchDPID )
2718 # Peek at the deleted switch
2719 main.log.warn( str( device ) )
2720 result = main.FALSE
2721 if device and device[ 'available' ] is False:
2722 result = main.TRUE
2723 utilities.assert_equals( expect=main.TRUE, actual=result,
2724 onpass="Kill switch successful",
2725 onfail="Failed to kill switch?" )
2726
2727 def CASE12( self, main ):
2728 """
2729 Switch Up
2730 """
2731 # NOTE: You should probably run a topology check after this
2732 import time
2733 assert main.numCtrls, "main.numCtrls not defined"
2734 assert main, "main not defined"
2735 assert utilities.assert_equals, "utilities.assert_equals not defined"
2736 assert main.CLIs, "main.CLIs not defined"
2737 assert main.nodes, "main.nodes not defined"
2738 assert ONOS1Port, "ONOS1Port not defined"
2739 assert ONOS2Port, "ONOS2Port not defined"
2740 assert ONOS3Port, "ONOS3Port not defined"
2741 assert ONOS4Port, "ONOS4Port not defined"
2742 assert ONOS5Port, "ONOS5Port not defined"
2743 assert ONOS6Port, "ONOS6Port not defined"
2744 assert ONOS7Port, "ONOS7Port not defined"
2745
2746 switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
2747 switch = main.params[ 'kill' ][ 'switch' ]
2748 switchDPID = main.params[ 'kill' ][ 'dpid' ]
2749 links = main.params[ 'kill' ][ 'links' ].split()
Jon Hallf37d44d2017-05-24 10:37:30 -07002750 onosCli = main.CLIs[ main.activeNodes[ 0 ] ]
Jon Hall6e709752016-02-01 13:38:46 -08002751 description = "Adding a switch to ensure it is discovered correctly"
2752 main.case( description )
2753
2754 main.step( "Add back " + switch )
2755 main.Mininet1.addSwitch( switch, dpid=switchDPID )
2756 for peer in links:
2757 main.Mininet1.addLink( switch, peer )
2758 ipList = [ node.ip_address for node in main.nodes ]
2759 main.Mininet1.assignSwController( sw=switch, ip=ipList )
2760 main.log.info( "Waiting " + str( switchSleep ) +
2761 " seconds for switch up to be discovered" )
2762 time.sleep( switchSleep )
2763 device = onosCli.getDevice( dpid=switchDPID )
2764 # Peek at the deleted switch
2765 main.log.warn( str( device ) )
2766 result = main.FALSE
2767 if device and device[ 'available' ]:
2768 result = main.TRUE
2769 utilities.assert_equals( expect=main.TRUE, actual=result,
2770 onpass="add switch successful",
2771 onfail="Failed to add switch?" )
2772
2773 def CASE13( self, main ):
2774 """
2775 Clean up
2776 """
2777 import os
2778 import time
2779 assert main.numCtrls, "main.numCtrls not defined"
2780 assert main, "main not defined"
2781 assert utilities.assert_equals, "utilities.assert_equals not defined"
2782 assert main.CLIs, "main.CLIs not defined"
2783 assert main.nodes, "main.nodes not defined"
2784
2785 # printing colors to terminal
2786 colors = { 'cyan': '\033[96m', 'purple': '\033[95m',
2787 'blue': '\033[94m', 'green': '\033[92m',
2788 'yellow': '\033[93m', 'red': '\033[91m', 'end': '\033[0m' }
2789 main.case( "Test Cleanup" )
2790 main.step( "Killing tcpdumps" )
2791 main.Mininet2.stopTcpdump()
2792
2793 testname = main.TEST
2794 if main.params[ 'BACKUP' ][ 'ENABLED' ] == "True":
2795 main.step( "Copying MN pcap and ONOS log files to test station" )
2796 teststationUser = main.params[ 'BACKUP' ][ 'TESTONUSER' ]
2797 teststationIP = main.params[ 'BACKUP' ][ 'TESTONIP' ]
2798 # NOTE: MN Pcap file is being saved to logdir.
2799 # We scp this file as MN and TestON aren't necessarily the same vm
2800
2801 # FIXME: To be replaced with a Jenkin's post script
2802 # TODO: Load these from params
2803 # NOTE: must end in /
2804 logFolder = "/opt/onos/log/"
2805 logFiles = [ "karaf.log", "karaf.log.1" ]
2806 # NOTE: must end in /
2807 for f in logFiles:
2808 for node in main.nodes:
2809 dstName = main.logdir + "/" + node.name + "-" + f
2810 main.ONOSbench.secureCopy( node.user_name, node.ip_address,
2811 logFolder + f, dstName )
2812 # std*.log's
2813 # NOTE: must end in /
2814 logFolder = "/opt/onos/var/"
2815 logFiles = [ "stderr.log", "stdout.log" ]
2816 # NOTE: must end in /
2817 for f in logFiles:
2818 for node in main.nodes:
2819 dstName = main.logdir + "/" + node.name + "-" + f
2820 main.ONOSbench.secureCopy( node.user_name, node.ip_address,
2821 logFolder + f, dstName )
2822 else:
2823 main.log.debug( "skipping saving log files" )
2824
2825 main.step( "Stopping Mininet" )
2826 mnResult = main.Mininet1.stopNet()
2827 utilities.assert_equals( expect=main.TRUE, actual=mnResult,
2828 onpass="Mininet stopped",
2829 onfail="MN cleanup NOT successful" )
2830
2831 main.step( "Checking ONOS Logs for errors" )
2832 for node in main.nodes:
2833 main.log.debug( "Checking logs for errors on " + node.name + ":" )
2834 main.log.warn( main.ONOSbench.checkLogs( node.ip_address ) )
2835
2836 try:
Jon Hallf37d44d2017-05-24 10:37:30 -07002837 timerLog = open( main.logdir + "/Timers.csv", 'w' )
Jon Hall6e709752016-02-01 13:38:46 -08002838 # Overwrite with empty line and close
2839 labels = "Gossip Intents"
2840 data = str( gossipTime )
2841 timerLog.write( labels + "\n" + data )
2842 timerLog.close()
Jon Hallf37d44d2017-05-24 10:37:30 -07002843 except NameError as e:
2844 main.log.exception( e )
Jon Hall6e709752016-02-01 13:38:46 -08002845
2846 def CASE14( self, main ):
2847 """
2848 start election app on all onos nodes
2849 """
2850 assert main.numCtrls, "main.numCtrls not defined"
2851 assert main, "main not defined"
2852 assert utilities.assert_equals, "utilities.assert_equals not defined"
2853 assert main.CLIs, "main.CLIs not defined"
2854 assert main.nodes, "main.nodes not defined"
2855
Jon Hallf37d44d2017-05-24 10:37:30 -07002856 main.case( "Start Leadership Election app" )
Jon Hall6e709752016-02-01 13:38:46 -08002857 main.step( "Install leadership election app" )
Jon Hallf37d44d2017-05-24 10:37:30 -07002858 onosCli = main.CLIs[ main.activeNodes[ 0 ] ]
Jon Hall6e709752016-02-01 13:38:46 -08002859 appResult = onosCli.activateApp( "org.onosproject.election" )
2860 utilities.assert_equals(
2861 expect=main.TRUE,
2862 actual=appResult,
2863 onpass="Election app installed",
2864 onfail="Something went wrong with installing Leadership election" )
2865
2866 main.step( "Run for election on each node" )
Jon Hall6e709752016-02-01 13:38:46 -08002867 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07002868 main.CLIs[ i ].electionTestRun()
2869 time.sleep( 5 )
2870 activeCLIs = [ main.CLIs[ i ] for i in main.activeNodes ]
Jon Hall25463a82016-04-13 14:03:52 -07002871 sameResult, leaders = main.HA.consistentLeaderboards( activeCLIs )
Jon Hall6e709752016-02-01 13:38:46 -08002872 utilities.assert_equals(
Jon Hall25463a82016-04-13 14:03:52 -07002873 expect=True,
2874 actual=sameResult,
2875 onpass="All nodes see the same leaderboards",
2876 onfail="Inconsistent leaderboards" )
Jon Hall6e709752016-02-01 13:38:46 -08002877
Jon Hall25463a82016-04-13 14:03:52 -07002878 if sameResult:
2879 leader = leaders[ 0 ][ 0 ]
Jon Hallf37d44d2017-05-24 10:37:30 -07002880 if main.nodes[ main.activeNodes[ 0 ] ].ip_address in leader:
Jon Hall25463a82016-04-13 14:03:52 -07002881 correctLeader = True
2882 else:
2883 correctLeader = False
2884 main.step( "First node was elected leader" )
2885 utilities.assert_equals(
2886 expect=True,
2887 actual=correctLeader,
2888 onpass="Correct leader was elected",
2889 onfail="Incorrect leader" )
Jon Hall6e709752016-02-01 13:38:46 -08002890
2891 def CASE15( self, main ):
2892 """
2893 Check that Leadership Election is still functional
2894 15.1 Run election on each node
2895 15.2 Check that each node has the same leaders and candidates
2896 15.3 Find current leader and withdraw
2897 15.4 Check that a new node was elected leader
2898 15.5 Check that that new leader was the candidate of old leader
2899 15.6 Run for election on old leader
2900 15.7 Check that oldLeader is a candidate, and leader if only 1 node
2901 15.8 Make sure that the old leader was added to the candidate list
2902
2903 old and new variable prefixes refer to data from before vs after
2904 withdrawl and later before withdrawl vs after re-election
2905 """
2906 import time
2907 assert main.numCtrls, "main.numCtrls not defined"
2908 assert main, "main not defined"
2909 assert utilities.assert_equals, "utilities.assert_equals not defined"
2910 assert main.CLIs, "main.CLIs not defined"
2911 assert main.nodes, "main.nodes not defined"
2912
2913 description = "Check that Leadership Election is still functional"
2914 main.case( description )
Jon Halla440e872016-03-31 15:15:50 -07002915 # NOTE: Need to re-run after restarts since being a canidate is not persistant
Jon Hall6e709752016-02-01 13:38:46 -08002916
Jon Halla440e872016-03-31 15:15:50 -07002917 oldLeaders = [] # list of lists of each nodes' candidates before
2918 newLeaders = [] # list of lists of each nodes' candidates after
Jon Hall6e709752016-02-01 13:38:46 -08002919 oldLeader = '' # the old leader from oldLeaders, None if not same
2920 newLeader = '' # the new leaders fron newLoeaders, None if not same
2921 oldLeaderCLI = None # the CLI of the old leader used for re-electing
2922 expectNoLeader = False # True when there is only one leader
2923 if main.numCtrls == 1:
2924 expectNoLeader = True
2925
2926 main.step( "Run for election on each node" )
2927 electionResult = main.TRUE
2928
2929 for i in main.activeNodes: # run test election on each node
Jon Hallf37d44d2017-05-24 10:37:30 -07002930 if main.CLIs[ i ].electionTestRun() == main.FALSE:
Jon Hall6e709752016-02-01 13:38:46 -08002931 electionResult = main.FALSE
2932 utilities.assert_equals(
2933 expect=main.TRUE,
2934 actual=electionResult,
2935 onpass="All nodes successfully ran for leadership",
2936 onfail="At least one node failed to run for leadership" )
2937
2938 if electionResult == main.FALSE:
2939 main.log.error(
2940 "Skipping Test Case because Election Test App isn't loaded" )
2941 main.skipCase()
2942
2943 main.step( "Check that each node shows the same leader and candidates" )
Jon Halla440e872016-03-31 15:15:50 -07002944 failMessage = "Nodes have different leaderboards"
Jon Hallf37d44d2017-05-24 10:37:30 -07002945 activeCLIs = [ main.CLIs[ i ] for i in main.activeNodes ]
Jon Hall41d39f12016-04-11 22:54:35 -07002946 sameResult, oldLeaders = main.HA.consistentLeaderboards( activeCLIs )
Jon Halla440e872016-03-31 15:15:50 -07002947 if sameResult:
2948 oldLeader = oldLeaders[ 0 ][ 0 ]
2949 main.log.warn( oldLeader )
Jon Hall6e709752016-02-01 13:38:46 -08002950 else:
Jon Halla440e872016-03-31 15:15:50 -07002951 oldLeader = None
Jon Hall6e709752016-02-01 13:38:46 -08002952 utilities.assert_equals(
Jon Halla440e872016-03-31 15:15:50 -07002953 expect=True,
Jon Hall6e709752016-02-01 13:38:46 -08002954 actual=sameResult,
Jon Halla440e872016-03-31 15:15:50 -07002955 onpass="Leaderboards are consistent for the election topic",
Jon Hall6e709752016-02-01 13:38:46 -08002956 onfail=failMessage )
2957
2958 main.step( "Find current leader and withdraw" )
2959 withdrawResult = main.TRUE
2960 # do some sanity checking on leader before using it
2961 if oldLeader is None:
2962 main.log.error( "Leadership isn't consistent." )
2963 withdrawResult = main.FALSE
2964 # Get the CLI of the oldLeader
2965 for i in main.activeNodes:
2966 if oldLeader == main.nodes[ i ].ip_address:
2967 oldLeaderCLI = main.CLIs[ i ]
2968 break
2969 else: # FOR/ELSE statement
2970 main.log.error( "Leader election, could not find current leader" )
2971 if oldLeader:
2972 withdrawResult = oldLeaderCLI.electionTestWithdraw()
2973 utilities.assert_equals(
2974 expect=main.TRUE,
2975 actual=withdrawResult,
2976 onpass="Node was withdrawn from election",
2977 onfail="Node was not withdrawn from election" )
2978
2979 main.step( "Check that a new node was elected leader" )
Jon Hall6e709752016-02-01 13:38:46 -08002980 failMessage = "Nodes have different leaders"
Jon Hall6e709752016-02-01 13:38:46 -08002981 # Get new leaders and candidates
Jon Hall41d39f12016-04-11 22:54:35 -07002982 newLeaderResult, newLeaders = main.HA.consistentLeaderboards( activeCLIs )
Jon Hall3a7843a2016-04-12 03:01:09 -07002983 newLeader = None
Jon Halla440e872016-03-31 15:15:50 -07002984 if newLeaderResult:
Jon Hall3a7843a2016-04-12 03:01:09 -07002985 if newLeaders[ 0 ][ 0 ] == 'none':
2986 main.log.error( "No leader was elected on at least 1 node" )
2987 if not expectNoLeader:
2988 newLeaderResult = False
Jon Hall25463a82016-04-13 14:03:52 -07002989 newLeader = newLeaders[ 0 ][ 0 ]
Jon Hall6e709752016-02-01 13:38:46 -08002990
2991 # Check that the new leader is not the older leader, which was withdrawn
2992 if newLeader == oldLeader:
Jon Halla440e872016-03-31 15:15:50 -07002993 newLeaderResult = False
Jon Hall6e709752016-02-01 13:38:46 -08002994 main.log.error( "All nodes still see old leader: " + str( oldLeader ) +
Jon Hallf37d44d2017-05-24 10:37:30 -07002995 " as the current leader" )
Jon Hall6e709752016-02-01 13:38:46 -08002996 utilities.assert_equals(
Jon Halla440e872016-03-31 15:15:50 -07002997 expect=True,
Jon Hall6e709752016-02-01 13:38:46 -08002998 actual=newLeaderResult,
2999 onpass="Leadership election passed",
3000 onfail="Something went wrong with Leadership election" )
3001
Jon Halla440e872016-03-31 15:15:50 -07003002 main.step( "Check that that new leader was the candidate of old leader" )
Jon Hall6e709752016-02-01 13:38:46 -08003003 # candidates[ 2 ] should become the top candidate after withdrawl
3004 correctCandidateResult = main.TRUE
3005 if expectNoLeader:
3006 if newLeader == 'none':
3007 main.log.info( "No leader expected. None found. Pass" )
3008 correctCandidateResult = main.TRUE
3009 else:
3010 main.log.info( "Expected no leader, got: " + str( newLeader ) )
3011 correctCandidateResult = main.FALSE
Jon Hallf37d44d2017-05-24 10:37:30 -07003012 elif len( oldLeaders[ 0 ] ) >= 3:
Jon Halla440e872016-03-31 15:15:50 -07003013 if newLeader == oldLeaders[ 0 ][ 2 ]:
3014 # correct leader was elected
3015 correctCandidateResult = main.TRUE
3016 else:
3017 correctCandidateResult = main.FALSE
3018 main.log.error( "Candidate {} was elected. {} should have had priority.".format(
3019 newLeader, oldLeaders[ 0 ][ 2 ] ) )
Jon Hall6e709752016-02-01 13:38:46 -08003020 else:
3021 main.log.warn( "Could not determine who should be the correct leader" )
Jon Halla440e872016-03-31 15:15:50 -07003022 main.log.debug( oldLeaders[ 0 ] )
Jon Hall6e709752016-02-01 13:38:46 -08003023 correctCandidateResult = main.FALSE
3024 utilities.assert_equals(
3025 expect=main.TRUE,
3026 actual=correctCandidateResult,
3027 onpass="Correct Candidate Elected",
3028 onfail="Incorrect Candidate Elected" )
3029
3030 main.step( "Run for election on old leader( just so everyone " +
3031 "is in the hat )" )
3032 if oldLeaderCLI is not None:
3033 runResult = oldLeaderCLI.electionTestRun()
3034 else:
3035 main.log.error( "No old leader to re-elect" )
3036 runResult = main.FALSE
3037 utilities.assert_equals(
3038 expect=main.TRUE,
3039 actual=runResult,
3040 onpass="App re-ran for election",
3041 onfail="App failed to run for election" )
Jon Halla440e872016-03-31 15:15:50 -07003042
Jon Hall6e709752016-02-01 13:38:46 -08003043 main.step(
3044 "Check that oldLeader is a candidate, and leader if only 1 node" )
3045 # verify leader didn't just change
Jon Halla440e872016-03-31 15:15:50 -07003046 # Get new leaders and candidates
3047 reRunLeaders = []
Jon Hall7a6ebfd2017-03-13 10:58:58 -07003048 time.sleep( 5 ) # Paremterize
Jon Hall41d39f12016-04-11 22:54:35 -07003049 positionResult, reRunLeaders = main.HA.consistentLeaderboards( activeCLIs )
Jon Hall6e709752016-02-01 13:38:46 -08003050
3051 # Check that the re-elected node is last on the candidate List
Jon Hallf37d44d2017-05-24 10:37:30 -07003052 if not reRunLeaders[ 0 ]:
Jon Hall3a7843a2016-04-12 03:01:09 -07003053 positionResult = main.FALSE
3054 elif oldLeader != reRunLeaders[ 0 ][ -1 ]:
Jon Hallf37d44d2017-05-24 10:37:30 -07003055 main.log.error( "Old Leader ({}) not in the proper position: {} ".format( str( oldLeader ),
Jon Halla440e872016-03-31 15:15:50 -07003056 str( reRunLeaders[ 0 ] ) ) )
Jon Hall6e709752016-02-01 13:38:46 -08003057 positionResult = main.FALSE
Jon Hall6e709752016-02-01 13:38:46 -08003058 utilities.assert_equals(
Jon Halla440e872016-03-31 15:15:50 -07003059 expect=True,
Jon Hall6e709752016-02-01 13:38:46 -08003060 actual=positionResult,
3061 onpass="Old leader successfully re-ran for election",
3062 onfail="Something went wrong with Leadership election after " +
3063 "the old leader re-ran for election" )
3064
3065 def CASE16( self, main ):
3066 """
3067 Install Distributed Primitives app
3068 """
3069 import time
3070 assert main.numCtrls, "main.numCtrls not defined"
3071 assert main, "main not defined"
3072 assert utilities.assert_equals, "utilities.assert_equals not defined"
3073 assert main.CLIs, "main.CLIs not defined"
3074 assert main.nodes, "main.nodes not defined"
3075
3076 # Variables for the distributed primitives tests
Jon Hall7a6ebfd2017-03-13 10:58:58 -07003077 main.pCounterName = "TestON-Partitions"
3078 main.pCounterValue = 0
Jon Hallf37d44d2017-05-24 10:37:30 -07003079 main.onosSet = set( [] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07003080 main.onosSetName = "TestON-set"
Jon Hall6e709752016-02-01 13:38:46 -08003081
3082 description = "Install Primitives app"
3083 main.case( description )
3084 main.step( "Install Primitives app" )
3085 appName = "org.onosproject.distributedprimitives"
Jon Hallf37d44d2017-05-24 10:37:30 -07003086 node = main.activeNodes[ 0 ]
3087 appResults = main.CLIs[ node ].activateApp( appName )
Jon Hall6e709752016-02-01 13:38:46 -08003088 utilities.assert_equals( expect=main.TRUE,
3089 actual=appResults,
3090 onpass="Primitives app activated",
3091 onfail="Primitives app not activated" )
3092 time.sleep( 5 ) # To allow all nodes to activate
3093
3094 def CASE17( self, main ):
3095 """
3096 Check for basic functionality with distributed primitives
3097 """
Jon Hall7a6ebfd2017-03-13 10:58:58 -07003098 main.HA.CASE17( main )