blob: 276bcdf626eef19207a9dbd4ae400c38d53df2e3 [file] [log] [blame]
Jon Hall9ebd1bd2016-04-19 01:37:17 -07001"""
2Description: This test is to determine if ONOS can handle
3 dynamic scaling of the cluster size.
4
5List of test cases:
6CASE1: Compile ONOS and push it to the test machines
7CASE2: Assign devices to controllers
8CASE21: Assign mastership to controllers
9CASE3: Assign intents
10CASE4: Ping across added host intents
11CASE5: Reading state of ONOS
12CASE6: The scaling case.
13CASE7: Check state after control plane failure
14CASE8: Compare topo
15CASE9: Link s3-s28 down
16CASE10: Link s3-s28 up
17CASE11: Switch down
18CASE12: Switch up
19CASE13: Clean up
20CASE14: start election app on all onos nodes
21CASE15: Check that Leadership Election is still functional
22CASE16: Install Distributed Primitives app
23CASE17: Check for basic functionality with distributed primitives
24"""
Jon Hall9ebd1bd2016-04-19 01:37:17 -070025class HAscaling:
26
27 def __init__( self ):
28 self.default = ''
29
30 def CASE1( self, main ):
31 """
32 CASE1 is to compile ONOS and push it to the test machines
33
34 Startup sequence:
35 cell <name>
36 onos-verify-cell
37 NOTE: temporary - onos-remove-raft-logs
38 onos-uninstall
39 start mininet
40 git pull
41 mvn clean install
42 onos-package
43 onos-install -f
44 onos-wait-for-start
45 start cli sessions
46 start tcpdump
47 """
48 import time
49 import os
50 import re
51 main.log.info( "ONOS HA test: Restart all ONOS nodes - " +
52 "initialization" )
53 main.case( "Setting up test environment" )
54 main.caseExplanation = "Setup the test environment including " +\
55 "installing ONOS, starting Mininet and ONOS" +\
56 "cli sessions."
57
58 # load some variables from the params file
59 PULLCODE = False
60 if main.params[ 'Git' ] == 'True':
61 PULLCODE = True
62 gitBranch = main.params[ 'branch' ]
63 cellName = main.params[ 'ENV' ][ 'cellName' ]
64
65 main.numCtrls = int( main.params[ 'num_controllers' ] )
66 if main.ONOSbench.maxNodes:
67 if main.ONOSbench.maxNodes < main.numCtrls:
68 main.numCtrls = int( main.ONOSbench.maxNodes )
69 # set global variables
70 # These are for csv plotting in jenkins
71 global labels
72 global data
73 labels = []
74 data = []
75
76 try:
77 from tests.HA.dependencies.HA import HA
78 main.HA = HA()
79 from tests.HA.HAscaling.dependencies.Server import Server
80 main.Server = Server()
81 except Exception as e:
82 main.log.exception( e )
83 main.cleanup()
84 main.exit()
85
86 main.CLIs = []
87 main.nodes = []
88 ipList = []
89 for i in range( 1, main.numCtrls + 1 ):
90 try:
91 main.CLIs.append( getattr( main, 'ONOScli' + str( i ) ) )
92 main.nodes.append( getattr( main, 'ONOS' + str( i ) ) )
93 ipList.append( main.nodes[ -1 ].ip_address )
94 except AttributeError:
95 break
96
97 main.step( "Create cell file" )
98 cellAppString = main.params[ 'ENV' ][ 'appString' ]
99 main.ONOSbench.createCellFile( main.ONOSbench.ip_address, cellName,
100 main.Mininet1.ip_address,
101 cellAppString, ipList )
102
103 main.step( "Applying cell variable to environment" )
104 cellResult = main.ONOSbench.setCell( cellName )
105 utilities.assert_equals( expect=main.TRUE, actual=cellResult,
106 onpass="Set cell successfull",
107 onfail="Failled to set cell" )
108
109 main.step( "Verify connectivity to cell" )
110 verifyResult = main.ONOSbench.verifyCell()
111 utilities.assert_equals( expect=main.TRUE, actual=verifyResult,
112 onpass="Verify cell passed",
113 onfail="Failled to verify cell" )
114
115 # FIXME:this is short term fix
116 main.log.info( "Removing raft logs" )
117 main.ONOSbench.onosRemoveRaftLogs()
118
119 main.log.info( "Uninstalling ONOS" )
120 for node in main.nodes:
121 main.ONOSbench.onosUninstall( node.ip_address )
122
123 # Make sure ONOS is DEAD
124 main.log.info( "Killing any ONOS processes" )
125 killResults = main.TRUE
126 for node in main.nodes:
127 killed = main.ONOSbench.onosKill( node.ip_address )
128 killResults = killResults and killed
129
130 main.step( "Setup server for cluster metadata file" )
Jon Hallf37d44d2017-05-24 10:37:30 -0700131 port = main.params[ 'server' ][ 'port' ]
Jon Hall9ebd1bd2016-04-19 01:37:17 -0700132 rootDir = os.path.dirname( main.testFile ) + "/dependencies"
133 main.log.debug( "Root dir: {}".format( rootDir ) )
134 status = main.Server.start( main.ONOSbench,
135 rootDir,
136 port=port,
137 logDir=main.logdir + "/server.log" )
138 utilities.assert_equals( expect=main.TRUE, actual=status,
139 onpass="Server started",
140 onfail="Failled to start SimpleHTTPServer" )
141
142 main.step( "Generate initial metadata file" )
Jon Hallf37d44d2017-05-24 10:37:30 -0700143 main.scaling = main.params[ 'scaling' ].split( "," )
Jon Hall9ebd1bd2016-04-19 01:37:17 -0700144 main.log.debug( main.scaling )
Jon Hallf37d44d2017-05-24 10:37:30 -0700145 scale = main.scaling.pop( 0 )
146 main.log.debug( scale )
Jon Hall9ebd1bd2016-04-19 01:37:17 -0700147 if "e" in scale:
148 equal = True
149 else:
150 equal = False
Jon Hallf37d44d2017-05-24 10:37:30 -0700151 main.log.debug( equal )
152 main.numCtrls = int( re.search( "\d+", scale ).group( 0 ) )
Jon Hall9ebd1bd2016-04-19 01:37:17 -0700153 genResult = main.Server.generateFile( main.numCtrls, equal=equal )
154 utilities.assert_equals( expect=main.TRUE, actual=genResult,
155 onpass="New cluster metadata file generated",
156 onfail="Failled to generate new metadata file" )
157
158 cleanInstallResult = main.TRUE
159 gitPullResult = main.TRUE
160
161 main.step( "Starting Mininet" )
162 # scp topo file to mininet
163 # TODO: move to params?
164 topoName = "obelisk.py"
165 filePath = main.ONOSbench.home + "/tools/test/topos/"
166 main.ONOSbench.scp( main.Mininet1,
167 filePath + topoName,
168 main.Mininet1.home,
169 direction="to" )
Jon Hallf37d44d2017-05-24 10:37:30 -0700170 mnResult = main.Mininet1.startNet()
Jon Hall9ebd1bd2016-04-19 01:37:17 -0700171 utilities.assert_equals( expect=main.TRUE, actual=mnResult,
172 onpass="Mininet Started",
173 onfail="Error starting Mininet" )
174
175 main.step( "Git checkout and pull " + gitBranch )
176 if PULLCODE:
177 main.ONOSbench.gitCheckout( gitBranch )
178 gitPullResult = main.ONOSbench.gitPull()
179 # values of 1 or 3 are good
180 utilities.assert_lesser( expect=0, actual=gitPullResult,
181 onpass="Git pull successful",
182 onfail="Git pull failed" )
183 main.ONOSbench.getVersion( report=True )
184
185 main.step( "Using mvn clean install" )
186 cleanInstallResult = main.TRUE
187 if PULLCODE and gitPullResult == main.TRUE:
188 cleanInstallResult = main.ONOSbench.cleanInstall()
189 else:
190 main.log.warn( "Did not pull new code so skipping mvn " +
191 "clean install" )
192 utilities.assert_equals( expect=main.TRUE,
193 actual=cleanInstallResult,
194 onpass="MCI successful",
195 onfail="MCI failed" )
196 # GRAPHS
197 # NOTE: important params here:
198 # job = name of Jenkins job
199 # Plot Name = Plot-HA, only can be used if multiple plots
200 # index = The number of the graph under plot name
201 job = "HAscaling"
202 plotName = "Plot-HA"
Jon Hall676e5432016-09-26 11:32:50 -0700203 index = "1"
Jon Hall9ebd1bd2016-04-19 01:37:17 -0700204 graphs = '<ac:structured-macro ac:name="html">\n'
205 graphs += '<ac:plain-text-body><![CDATA[\n'
206 graphs += '<iframe src="https://onos-jenkins.onlab.us/job/' + job +\
207 '/plot/' + plotName + '/getPlot?index=' + index +\
208 '&width=500&height=300"' +\
209 'noborder="0" width="500" height="300" scrolling="yes" ' +\
210 'seamless="seamless"></iframe>\n'
211 graphs += ']]></ac:plain-text-body>\n'
212 graphs += '</ac:structured-macro>\n'
Jon Hallf37d44d2017-05-24 10:37:30 -0700213 main.log.wiki( graphs )
Jon Hall9ebd1bd2016-04-19 01:37:17 -0700214
215 main.step( "Copying backup config files" )
216 path = "~/onos/tools/package/bin/onos-service"
217 cp = main.ONOSbench.scp( main.ONOSbench,
218 path,
219 path + ".backup",
220 direction="to" )
221
222 utilities.assert_equals( expect=main.TRUE,
223 actual=cp,
224 onpass="Copy backup config file succeeded",
225 onfail="Copy backup config file failed" )
226 # we need to modify the onos-service file to use remote metadata file
227 # url for cluster metadata file
Jon Hallf37d44d2017-05-24 10:37:30 -0700228 iface = main.params[ 'server' ].get( 'interface' )
Jon Hall8f6d4622016-05-23 15:27:18 -0700229 ip = main.ONOSbench.getIpAddr( iface=iface )
Jon Hall9ebd1bd2016-04-19 01:37:17 -0700230 metaFile = "cluster.json"
231 javaArgs = r"-Donos.cluster.metadata.uri=http:\/\/{}:{}\/{}".format( ip, port, metaFile )
232 main.log.warn( javaArgs )
233 main.log.warn( repr( javaArgs ) )
234 handle = main.ONOSbench.handle
235 sed = r"sed -i 's/bash/bash\nexport JAVA_OPTS=${{JAVA_OPTS:-{}}}\n/' {}".format( javaArgs, path )
236 main.log.warn( sed )
237 main.log.warn( repr( sed ) )
238 handle.sendline( sed )
Jon Hallbd60ea02016-08-23 10:03:59 -0700239 handle.expect( metaFile )
240 output = handle.before
Jon Hall9ebd1bd2016-04-19 01:37:17 -0700241 handle.expect( "\$" )
Jon Hallbd60ea02016-08-23 10:03:59 -0700242 output += handle.before
243 main.log.debug( repr( output ) )
Jon Hall9ebd1bd2016-04-19 01:37:17 -0700244
245 main.step( "Creating ONOS package" )
Jon Hallbd60ea02016-08-23 10:03:59 -0700246 packageResult = main.ONOSbench.buckBuild()
Jon Hall9ebd1bd2016-04-19 01:37:17 -0700247 utilities.assert_equals( expect=main.TRUE, actual=packageResult,
248 onpass="ONOS package successful",
249 onfail="ONOS package failed" )
Jon Hallbd60ea02016-08-23 10:03:59 -0700250 if not packageResult:
251 main.cleanup()
252 main.exit()
Jon Hall9ebd1bd2016-04-19 01:37:17 -0700253
254 main.step( "Installing ONOS package" )
255 onosInstallResult = main.TRUE
256 for i in range( main.ONOSbench.maxNodes ):
Jon Hallf37d44d2017-05-24 10:37:30 -0700257 node = main.nodes[ i ]
Jon Hall9ebd1bd2016-04-19 01:37:17 -0700258 options = "-f"
259 if i >= main.numCtrls:
260 options = "-nf" # Don't start more than the current scale
261 tmpResult = main.ONOSbench.onosInstall( options=options,
262 node=node.ip_address )
263 onosInstallResult = onosInstallResult and tmpResult
264 utilities.assert_equals( expect=main.TRUE, actual=onosInstallResult,
265 onpass="ONOS install successful",
266 onfail="ONOS install failed" )
267
268 # Cleanup custom onos-service file
269 main.ONOSbench.scp( main.ONOSbench,
270 path + ".backup",
271 path,
272 direction="to" )
273
You Wangf5de25b2017-01-06 15:13:01 -0800274 main.step( "Set up ONOS secure SSH" )
275 secureSshResult = main.TRUE
Jon Hall168c1862017-01-31 17:35:34 -0800276 for i in range( main.numCtrls ):
Jon Hallf37d44d2017-05-24 10:37:30 -0700277 node = main.nodes[ i ]
You Wangf5de25b2017-01-06 15:13:01 -0800278 secureSshResult = secureSshResult and main.ONOSbench.onosSecureSSH( node=node.ip_address )
279 utilities.assert_equals( expect=main.TRUE, actual=secureSshResult,
280 onpass="Test step PASS",
281 onfail="Test step FAIL" )
282
Jon Hall9ebd1bd2016-04-19 01:37:17 -0700283 main.step( "Checking if ONOS is up yet" )
284 for i in range( 2 ):
285 onosIsupResult = main.TRUE
286 for i in range( main.numCtrls ):
Jon Hallf37d44d2017-05-24 10:37:30 -0700287 node = main.nodes[ i ]
Jon Hall9ebd1bd2016-04-19 01:37:17 -0700288 started = main.ONOSbench.isup( node.ip_address )
289 if not started:
290 main.log.error( node.name + " hasn't started" )
291 onosIsupResult = onosIsupResult and started
292 if onosIsupResult == main.TRUE:
293 break
294 utilities.assert_equals( expect=main.TRUE, actual=onosIsupResult,
295 onpass="ONOS startup successful",
296 onfail="ONOS startup failed" )
297
Jon Hall6509dbf2016-06-21 17:01:17 -0700298 main.step( "Starting ONOS CLI sessions" )
Jon Hall9ebd1bd2016-04-19 01:37:17 -0700299 cliResults = main.TRUE
300 threads = []
301 for i in range( main.numCtrls ):
Jon Hallf37d44d2017-05-24 10:37:30 -0700302 t = main.Thread( target=main.CLIs[ i ].startOnosCli,
Jon Hall9ebd1bd2016-04-19 01:37:17 -0700303 name="startOnosCli-" + str( i ),
Jon Hallf37d44d2017-05-24 10:37:30 -0700304 args=[ main.nodes[ i ].ip_address ] )
Jon Hall9ebd1bd2016-04-19 01:37:17 -0700305 threads.append( t )
306 t.start()
307
308 for t in threads:
309 t.join()
310 cliResults = cliResults and t.result
311 utilities.assert_equals( expect=main.TRUE, actual=cliResults,
312 onpass="ONOS cli startup successful",
313 onfail="ONOS cli startup failed" )
314
315 # Create a list of active nodes for use when some nodes are stopped
316 main.activeNodes = [ i for i in range( 0, main.numCtrls ) ]
317
318 if main.params[ 'tcpdump' ].lower() == "true":
319 main.step( "Start Packet Capture MN" )
320 main.Mininet2.startTcpdump(
321 str( main.params[ 'MNtcpdump' ][ 'folder' ] ) + str( main.TEST )
322 + "-MN.pcap",
323 intf=main.params[ 'MNtcpdump' ][ 'intf' ],
324 port=main.params[ 'MNtcpdump' ][ 'port' ] )
325
326 main.step( "Checking ONOS nodes" )
327 nodeResults = utilities.retry( main.HA.nodesCheck,
328 False,
Jon Hallf37d44d2017-05-24 10:37:30 -0700329 args=[ main.activeNodes ],
Jon Hall9ebd1bd2016-04-19 01:37:17 -0700330 attempts=5 )
331 utilities.assert_equals( expect=True, actual=nodeResults,
332 onpass="Nodes check successful",
333 onfail="Nodes check NOT successful" )
334
335 if not nodeResults:
Jon Hall7ac7bc32016-05-05 10:57:02 -0700336 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -0700337 cli = main.CLIs[ i ]
Jon Hall9ebd1bd2016-04-19 01:37:17 -0700338 main.log.debug( "{} components not ACTIVE: \n{}".format(
339 cli.name,
340 cli.sendline( "scr:list | grep -v ACTIVE" ) ) )
Jon Hall9ebd1bd2016-04-19 01:37:17 -0700341 main.log.error( "Failed to start ONOS, stopping test" )
342 main.cleanup()
343 main.exit()
344
345 main.step( "Activate apps defined in the params file" )
346 # get data from the params
347 apps = main.params.get( 'apps' )
348 if apps:
Jon Hallf37d44d2017-05-24 10:37:30 -0700349 apps = apps.split( ',' )
Jon Hall9ebd1bd2016-04-19 01:37:17 -0700350 main.log.warn( apps )
351 activateResult = True
352 for app in apps:
353 main.CLIs[ 0 ].app( app, "Activate" )
354 # TODO: check this worked
355 time.sleep( 10 ) # wait for apps to activate
356 for app in apps:
357 state = main.CLIs[ 0 ].appStatus( app )
358 if state == "ACTIVE":
359 activateResult = activateResult and True
360 else:
361 main.log.error( "{} is in {} state".format( app, state ) )
362 activateResult = False
363 utilities.assert_equals( expect=True,
364 actual=activateResult,
365 onpass="Successfully activated apps",
366 onfail="Failed to activate apps" )
367 else:
368 main.log.warn( "No apps were specified to be loaded after startup" )
369
370 main.step( "Set ONOS configurations" )
371 config = main.params.get( 'ONOS_Configuration' )
372 if config:
373 main.log.debug( config )
374 checkResult = main.TRUE
375 for component in config:
Jon Hallf37d44d2017-05-24 10:37:30 -0700376 for setting in config[ component ]:
377 value = config[ component ][ setting ]
Jon Hall9ebd1bd2016-04-19 01:37:17 -0700378 check = main.CLIs[ 0 ].setCfg( component, setting, value )
379 main.log.info( "Value was changed? {}".format( main.TRUE == check ) )
380 checkResult = check and checkResult
381 utilities.assert_equals( expect=main.TRUE,
382 actual=checkResult,
383 onpass="Successfully set config",
384 onfail="Failed to set config" )
385 else:
386 main.log.warn( "No configurations were specified to be changed after startup" )
387
388 main.step( "App Ids check" )
389 appCheck = main.TRUE
390 threads = []
391 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -0700392 t = main.Thread( target=main.CLIs[ i ].appToIDCheck,
Jon Hall9ebd1bd2016-04-19 01:37:17 -0700393 name="appToIDCheck-" + str( i ),
394 args=[] )
395 threads.append( t )
396 t.start()
397
398 for t in threads:
399 t.join()
400 appCheck = appCheck and t.result
401 if appCheck != main.TRUE:
Jon Hallf37d44d2017-05-24 10:37:30 -0700402 node = main.activeNodes[ 0 ]
403 main.log.warn( main.CLIs[ node ].apps() )
404 main.log.warn( main.CLIs[ node ].appIDs() )
Jon Hall9ebd1bd2016-04-19 01:37:17 -0700405 utilities.assert_equals( expect=main.TRUE, actual=appCheck,
406 onpass="App Ids seem to be correct",
407 onfail="Something is wrong with app Ids" )
408
409 def CASE2( self, main ):
410 """
411 Assign devices to controllers
412 """
413 import re
414 assert main.numCtrls, "main.numCtrls not defined"
415 assert main, "main not defined"
416 assert utilities.assert_equals, "utilities.assert_equals not defined"
417 assert main.CLIs, "main.CLIs not defined"
418 assert main.nodes, "main.nodes not defined"
419
420 main.case( "Assigning devices to controllers" )
421 main.caseExplanation = "Assign switches to ONOS using 'ovs-vsctl' " +\
422 "and check that an ONOS node becomes the " +\
423 "master of the device."
424 main.step( "Assign switches to controllers" )
425
426 ipList = []
427 for i in range( main.ONOSbench.maxNodes ):
428 ipList.append( main.nodes[ i ].ip_address )
429 swList = []
430 for i in range( 1, 29 ):
431 swList.append( "s" + str( i ) )
432 main.Mininet1.assignSwController( sw=swList, ip=ipList )
433
434 mastershipCheck = main.TRUE
435 for i in range( 1, 29 ):
436 response = main.Mininet1.getSwController( "s" + str( i ) )
437 try:
438 main.log.info( str( response ) )
439 except Exception:
440 main.log.info( repr( response ) )
441 for node in main.nodes:
442 if re.search( "tcp:" + node.ip_address, response ):
443 mastershipCheck = mastershipCheck and main.TRUE
444 else:
445 main.log.error( "Error, node " + node.ip_address + " is " +
446 "not in the list of controllers s" +
447 str( i ) + " is connecting to." )
448 mastershipCheck = main.FALSE
449 utilities.assert_equals(
450 expect=main.TRUE,
451 actual=mastershipCheck,
452 onpass="Switch mastership assigned correctly",
453 onfail="Switches not assigned correctly to controllers" )
454
455 def CASE21( self, main ):
456 """
457 Assign mastership to controllers
458 """
459 import time
460 assert main.numCtrls, "main.numCtrls not defined"
461 assert main, "main not defined"
462 assert utilities.assert_equals, "utilities.assert_equals not defined"
463 assert main.CLIs, "main.CLIs not defined"
464 assert main.nodes, "main.nodes not defined"
465
466 main.case( "Assigning Controller roles for switches" )
467 main.caseExplanation = "Check that ONOS is connected to each " +\
468 "device. Then manually assign" +\
469 " mastership to specific ONOS nodes using" +\
470 " 'device-role'"
471 main.step( "Assign mastership of switches to specific controllers" )
472 # Manually assign mastership to the controller we want
473 roleCall = main.TRUE
474
Jon Hallf37d44d2017-05-24 10:37:30 -0700475 ipList = []
Jon Hall9ebd1bd2016-04-19 01:37:17 -0700476 deviceList = []
Jon Hallf37d44d2017-05-24 10:37:30 -0700477 onosCli = main.CLIs[ main.activeNodes[ 0 ] ]
Jon Hall9ebd1bd2016-04-19 01:37:17 -0700478 try:
479 # Assign mastership to specific controllers. This assignment was
480 # determined for a 7 node cluser, but will work with any sized
481 # cluster
482 for i in range( 1, 29 ): # switches 1 through 28
483 # set up correct variables:
484 if i == 1:
485 c = 0
486 ip = main.nodes[ c ].ip_address # ONOS1
487 deviceId = onosCli.getDevice( "1000" ).get( 'id' )
488 elif i == 2:
489 c = 1 % main.numCtrls
490 ip = main.nodes[ c ].ip_address # ONOS2
491 deviceId = onosCli.getDevice( "2000" ).get( 'id' )
492 elif i == 3:
493 c = 1 % main.numCtrls
494 ip = main.nodes[ c ].ip_address # ONOS2
495 deviceId = onosCli.getDevice( "3000" ).get( 'id' )
496 elif i == 4:
497 c = 3 % main.numCtrls
498 ip = main.nodes[ c ].ip_address # ONOS4
499 deviceId = onosCli.getDevice( "3004" ).get( 'id' )
500 elif i == 5:
501 c = 2 % main.numCtrls
502 ip = main.nodes[ c ].ip_address # ONOS3
503 deviceId = onosCli.getDevice( "5000" ).get( 'id' )
504 elif i == 6:
505 c = 2 % main.numCtrls
506 ip = main.nodes[ c ].ip_address # ONOS3
507 deviceId = onosCli.getDevice( "6000" ).get( 'id' )
508 elif i == 7:
509 c = 5 % main.numCtrls
510 ip = main.nodes[ c ].ip_address # ONOS6
511 deviceId = onosCli.getDevice( "6007" ).get( 'id' )
512 elif i >= 8 and i <= 17:
513 c = 4 % main.numCtrls
514 ip = main.nodes[ c ].ip_address # ONOS5
515 dpid = '3' + str( i ).zfill( 3 )
516 deviceId = onosCli.getDevice( dpid ).get( 'id' )
517 elif i >= 18 and i <= 27:
518 c = 6 % main.numCtrls
519 ip = main.nodes[ c ].ip_address # ONOS7
520 dpid = '6' + str( i ).zfill( 3 )
521 deviceId = onosCli.getDevice( dpid ).get( 'id' )
522 elif i == 28:
523 c = 0
524 ip = main.nodes[ c ].ip_address # ONOS1
525 deviceId = onosCli.getDevice( "2800" ).get( 'id' )
526 else:
527 main.log.error( "You didn't write an else statement for " +
528 "switch s" + str( i ) )
529 roleCall = main.FALSE
530 # Assign switch
531 assert deviceId, "No device id for s" + str( i ) + " in ONOS"
532 # TODO: make this controller dynamic
533 roleCall = roleCall and onosCli.deviceRole( deviceId, ip )
534 ipList.append( ip )
535 deviceList.append( deviceId )
536 except ( AttributeError, AssertionError ):
537 main.log.exception( "Something is wrong with ONOS device view" )
538 main.log.info( onosCli.devices() )
539 utilities.assert_equals(
540 expect=main.TRUE,
541 actual=roleCall,
542 onpass="Re-assigned switch mastership to designated controller",
543 onfail="Something wrong with deviceRole calls" )
544
545 main.step( "Check mastership was correctly assigned" )
546 roleCheck = main.TRUE
547 # NOTE: This is due to the fact that device mastership change is not
548 # atomic and is actually a multi step process
549 time.sleep( 5 )
550 for i in range( len( ipList ) ):
Jon Hallf37d44d2017-05-24 10:37:30 -0700551 ip = ipList[ i ]
552 deviceId = deviceList[ i ]
Jon Hall9ebd1bd2016-04-19 01:37:17 -0700553 # Check assignment
554 master = onosCli.getRole( deviceId ).get( 'master' )
555 if ip in master:
556 roleCheck = roleCheck and main.TRUE
557 else:
558 roleCheck = roleCheck and main.FALSE
559 main.log.error( "Error, controller " + ip + " is not" +
560 " master " + "of device " +
561 str( deviceId ) + ". Master is " +
562 repr( master ) + "." )
563 utilities.assert_equals(
564 expect=main.TRUE,
565 actual=roleCheck,
566 onpass="Switches were successfully reassigned to designated " +
567 "controller",
568 onfail="Switches were not successfully reassigned" )
569
570 def CASE3( self, main ):
571 """
572 Assign intents
573 """
574 import time
575 import json
576 assert main.numCtrls, "main.numCtrls not defined"
577 assert main, "main not defined"
578 assert utilities.assert_equals, "utilities.assert_equals not defined"
579 assert main.CLIs, "main.CLIs not defined"
580 assert main.nodes, "main.nodes not defined"
581 try:
582 labels
583 except NameError:
584 main.log.error( "labels not defined, setting to []" )
585 labels = []
586 try:
587 data
588 except NameError:
589 main.log.error( "data not defined, setting to []" )
590 data = []
591 # NOTE: we must reinstall intents until we have a persistant intent
592 # datastore!
593 main.case( "Adding host Intents" )
594 main.caseExplanation = "Discover hosts by using pingall then " +\
595 "assign predetermined host-to-host intents." +\
596 " After installation, check that the intent" +\
597 " is distributed to all nodes and the state" +\
598 " is INSTALLED"
599
600 # install onos-app-fwd
601 main.step( "Install reactive forwarding app" )
Jon Hallf37d44d2017-05-24 10:37:30 -0700602 onosCli = main.CLIs[ main.activeNodes[ 0 ] ]
Jon Hall9ebd1bd2016-04-19 01:37:17 -0700603 installResults = onosCli.activateApp( "org.onosproject.fwd" )
604 utilities.assert_equals( expect=main.TRUE, actual=installResults,
605 onpass="Install fwd successful",
606 onfail="Install fwd failed" )
607
608 main.step( "Check app ids" )
609 appCheck = main.TRUE
610 threads = []
611 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -0700612 t = main.Thread( target=main.CLIs[ i ].appToIDCheck,
Jon Hall9ebd1bd2016-04-19 01:37:17 -0700613 name="appToIDCheck-" + str( i ),
614 args=[] )
615 threads.append( t )
616 t.start()
617
618 for t in threads:
619 t.join()
620 appCheck = appCheck and t.result
621 if appCheck != main.TRUE:
622 main.log.warn( onosCli.apps() )
623 main.log.warn( onosCli.appIDs() )
624 utilities.assert_equals( expect=main.TRUE, actual=appCheck,
625 onpass="App Ids seem to be correct",
626 onfail="Something is wrong with app Ids" )
627
628 main.step( "Discovering Hosts( Via pingall for now )" )
629 # FIXME: Once we have a host discovery mechanism, use that instead
630 # REACTIVE FWD test
631 pingResult = main.FALSE
632 passMsg = "Reactive Pingall test passed"
633 time1 = time.time()
634 pingResult = main.Mininet1.pingall()
635 time2 = time.time()
636 if not pingResult:
Jon Hallf37d44d2017-05-24 10:37:30 -0700637 main.log.warn( "First pingall failed. Trying again..." )
Jon Hall9ebd1bd2016-04-19 01:37:17 -0700638 pingResult = main.Mininet1.pingall()
639 passMsg += " on the second try"
640 utilities.assert_equals(
641 expect=main.TRUE,
642 actual=pingResult,
Jon Hallf37d44d2017-05-24 10:37:30 -0700643 onpass=passMsg,
Jon Hall9ebd1bd2016-04-19 01:37:17 -0700644 onfail="Reactive Pingall failed, " +
645 "one or more ping pairs failed" )
646 main.log.info( "Time for pingall: %2f seconds" %
647 ( time2 - time1 ) )
648 # timeout for fwd flows
649 time.sleep( 11 )
650 # uninstall onos-app-fwd
651 main.step( "Uninstall reactive forwarding app" )
Jon Hallf37d44d2017-05-24 10:37:30 -0700652 node = main.activeNodes[ 0 ]
653 uninstallResult = main.CLIs[ node ].deactivateApp( "org.onosproject.fwd" )
Jon Hall9ebd1bd2016-04-19 01:37:17 -0700654 utilities.assert_equals( expect=main.TRUE, actual=uninstallResult,
655 onpass="Uninstall fwd successful",
656 onfail="Uninstall fwd failed" )
657
658 main.step( "Check app ids" )
659 threads = []
660 appCheck2 = main.TRUE
661 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -0700662 t = main.Thread( target=main.CLIs[ i ].appToIDCheck,
Jon Hall9ebd1bd2016-04-19 01:37:17 -0700663 name="appToIDCheck-" + str( i ),
664 args=[] )
665 threads.append( t )
666 t.start()
667
668 for t in threads:
669 t.join()
670 appCheck2 = appCheck2 and t.result
671 if appCheck2 != main.TRUE:
Jon Hallf37d44d2017-05-24 10:37:30 -0700672 node = main.activeNodes[ 0 ]
673 main.log.warn( main.CLIs[ node ].apps() )
674 main.log.warn( main.CLIs[ node ].appIDs() )
Jon Hall9ebd1bd2016-04-19 01:37:17 -0700675 utilities.assert_equals( expect=main.TRUE, actual=appCheck2,
676 onpass="App Ids seem to be correct",
677 onfail="Something is wrong with app Ids" )
678
679 main.step( "Add host intents via cli" )
680 intentIds = []
681 # TODO: move the host numbers to params
682 # Maybe look at all the paths we ping?
683 intentAddResult = True
684 hostResult = main.TRUE
685 for i in range( 8, 18 ):
686 main.log.info( "Adding host intent between h" + str( i ) +
687 " and h" + str( i + 10 ) )
688 host1 = "00:00:00:00:00:" + \
689 str( hex( i )[ 2: ] ).zfill( 2 ).upper()
690 host2 = "00:00:00:00:00:" + \
691 str( hex( i + 10 )[ 2: ] ).zfill( 2 ).upper()
692 # NOTE: getHost can return None
693 host1Dict = onosCli.getHost( host1 )
694 host2Dict = onosCli.getHost( host2 )
695 host1Id = None
696 host2Id = None
697 if host1Dict and host2Dict:
698 host1Id = host1Dict.get( 'id', None )
699 host2Id = host2Dict.get( 'id', None )
700 if host1Id and host2Id:
701 nodeNum = ( i % len( main.activeNodes ) )
Jon Hallf37d44d2017-05-24 10:37:30 -0700702 node = main.activeNodes[ nodeNum ]
703 tmpId = main.CLIs[ node ].addHostIntent( host1Id, host2Id )
Jon Hall9ebd1bd2016-04-19 01:37:17 -0700704 if tmpId:
705 main.log.info( "Added intent with id: " + tmpId )
706 intentIds.append( tmpId )
707 else:
708 main.log.error( "addHostIntent returned: " +
709 repr( tmpId ) )
710 else:
711 main.log.error( "Error, getHost() failed for h" + str( i ) +
712 " and/or h" + str( i + 10 ) )
Jon Hallf37d44d2017-05-24 10:37:30 -0700713 node = main.activeNodes[ 0 ]
714 hosts = main.CLIs[ node ].hosts()
Jon Hall9ebd1bd2016-04-19 01:37:17 -0700715 main.log.warn( "Hosts output: " )
716 try:
717 main.log.warn( json.dumps( json.loads( hosts ),
718 sort_keys=True,
719 indent=4,
720 separators=( ',', ': ' ) ) )
721 except ( ValueError, TypeError ):
722 main.log.warn( repr( hosts ) )
723 hostResult = main.FALSE
724 utilities.assert_equals( expect=main.TRUE, actual=hostResult,
725 onpass="Found a host id for each host",
726 onfail="Error looking up host ids" )
727
728 intentStart = time.time()
729 onosIds = onosCli.getAllIntentsId()
730 main.log.info( "Submitted intents: " + str( intentIds ) )
731 main.log.info( "Intents in ONOS: " + str( onosIds ) )
732 for intent in intentIds:
733 if intent in onosIds:
734 pass # intent submitted is in onos
735 else:
736 intentAddResult = False
737 if intentAddResult:
738 intentStop = time.time()
739 else:
740 intentStop = None
741 # Print the intent states
742 intents = onosCli.intents()
743 intentStates = []
744 installedCheck = True
745 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
746 count = 0
747 try:
748 for intent in json.loads( intents ):
749 state = intent.get( 'state', None )
750 if "INSTALLED" not in state:
751 installedCheck = False
752 intentId = intent.get( 'id', None )
753 intentStates.append( ( intentId, state ) )
754 except ( ValueError, TypeError ):
755 main.log.exception( "Error parsing intents" )
756 # add submitted intents not in the store
757 tmplist = [ i for i, s in intentStates ]
758 missingIntents = False
759 for i in intentIds:
760 if i not in tmplist:
761 intentStates.append( ( i, " - " ) )
762 missingIntents = True
763 intentStates.sort()
764 for i, s in intentStates:
765 count += 1
766 main.log.info( "%-6s%-15s%-15s" %
767 ( str( count ), str( i ), str( s ) ) )
768 leaders = onosCli.leaders()
769 try:
770 missing = False
771 if leaders:
772 parsedLeaders = json.loads( leaders )
773 main.log.warn( json.dumps( parsedLeaders,
774 sort_keys=True,
775 indent=4,
776 separators=( ',', ': ' ) ) )
777 # check for all intent partitions
778 topics = []
779 for i in range( 14 ):
Jon Hall8dafdcc2016-09-16 10:21:25 -0700780 topics.append( "work-partition-" + str( i ) )
Jon Hall9ebd1bd2016-04-19 01:37:17 -0700781 main.log.debug( topics )
Jon Hallf37d44d2017-05-24 10:37:30 -0700782 ONOStopics = [ j[ 'topic' ] for j in parsedLeaders ]
Jon Hall9ebd1bd2016-04-19 01:37:17 -0700783 for topic in topics:
784 if topic not in ONOStopics:
785 main.log.error( "Error: " + topic +
786 " not in leaders" )
787 missing = True
788 else:
789 main.log.error( "leaders() returned None" )
790 except ( ValueError, TypeError ):
791 main.log.exception( "Error parsing leaders" )
792 main.log.error( repr( leaders ) )
793 # Check all nodes
794 if missing:
795 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -0700796 response = main.CLIs[ i ].leaders( jsonFormat=False )
797 main.log.warn( str( main.CLIs[ i ].name ) + " leaders output: \n" +
Jon Hall9ebd1bd2016-04-19 01:37:17 -0700798 str( response ) )
799
800 partitions = onosCli.partitions()
801 try:
Jon Hallf37d44d2017-05-24 10:37:30 -0700802 if partitions:
Jon Hall9ebd1bd2016-04-19 01:37:17 -0700803 parsedPartitions = json.loads( partitions )
804 main.log.warn( json.dumps( parsedPartitions,
805 sort_keys=True,
806 indent=4,
807 separators=( ',', ': ' ) ) )
808 # TODO check for a leader in all paritions
809 # TODO check for consistency among nodes
810 else:
811 main.log.error( "partitions() returned None" )
812 except ( ValueError, TypeError ):
813 main.log.exception( "Error parsing partitions" )
814 main.log.error( repr( partitions ) )
815 pendingMap = onosCli.pendingMap()
816 try:
Jon Hallf37d44d2017-05-24 10:37:30 -0700817 if pendingMap:
Jon Hall9ebd1bd2016-04-19 01:37:17 -0700818 parsedPending = json.loads( pendingMap )
819 main.log.warn( json.dumps( parsedPending,
820 sort_keys=True,
821 indent=4,
822 separators=( ',', ': ' ) ) )
823 # TODO check something here?
824 else:
825 main.log.error( "pendingMap() returned None" )
826 except ( ValueError, TypeError ):
827 main.log.exception( "Error parsing pending map" )
828 main.log.error( repr( pendingMap ) )
829
830 intentAddResult = bool( intentAddResult and not missingIntents and
831 installedCheck )
832 if not intentAddResult:
833 main.log.error( "Error in pushing host intents to ONOS" )
834
835 main.step( "Intent Anti-Entropy dispersion" )
Jon Hallf37d44d2017-05-24 10:37:30 -0700836 for j in range( 100 ):
Jon Hall9ebd1bd2016-04-19 01:37:17 -0700837 correct = True
838 main.log.info( "Submitted intents: " + str( sorted( intentIds ) ) )
839 for i in main.activeNodes:
840 onosIds = []
Jon Hallf37d44d2017-05-24 10:37:30 -0700841 ids = main.CLIs[ i ].getAllIntentsId()
Jon Hall9ebd1bd2016-04-19 01:37:17 -0700842 onosIds.append( ids )
Jon Hallf37d44d2017-05-24 10:37:30 -0700843 main.log.debug( "Intents in " + main.CLIs[ i ].name + ": " +
Jon Hall9ebd1bd2016-04-19 01:37:17 -0700844 str( sorted( onosIds ) ) )
845 if sorted( ids ) != sorted( intentIds ):
846 main.log.warn( "Set of intent IDs doesn't match" )
847 correct = False
848 break
849 else:
Jon Hallf37d44d2017-05-24 10:37:30 -0700850 intents = json.loads( main.CLIs[ i ].intents() )
Jon Hall9ebd1bd2016-04-19 01:37:17 -0700851 for intent in intents:
852 if intent[ 'state' ] != "INSTALLED":
853 main.log.warn( "Intent " + intent[ 'id' ] +
854 " is " + intent[ 'state' ] )
855 correct = False
856 break
857 if correct:
858 break
859 else:
Jon Hallf37d44d2017-05-24 10:37:30 -0700860 time.sleep( 1 )
Jon Hall9ebd1bd2016-04-19 01:37:17 -0700861 if not intentStop:
862 intentStop = time.time()
863 global gossipTime
864 gossipTime = intentStop - intentStart
865 main.log.info( "It took about " + str( gossipTime ) +
866 " seconds for all intents to appear in each node" )
867 append = False
868 title = "Gossip Intents"
869 count = 1
870 while append is False:
871 curTitle = title + str( count )
872 if curTitle not in labels:
873 labels.append( curTitle )
874 data.append( str( gossipTime ) )
875 append = True
876 else:
877 count += 1
Jon Hallf37d44d2017-05-24 10:37:30 -0700878 gossipPeriod = int( main.params[ 'timers' ][ 'gossip' ] )
Jon Hall9ebd1bd2016-04-19 01:37:17 -0700879 maxGossipTime = gossipPeriod * len( main.activeNodes )
880 utilities.assert_greater_equals(
881 expect=maxGossipTime, actual=gossipTime,
882 onpass="ECM anti-entropy for intents worked within " +
883 "expected time",
884 onfail="Intent ECM anti-entropy took too long. " +
885 "Expected time:{}, Actual time:{}".format( maxGossipTime,
886 gossipTime ) )
887 if gossipTime <= maxGossipTime:
888 intentAddResult = True
889
890 if not intentAddResult or "key" in pendingMap:
891 import time
892 installedCheck = True
893 main.log.info( "Sleeping 60 seconds to see if intents are found" )
894 time.sleep( 60 )
895 onosIds = onosCli.getAllIntentsId()
896 main.log.info( "Submitted intents: " + str( intentIds ) )
897 main.log.info( "Intents in ONOS: " + str( onosIds ) )
898 # Print the intent states
899 intents = onosCli.intents()
900 intentStates = []
901 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
902 count = 0
903 try:
904 for intent in json.loads( intents ):
905 # Iter through intents of a node
906 state = intent.get( 'state', None )
907 if "INSTALLED" not in state:
908 installedCheck = False
909 intentId = intent.get( 'id', None )
910 intentStates.append( ( intentId, state ) )
911 except ( ValueError, TypeError ):
912 main.log.exception( "Error parsing intents" )
913 # add submitted intents not in the store
914 tmplist = [ i for i, s in intentStates ]
915 for i in intentIds:
916 if i not in tmplist:
917 intentStates.append( ( i, " - " ) )
918 intentStates.sort()
919 for i, s in intentStates:
920 count += 1
921 main.log.info( "%-6s%-15s%-15s" %
922 ( str( count ), str( i ), str( s ) ) )
923 leaders = onosCli.leaders()
924 try:
925 missing = False
926 if leaders:
927 parsedLeaders = json.loads( leaders )
928 main.log.warn( json.dumps( parsedLeaders,
929 sort_keys=True,
930 indent=4,
931 separators=( ',', ': ' ) ) )
932 # check for all intent partitions
933 # check for election
934 topics = []
935 for i in range( 14 ):
Jon Hall8dafdcc2016-09-16 10:21:25 -0700936 topics.append( "work-partition-" + str( i ) )
Jon Hall9ebd1bd2016-04-19 01:37:17 -0700937 # FIXME: this should only be after we start the app
938 topics.append( "org.onosproject.election" )
939 main.log.debug( topics )
Jon Hallf37d44d2017-05-24 10:37:30 -0700940 ONOStopics = [ j[ 'topic' ] for j in parsedLeaders ]
Jon Hall9ebd1bd2016-04-19 01:37:17 -0700941 for topic in topics:
942 if topic not in ONOStopics:
943 main.log.error( "Error: " + topic +
944 " not in leaders" )
945 missing = True
946 else:
947 main.log.error( "leaders() returned None" )
948 except ( ValueError, TypeError ):
949 main.log.exception( "Error parsing leaders" )
950 main.log.error( repr( leaders ) )
951 # Check all nodes
952 if missing:
953 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -0700954 node = main.CLIs[ i ]
955 response = node.leaders( jsonFormat=False )
Jon Hall9ebd1bd2016-04-19 01:37:17 -0700956 main.log.warn( str( node.name ) + " leaders output: \n" +
957 str( response ) )
958
959 partitions = onosCli.partitions()
960 try:
Jon Hallf37d44d2017-05-24 10:37:30 -0700961 if partitions:
Jon Hall9ebd1bd2016-04-19 01:37:17 -0700962 parsedPartitions = json.loads( partitions )
963 main.log.warn( json.dumps( parsedPartitions,
964 sort_keys=True,
965 indent=4,
966 separators=( ',', ': ' ) ) )
967 # TODO check for a leader in all paritions
968 # TODO check for consistency among nodes
969 else:
970 main.log.error( "partitions() returned None" )
971 except ( ValueError, TypeError ):
972 main.log.exception( "Error parsing partitions" )
973 main.log.error( repr( partitions ) )
974 pendingMap = onosCli.pendingMap()
975 try:
Jon Hallf37d44d2017-05-24 10:37:30 -0700976 if pendingMap:
Jon Hall9ebd1bd2016-04-19 01:37:17 -0700977 parsedPending = json.loads( pendingMap )
978 main.log.warn( json.dumps( parsedPending,
979 sort_keys=True,
980 indent=4,
981 separators=( ',', ': ' ) ) )
982 # TODO check something here?
983 else:
984 main.log.error( "pendingMap() returned None" )
985 except ( ValueError, TypeError ):
986 main.log.exception( "Error parsing pending map" )
987 main.log.error( repr( pendingMap ) )
988
989 def CASE4( self, main ):
990 """
991 Ping across added host intents
992 """
993 import json
994 import time
995 assert main.numCtrls, "main.numCtrls not defined"
996 assert main, "main not defined"
997 assert utilities.assert_equals, "utilities.assert_equals not defined"
998 assert main.CLIs, "main.CLIs not defined"
999 assert main.nodes, "main.nodes not defined"
1000 main.case( "Verify connectivity by sending traffic across Intents" )
1001 main.caseExplanation = "Ping across added host intents to check " +\
1002 "functionality and check the state of " +\
1003 "the intent"
1004
Jon Hallf37d44d2017-05-24 10:37:30 -07001005 onosCli = main.CLIs[ main.activeNodes[ 0 ] ]
Jon Hall9ebd1bd2016-04-19 01:37:17 -07001006 main.step( "Check Intent state" )
1007 installedCheck = False
1008 loopCount = 0
1009 while not installedCheck and loopCount < 40:
1010 installedCheck = True
1011 # Print the intent states
1012 intents = onosCli.intents()
1013 intentStates = []
1014 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
1015 count = 0
1016 # Iter through intents of a node
1017 try:
1018 for intent in json.loads( intents ):
1019 state = intent.get( 'state', None )
1020 if "INSTALLED" not in state:
1021 installedCheck = False
1022 intentId = intent.get( 'id', None )
1023 intentStates.append( ( intentId, state ) )
1024 except ( ValueError, TypeError ):
1025 main.log.exception( "Error parsing intents." )
1026 # Print states
1027 intentStates.sort()
1028 for i, s in intentStates:
1029 count += 1
1030 main.log.info( "%-6s%-15s%-15s" %
1031 ( str( count ), str( i ), str( s ) ) )
1032 if not installedCheck:
1033 time.sleep( 1 )
1034 loopCount += 1
1035 utilities.assert_equals( expect=True, actual=installedCheck,
1036 onpass="Intents are all INSTALLED",
1037 onfail="Intents are not all in " +
1038 "INSTALLED state" )
1039
1040 main.step( "Ping across added host intents" )
1041 PingResult = main.TRUE
1042 for i in range( 8, 18 ):
1043 ping = main.Mininet1.pingHost( src="h" + str( i ),
1044 target="h" + str( i + 10 ) )
1045 PingResult = PingResult and ping
1046 if ping == main.FALSE:
1047 main.log.warn( "Ping failed between h" + str( i ) +
1048 " and h" + str( i + 10 ) )
1049 elif ping == main.TRUE:
1050 main.log.info( "Ping test passed!" )
1051 # Don't set PingResult or you'd override failures
1052 if PingResult == main.FALSE:
1053 main.log.error(
1054 "Intents have not been installed correctly, pings failed." )
1055 # TODO: pretty print
1056 main.log.warn( "ONOS1 intents: " )
1057 try:
1058 tmpIntents = onosCli.intents()
1059 main.log.warn( json.dumps( json.loads( tmpIntents ),
1060 sort_keys=True,
1061 indent=4,
1062 separators=( ',', ': ' ) ) )
1063 except ( ValueError, TypeError ):
1064 main.log.warn( repr( tmpIntents ) )
1065 utilities.assert_equals(
1066 expect=main.TRUE,
1067 actual=PingResult,
1068 onpass="Intents have been installed correctly and pings work",
1069 onfail="Intents have not been installed correctly, pings failed." )
1070
1071 main.step( "Check leadership of topics" )
1072 leaders = onosCli.leaders()
1073 topicCheck = main.TRUE
1074 try:
1075 if leaders:
1076 parsedLeaders = json.loads( leaders )
1077 main.log.warn( json.dumps( parsedLeaders,
1078 sort_keys=True,
1079 indent=4,
1080 separators=( ',', ': ' ) ) )
1081 # check for all intent partitions
1082 # check for election
1083 # TODO: Look at Devices as topics now that it uses this system
1084 topics = []
1085 for i in range( 14 ):
Jon Hall8dafdcc2016-09-16 10:21:25 -07001086 topics.append( "work-partition-" + str( i ) )
Jon Hall9ebd1bd2016-04-19 01:37:17 -07001087 # FIXME: this should only be after we start the app
1088 # FIXME: topics.append( "org.onosproject.election" )
1089 # Print leaders output
1090 main.log.debug( topics )
Jon Hallf37d44d2017-05-24 10:37:30 -07001091 ONOStopics = [ j[ 'topic' ] for j in parsedLeaders ]
Jon Hall9ebd1bd2016-04-19 01:37:17 -07001092 for topic in topics:
1093 if topic not in ONOStopics:
1094 main.log.error( "Error: " + topic +
1095 " not in leaders" )
1096 topicCheck = main.FALSE
1097 else:
1098 main.log.error( "leaders() returned None" )
1099 topicCheck = main.FALSE
1100 except ( ValueError, TypeError ):
1101 topicCheck = main.FALSE
1102 main.log.exception( "Error parsing leaders" )
1103 main.log.error( repr( leaders ) )
1104 # TODO: Check for a leader of these topics
1105 # Check all nodes
1106 if topicCheck:
1107 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07001108 node = main.CLIs[ i ]
1109 response = node.leaders( jsonFormat=False )
Jon Hall9ebd1bd2016-04-19 01:37:17 -07001110 main.log.warn( str( node.name ) + " leaders output: \n" +
1111 str( response ) )
1112
1113 utilities.assert_equals( expect=main.TRUE, actual=topicCheck,
1114 onpass="intent Partitions is in leaders",
1115 onfail="Some topics were lost " )
1116 # Print partitions
1117 partitions = onosCli.partitions()
1118 try:
Jon Hallf37d44d2017-05-24 10:37:30 -07001119 if partitions:
Jon Hall9ebd1bd2016-04-19 01:37:17 -07001120 parsedPartitions = json.loads( partitions )
1121 main.log.warn( json.dumps( parsedPartitions,
1122 sort_keys=True,
1123 indent=4,
1124 separators=( ',', ': ' ) ) )
1125 # TODO check for a leader in all paritions
1126 # TODO check for consistency among nodes
1127 else:
1128 main.log.error( "partitions() returned None" )
1129 except ( ValueError, TypeError ):
1130 main.log.exception( "Error parsing partitions" )
1131 main.log.error( repr( partitions ) )
1132 # Print Pending Map
1133 pendingMap = onosCli.pendingMap()
1134 try:
Jon Hallf37d44d2017-05-24 10:37:30 -07001135 if pendingMap:
Jon Hall9ebd1bd2016-04-19 01:37:17 -07001136 parsedPending = json.loads( pendingMap )
1137 main.log.warn( json.dumps( parsedPending,
1138 sort_keys=True,
1139 indent=4,
1140 separators=( ',', ': ' ) ) )
1141 # TODO check something here?
1142 else:
1143 main.log.error( "pendingMap() returned None" )
1144 except ( ValueError, TypeError ):
1145 main.log.exception( "Error parsing pending map" )
1146 main.log.error( repr( pendingMap ) )
1147
1148 if not installedCheck:
1149 main.log.info( "Waiting 60 seconds to see if the state of " +
1150 "intents change" )
1151 time.sleep( 60 )
1152 # Print the intent states
1153 intents = onosCli.intents()
1154 intentStates = []
1155 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
1156 count = 0
1157 # Iter through intents of a node
1158 try:
1159 for intent in json.loads( intents ):
1160 state = intent.get( 'state', None )
1161 if "INSTALLED" not in state:
1162 installedCheck = False
1163 intentId = intent.get( 'id', None )
1164 intentStates.append( ( intentId, state ) )
1165 except ( ValueError, TypeError ):
1166 main.log.exception( "Error parsing intents." )
1167 intentStates.sort()
1168 for i, s in intentStates:
1169 count += 1
1170 main.log.info( "%-6s%-15s%-15s" %
1171 ( str( count ), str( i ), str( s ) ) )
1172 leaders = onosCli.leaders()
1173 try:
1174 missing = False
1175 if leaders:
1176 parsedLeaders = json.loads( leaders )
1177 main.log.warn( json.dumps( parsedLeaders,
1178 sort_keys=True,
1179 indent=4,
1180 separators=( ',', ': ' ) ) )
1181 # check for all intent partitions
1182 # check for election
1183 topics = []
1184 for i in range( 14 ):
Jon Hall8dafdcc2016-09-16 10:21:25 -07001185 topics.append( "work-partition-" + str( i ) )
Jon Hall9ebd1bd2016-04-19 01:37:17 -07001186 # FIXME: this should only be after we start the app
1187 topics.append( "org.onosproject.election" )
1188 main.log.debug( topics )
Jon Hallf37d44d2017-05-24 10:37:30 -07001189 ONOStopics = [ j[ 'topic' ] for j in parsedLeaders ]
Jon Hall9ebd1bd2016-04-19 01:37:17 -07001190 for topic in topics:
1191 if topic not in ONOStopics:
1192 main.log.error( "Error: " + topic +
1193 " not in leaders" )
1194 missing = True
1195 else:
1196 main.log.error( "leaders() returned None" )
1197 except ( ValueError, TypeError ):
1198 main.log.exception( "Error parsing leaders" )
1199 main.log.error( repr( leaders ) )
1200 if missing:
1201 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07001202 node = main.CLIs[ i ]
1203 response = node.leaders( jsonFormat=False )
Jon Hall9ebd1bd2016-04-19 01:37:17 -07001204 main.log.warn( str( node.name ) + " leaders output: \n" +
1205 str( response ) )
1206
1207 partitions = onosCli.partitions()
1208 try:
Jon Hallf37d44d2017-05-24 10:37:30 -07001209 if partitions:
Jon Hall9ebd1bd2016-04-19 01:37:17 -07001210 parsedPartitions = json.loads( partitions )
1211 main.log.warn( json.dumps( parsedPartitions,
1212 sort_keys=True,
1213 indent=4,
1214 separators=( ',', ': ' ) ) )
1215 # TODO check for a leader in all paritions
1216 # TODO check for consistency among nodes
1217 else:
1218 main.log.error( "partitions() returned None" )
1219 except ( ValueError, TypeError ):
1220 main.log.exception( "Error parsing partitions" )
1221 main.log.error( repr( partitions ) )
1222 pendingMap = onosCli.pendingMap()
1223 try:
Jon Hallf37d44d2017-05-24 10:37:30 -07001224 if pendingMap:
Jon Hall9ebd1bd2016-04-19 01:37:17 -07001225 parsedPending = json.loads( pendingMap )
1226 main.log.warn( json.dumps( parsedPending,
1227 sort_keys=True,
1228 indent=4,
1229 separators=( ',', ': ' ) ) )
1230 # TODO check something here?
1231 else:
1232 main.log.error( "pendingMap() returned None" )
1233 except ( ValueError, TypeError ):
1234 main.log.exception( "Error parsing pending map" )
1235 main.log.error( repr( pendingMap ) )
1236 # Print flowrules
Jon Hallf37d44d2017-05-24 10:37:30 -07001237 node = main.activeNodes[ 0 ]
1238 main.log.debug( main.CLIs[ node ].flows( jsonFormat=False ) )
Jon Hall9ebd1bd2016-04-19 01:37:17 -07001239 main.step( "Wait a minute then ping again" )
1240 # the wait is above
1241 PingResult = main.TRUE
1242 for i in range( 8, 18 ):
1243 ping = main.Mininet1.pingHost( src="h" + str( i ),
1244 target="h" + str( i + 10 ) )
1245 PingResult = PingResult and ping
1246 if ping == main.FALSE:
1247 main.log.warn( "Ping failed between h" + str( i ) +
1248 " and h" + str( i + 10 ) )
1249 elif ping == main.TRUE:
1250 main.log.info( "Ping test passed!" )
1251 # Don't set PingResult or you'd override failures
1252 if PingResult == main.FALSE:
1253 main.log.error(
1254 "Intents have not been installed correctly, pings failed." )
1255 # TODO: pretty print
1256 main.log.warn( "ONOS1 intents: " )
1257 try:
1258 tmpIntents = onosCli.intents()
1259 main.log.warn( json.dumps( json.loads( tmpIntents ),
1260 sort_keys=True,
1261 indent=4,
1262 separators=( ',', ': ' ) ) )
1263 except ( ValueError, TypeError ):
1264 main.log.warn( repr( tmpIntents ) )
1265 utilities.assert_equals(
1266 expect=main.TRUE,
1267 actual=PingResult,
1268 onpass="Intents have been installed correctly and pings work",
1269 onfail="Intents have not been installed correctly, pings failed." )
1270
1271 def CASE5( self, main ):
1272 """
1273 Reading state of ONOS
1274 """
1275 import json
1276 import time
1277 assert main.numCtrls, "main.numCtrls not defined"
1278 assert main, "main not defined"
1279 assert utilities.assert_equals, "utilities.assert_equals not defined"
1280 assert main.CLIs, "main.CLIs not defined"
1281 assert main.nodes, "main.nodes not defined"
1282
1283 main.case( "Setting up and gathering data for current state" )
1284 # The general idea for this test case is to pull the state of
1285 # ( intents,flows, topology,... ) from each ONOS node
1286 # We can then compare them with each other and also with past states
1287
1288 main.step( "Check that each switch has a master" )
1289 global mastershipState
1290 mastershipState = '[]'
1291
1292 # Assert that each device has a master
1293 rolesNotNull = main.TRUE
1294 threads = []
1295 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07001296 t = main.Thread( target=main.CLIs[ i ].rolesNotNull,
Jon Hall9ebd1bd2016-04-19 01:37:17 -07001297 name="rolesNotNull-" + str( i ),
1298 args=[] )
1299 threads.append( t )
1300 t.start()
1301
1302 for t in threads:
1303 t.join()
1304 rolesNotNull = rolesNotNull and t.result
1305 utilities.assert_equals(
1306 expect=main.TRUE,
1307 actual=rolesNotNull,
1308 onpass="Each device has a master",
1309 onfail="Some devices don't have a master assigned" )
1310
1311 main.step( "Get the Mastership of each switch from each controller" )
1312 ONOSMastership = []
1313 consistentMastership = True
1314 rolesResults = True
1315 threads = []
1316 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07001317 t = main.Thread( target=main.CLIs[ i ].roles,
Jon Hall9ebd1bd2016-04-19 01:37:17 -07001318 name="roles-" + str( i ),
1319 args=[] )
1320 threads.append( t )
1321 t.start()
1322
1323 for t in threads:
1324 t.join()
1325 ONOSMastership.append( t.result )
1326
1327 for i in range( len( ONOSMastership ) ):
Jon Hallf37d44d2017-05-24 10:37:30 -07001328 node = str( main.activeNodes[ i ] + 1 )
1329 if not ONOSMastership[ i ] or "Error" in ONOSMastership[ i ]:
Jon Hall9ebd1bd2016-04-19 01:37:17 -07001330 main.log.error( "Error in getting ONOS" + node + " roles" )
1331 main.log.warn( "ONOS" + node + " mastership response: " +
Jon Hallf37d44d2017-05-24 10:37:30 -07001332 repr( ONOSMastership[ i ] ) )
Jon Hall9ebd1bd2016-04-19 01:37:17 -07001333 rolesResults = False
1334 utilities.assert_equals(
1335 expect=True,
1336 actual=rolesResults,
1337 onpass="No error in reading roles output",
1338 onfail="Error in reading roles from ONOS" )
1339
1340 main.step( "Check for consistency in roles from each controller" )
Jon Hallf37d44d2017-05-24 10:37:30 -07001341 if all( [ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
Jon Hall9ebd1bd2016-04-19 01:37:17 -07001342 main.log.info(
1343 "Switch roles are consistent across all ONOS nodes" )
1344 else:
1345 consistentMastership = False
1346 utilities.assert_equals(
1347 expect=True,
1348 actual=consistentMastership,
1349 onpass="Switch roles are consistent across all ONOS nodes",
1350 onfail="ONOS nodes have different views of switch roles" )
1351
1352 if rolesResults and not consistentMastership:
1353 for i in range( len( main.activeNodes ) ):
Jon Hallf37d44d2017-05-24 10:37:30 -07001354 node = str( main.activeNodes[ i ] + 1 )
Jon Hall9ebd1bd2016-04-19 01:37:17 -07001355 try:
1356 main.log.warn(
1357 "ONOS" + node + " roles: ",
1358 json.dumps(
1359 json.loads( ONOSMastership[ i ] ),
1360 sort_keys=True,
1361 indent=4,
1362 separators=( ',', ': ' ) ) )
1363 except ( ValueError, TypeError ):
1364 main.log.warn( repr( ONOSMastership[ i ] ) )
1365 elif rolesResults and consistentMastership:
1366 mastershipState = ONOSMastership[ 0 ]
1367
1368 main.step( "Get the intents from each controller" )
1369 global intentState
1370 intentState = []
1371 ONOSIntents = []
1372 consistentIntents = True # Are Intents consistent across nodes?
1373 intentsResults = True # Could we read Intents from ONOS?
1374 threads = []
1375 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07001376 t = main.Thread( target=main.CLIs[ i ].intents,
Jon Hall9ebd1bd2016-04-19 01:37:17 -07001377 name="intents-" + str( i ),
1378 args=[],
1379 kwargs={ 'jsonFormat': True } )
1380 threads.append( t )
1381 t.start()
1382
1383 for t in threads:
1384 t.join()
1385 ONOSIntents.append( t.result )
1386
1387 for i in range( len( ONOSIntents ) ):
Jon Hallf37d44d2017-05-24 10:37:30 -07001388 node = str( main.activeNodes[ i ] + 1 )
Jon Hall9ebd1bd2016-04-19 01:37:17 -07001389 if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
1390 main.log.error( "Error in getting ONOS" + node + " intents" )
1391 main.log.warn( "ONOS" + node + " intents response: " +
1392 repr( ONOSIntents[ i ] ) )
1393 intentsResults = False
1394 utilities.assert_equals(
1395 expect=True,
1396 actual=intentsResults,
1397 onpass="No error in reading intents output",
1398 onfail="Error in reading intents from ONOS" )
1399
1400 main.step( "Check for consistency in Intents from each controller" )
Jon Hallf37d44d2017-05-24 10:37:30 -07001401 if all( [ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
Jon Hall9ebd1bd2016-04-19 01:37:17 -07001402 main.log.info( "Intents are consistent across all ONOS " +
1403 "nodes" )
1404 else:
1405 consistentIntents = False
1406 main.log.error( "Intents not consistent" )
1407 utilities.assert_equals(
1408 expect=True,
1409 actual=consistentIntents,
1410 onpass="Intents are consistent across all ONOS nodes",
1411 onfail="ONOS nodes have different views of intents" )
1412
1413 if intentsResults:
1414 # Try to make it easy to figure out what is happening
1415 #
1416 # Intent ONOS1 ONOS2 ...
1417 # 0x01 INSTALLED INSTALLING
1418 # ... ... ...
1419 # ... ... ...
1420 title = " Id"
1421 for n in main.activeNodes:
1422 title += " " * 10 + "ONOS" + str( n + 1 )
1423 main.log.warn( title )
1424 # get all intent keys in the cluster
1425 keys = []
1426 try:
1427 # Get the set of all intent keys
1428 for nodeStr in ONOSIntents:
1429 node = json.loads( nodeStr )
1430 for intent in node:
1431 keys.append( intent.get( 'id' ) )
1432 keys = set( keys )
1433 # For each intent key, print the state on each node
1434 for key in keys:
1435 row = "%-13s" % key
1436 for nodeStr in ONOSIntents:
1437 node = json.loads( nodeStr )
1438 for intent in node:
1439 if intent.get( 'id', "Error" ) == key:
1440 row += "%-15s" % intent.get( 'state' )
1441 main.log.warn( row )
1442 # End of intent state table
1443 except ValueError as e:
1444 main.log.exception( e )
1445 main.log.debug( "nodeStr was: " + repr( nodeStr ) )
1446
1447 if intentsResults and not consistentIntents:
1448 # print the json objects
Jon Hallf37d44d2017-05-24 10:37:30 -07001449 n = str( main.activeNodes[ -1 ] + 1 )
Jon Hall9ebd1bd2016-04-19 01:37:17 -07001450 main.log.debug( "ONOS" + n + " intents: " )
1451 main.log.debug( json.dumps( json.loads( ONOSIntents[ -1 ] ),
1452 sort_keys=True,
1453 indent=4,
1454 separators=( ',', ': ' ) ) )
1455 for i in range( len( ONOSIntents ) ):
Jon Hallf37d44d2017-05-24 10:37:30 -07001456 node = str( main.activeNodes[ i ] + 1 )
Jon Hall9ebd1bd2016-04-19 01:37:17 -07001457 if ONOSIntents[ i ] != ONOSIntents[ -1 ]:
1458 main.log.debug( "ONOS" + node + " intents: " )
Jon Hallf37d44d2017-05-24 10:37:30 -07001459 main.log.debug( json.dumps( json.loads( ONOSIntents[ i ] ),
Jon Hall9ebd1bd2016-04-19 01:37:17 -07001460 sort_keys=True,
1461 indent=4,
1462 separators=( ',', ': ' ) ) )
1463 else:
1464 main.log.debug( "ONOS" + node + " intents match ONOS" +
1465 n + " intents" )
1466 elif intentsResults and consistentIntents:
1467 intentState = ONOSIntents[ 0 ]
1468
1469 main.step( "Get the flows from each controller" )
1470 global flowState
1471 flowState = []
1472 ONOSFlows = []
1473 ONOSFlowsJson = []
1474 flowCheck = main.FALSE
1475 consistentFlows = True
1476 flowsResults = True
1477 threads = []
1478 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07001479 t = main.Thread( target=main.CLIs[ i ].flows,
Jon Hall9ebd1bd2016-04-19 01:37:17 -07001480 name="flows-" + str( i ),
1481 args=[],
1482 kwargs={ 'jsonFormat': True } )
1483 threads.append( t )
1484 t.start()
1485
1486 # NOTE: Flows command can take some time to run
Jon Hallf37d44d2017-05-24 10:37:30 -07001487 time.sleep( 30 )
Jon Hall9ebd1bd2016-04-19 01:37:17 -07001488 for t in threads:
1489 t.join()
1490 result = t.result
1491 ONOSFlows.append( result )
1492
1493 for i in range( len( ONOSFlows ) ):
Jon Hallf37d44d2017-05-24 10:37:30 -07001494 num = str( main.activeNodes[ i ] + 1 )
Jon Hall9ebd1bd2016-04-19 01:37:17 -07001495 if not ONOSFlows[ i ] or "Error" in ONOSFlows[ i ]:
1496 main.log.error( "Error in getting ONOS" + num + " flows" )
1497 main.log.warn( "ONOS" + num + " flows response: " +
1498 repr( ONOSFlows[ i ] ) )
1499 flowsResults = False
1500 ONOSFlowsJson.append( None )
1501 else:
1502 try:
1503 ONOSFlowsJson.append( json.loads( ONOSFlows[ i ] ) )
1504 except ( ValueError, TypeError ):
1505 # FIXME: change this to log.error?
1506 main.log.exception( "Error in parsing ONOS" + num +
1507 " response as json." )
1508 main.log.error( repr( ONOSFlows[ i ] ) )
1509 ONOSFlowsJson.append( None )
1510 flowsResults = False
1511 utilities.assert_equals(
1512 expect=True,
1513 actual=flowsResults,
1514 onpass="No error in reading flows output",
1515 onfail="Error in reading flows from ONOS" )
1516
1517 main.step( "Check for consistency in Flows from each controller" )
1518 tmp = [ len( i ) == len( ONOSFlowsJson[ 0 ] ) for i in ONOSFlowsJson ]
1519 if all( tmp ):
1520 main.log.info( "Flow count is consistent across all ONOS nodes" )
1521 else:
1522 consistentFlows = False
1523 utilities.assert_equals(
1524 expect=True,
1525 actual=consistentFlows,
1526 onpass="The flow count is consistent across all ONOS nodes",
1527 onfail="ONOS nodes have different flow counts" )
1528
1529 if flowsResults and not consistentFlows:
1530 for i in range( len( ONOSFlows ) ):
Jon Hallf37d44d2017-05-24 10:37:30 -07001531 node = str( main.activeNodes[ i ] + 1 )
Jon Hall9ebd1bd2016-04-19 01:37:17 -07001532 try:
1533 main.log.warn(
1534 "ONOS" + node + " flows: " +
Jon Hallf37d44d2017-05-24 10:37:30 -07001535 json.dumps( json.loads( ONOSFlows[ i ] ), sort_keys=True,
Jon Hall9ebd1bd2016-04-19 01:37:17 -07001536 indent=4, separators=( ',', ': ' ) ) )
1537 except ( ValueError, TypeError ):
1538 main.log.warn( "ONOS" + node + " flows: " +
1539 repr( ONOSFlows[ i ] ) )
1540 elif flowsResults and consistentFlows:
1541 flowCheck = main.TRUE
1542 flowState = ONOSFlows[ 0 ]
1543
1544 main.step( "Get the OF Table entries" )
1545 global flows
1546 flows = []
1547 for i in range( 1, 29 ):
1548 flows.append( main.Mininet1.getFlowTable( "s" + str( i ), version="1.3", debug=False ) )
1549 if flowCheck == main.FALSE:
1550 for table in flows:
1551 main.log.warn( table )
1552 # TODO: Compare switch flow tables with ONOS flow tables
1553
1554 main.step( "Start continuous pings" )
1555 main.Mininet2.pingLong(
1556 src=main.params[ 'PING' ][ 'source1' ],
1557 target=main.params[ 'PING' ][ 'target1' ],
1558 pingTime=500 )
1559 main.Mininet2.pingLong(
1560 src=main.params[ 'PING' ][ 'source2' ],
1561 target=main.params[ 'PING' ][ 'target2' ],
1562 pingTime=500 )
1563 main.Mininet2.pingLong(
1564 src=main.params[ 'PING' ][ 'source3' ],
1565 target=main.params[ 'PING' ][ 'target3' ],
1566 pingTime=500 )
1567 main.Mininet2.pingLong(
1568 src=main.params[ 'PING' ][ 'source4' ],
1569 target=main.params[ 'PING' ][ 'target4' ],
1570 pingTime=500 )
1571 main.Mininet2.pingLong(
1572 src=main.params[ 'PING' ][ 'source5' ],
1573 target=main.params[ 'PING' ][ 'target5' ],
1574 pingTime=500 )
1575 main.Mininet2.pingLong(
1576 src=main.params[ 'PING' ][ 'source6' ],
1577 target=main.params[ 'PING' ][ 'target6' ],
1578 pingTime=500 )
1579 main.Mininet2.pingLong(
1580 src=main.params[ 'PING' ][ 'source7' ],
1581 target=main.params[ 'PING' ][ 'target7' ],
1582 pingTime=500 )
1583 main.Mininet2.pingLong(
1584 src=main.params[ 'PING' ][ 'source8' ],
1585 target=main.params[ 'PING' ][ 'target8' ],
1586 pingTime=500 )
1587 main.Mininet2.pingLong(
1588 src=main.params[ 'PING' ][ 'source9' ],
1589 target=main.params[ 'PING' ][ 'target9' ],
1590 pingTime=500 )
1591 main.Mininet2.pingLong(
1592 src=main.params[ 'PING' ][ 'source10' ],
1593 target=main.params[ 'PING' ][ 'target10' ],
1594 pingTime=500 )
1595
1596 main.step( "Collecting topology information from ONOS" )
1597 devices = []
1598 threads = []
1599 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07001600 t = main.Thread( target=main.CLIs[ i ].devices,
Jon Hall9ebd1bd2016-04-19 01:37:17 -07001601 name="devices-" + str( i ),
Jon Hallf37d44d2017-05-24 10:37:30 -07001602 args=[] )
Jon Hall9ebd1bd2016-04-19 01:37:17 -07001603 threads.append( t )
1604 t.start()
1605
1606 for t in threads:
1607 t.join()
1608 devices.append( t.result )
1609 hosts = []
1610 threads = []
1611 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07001612 t = main.Thread( target=main.CLIs[ i ].hosts,
Jon Hall9ebd1bd2016-04-19 01:37:17 -07001613 name="hosts-" + str( i ),
Jon Hallf37d44d2017-05-24 10:37:30 -07001614 args=[] )
Jon Hall9ebd1bd2016-04-19 01:37:17 -07001615 threads.append( t )
1616 t.start()
1617
1618 for t in threads:
1619 t.join()
1620 try:
1621 hosts.append( json.loads( t.result ) )
1622 except ( ValueError, TypeError ):
1623 # FIXME: better handling of this, print which node
1624 # Maybe use thread name?
1625 main.log.exception( "Error parsing json output of hosts" )
1626 main.log.warn( repr( t.result ) )
1627 hosts.append( None )
1628
1629 ports = []
1630 threads = []
1631 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07001632 t = main.Thread( target=main.CLIs[ i ].ports,
Jon Hall9ebd1bd2016-04-19 01:37:17 -07001633 name="ports-" + str( i ),
Jon Hallf37d44d2017-05-24 10:37:30 -07001634 args=[] )
Jon Hall9ebd1bd2016-04-19 01:37:17 -07001635 threads.append( t )
1636 t.start()
1637
1638 for t in threads:
1639 t.join()
1640 ports.append( t.result )
1641 links = []
1642 threads = []
1643 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07001644 t = main.Thread( target=main.CLIs[ i ].links,
Jon Hall9ebd1bd2016-04-19 01:37:17 -07001645 name="links-" + str( i ),
Jon Hallf37d44d2017-05-24 10:37:30 -07001646 args=[] )
Jon Hall9ebd1bd2016-04-19 01:37:17 -07001647 threads.append( t )
1648 t.start()
1649
1650 for t in threads:
1651 t.join()
1652 links.append( t.result )
1653 clusters = []
1654 threads = []
1655 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07001656 t = main.Thread( target=main.CLIs[ i ].clusters,
Jon Hall9ebd1bd2016-04-19 01:37:17 -07001657 name="clusters-" + str( i ),
Jon Hallf37d44d2017-05-24 10:37:30 -07001658 args=[] )
Jon Hall9ebd1bd2016-04-19 01:37:17 -07001659 threads.append( t )
1660 t.start()
1661
1662 for t in threads:
1663 t.join()
1664 clusters.append( t.result )
1665 # Compare json objects for hosts and dataplane clusters
1666
1667 # hosts
1668 main.step( "Host view is consistent across ONOS nodes" )
1669 consistentHostsResult = main.TRUE
1670 for controller in range( len( hosts ) ):
Jon Hallf37d44d2017-05-24 10:37:30 -07001671 controllerStr = str( main.activeNodes[ controller ] + 1 )
Jon Hall9ebd1bd2016-04-19 01:37:17 -07001672 if hosts[ controller ] and "Error" not in hosts[ controller ]:
1673 if hosts[ controller ] == hosts[ 0 ]:
1674 continue
1675 else: # hosts not consistent
1676 main.log.error( "hosts from ONOS" +
1677 controllerStr +
1678 " is inconsistent with ONOS1" )
1679 main.log.warn( repr( hosts[ controller ] ) )
1680 consistentHostsResult = main.FALSE
1681
1682 else:
1683 main.log.error( "Error in getting ONOS hosts from ONOS" +
1684 controllerStr )
1685 consistentHostsResult = main.FALSE
1686 main.log.warn( "ONOS" + controllerStr +
1687 " hosts response: " +
1688 repr( hosts[ controller ] ) )
1689 utilities.assert_equals(
1690 expect=main.TRUE,
1691 actual=consistentHostsResult,
1692 onpass="Hosts view is consistent across all ONOS nodes",
1693 onfail="ONOS nodes have different views of hosts" )
1694
1695 main.step( "Each host has an IP address" )
1696 ipResult = main.TRUE
1697 for controller in range( 0, len( hosts ) ):
Jon Hallf37d44d2017-05-24 10:37:30 -07001698 controllerStr = str( main.activeNodes[ controller ] + 1 )
Jon Hall9ebd1bd2016-04-19 01:37:17 -07001699 if hosts[ controller ]:
1700 for host in hosts[ controller ]:
Jon Hallf37d44d2017-05-24 10:37:30 -07001701 if not host.get( 'ipAddresses', [] ):
Jon Hall9ebd1bd2016-04-19 01:37:17 -07001702 main.log.error( "Error with host ips on controller" +
1703 controllerStr + ": " + str( host ) )
1704 ipResult = main.FALSE
1705 utilities.assert_equals(
1706 expect=main.TRUE,
1707 actual=ipResult,
1708 onpass="The ips of the hosts aren't empty",
1709 onfail="The ip of at least one host is missing" )
1710
1711 # Strongly connected clusters of devices
1712 main.step( "Cluster view is consistent across ONOS nodes" )
1713 consistentClustersResult = main.TRUE
1714 for controller in range( len( clusters ) ):
Jon Hallf37d44d2017-05-24 10:37:30 -07001715 controllerStr = str( main.activeNodes[ controller ] + 1 )
Jon Hall9ebd1bd2016-04-19 01:37:17 -07001716 if "Error" not in clusters[ controller ]:
1717 if clusters[ controller ] == clusters[ 0 ]:
1718 continue
1719 else: # clusters not consistent
1720 main.log.error( "clusters from ONOS" + controllerStr +
1721 " is inconsistent with ONOS1" )
1722 consistentClustersResult = main.FALSE
1723
1724 else:
1725 main.log.error( "Error in getting dataplane clusters " +
1726 "from ONOS" + controllerStr )
1727 consistentClustersResult = main.FALSE
1728 main.log.warn( "ONOS" + controllerStr +
1729 " clusters response: " +
1730 repr( clusters[ controller ] ) )
1731 utilities.assert_equals(
1732 expect=main.TRUE,
1733 actual=consistentClustersResult,
1734 onpass="Clusters view is consistent across all ONOS nodes",
1735 onfail="ONOS nodes have different views of clusters" )
Jon Hall64948022016-05-12 13:38:50 -07001736 if not consistentClustersResult:
Jon Hall9ebd1bd2016-04-19 01:37:17 -07001737 main.log.debug( clusters )
Jon Hall64948022016-05-12 13:38:50 -07001738
Jon Hall9ebd1bd2016-04-19 01:37:17 -07001739 # there should always only be one cluster
1740 main.step( "Cluster view correct across ONOS nodes" )
1741 try:
1742 numClusters = len( json.loads( clusters[ 0 ] ) )
1743 except ( ValueError, TypeError ):
1744 main.log.exception( "Error parsing clusters[0]: " +
1745 repr( clusters[ 0 ] ) )
1746 numClusters = "ERROR"
1747 utilities.assert_equals(
1748 expect=1,
1749 actual=numClusters,
1750 onpass="ONOS shows 1 SCC",
1751 onfail="ONOS shows " + str( numClusters ) + " SCCs" )
1752
1753 main.step( "Comparing ONOS topology to MN" )
1754 devicesResults = main.TRUE
1755 linksResults = main.TRUE
1756 hostsResults = main.TRUE
1757 mnSwitches = main.Mininet1.getSwitches()
1758 mnLinks = main.Mininet1.getLinks()
1759 mnHosts = main.Mininet1.getHosts()
1760 for controller in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07001761 controllerStr = str( main.activeNodes[ controller ] + 1 )
Jon Hall9ebd1bd2016-04-19 01:37:17 -07001762 if devices[ controller ] and ports[ controller ] and\
Jon Hallf37d44d2017-05-24 10:37:30 -07001763 "Error" not in devices[ controller ] and\
1764 "Error" not in ports[ controller ]:
1765 currentDevicesResult = main.Mininet1.compareSwitches(
1766 mnSwitches,
1767 json.loads( devices[ controller ] ),
1768 json.loads( ports[ controller ] ) )
Jon Hall9ebd1bd2016-04-19 01:37:17 -07001769 else:
1770 currentDevicesResult = main.FALSE
1771 utilities.assert_equals( expect=main.TRUE,
1772 actual=currentDevicesResult,
1773 onpass="ONOS" + controllerStr +
1774 " Switches view is correct",
1775 onfail="ONOS" + controllerStr +
1776 " Switches view is incorrect" )
1777 if links[ controller ] and "Error" not in links[ controller ]:
1778 currentLinksResult = main.Mininet1.compareLinks(
1779 mnSwitches, mnLinks,
1780 json.loads( links[ controller ] ) )
1781 else:
1782 currentLinksResult = main.FALSE
1783 utilities.assert_equals( expect=main.TRUE,
1784 actual=currentLinksResult,
1785 onpass="ONOS" + controllerStr +
1786 " links view is correct",
1787 onfail="ONOS" + controllerStr +
1788 " links view is incorrect" )
1789
1790 if hosts[ controller ] and "Error" not in hosts[ controller ]:
1791 currentHostsResult = main.Mininet1.compareHosts(
1792 mnHosts,
1793 hosts[ controller ] )
1794 else:
1795 currentHostsResult = main.FALSE
1796 utilities.assert_equals( expect=main.TRUE,
1797 actual=currentHostsResult,
1798 onpass="ONOS" + controllerStr +
1799 " hosts exist in Mininet",
1800 onfail="ONOS" + controllerStr +
1801 " hosts don't match Mininet" )
1802
1803 devicesResults = devicesResults and currentDevicesResult
1804 linksResults = linksResults and currentLinksResult
1805 hostsResults = hostsResults and currentHostsResult
1806
1807 main.step( "Device information is correct" )
1808 utilities.assert_equals(
1809 expect=main.TRUE,
1810 actual=devicesResults,
1811 onpass="Device information is correct",
1812 onfail="Device information is incorrect" )
1813
1814 main.step( "Links are correct" )
1815 utilities.assert_equals(
1816 expect=main.TRUE,
1817 actual=linksResults,
1818 onpass="Link are correct",
1819 onfail="Links are incorrect" )
1820
1821 main.step( "Hosts are correct" )
1822 utilities.assert_equals(
1823 expect=main.TRUE,
1824 actual=hostsResults,
1825 onpass="Hosts are correct",
1826 onfail="Hosts are incorrect" )
1827
1828 def CASE6( self, main ):
1829 """
1830 The Scaling case.
1831 """
1832 import time
1833 import re
1834 assert main.numCtrls, "main.numCtrls not defined"
1835 assert main, "main not defined"
1836 assert utilities.assert_equals, "utilities.assert_equals not defined"
1837 assert main.CLIs, "main.CLIs not defined"
1838 assert main.nodes, "main.nodes not defined"
1839 try:
1840 labels
1841 except NameError:
1842 main.log.error( "labels not defined, setting to []" )
1843 global labels
1844 labels = []
1845 try:
1846 data
1847 except NameError:
1848 main.log.error( "data not defined, setting to []" )
1849 global data
1850 data = []
1851
Jon Hall69b2b982016-05-11 12:04:59 -07001852 main.case( "Scale the number of nodes in the ONOS cluster" )
Jon Hall9ebd1bd2016-04-19 01:37:17 -07001853
1854 main.step( "Checking ONOS Logs for errors" )
1855 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07001856 node = main.nodes[ i ]
Jon Hall9ebd1bd2016-04-19 01:37:17 -07001857 main.log.debug( "Checking logs for errors on " + node.name + ":" )
1858 main.log.warn( main.ONOSbench.checkLogs( node.ip_address ) )
1859
1860 """
1861 pop # of nodes from a list, might look like 1,3b,3,5b,5,7b,7,7b,5,5b,3...
1862 modify cluster.json file appropriately
1863 install/deactivate node as needed
1864 """
Jon Hall9ebd1bd2016-04-19 01:37:17 -07001865 try:
1866 prevNodes = main.activeNodes
Jon Hallf37d44d2017-05-24 10:37:30 -07001867 scale = main.scaling.pop( 0 )
Jon Hall9ebd1bd2016-04-19 01:37:17 -07001868 if "e" in scale:
1869 equal = True
1870 else:
1871 equal = False
Jon Hallf37d44d2017-05-24 10:37:30 -07001872 main.numCtrls = int( re.search( "\d+", scale ).group( 0 ) )
Jon Hall9ebd1bd2016-04-19 01:37:17 -07001873 main.log.info( "Scaling to {} nodes".format( main.numCtrls ) )
1874 genResult = main.Server.generateFile( main.numCtrls, equal=equal )
1875 utilities.assert_equals( expect=main.TRUE, actual=genResult,
1876 onpass="New cluster metadata file generated",
1877 onfail="Failled to generate new metadata file" )
1878 time.sleep( 5 ) # Give time for nodes to read new file
1879 except IndexError:
1880 main.cleanup()
1881 main.exit()
1882
1883 main.activeNodes = [ i for i in range( 0, main.numCtrls ) ]
1884 newNodes = [ x for x in main.activeNodes if x not in prevNodes ]
1885
1886 main.step( "Start new nodes" ) # OR stop old nodes?
1887 started = main.TRUE
1888 for i in newNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07001889 started = main.ONOSbench.onosStart( main.nodes[ i ].ip_address ) and main.TRUE
Jon Hall9ebd1bd2016-04-19 01:37:17 -07001890 utilities.assert_equals( expect=main.TRUE, actual=started,
1891 onpass="ONOS started",
1892 onfail="ONOS start NOT successful" )
1893
1894 main.step( "Checking if ONOS is up yet" )
1895 for i in range( 2 ):
1896 onosIsupResult = main.TRUE
1897 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07001898 node = main.nodes[ i ]
Jon Hall168c1862017-01-31 17:35:34 -08001899 main.ONOSbench.onosSecureSSH( node=node.ip_address )
Jon Hall9ebd1bd2016-04-19 01:37:17 -07001900 started = main.ONOSbench.isup( node.ip_address )
1901 if not started:
1902 main.log.error( node.name + " didn't start!" )
1903 onosIsupResult = onosIsupResult and started
1904 if onosIsupResult == main.TRUE:
1905 break
1906 utilities.assert_equals( expect=main.TRUE, actual=onosIsupResult,
1907 onpass="ONOS started",
1908 onfail="ONOS start NOT successful" )
1909
Jon Hall6509dbf2016-06-21 17:01:17 -07001910 main.step( "Starting ONOS CLI sessions" )
Jon Hall9ebd1bd2016-04-19 01:37:17 -07001911 cliResults = main.TRUE
1912 threads = []
1913 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07001914 t = main.Thread( target=main.CLIs[ i ].startOnosCli,
Jon Hall9ebd1bd2016-04-19 01:37:17 -07001915 name="startOnosCli-" + str( i ),
Jon Hallf37d44d2017-05-24 10:37:30 -07001916 args=[ main.nodes[ i ].ip_address ] )
Jon Hall9ebd1bd2016-04-19 01:37:17 -07001917 threads.append( t )
1918 t.start()
1919
1920 for t in threads:
1921 t.join()
1922 cliResults = cliResults and t.result
1923 utilities.assert_equals( expect=main.TRUE, actual=cliResults,
1924 onpass="ONOS cli started",
1925 onfail="ONOS clis did not start" )
1926
1927 main.step( "Checking ONOS nodes" )
1928 nodeResults = utilities.retry( main.HA.nodesCheck,
1929 False,
Jon Hallf37d44d2017-05-24 10:37:30 -07001930 args=[ main.activeNodes ],
Jon Hall9ebd1bd2016-04-19 01:37:17 -07001931 attempts=5 )
1932 utilities.assert_equals( expect=True, actual=nodeResults,
1933 onpass="Nodes check successful",
1934 onfail="Nodes check NOT successful" )
1935
1936 for i in range( 10 ):
1937 ready = True
1938 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07001939 cli = main.CLIs[ i ]
Jon Hall9ebd1bd2016-04-19 01:37:17 -07001940 output = cli.summary()
1941 if not output:
1942 ready = False
1943 if ready:
1944 break
1945 time.sleep( 30 )
1946 utilities.assert_equals( expect=True, actual=ready,
1947 onpass="ONOS summary command succeded",
1948 onfail="ONOS summary command failed" )
1949 if not ready:
1950 main.cleanup()
1951 main.exit()
1952
1953 # Rerun for election on new nodes
1954 runResults = main.TRUE
1955 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07001956 cli = main.CLIs[ i ]
Jon Hall9ebd1bd2016-04-19 01:37:17 -07001957 run = cli.electionTestRun()
1958 if run != main.TRUE:
1959 main.log.error( "Error running for election on " + cli.name )
1960 runResults = runResults and run
1961 utilities.assert_equals( expect=main.TRUE, actual=runResults,
1962 onpass="Reran for election",
1963 onfail="Failed to rerun for election" )
1964
1965 # TODO: Make this configurable
1966 time.sleep( 60 )
1967 for node in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07001968 main.log.warn( "\n****************** {} **************".format( main.nodes[ node ].ip_address ) )
1969 main.log.debug( main.CLIs[ node ].nodes( jsonFormat=False ) )
1970 main.log.debug( main.CLIs[ node ].leaders( jsonFormat=False ) )
1971 main.log.debug( main.CLIs[ node ].partitions( jsonFormat=False ) )
1972 main.log.debug( main.CLIs[ node ].apps( jsonFormat=False ) )
Jon Hall9ebd1bd2016-04-19 01:37:17 -07001973
1974 def CASE7( self, main ):
1975 """
1976 Check state after ONOS scaling
1977 """
1978 import json
1979 assert main.numCtrls, "main.numCtrls not defined"
1980 assert main, "main not defined"
1981 assert utilities.assert_equals, "utilities.assert_equals not defined"
1982 assert main.CLIs, "main.CLIs not defined"
1983 assert main.nodes, "main.nodes not defined"
1984 main.case( "Running ONOS Constant State Tests" )
1985
1986 main.step( "Check that each switch has a master" )
1987 # Assert that each device has a master
1988 rolesNotNull = main.TRUE
1989 threads = []
1990 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07001991 t = main.Thread( target=main.CLIs[ i ].rolesNotNull,
Jon Hall9ebd1bd2016-04-19 01:37:17 -07001992 name="rolesNotNull-" + str( i ),
Jon Hallf37d44d2017-05-24 10:37:30 -07001993 args=[] )
Jon Hall9ebd1bd2016-04-19 01:37:17 -07001994 threads.append( t )
1995 t.start()
1996
1997 for t in threads:
1998 t.join()
1999 rolesNotNull = rolesNotNull and t.result
2000 utilities.assert_equals(
2001 expect=main.TRUE,
2002 actual=rolesNotNull,
2003 onpass="Each device has a master",
2004 onfail="Some devices don't have a master assigned" )
2005
2006 main.step( "Read device roles from ONOS" )
2007 ONOSMastership = []
2008 consistentMastership = True
2009 rolesResults = True
2010 threads = []
2011 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07002012 t = main.Thread( target=main.CLIs[ i ].roles,
Jon Hall9ebd1bd2016-04-19 01:37:17 -07002013 name="roles-" + str( i ),
2014 args=[] )
2015 threads.append( t )
2016 t.start()
2017
2018 for t in threads:
2019 t.join()
2020 ONOSMastership.append( t.result )
2021
2022 for i in range( len( ONOSMastership ) ):
Jon Hallf37d44d2017-05-24 10:37:30 -07002023 node = str( main.activeNodes[ i ] + 1 )
2024 if not ONOSMastership[ i ] or "Error" in ONOSMastership[ i ]:
Jon Hall9ebd1bd2016-04-19 01:37:17 -07002025 main.log.error( "Error in getting ONOS" + node + " roles" )
2026 main.log.warn( "ONOS" + node + " mastership response: " +
Jon Hallf37d44d2017-05-24 10:37:30 -07002027 repr( ONOSMastership[ i ] ) )
Jon Hall9ebd1bd2016-04-19 01:37:17 -07002028 rolesResults = False
2029 utilities.assert_equals(
2030 expect=True,
2031 actual=rolesResults,
2032 onpass="No error in reading roles output",
2033 onfail="Error in reading roles from ONOS" )
2034
2035 main.step( "Check for consistency in roles from each controller" )
Jon Hallf37d44d2017-05-24 10:37:30 -07002036 if all( [ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
Jon Hall9ebd1bd2016-04-19 01:37:17 -07002037 main.log.info(
2038 "Switch roles are consistent across all ONOS nodes" )
2039 else:
2040 consistentMastership = False
2041 utilities.assert_equals(
2042 expect=True,
2043 actual=consistentMastership,
2044 onpass="Switch roles are consistent across all ONOS nodes",
2045 onfail="ONOS nodes have different views of switch roles" )
2046
2047 if rolesResults and not consistentMastership:
2048 for i in range( len( ONOSMastership ) ):
Jon Hallf37d44d2017-05-24 10:37:30 -07002049 node = str( main.activeNodes[ i ] + 1 )
Jon Hall9ebd1bd2016-04-19 01:37:17 -07002050 main.log.warn( "ONOS" + node + " roles: ",
2051 json.dumps( json.loads( ONOSMastership[ i ] ),
2052 sort_keys=True,
2053 indent=4,
2054 separators=( ',', ': ' ) ) )
2055
2056 # NOTE: we expect mastership to change on controller scaling down
2057
2058 main.step( "Get the intents and compare across all nodes" )
2059 ONOSIntents = []
2060 intentCheck = main.FALSE
2061 consistentIntents = True
2062 intentsResults = True
2063 threads = []
2064 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07002065 t = main.Thread( target=main.CLIs[ i ].intents,
Jon Hall9ebd1bd2016-04-19 01:37:17 -07002066 name="intents-" + str( i ),
2067 args=[],
2068 kwargs={ 'jsonFormat': True } )
2069 threads.append( t )
2070 t.start()
2071
2072 for t in threads:
2073 t.join()
2074 ONOSIntents.append( t.result )
2075
Jon Hallf37d44d2017-05-24 10:37:30 -07002076 for i in range( len( ONOSIntents ) ):
2077 node = str( main.activeNodes[ i ] + 1 )
Jon Hall9ebd1bd2016-04-19 01:37:17 -07002078 if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
2079 main.log.error( "Error in getting ONOS" + node + " intents" )
2080 main.log.warn( "ONOS" + node + " intents response: " +
2081 repr( ONOSIntents[ i ] ) )
2082 intentsResults = False
2083 utilities.assert_equals(
2084 expect=True,
2085 actual=intentsResults,
2086 onpass="No error in reading intents output",
2087 onfail="Error in reading intents from ONOS" )
2088
2089 main.step( "Check for consistency in Intents from each controller" )
Jon Hallf37d44d2017-05-24 10:37:30 -07002090 if all( [ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
Jon Hall9ebd1bd2016-04-19 01:37:17 -07002091 main.log.info( "Intents are consistent across all ONOS " +
2092 "nodes" )
2093 else:
2094 consistentIntents = False
2095
2096 # Try to make it easy to figure out what is happening
2097 #
2098 # Intent ONOS1 ONOS2 ...
2099 # 0x01 INSTALLED INSTALLING
2100 # ... ... ...
2101 # ... ... ...
2102 title = " ID"
2103 for n in main.activeNodes:
2104 title += " " * 10 + "ONOS" + str( n + 1 )
2105 main.log.warn( title )
2106 # get all intent keys in the cluster
2107 keys = []
2108 for nodeStr in ONOSIntents:
2109 node = json.loads( nodeStr )
2110 for intent in node:
2111 keys.append( intent.get( 'id' ) )
2112 keys = set( keys )
2113 for key in keys:
2114 row = "%-13s" % key
2115 for nodeStr in ONOSIntents:
2116 node = json.loads( nodeStr )
2117 for intent in node:
2118 if intent.get( 'id' ) == key:
2119 row += "%-15s" % intent.get( 'state' )
2120 main.log.warn( row )
2121 # End table view
2122
2123 utilities.assert_equals(
2124 expect=True,
2125 actual=consistentIntents,
2126 onpass="Intents are consistent across all ONOS nodes",
2127 onfail="ONOS nodes have different views of intents" )
2128 intentStates = []
2129 for node in ONOSIntents: # Iter through ONOS nodes
2130 nodeStates = []
2131 # Iter through intents of a node
2132 try:
2133 for intent in json.loads( node ):
2134 nodeStates.append( intent[ 'state' ] )
2135 except ( ValueError, TypeError ):
2136 main.log.exception( "Error in parsing intents" )
2137 main.log.error( repr( node ) )
2138 intentStates.append( nodeStates )
Jon Hallf37d44d2017-05-24 10:37:30 -07002139 out = [ ( i, nodeStates.count( i ) ) for i in set( nodeStates ) ]
Jon Hall9ebd1bd2016-04-19 01:37:17 -07002140 main.log.info( dict( out ) )
2141
2142 if intentsResults and not consistentIntents:
2143 for i in range( len( main.activeNodes ) ):
Jon Hallf37d44d2017-05-24 10:37:30 -07002144 node = str( main.activeNodes[ i ] + 1 )
Jon Hall9ebd1bd2016-04-19 01:37:17 -07002145 main.log.warn( "ONOS" + node + " intents: " )
2146 main.log.warn( json.dumps(
2147 json.loads( ONOSIntents[ i ] ),
2148 sort_keys=True,
2149 indent=4,
2150 separators=( ',', ': ' ) ) )
2151 elif intentsResults and consistentIntents:
2152 intentCheck = main.TRUE
2153
2154 main.step( "Compare current intents with intents before the scaling" )
2155 # NOTE: this requires case 5 to pass for intentState to be set.
2156 # maybe we should stop the test if that fails?
2157 sameIntents = main.FALSE
2158 try:
2159 intentState
2160 except NameError:
2161 main.log.warn( "No previous intent state was saved" )
2162 else:
2163 if intentState and intentState == ONOSIntents[ 0 ]:
2164 sameIntents = main.TRUE
2165 main.log.info( "Intents are consistent with before scaling" )
2166 # TODO: possibly the states have changed? we may need to figure out
2167 # what the acceptable states are
2168 elif len( intentState ) == len( ONOSIntents[ 0 ] ):
2169 sameIntents = main.TRUE
2170 try:
2171 before = json.loads( intentState )
2172 after = json.loads( ONOSIntents[ 0 ] )
2173 for intent in before:
2174 if intent not in after:
2175 sameIntents = main.FALSE
2176 main.log.debug( "Intent is not currently in ONOS " +
2177 "(at least in the same form):" )
2178 main.log.debug( json.dumps( intent ) )
2179 except ( ValueError, TypeError ):
2180 main.log.exception( "Exception printing intents" )
Jon Hallf37d44d2017-05-24 10:37:30 -07002181 main.log.debug( repr( ONOSIntents[ 0 ] ) )
Jon Hall9ebd1bd2016-04-19 01:37:17 -07002182 main.log.debug( repr( intentState ) )
2183 if sameIntents == main.FALSE:
2184 try:
2185 main.log.debug( "ONOS intents before: " )
2186 main.log.debug( json.dumps( json.loads( intentState ),
2187 sort_keys=True, indent=4,
2188 separators=( ',', ': ' ) ) )
2189 main.log.debug( "Current ONOS intents: " )
2190 main.log.debug( json.dumps( json.loads( ONOSIntents[ 0 ] ),
2191 sort_keys=True, indent=4,
2192 separators=( ',', ': ' ) ) )
2193 except ( ValueError, TypeError ):
2194 main.log.exception( "Exception printing intents" )
Jon Hallf37d44d2017-05-24 10:37:30 -07002195 main.log.debug( repr( ONOSIntents[ 0 ] ) )
Jon Hall9ebd1bd2016-04-19 01:37:17 -07002196 main.log.debug( repr( intentState ) )
2197 utilities.assert_equals(
2198 expect=main.TRUE,
2199 actual=sameIntents,
2200 onpass="Intents are consistent with before scaling",
2201 onfail="The Intents changed during scaling" )
2202 intentCheck = intentCheck and sameIntents
2203
2204 main.step( "Get the OF Table entries and compare to before " +
2205 "component scaling" )
2206 FlowTables = main.TRUE
2207 for i in range( 28 ):
2208 main.log.info( "Checking flow table on s" + str( i + 1 ) )
2209 tmpFlows = main.Mininet1.getFlowTable( "s" + str( i + 1 ), version="1.3", debug=False )
Jon Hallf37d44d2017-05-24 10:37:30 -07002210 curSwitch = main.Mininet1.flowTableComp( flows[ i ], tmpFlows )
Jon Hall9ebd1bd2016-04-19 01:37:17 -07002211 FlowTables = FlowTables and curSwitch
2212 if curSwitch == main.FALSE:
2213 main.log.warn( "Differences in flow table for switch: s{}".format( i + 1 ) )
2214 utilities.assert_equals(
2215 expect=main.TRUE,
2216 actual=FlowTables,
2217 onpass="No changes were found in the flow tables",
2218 onfail="Changes were found in the flow tables" )
2219
2220 main.Mininet2.pingLongKill()
Jon Hallf37d44d2017-05-24 10:37:30 -07002221 """
Jon Hall9ebd1bd2016-04-19 01:37:17 -07002222 # main.step( "Check the continuous pings to ensure that no packets " +
2223 # "were dropped during component failure" )
2224 main.Mininet2.pingKill( main.params[ 'TESTONUSER' ],
2225 main.params[ 'TESTONIP' ] )
2226 LossInPings = main.FALSE
2227 # NOTE: checkForLoss returns main.FALSE with 0% packet loss
2228 for i in range( 8, 18 ):
2229 main.log.info(
2230 "Checking for a loss in pings along flow from s" +
2231 str( i ) )
2232 LossInPings = main.Mininet2.checkForLoss(
2233 "/tmp/ping.h" +
2234 str( i ) ) or LossInPings
2235 if LossInPings == main.TRUE:
2236 main.log.info( "Loss in ping detected" )
2237 elif LossInPings == main.ERROR:
2238 main.log.info( "There are multiple mininet process running" )
2239 elif LossInPings == main.FALSE:
2240 main.log.info( "No Loss in the pings" )
2241 main.log.info( "No loss of dataplane connectivity" )
2242 # utilities.assert_equals(
2243 # expect=main.FALSE,
2244 # actual=LossInPings,
2245 # onpass="No Loss of connectivity",
2246 # onfail="Loss of dataplane connectivity detected" )
2247
2248 # NOTE: Since intents are not persisted with IntnentStore,
2249 # we expect loss in dataplane connectivity
2250 LossInPings = main.FALSE
Jon Hallf37d44d2017-05-24 10:37:30 -07002251 """
Jon Hall9ebd1bd2016-04-19 01:37:17 -07002252 main.step( "Leadership Election is still functional" )
2253 # Test of LeadershipElection
2254 leaderList = []
2255 leaderResult = main.TRUE
2256
2257 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07002258 cli = main.CLIs[ i ]
Jon Hall9ebd1bd2016-04-19 01:37:17 -07002259 leaderN = cli.electionTestLeader()
2260 leaderList.append( leaderN )
2261 if leaderN == main.FALSE:
2262 # error in response
2263 main.log.error( "Something is wrong with " +
2264 "electionTestLeader function, check the" +
2265 " error logs" )
2266 leaderResult = main.FALSE
2267 elif leaderN is None:
2268 main.log.error( cli.name +
2269 " shows no leader for the election-app." )
2270 leaderResult = main.FALSE
2271 if len( set( leaderList ) ) != 1:
2272 leaderResult = main.FALSE
2273 main.log.error(
2274 "Inconsistent view of leader for the election test app" )
2275 # TODO: print the list
2276 utilities.assert_equals(
2277 expect=main.TRUE,
2278 actual=leaderResult,
2279 onpass="Leadership election passed",
2280 onfail="Something went wrong with Leadership election" )
2281
2282 def CASE8( self, main ):
2283 """
2284 Compare topo
2285 """
2286 import json
2287 import time
2288 assert main.numCtrls, "main.numCtrls not defined"
2289 assert main, "main not defined"
2290 assert utilities.assert_equals, "utilities.assert_equals not defined"
2291 assert main.CLIs, "main.CLIs not defined"
2292 assert main.nodes, "main.nodes not defined"
2293
2294 main.case( "Compare ONOS Topology view to Mininet topology" )
2295 main.caseExplanation = "Compare topology objects between Mininet" +\
2296 " and ONOS"
2297 topoResult = main.FALSE
2298 topoFailMsg = "ONOS topology don't match Mininet"
2299 elapsed = 0
2300 count = 0
2301 main.step( "Comparing ONOS topology to MN topology" )
2302 startTime = time.time()
2303 # Give time for Gossip to work
2304 while topoResult == main.FALSE and ( elapsed < 60 or count < 3 ):
2305 devicesResults = main.TRUE
2306 linksResults = main.TRUE
2307 hostsResults = main.TRUE
2308 hostAttachmentResults = True
2309 count += 1
2310 cliStart = time.time()
2311 devices = []
2312 threads = []
2313 for i in main.activeNodes:
2314 t = main.Thread( target=utilities.retry,
2315 name="devices-" + str( i ),
Jon Hallf37d44d2017-05-24 10:37:30 -07002316 args=[ main.CLIs[ i ].devices, [ None ] ],
2317 kwargs={ 'sleep': 5, 'attempts': 5,
Jon Hall9ebd1bd2016-04-19 01:37:17 -07002318 'randomTime': True } )
2319 threads.append( t )
2320 t.start()
2321
2322 for t in threads:
2323 t.join()
2324 devices.append( t.result )
2325 hosts = []
2326 ipResult = main.TRUE
2327 threads = []
2328 for i in main.activeNodes:
2329 t = main.Thread( target=utilities.retry,
2330 name="hosts-" + str( i ),
Jon Hallf37d44d2017-05-24 10:37:30 -07002331 args=[ main.CLIs[ i ].hosts, [ None ] ],
2332 kwargs={ 'sleep': 5, 'attempts': 5,
Jon Hall9ebd1bd2016-04-19 01:37:17 -07002333 'randomTime': True } )
2334 threads.append( t )
2335 t.start()
2336
2337 for t in threads:
2338 t.join()
2339 try:
2340 hosts.append( json.loads( t.result ) )
2341 except ( ValueError, TypeError ):
2342 main.log.exception( "Error parsing hosts results" )
2343 main.log.error( repr( t.result ) )
2344 hosts.append( None )
2345 for controller in range( 0, len( hosts ) ):
Jon Hallf37d44d2017-05-24 10:37:30 -07002346 controllerStr = str( main.activeNodes[ controller ] + 1 )
Jon Hall9ebd1bd2016-04-19 01:37:17 -07002347 if hosts[ controller ]:
2348 for host in hosts[ controller ]:
2349 if host is None or host.get( 'ipAddresses', [] ) == []:
2350 main.log.error(
2351 "Error with host ipAddresses on controller" +
2352 controllerStr + ": " + str( host ) )
2353 ipResult = main.FALSE
2354 ports = []
2355 threads = []
2356 for i in main.activeNodes:
2357 t = main.Thread( target=utilities.retry,
2358 name="ports-" + str( i ),
Jon Hallf37d44d2017-05-24 10:37:30 -07002359 args=[ main.CLIs[ i ].ports, [ None ] ],
2360 kwargs={ 'sleep': 5, 'attempts': 5,
Jon Hall9ebd1bd2016-04-19 01:37:17 -07002361 'randomTime': True } )
2362 threads.append( t )
2363 t.start()
2364
2365 for t in threads:
2366 t.join()
2367 ports.append( t.result )
2368 links = []
2369 threads = []
2370 for i in main.activeNodes:
2371 t = main.Thread( target=utilities.retry,
2372 name="links-" + str( i ),
Jon Hallf37d44d2017-05-24 10:37:30 -07002373 args=[ main.CLIs[ i ].links, [ None ] ],
2374 kwargs={ 'sleep': 5, 'attempts': 5,
Jon Hall9ebd1bd2016-04-19 01:37:17 -07002375 'randomTime': True } )
2376 threads.append( t )
2377 t.start()
2378
2379 for t in threads:
2380 t.join()
2381 links.append( t.result )
2382 clusters = []
2383 threads = []
2384 for i in main.activeNodes:
2385 t = main.Thread( target=utilities.retry,
2386 name="clusters-" + str( i ),
Jon Hallf37d44d2017-05-24 10:37:30 -07002387 args=[ main.CLIs[ i ].clusters, [ None ] ],
2388 kwargs={ 'sleep': 5, 'attempts': 5,
Jon Hall9ebd1bd2016-04-19 01:37:17 -07002389 'randomTime': True } )
2390 threads.append( t )
2391 t.start()
2392
2393 for t in threads:
2394 t.join()
2395 clusters.append( t.result )
2396
2397 elapsed = time.time() - startTime
2398 cliTime = time.time() - cliStart
2399 print "Elapsed time: " + str( elapsed )
2400 print "CLI time: " + str( cliTime )
2401
2402 if all( e is None for e in devices ) and\
2403 all( e is None for e in hosts ) and\
2404 all( e is None for e in ports ) and\
2405 all( e is None for e in links ) and\
2406 all( e is None for e in clusters ):
Jon Hallf37d44d2017-05-24 10:37:30 -07002407 topoFailMsg = "Could not get topology from ONOS"
2408 main.log.error( topoFailMsg )
2409 continue # Try again, No use trying to compare
Jon Hall9ebd1bd2016-04-19 01:37:17 -07002410
2411 mnSwitches = main.Mininet1.getSwitches()
2412 mnLinks = main.Mininet1.getLinks()
2413 mnHosts = main.Mininet1.getHosts()
2414 for controller in range( len( main.activeNodes ) ):
Jon Hallf37d44d2017-05-24 10:37:30 -07002415 controllerStr = str( main.activeNodes[ controller ] + 1 )
Jon Hall9ebd1bd2016-04-19 01:37:17 -07002416 if devices[ controller ] and ports[ controller ] and\
Jon Hallf37d44d2017-05-24 10:37:30 -07002417 "Error" not in devices[ controller ] and\
2418 "Error" not in ports[ controller ]:
Jon Hall9ebd1bd2016-04-19 01:37:17 -07002419
2420 try:
2421 currentDevicesResult = main.Mininet1.compareSwitches(
2422 mnSwitches,
2423 json.loads( devices[ controller ] ),
2424 json.loads( ports[ controller ] ) )
2425 except ( TypeError, ValueError ):
2426 main.log.exception( "Object not as expected; devices={!r}\nports={!r}".format(
2427 devices[ controller ], ports[ controller ] ) )
2428 else:
2429 currentDevicesResult = main.FALSE
2430 utilities.assert_equals( expect=main.TRUE,
2431 actual=currentDevicesResult,
2432 onpass="ONOS" + controllerStr +
2433 " Switches view is correct",
2434 onfail="ONOS" + controllerStr +
2435 " Switches view is incorrect" )
2436
2437 if links[ controller ] and "Error" not in links[ controller ]:
2438 currentLinksResult = main.Mininet1.compareLinks(
2439 mnSwitches, mnLinks,
2440 json.loads( links[ controller ] ) )
2441 else:
2442 currentLinksResult = main.FALSE
2443 utilities.assert_equals( expect=main.TRUE,
2444 actual=currentLinksResult,
2445 onpass="ONOS" + controllerStr +
2446 " links view is correct",
2447 onfail="ONOS" + controllerStr +
2448 " links view is incorrect" )
2449 if hosts[ controller ] and "Error" not in hosts[ controller ]:
2450 currentHostsResult = main.Mininet1.compareHosts(
2451 mnHosts,
2452 hosts[ controller ] )
2453 elif hosts[ controller ] == []:
2454 currentHostsResult = main.TRUE
2455 else:
2456 currentHostsResult = main.FALSE
2457 utilities.assert_equals( expect=main.TRUE,
2458 actual=currentHostsResult,
2459 onpass="ONOS" + controllerStr +
2460 " hosts exist in Mininet",
2461 onfail="ONOS" + controllerStr +
2462 " hosts don't match Mininet" )
2463 # CHECKING HOST ATTACHMENT POINTS
2464 hostAttachment = True
2465 zeroHosts = False
2466 # FIXME: topo-HA/obelisk specific mappings:
2467 # key is mac and value is dpid
2468 mappings = {}
2469 for i in range( 1, 29 ): # hosts 1 through 28
2470 # set up correct variables:
Jon Hallf37d44d2017-05-24 10:37:30 -07002471 macId = "00:" * 5 + hex( i ).split( "0x" )[ 1 ].upper().zfill( 2 )
Jon Hall9ebd1bd2016-04-19 01:37:17 -07002472 if i == 1:
Jon Hallf37d44d2017-05-24 10:37:30 -07002473 deviceId = "1000".zfill( 16 )
Jon Hall9ebd1bd2016-04-19 01:37:17 -07002474 elif i == 2:
Jon Hallf37d44d2017-05-24 10:37:30 -07002475 deviceId = "2000".zfill( 16 )
Jon Hall9ebd1bd2016-04-19 01:37:17 -07002476 elif i == 3:
Jon Hallf37d44d2017-05-24 10:37:30 -07002477 deviceId = "3000".zfill( 16 )
Jon Hall9ebd1bd2016-04-19 01:37:17 -07002478 elif i == 4:
Jon Hallf37d44d2017-05-24 10:37:30 -07002479 deviceId = "3004".zfill( 16 )
Jon Hall9ebd1bd2016-04-19 01:37:17 -07002480 elif i == 5:
Jon Hallf37d44d2017-05-24 10:37:30 -07002481 deviceId = "5000".zfill( 16 )
Jon Hall9ebd1bd2016-04-19 01:37:17 -07002482 elif i == 6:
Jon Hallf37d44d2017-05-24 10:37:30 -07002483 deviceId = "6000".zfill( 16 )
Jon Hall9ebd1bd2016-04-19 01:37:17 -07002484 elif i == 7:
Jon Hallf37d44d2017-05-24 10:37:30 -07002485 deviceId = "6007".zfill( 16 )
Jon Hall9ebd1bd2016-04-19 01:37:17 -07002486 elif i >= 8 and i <= 17:
2487 dpid = '3' + str( i ).zfill( 3 )
Jon Hallf37d44d2017-05-24 10:37:30 -07002488 deviceId = dpid.zfill( 16 )
Jon Hall9ebd1bd2016-04-19 01:37:17 -07002489 elif i >= 18 and i <= 27:
2490 dpid = '6' + str( i ).zfill( 3 )
Jon Hallf37d44d2017-05-24 10:37:30 -07002491 deviceId = dpid.zfill( 16 )
Jon Hall9ebd1bd2016-04-19 01:37:17 -07002492 elif i == 28:
Jon Hallf37d44d2017-05-24 10:37:30 -07002493 deviceId = "2800".zfill( 16 )
Jon Hall9ebd1bd2016-04-19 01:37:17 -07002494 mappings[ macId ] = deviceId
2495 if hosts[ controller ] is not None and "Error" not in hosts[ controller ]:
2496 if hosts[ controller ] == []:
2497 main.log.warn( "There are no hosts discovered" )
2498 zeroHosts = True
2499 else:
2500 for host in hosts[ controller ]:
2501 mac = None
2502 location = None
2503 device = None
2504 port = None
2505 try:
2506 mac = host.get( 'mac' )
2507 assert mac, "mac field could not be found for this host object"
2508
2509 location = host.get( 'location' )
2510 assert location, "location field could not be found for this host object"
2511
2512 # Trim the protocol identifier off deviceId
Jon Hallf37d44d2017-05-24 10:37:30 -07002513 device = str( location.get( 'elementId' ) ).split( ':' )[ 1 ]
Jon Hall9ebd1bd2016-04-19 01:37:17 -07002514 assert device, "elementId field could not be found for this host location object"
2515
2516 port = location.get( 'port' )
2517 assert port, "port field could not be found for this host location object"
2518
2519 # Now check if this matches where they should be
2520 if mac and device and port:
2521 if str( port ) != "1":
2522 main.log.error( "The attachment port is incorrect for " +
2523 "host " + str( mac ) +
Jon Hallf37d44d2017-05-24 10:37:30 -07002524 ". Expected: 1 Actual: " + str( port ) )
Jon Hall9ebd1bd2016-04-19 01:37:17 -07002525 hostAttachment = False
2526 if device != mappings[ str( mac ) ]:
2527 main.log.error( "The attachment device is incorrect for " +
2528 "host " + str( mac ) +
2529 ". Expected: " + mappings[ str( mac ) ] +
2530 " Actual: " + device )
2531 hostAttachment = False
2532 else:
2533 hostAttachment = False
2534 except AssertionError:
2535 main.log.exception( "Json object not as expected" )
2536 main.log.error( repr( host ) )
2537 hostAttachment = False
2538 else:
2539 main.log.error( "No hosts json output or \"Error\"" +
2540 " in output. hosts = " +
2541 repr( hosts[ controller ] ) )
2542 if zeroHosts is False:
2543 # TODO: Find a way to know if there should be hosts in a
2544 # given point of the test
2545 hostAttachment = True
2546
2547 # END CHECKING HOST ATTACHMENT POINTS
2548 devicesResults = devicesResults and currentDevicesResult
2549 linksResults = linksResults and currentLinksResult
2550 hostsResults = hostsResults and currentHostsResult
2551 hostAttachmentResults = hostAttachmentResults and\
2552 hostAttachment
2553 topoResult = ( devicesResults and linksResults
2554 and hostsResults and ipResult and
2555 hostAttachmentResults )
2556 utilities.assert_equals( expect=True,
2557 actual=topoResult,
2558 onpass="ONOS topology matches Mininet",
2559 onfail=topoFailMsg )
2560 # End of While loop to pull ONOS state
2561
2562 # Compare json objects for hosts and dataplane clusters
2563
2564 # hosts
2565 main.step( "Hosts view is consistent across all ONOS nodes" )
2566 consistentHostsResult = main.TRUE
2567 for controller in range( len( hosts ) ):
Jon Hallf37d44d2017-05-24 10:37:30 -07002568 controllerStr = str( main.activeNodes[ controller ] + 1 )
Jon Hall9ebd1bd2016-04-19 01:37:17 -07002569 if hosts[ controller ] is not None and "Error" not in hosts[ controller ]:
2570 if hosts[ controller ] == hosts[ 0 ]:
2571 continue
2572 else: # hosts not consistent
2573 main.log.error( "hosts from ONOS" + controllerStr +
2574 " is inconsistent with ONOS1" )
2575 main.log.warn( repr( hosts[ controller ] ) )
2576 consistentHostsResult = main.FALSE
2577
2578 else:
2579 main.log.error( "Error in getting ONOS hosts from ONOS" +
2580 controllerStr )
2581 consistentHostsResult = main.FALSE
2582 main.log.warn( "ONOS" + controllerStr +
2583 " hosts response: " +
2584 repr( hosts[ controller ] ) )
2585 utilities.assert_equals(
2586 expect=main.TRUE,
2587 actual=consistentHostsResult,
2588 onpass="Hosts view is consistent across all ONOS nodes",
2589 onfail="ONOS nodes have different views of hosts" )
2590
2591 main.step( "Hosts information is correct" )
2592 hostsResults = hostsResults and ipResult
2593 utilities.assert_equals(
2594 expect=main.TRUE,
2595 actual=hostsResults,
2596 onpass="Host information is correct",
2597 onfail="Host information is incorrect" )
2598
2599 main.step( "Host attachment points to the network" )
2600 utilities.assert_equals(
2601 expect=True,
2602 actual=hostAttachmentResults,
2603 onpass="Hosts are correctly attached to the network",
2604 onfail="ONOS did not correctly attach hosts to the network" )
2605
2606 # Strongly connected clusters of devices
2607 main.step( "Clusters view is consistent across all ONOS nodes" )
2608 consistentClustersResult = main.TRUE
2609 for controller in range( len( clusters ) ):
Jon Hallf37d44d2017-05-24 10:37:30 -07002610 controllerStr = str( main.activeNodes[ controller ] + 1 )
Jon Hall9ebd1bd2016-04-19 01:37:17 -07002611 if "Error" not in clusters[ controller ]:
2612 if clusters[ controller ] == clusters[ 0 ]:
2613 continue
2614 else: # clusters not consistent
2615 main.log.error( "clusters from ONOS" +
2616 controllerStr +
2617 " is inconsistent with ONOS1" )
2618 consistentClustersResult = main.FALSE
2619 else:
2620 main.log.error( "Error in getting dataplane clusters " +
2621 "from ONOS" + controllerStr )
2622 consistentClustersResult = main.FALSE
2623 main.log.warn( "ONOS" + controllerStr +
2624 " clusters response: " +
2625 repr( clusters[ controller ] ) )
2626 utilities.assert_equals(
2627 expect=main.TRUE,
2628 actual=consistentClustersResult,
2629 onpass="Clusters view is consistent across all ONOS nodes",
2630 onfail="ONOS nodes have different views of clusters" )
Jon Hall64948022016-05-12 13:38:50 -07002631 if not consistentClustersResult:
2632 main.log.debug( clusters )
Jon Hall9ebd1bd2016-04-19 01:37:17 -07002633
2634 main.step( "There is only one SCC" )
2635 # there should always only be one cluster
2636 try:
2637 numClusters = len( json.loads( clusters[ 0 ] ) )
2638 except ( ValueError, TypeError ):
2639 main.log.exception( "Error parsing clusters[0]: " +
Jon Hallf37d44d2017-05-24 10:37:30 -07002640 repr( clusters[ 0 ] ) )
Jon Hall9ebd1bd2016-04-19 01:37:17 -07002641 numClusters = "ERROR"
2642 clusterResults = main.FALSE
2643 if numClusters == 1:
2644 clusterResults = main.TRUE
2645 utilities.assert_equals(
2646 expect=1,
2647 actual=numClusters,
2648 onpass="ONOS shows 1 SCC",
2649 onfail="ONOS shows " + str( numClusters ) + " SCCs" )
2650
2651 topoResult = ( devicesResults and linksResults
2652 and hostsResults and consistentHostsResult
2653 and consistentClustersResult and clusterResults
2654 and ipResult and hostAttachmentResults )
2655
2656 topoResult = topoResult and int( count <= 2 )
2657 note = "note it takes about " + str( int( cliTime ) ) + \
2658 " seconds for the test to make all the cli calls to fetch " +\
2659 "the topology from each ONOS instance"
2660 main.log.info(
2661 "Very crass estimate for topology discovery/convergence( " +
2662 str( note ) + " ): " + str( elapsed ) + " seconds, " +
2663 str( count ) + " tries" )
2664
2665 main.step( "Device information is correct" )
2666 utilities.assert_equals(
2667 expect=main.TRUE,
2668 actual=devicesResults,
2669 onpass="Device information is correct",
2670 onfail="Device information is incorrect" )
2671
2672 main.step( "Links are correct" )
2673 utilities.assert_equals(
2674 expect=main.TRUE,
2675 actual=linksResults,
2676 onpass="Link are correct",
2677 onfail="Links are incorrect" )
2678
2679 main.step( "Hosts are correct" )
2680 utilities.assert_equals(
2681 expect=main.TRUE,
2682 actual=hostsResults,
2683 onpass="Hosts are correct",
2684 onfail="Hosts are incorrect" )
2685
2686 # FIXME: move this to an ONOS state case
2687 main.step( "Checking ONOS nodes" )
2688 nodeResults = utilities.retry( main.HA.nodesCheck,
2689 False,
Jon Hallf37d44d2017-05-24 10:37:30 -07002690 args=[ main.activeNodes ],
Jon Hall9ebd1bd2016-04-19 01:37:17 -07002691 attempts=5 )
2692 utilities.assert_equals( expect=True, actual=nodeResults,
2693 onpass="Nodes check successful",
2694 onfail="Nodes check NOT successful" )
2695 if not nodeResults:
2696 for i in main.activeNodes:
2697 main.log.debug( "{} components not ACTIVE: \n{}".format(
Jon Hallf37d44d2017-05-24 10:37:30 -07002698 main.CLIs[ i ].name,
2699 main.CLIs[ i ].sendline( "scr:list | grep -v ACTIVE" ) ) )
Jon Hall9ebd1bd2016-04-19 01:37:17 -07002700
Jon Halld2871c22016-07-26 11:01:14 -07002701 if not topoResult:
2702 main.cleanup()
2703 main.exit()
2704
Jon Hall9ebd1bd2016-04-19 01:37:17 -07002705 def CASE9( self, main ):
2706 """
2707 Link s3-s28 down
2708 """
2709 import time
2710 assert main.numCtrls, "main.numCtrls not defined"
2711 assert main, "main not defined"
2712 assert utilities.assert_equals, "utilities.assert_equals not defined"
2713 assert main.CLIs, "main.CLIs not defined"
2714 assert main.nodes, "main.nodes not defined"
2715 # NOTE: You should probably run a topology check after this
2716
2717 linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
2718
2719 description = "Turn off a link to ensure that Link Discovery " +\
2720 "is working properly"
2721 main.case( description )
2722
2723 main.step( "Kill Link between s3 and s28" )
2724 LinkDown = main.Mininet1.link( END1="s3", END2="s28", OPTION="down" )
2725 main.log.info( "Waiting " + str( linkSleep ) +
2726 " seconds for link down to be discovered" )
2727 time.sleep( linkSleep )
2728 utilities.assert_equals( expect=main.TRUE, actual=LinkDown,
2729 onpass="Link down successful",
2730 onfail="Failed to bring link down" )
2731 # TODO do some sort of check here
2732
2733 def CASE10( self, main ):
2734 """
2735 Link s3-s28 up
2736 """
2737 import time
2738 assert main.numCtrls, "main.numCtrls not defined"
2739 assert main, "main not defined"
2740 assert utilities.assert_equals, "utilities.assert_equals not defined"
2741 assert main.CLIs, "main.CLIs not defined"
2742 assert main.nodes, "main.nodes not defined"
2743 # NOTE: You should probably run a topology check after this
2744
2745 linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
2746
2747 description = "Restore a link to ensure that Link Discovery is " + \
2748 "working properly"
2749 main.case( description )
2750
2751 main.step( "Bring link between s3 and s28 back up" )
2752 LinkUp = main.Mininet1.link( END1="s3", END2="s28", OPTION="up" )
2753 main.log.info( "Waiting " + str( linkSleep ) +
2754 " seconds for link up to be discovered" )
2755 time.sleep( linkSleep )
2756 utilities.assert_equals( expect=main.TRUE, actual=LinkUp,
2757 onpass="Link up successful",
2758 onfail="Failed to bring link up" )
2759 # TODO do some sort of check here
2760
2761 def CASE11( self, main ):
2762 """
2763 Switch Down
2764 """
2765 # NOTE: You should probably run a topology check after this
2766 import time
2767 assert main.numCtrls, "main.numCtrls not defined"
2768 assert main, "main not defined"
2769 assert utilities.assert_equals, "utilities.assert_equals not defined"
2770 assert main.CLIs, "main.CLIs not defined"
2771 assert main.nodes, "main.nodes not defined"
2772
2773 switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
2774
2775 description = "Killing a switch to ensure it is discovered correctly"
Jon Hallf37d44d2017-05-24 10:37:30 -07002776 onosCli = main.CLIs[ main.activeNodes[ 0 ] ]
Jon Hall9ebd1bd2016-04-19 01:37:17 -07002777 main.case( description )
2778 switch = main.params[ 'kill' ][ 'switch' ]
2779 switchDPID = main.params[ 'kill' ][ 'dpid' ]
2780
2781 # TODO: Make this switch parameterizable
2782 main.step( "Kill " + switch )
2783 main.log.info( "Deleting " + switch )
2784 main.Mininet1.delSwitch( switch )
2785 main.log.info( "Waiting " + str( switchSleep ) +
2786 " seconds for switch down to be discovered" )
2787 time.sleep( switchSleep )
2788 device = onosCli.getDevice( dpid=switchDPID )
2789 # Peek at the deleted switch
2790 main.log.warn( str( device ) )
2791 result = main.FALSE
2792 if device and device[ 'available' ] is False:
2793 result = main.TRUE
2794 utilities.assert_equals( expect=main.TRUE, actual=result,
2795 onpass="Kill switch successful",
2796 onfail="Failed to kill switch?" )
2797
2798 def CASE12( self, main ):
2799 """
2800 Switch Up
2801 """
2802 # NOTE: You should probably run a topology check after this
2803 import time
2804 assert main.numCtrls, "main.numCtrls not defined"
2805 assert main, "main not defined"
2806 assert utilities.assert_equals, "utilities.assert_equals not defined"
2807 assert main.CLIs, "main.CLIs not defined"
2808 assert main.nodes, "main.nodes not defined"
2809
2810 switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
2811 switch = main.params[ 'kill' ][ 'switch' ]
2812 switchDPID = main.params[ 'kill' ][ 'dpid' ]
2813 links = main.params[ 'kill' ][ 'links' ].split()
Jon Hallf37d44d2017-05-24 10:37:30 -07002814 onosCli = main.CLIs[ main.activeNodes[ 0 ] ]
Jon Hall9ebd1bd2016-04-19 01:37:17 -07002815 description = "Adding a switch to ensure it is discovered correctly"
2816 main.case( description )
2817
2818 main.step( "Add back " + switch )
2819 main.Mininet1.addSwitch( switch, dpid=switchDPID )
2820 for peer in links:
2821 main.Mininet1.addLink( switch, peer )
2822 ipList = [ node.ip_address for node in main.nodes ]
2823 main.Mininet1.assignSwController( sw=switch, ip=ipList )
2824 main.log.info( "Waiting " + str( switchSleep ) +
2825 " seconds for switch up to be discovered" )
2826 time.sleep( switchSleep )
2827 device = onosCli.getDevice( dpid=switchDPID )
2828 # Peek at the deleted switch
2829 main.log.warn( str( device ) )
2830 result = main.FALSE
2831 if device and device[ 'available' ]:
2832 result = main.TRUE
2833 utilities.assert_equals( expect=main.TRUE, actual=result,
2834 onpass="add switch successful",
2835 onfail="Failed to add switch?" )
2836
2837 def CASE13( self, main ):
2838 """
2839 Clean up
2840 """
2841 assert main.numCtrls, "main.numCtrls not defined"
2842 assert main, "main not defined"
2843 assert utilities.assert_equals, "utilities.assert_equals not defined"
2844 assert main.CLIs, "main.CLIs not defined"
2845 assert main.nodes, "main.nodes not defined"
2846
2847 main.case( "Test Cleanup" )
2848 main.step( "Killing tcpdumps" )
2849 main.Mininet2.stopTcpdump()
2850
2851 if main.params[ 'BACKUP' ][ 'ENABLED' ] == "True":
2852 main.step( "Copying MN pcap and ONOS log files to test station" )
2853 # NOTE: MN Pcap file is being saved to logdir.
2854 # We scp this file as MN and TestON aren't necessarily the same vm
2855
2856 # FIXME: To be replaced with a Jenkin's post script
2857 # TODO: Load these from params
2858 # NOTE: must end in /
2859 logFolder = "/opt/onos/log/"
2860 logFiles = [ "karaf.log", "karaf.log.1" ]
2861 # NOTE: must end in /
2862 for f in logFiles:
2863 for node in main.nodes:
2864 dstName = main.logdir + "/" + node.name + "-" + f
2865 main.ONOSbench.secureCopy( node.user_name, node.ip_address,
2866 logFolder + f, dstName )
2867 # std*.log's
2868 # NOTE: must end in /
2869 logFolder = "/opt/onos/var/"
2870 logFiles = [ "stderr.log", "stdout.log" ]
2871 # NOTE: must end in /
2872 for f in logFiles:
2873 for node in main.nodes:
2874 dstName = main.logdir + "/" + node.name + "-" + f
2875 main.ONOSbench.secureCopy( node.user_name, node.ip_address,
2876 logFolder + f, dstName )
2877 else:
2878 main.log.debug( "skipping saving log files" )
2879
2880 main.step( "Stopping Mininet" )
2881 mnResult = main.Mininet1.stopNet()
2882 utilities.assert_equals( expect=main.TRUE, actual=mnResult,
2883 onpass="Mininet stopped",
2884 onfail="MN cleanup NOT successful" )
2885
2886 main.step( "Checking ONOS Logs for errors" )
2887 for node in main.nodes:
2888 main.log.debug( "Checking logs for errors on " + node.name + ":" )
2889 main.log.warn( main.ONOSbench.checkLogs( node.ip_address ) )
2890
2891 try:
Jon Hallf37d44d2017-05-24 10:37:30 -07002892 timerLog = open( main.logdir + "/Timers.csv", 'w' )
Jon Hall9ebd1bd2016-04-19 01:37:17 -07002893 main.log.error( ", ".join( labels ) + "\n" + ", ".join( data ) )
2894 timerLog.write( ", ".join( labels ) + "\n" + ", ".join( data ) )
2895 timerLog.close()
Jon Hallf37d44d2017-05-24 10:37:30 -07002896 except NameError as e:
2897 main.log.exception( e )
Jon Hall9ebd1bd2016-04-19 01:37:17 -07002898
2899 main.step( "Stopping webserver" )
Jon Hallf37d44d2017-05-24 10:37:30 -07002900 status = main.Server.stop()
Jon Hall9ebd1bd2016-04-19 01:37:17 -07002901 utilities.assert_equals( expect=main.TRUE, actual=status,
2902 onpass="Stop Server",
2903 onfail="Failled to stop SimpleHTTPServer" )
2904 del main.Server
2905
2906 def CASE14( self, main ):
2907 """
2908 start election app on all onos nodes
2909 """
2910 import time
2911 assert main.numCtrls, "main.numCtrls not defined"
2912 assert main, "main not defined"
2913 assert utilities.assert_equals, "utilities.assert_equals not defined"
2914 assert main.CLIs, "main.CLIs not defined"
2915 assert main.nodes, "main.nodes not defined"
2916
Jon Hallf37d44d2017-05-24 10:37:30 -07002917 main.case( "Start Leadership Election app" )
Jon Hall9ebd1bd2016-04-19 01:37:17 -07002918 main.step( "Install leadership election app" )
Jon Hallf37d44d2017-05-24 10:37:30 -07002919 onosCli = main.CLIs[ main.activeNodes[ 0 ] ]
Jon Hall9ebd1bd2016-04-19 01:37:17 -07002920 appResult = onosCli.activateApp( "org.onosproject.election" )
2921 utilities.assert_equals(
2922 expect=main.TRUE,
2923 actual=appResult,
2924 onpass="Election app installed",
2925 onfail="Something went wrong with installing Leadership election" )
2926
2927 main.step( "Run for election on each node" )
2928 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07002929 main.CLIs[ i ].electionTestRun()
2930 time.sleep( 5 )
2931 activeCLIs = [ main.CLIs[ i ] for i in main.activeNodes ]
Jon Hall9ebd1bd2016-04-19 01:37:17 -07002932 sameResult, leaders = main.HA.consistentLeaderboards( activeCLIs )
2933 utilities.assert_equals(
2934 expect=True,
2935 actual=sameResult,
2936 onpass="All nodes see the same leaderboards",
2937 onfail="Inconsistent leaderboards" )
2938
2939 if sameResult:
2940 leader = leaders[ 0 ][ 0 ]
Jon Hallf37d44d2017-05-24 10:37:30 -07002941 if main.nodes[ main.activeNodes[ 0 ] ].ip_address in leader:
Jon Hall9ebd1bd2016-04-19 01:37:17 -07002942 correctLeader = True
2943 else:
2944 correctLeader = False
2945 main.step( "First node was elected leader" )
2946 utilities.assert_equals(
2947 expect=True,
2948 actual=correctLeader,
2949 onpass="Correct leader was elected",
2950 onfail="Incorrect leader" )
2951
2952 def CASE15( self, main ):
2953 """
2954 Check that Leadership Election is still functional
2955 15.1 Run election on each node
2956 15.2 Check that each node has the same leaders and candidates
2957 15.3 Find current leader and withdraw
2958 15.4 Check that a new node was elected leader
2959 15.5 Check that that new leader was the candidate of old leader
2960 15.6 Run for election on old leader
2961 15.7 Check that oldLeader is a candidate, and leader if only 1 node
2962 15.8 Make sure that the old leader was added to the candidate list
2963
2964 old and new variable prefixes refer to data from before vs after
2965 withdrawl and later before withdrawl vs after re-election
2966 """
2967 import time
2968 assert main.numCtrls, "main.numCtrls not defined"
2969 assert main, "main not defined"
2970 assert utilities.assert_equals, "utilities.assert_equals not defined"
2971 assert main.CLIs, "main.CLIs not defined"
2972 assert main.nodes, "main.nodes not defined"
2973
2974 description = "Check that Leadership Election is still functional"
2975 main.case( description )
2976 # NOTE: Need to re-run after restarts since being a canidate is not persistant
2977
2978 oldLeaders = [] # list of lists of each nodes' candidates before
2979 newLeaders = [] # list of lists of each nodes' candidates after
2980 oldLeader = '' # the old leader from oldLeaders, None if not same
2981 newLeader = '' # the new leaders fron newLoeaders, None if not same
2982 oldLeaderCLI = None # the CLI of the old leader used for re-electing
2983 expectNoLeader = False # True when there is only one leader
2984 if main.numCtrls == 1:
2985 expectNoLeader = True
2986
2987 main.step( "Run for election on each node" )
2988 electionResult = main.TRUE
2989
2990 for i in main.activeNodes: # run test election on each node
Jon Hallf37d44d2017-05-24 10:37:30 -07002991 if main.CLIs[ i ].electionTestRun() == main.FALSE:
Jon Hall9ebd1bd2016-04-19 01:37:17 -07002992 electionResult = main.FALSE
2993 utilities.assert_equals(
2994 expect=main.TRUE,
2995 actual=electionResult,
2996 onpass="All nodes successfully ran for leadership",
2997 onfail="At least one node failed to run for leadership" )
2998
2999 if electionResult == main.FALSE:
3000 main.log.error(
3001 "Skipping Test Case because Election Test App isn't loaded" )
3002 main.skipCase()
3003
3004 main.step( "Check that each node shows the same leader and candidates" )
3005 failMessage = "Nodes have different leaderboards"
Jon Hallf37d44d2017-05-24 10:37:30 -07003006 activeCLIs = [ main.CLIs[ i ] for i in main.activeNodes ]
Jon Hall9ebd1bd2016-04-19 01:37:17 -07003007 sameResult, oldLeaders = main.HA.consistentLeaderboards( activeCLIs )
3008 if sameResult:
3009 oldLeader = oldLeaders[ 0 ][ 0 ]
3010 main.log.warn( oldLeader )
3011 else:
3012 oldLeader = None
3013 utilities.assert_equals(
3014 expect=True,
3015 actual=sameResult,
3016 onpass="Leaderboards are consistent for the election topic",
3017 onfail=failMessage )
3018
3019 main.step( "Find current leader and withdraw" )
3020 withdrawResult = main.TRUE
3021 # do some sanity checking on leader before using it
3022 if oldLeader is None:
3023 main.log.error( "Leadership isn't consistent." )
3024 withdrawResult = main.FALSE
3025 # Get the CLI of the oldLeader
3026 for i in main.activeNodes:
3027 if oldLeader == main.nodes[ i ].ip_address:
3028 oldLeaderCLI = main.CLIs[ i ]
3029 break
3030 else: # FOR/ELSE statement
3031 main.log.error( "Leader election, could not find current leader" )
3032 if oldLeader:
3033 withdrawResult = oldLeaderCLI.electionTestWithdraw()
3034 utilities.assert_equals(
3035 expect=main.TRUE,
3036 actual=withdrawResult,
3037 onpass="Node was withdrawn from election",
3038 onfail="Node was not withdrawn from election" )
3039
3040 main.step( "Check that a new node was elected leader" )
3041 failMessage = "Nodes have different leaders"
3042 # Get new leaders and candidates
3043 newLeaderResult, newLeaders = main.HA.consistentLeaderboards( activeCLIs )
3044 newLeader = None
3045 if newLeaderResult:
3046 if newLeaders[ 0 ][ 0 ] == 'none':
3047 main.log.error( "No leader was elected on at least 1 node" )
3048 if not expectNoLeader:
3049 newLeaderResult = False
3050 newLeader = newLeaders[ 0 ][ 0 ]
3051
3052 # Check that the new leader is not the older leader, which was withdrawn
3053 if newLeader == oldLeader:
3054 newLeaderResult = False
3055 main.log.error( "All nodes still see old leader: " + str( oldLeader ) +
Jon Hallf37d44d2017-05-24 10:37:30 -07003056 " as the current leader" )
Jon Hall9ebd1bd2016-04-19 01:37:17 -07003057 utilities.assert_equals(
3058 expect=True,
3059 actual=newLeaderResult,
3060 onpass="Leadership election passed",
3061 onfail="Something went wrong with Leadership election" )
3062
3063 main.step( "Check that that new leader was the candidate of old leader" )
3064 # candidates[ 2 ] should become the top candidate after withdrawl
3065 correctCandidateResult = main.TRUE
3066 if expectNoLeader:
3067 if newLeader == 'none':
3068 main.log.info( "No leader expected. None found. Pass" )
3069 correctCandidateResult = main.TRUE
3070 else:
3071 main.log.info( "Expected no leader, got: " + str( newLeader ) )
3072 correctCandidateResult = main.FALSE
Jon Hallf37d44d2017-05-24 10:37:30 -07003073 elif len( oldLeaders[ 0 ] ) >= 3:
Jon Hall9ebd1bd2016-04-19 01:37:17 -07003074 if newLeader == oldLeaders[ 0 ][ 2 ]:
3075 # correct leader was elected
3076 correctCandidateResult = main.TRUE
3077 else:
3078 correctCandidateResult = main.FALSE
3079 main.log.error( "Candidate {} was elected. {} should have had priority.".format(
3080 newLeader, oldLeaders[ 0 ][ 2 ] ) )
3081 else:
3082 main.log.warn( "Could not determine who should be the correct leader" )
3083 main.log.debug( oldLeaders[ 0 ] )
3084 correctCandidateResult = main.FALSE
3085 utilities.assert_equals(
3086 expect=main.TRUE,
3087 actual=correctCandidateResult,
3088 onpass="Correct Candidate Elected",
3089 onfail="Incorrect Candidate Elected" )
3090
3091 main.step( "Run for election on old leader( just so everyone " +
3092 "is in the hat )" )
3093 if oldLeaderCLI is not None:
3094 runResult = oldLeaderCLI.electionTestRun()
3095 else:
3096 main.log.error( "No old leader to re-elect" )
3097 runResult = main.FALSE
3098 utilities.assert_equals(
3099 expect=main.TRUE,
3100 actual=runResult,
3101 onpass="App re-ran for election",
3102 onfail="App failed to run for election" )
3103
3104 main.step(
3105 "Check that oldLeader is a candidate, and leader if only 1 node" )
3106 # verify leader didn't just change
3107 # Get new leaders and candidates
3108 reRunLeaders = []
3109 time.sleep( 5 ) # Paremterize
3110 positionResult, reRunLeaders = main.HA.consistentLeaderboards( activeCLIs )
3111
3112 # Check that the re-elected node is last on the candidate List
Jon Hallf37d44d2017-05-24 10:37:30 -07003113 if not reRunLeaders[ 0 ]:
Jon Hall9ebd1bd2016-04-19 01:37:17 -07003114 positionResult = main.FALSE
3115 elif oldLeader != reRunLeaders[ 0 ][ -1 ]:
Jon Hallf37d44d2017-05-24 10:37:30 -07003116 main.log.error( "Old Leader ({}) not in the proper position: {} ".format( str( oldLeader ),
Jon Hall9ebd1bd2016-04-19 01:37:17 -07003117 str( reRunLeaders[ 0 ] ) ) )
3118 positionResult = main.FALSE
3119 utilities.assert_equals(
3120 expect=True,
3121 actual=positionResult,
3122 onpass="Old leader successfully re-ran for election",
3123 onfail="Something went wrong with Leadership election after " +
3124 "the old leader re-ran for election" )
3125
3126 def CASE16( self, main ):
3127 """
3128 Install Distributed Primitives app
3129 """
3130 import time
3131 assert main.numCtrls, "main.numCtrls not defined"
3132 assert main, "main not defined"
3133 assert utilities.assert_equals, "utilities.assert_equals not defined"
3134 assert main.CLIs, "main.CLIs not defined"
3135 assert main.nodes, "main.nodes not defined"
3136
3137 # Variables for the distributed primitives tests
Jon Hall7a6ebfd2017-03-13 10:58:58 -07003138 main.pCounterName = "TestON-Partitions"
3139 main.pCounterValue = 0
Jon Hallf37d44d2017-05-24 10:37:30 -07003140 main.onosSet = set( [] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07003141 main.onosSetName = "TestON-set"
Jon Hall9ebd1bd2016-04-19 01:37:17 -07003142
3143 description = "Install Primitives app"
3144 main.case( description )
3145 main.step( "Install Primitives app" )
3146 appName = "org.onosproject.distributedprimitives"
Jon Hallf37d44d2017-05-24 10:37:30 -07003147 node = main.activeNodes[ 0 ]
3148 appResults = main.CLIs[ node ].activateApp( appName )
Jon Hall9ebd1bd2016-04-19 01:37:17 -07003149 utilities.assert_equals( expect=main.TRUE,
3150 actual=appResults,
3151 onpass="Primitives app activated",
3152 onfail="Primitives app not activated" )
3153 time.sleep( 5 ) # To allow all nodes to activate
3154
3155 def CASE17( self, main ):
3156 """
3157 Check for basic functionality with distributed primitives
3158 """
Jon Hall7a6ebfd2017-03-13 10:58:58 -07003159 main.HA.CASE17( main )