blob: 0ac38a2a0109d9d1db99b2ea49cc9f2b896ec015 [file] [log] [blame]
Jon Hall9ebd1bd2016-04-19 01:37:17 -07001"""
2Description: This test is to determine if ONOS can handle
3 dynamic scaling of the cluster size.
4
5List of test cases:
6CASE1: Compile ONOS and push it to the test machines
7CASE2: Assign devices to controllers
8CASE21: Assign mastership to controllers
9CASE3: Assign intents
10CASE4: Ping across added host intents
11CASE5: Reading state of ONOS
12CASE6: The scaling case.
13CASE7: Check state after control plane failure
14CASE8: Compare topo
15CASE9: Link s3-s28 down
16CASE10: Link s3-s28 up
17CASE11: Switch down
18CASE12: Switch up
19CASE13: Clean up
20CASE14: start election app on all onos nodes
21CASE15: Check that Leadership Election is still functional
22CASE16: Install Distributed Primitives app
23CASE17: Check for basic functionality with distributed primitives
24"""
Jon Hall9ebd1bd2016-04-19 01:37:17 -070025class HAscaling:
26
27 def __init__( self ):
28 self.default = ''
29
30 def CASE1( self, main ):
31 """
32 CASE1 is to compile ONOS and push it to the test machines
33
34 Startup sequence:
35 cell <name>
36 onos-verify-cell
37 NOTE: temporary - onos-remove-raft-logs
38 onos-uninstall
39 start mininet
40 git pull
41 mvn clean install
42 onos-package
43 onos-install -f
44 onos-wait-for-start
45 start cli sessions
46 start tcpdump
47 """
48 import time
49 import os
50 import re
51 main.log.info( "ONOS HA test: Restart all ONOS nodes - " +
52 "initialization" )
53 main.case( "Setting up test environment" )
54 main.caseExplanation = "Setup the test environment including " +\
55 "installing ONOS, starting Mininet and ONOS" +\
56 "cli sessions."
57
58 # load some variables from the params file
59 PULLCODE = False
60 if main.params[ 'Git' ] == 'True':
61 PULLCODE = True
62 gitBranch = main.params[ 'branch' ]
63 cellName = main.params[ 'ENV' ][ 'cellName' ]
64
65 main.numCtrls = int( main.params[ 'num_controllers' ] )
66 if main.ONOSbench.maxNodes:
67 if main.ONOSbench.maxNodes < main.numCtrls:
68 main.numCtrls = int( main.ONOSbench.maxNodes )
69 # set global variables
70 # These are for csv plotting in jenkins
71 global labels
72 global data
73 labels = []
74 data = []
75
76 try:
77 from tests.HA.dependencies.HA import HA
78 main.HA = HA()
79 from tests.HA.HAscaling.dependencies.Server import Server
80 main.Server = Server()
81 except Exception as e:
82 main.log.exception( e )
83 main.cleanup()
84 main.exit()
85
86 main.CLIs = []
87 main.nodes = []
88 ipList = []
89 for i in range( 1, main.numCtrls + 1 ):
90 try:
91 main.CLIs.append( getattr( main, 'ONOScli' + str( i ) ) )
92 main.nodes.append( getattr( main, 'ONOS' + str( i ) ) )
93 ipList.append( main.nodes[ -1 ].ip_address )
94 except AttributeError:
95 break
96
97 main.step( "Create cell file" )
98 cellAppString = main.params[ 'ENV' ][ 'appString' ]
99 main.ONOSbench.createCellFile( main.ONOSbench.ip_address, cellName,
100 main.Mininet1.ip_address,
Devin Limdc78e202017-06-09 18:30:07 -0700101 cellAppString, ipList, main.ONOScli1.karafUser )
Jon Hall9ebd1bd2016-04-19 01:37:17 -0700102
103 main.step( "Applying cell variable to environment" )
104 cellResult = main.ONOSbench.setCell( cellName )
105 utilities.assert_equals( expect=main.TRUE, actual=cellResult,
106 onpass="Set cell successfull",
107 onfail="Failled to set cell" )
108
109 main.step( "Verify connectivity to cell" )
110 verifyResult = main.ONOSbench.verifyCell()
111 utilities.assert_equals( expect=main.TRUE, actual=verifyResult,
112 onpass="Verify cell passed",
113 onfail="Failled to verify cell" )
114
115 # FIXME:this is short term fix
116 main.log.info( "Removing raft logs" )
117 main.ONOSbench.onosRemoveRaftLogs()
118
119 main.log.info( "Uninstalling ONOS" )
120 for node in main.nodes:
121 main.ONOSbench.onosUninstall( node.ip_address )
122
123 # Make sure ONOS is DEAD
124 main.log.info( "Killing any ONOS processes" )
125 killResults = main.TRUE
126 for node in main.nodes:
127 killed = main.ONOSbench.onosKill( node.ip_address )
128 killResults = killResults and killed
129
130 main.step( "Setup server for cluster metadata file" )
Jon Hallf37d44d2017-05-24 10:37:30 -0700131 port = main.params[ 'server' ][ 'port' ]
Jon Hall9ebd1bd2016-04-19 01:37:17 -0700132 rootDir = os.path.dirname( main.testFile ) + "/dependencies"
133 main.log.debug( "Root dir: {}".format( rootDir ) )
134 status = main.Server.start( main.ONOSbench,
135 rootDir,
136 port=port,
137 logDir=main.logdir + "/server.log" )
138 utilities.assert_equals( expect=main.TRUE, actual=status,
139 onpass="Server started",
140 onfail="Failled to start SimpleHTTPServer" )
141
142 main.step( "Generate initial metadata file" )
Jon Hallf37d44d2017-05-24 10:37:30 -0700143 main.scaling = main.params[ 'scaling' ].split( "," )
Jon Hall9ebd1bd2016-04-19 01:37:17 -0700144 main.log.debug( main.scaling )
Jon Hallf37d44d2017-05-24 10:37:30 -0700145 scale = main.scaling.pop( 0 )
146 main.log.debug( scale )
Jon Hall9ebd1bd2016-04-19 01:37:17 -0700147 if "e" in scale:
148 equal = True
149 else:
150 equal = False
Jon Hallf37d44d2017-05-24 10:37:30 -0700151 main.log.debug( equal )
152 main.numCtrls = int( re.search( "\d+", scale ).group( 0 ) )
Jon Hall9ebd1bd2016-04-19 01:37:17 -0700153 genResult = main.Server.generateFile( main.numCtrls, equal=equal )
154 utilities.assert_equals( expect=main.TRUE, actual=genResult,
155 onpass="New cluster metadata file generated",
156 onfail="Failled to generate new metadata file" )
157
Jon Hall9ebd1bd2016-04-19 01:37:17 -0700158 gitPullResult = main.TRUE
159
160 main.step( "Starting Mininet" )
161 # scp topo file to mininet
162 # TODO: move to params?
163 topoName = "obelisk.py"
164 filePath = main.ONOSbench.home + "/tools/test/topos/"
165 main.ONOSbench.scp( main.Mininet1,
166 filePath + topoName,
167 main.Mininet1.home,
168 direction="to" )
Jon Hallf37d44d2017-05-24 10:37:30 -0700169 mnResult = main.Mininet1.startNet()
Jon Hall9ebd1bd2016-04-19 01:37:17 -0700170 utilities.assert_equals( expect=main.TRUE, actual=mnResult,
171 onpass="Mininet Started",
172 onfail="Error starting Mininet" )
173
174 main.step( "Git checkout and pull " + gitBranch )
175 if PULLCODE:
176 main.ONOSbench.gitCheckout( gitBranch )
177 gitPullResult = main.ONOSbench.gitPull()
178 # values of 1 or 3 are good
179 utilities.assert_lesser( expect=0, actual=gitPullResult,
180 onpass="Git pull successful",
181 onfail="Git pull failed" )
182 main.ONOSbench.getVersion( report=True )
183
Jon Hall9ebd1bd2016-04-19 01:37:17 -0700184 # GRAPHS
185 # NOTE: important params here:
186 # job = name of Jenkins job
187 # Plot Name = Plot-HA, only can be used if multiple plots
188 # index = The number of the graph under plot name
189 job = "HAscaling"
190 plotName = "Plot-HA"
Jon Hall676e5432016-09-26 11:32:50 -0700191 index = "1"
Jon Hall9ebd1bd2016-04-19 01:37:17 -0700192 graphs = '<ac:structured-macro ac:name="html">\n'
193 graphs += '<ac:plain-text-body><![CDATA[\n'
194 graphs += '<iframe src="https://onos-jenkins.onlab.us/job/' + job +\
195 '/plot/' + plotName + '/getPlot?index=' + index +\
196 '&width=500&height=300"' +\
197 'noborder="0" width="500" height="300" scrolling="yes" ' +\
198 'seamless="seamless"></iframe>\n'
199 graphs += ']]></ac:plain-text-body>\n'
200 graphs += '</ac:structured-macro>\n'
Jon Hallf37d44d2017-05-24 10:37:30 -0700201 main.log.wiki( graphs )
Jon Hall9ebd1bd2016-04-19 01:37:17 -0700202
203 main.step( "Copying backup config files" )
204 path = "~/onos/tools/package/bin/onos-service"
205 cp = main.ONOSbench.scp( main.ONOSbench,
206 path,
207 path + ".backup",
208 direction="to" )
209
210 utilities.assert_equals( expect=main.TRUE,
211 actual=cp,
212 onpass="Copy backup config file succeeded",
213 onfail="Copy backup config file failed" )
214 # we need to modify the onos-service file to use remote metadata file
215 # url for cluster metadata file
Jon Hallf37d44d2017-05-24 10:37:30 -0700216 iface = main.params[ 'server' ].get( 'interface' )
Jon Hall8f6d4622016-05-23 15:27:18 -0700217 ip = main.ONOSbench.getIpAddr( iface=iface )
Jon Hall9ebd1bd2016-04-19 01:37:17 -0700218 metaFile = "cluster.json"
219 javaArgs = r"-Donos.cluster.metadata.uri=http:\/\/{}:{}\/{}".format( ip, port, metaFile )
220 main.log.warn( javaArgs )
221 main.log.warn( repr( javaArgs ) )
222 handle = main.ONOSbench.handle
223 sed = r"sed -i 's/bash/bash\nexport JAVA_OPTS=${{JAVA_OPTS:-{}}}\n/' {}".format( javaArgs, path )
224 main.log.warn( sed )
225 main.log.warn( repr( sed ) )
226 handle.sendline( sed )
Jon Hallbd60ea02016-08-23 10:03:59 -0700227 handle.expect( metaFile )
228 output = handle.before
Jon Hall9ebd1bd2016-04-19 01:37:17 -0700229 handle.expect( "\$" )
Jon Hallbd60ea02016-08-23 10:03:59 -0700230 output += handle.before
231 main.log.debug( repr( output ) )
Jon Hall9ebd1bd2016-04-19 01:37:17 -0700232
233 main.step( "Creating ONOS package" )
Jon Hallbd60ea02016-08-23 10:03:59 -0700234 packageResult = main.ONOSbench.buckBuild()
Jon Hall9ebd1bd2016-04-19 01:37:17 -0700235 utilities.assert_equals( expect=main.TRUE, actual=packageResult,
236 onpass="ONOS package successful",
237 onfail="ONOS package failed" )
Jon Hallbd60ea02016-08-23 10:03:59 -0700238 if not packageResult:
239 main.cleanup()
240 main.exit()
Jon Hall9ebd1bd2016-04-19 01:37:17 -0700241
242 main.step( "Installing ONOS package" )
243 onosInstallResult = main.TRUE
244 for i in range( main.ONOSbench.maxNodes ):
Jon Hallf37d44d2017-05-24 10:37:30 -0700245 node = main.nodes[ i ]
Jon Hall9ebd1bd2016-04-19 01:37:17 -0700246 options = "-f"
247 if i >= main.numCtrls:
248 options = "-nf" # Don't start more than the current scale
249 tmpResult = main.ONOSbench.onosInstall( options=options,
250 node=node.ip_address )
251 onosInstallResult = onosInstallResult and tmpResult
252 utilities.assert_equals( expect=main.TRUE, actual=onosInstallResult,
253 onpass="ONOS install successful",
254 onfail="ONOS install failed" )
255
256 # Cleanup custom onos-service file
257 main.ONOSbench.scp( main.ONOSbench,
258 path + ".backup",
259 path,
260 direction="to" )
261
You Wangf5de25b2017-01-06 15:13:01 -0800262 main.step( "Set up ONOS secure SSH" )
263 secureSshResult = main.TRUE
Jon Hall168c1862017-01-31 17:35:34 -0800264 for i in range( main.numCtrls ):
Jon Hallf37d44d2017-05-24 10:37:30 -0700265 node = main.nodes[ i ]
You Wangf5de25b2017-01-06 15:13:01 -0800266 secureSshResult = secureSshResult and main.ONOSbench.onosSecureSSH( node=node.ip_address )
267 utilities.assert_equals( expect=main.TRUE, actual=secureSshResult,
268 onpass="Test step PASS",
269 onfail="Test step FAIL" )
270
Jon Hall9ebd1bd2016-04-19 01:37:17 -0700271 main.step( "Checking if ONOS is up yet" )
272 for i in range( 2 ):
273 onosIsupResult = main.TRUE
274 for i in range( main.numCtrls ):
Jon Hallf37d44d2017-05-24 10:37:30 -0700275 node = main.nodes[ i ]
Jon Hall9ebd1bd2016-04-19 01:37:17 -0700276 started = main.ONOSbench.isup( node.ip_address )
277 if not started:
278 main.log.error( node.name + " hasn't started" )
279 onosIsupResult = onosIsupResult and started
280 if onosIsupResult == main.TRUE:
281 break
282 utilities.assert_equals( expect=main.TRUE, actual=onosIsupResult,
283 onpass="ONOS startup successful",
284 onfail="ONOS startup failed" )
285
Jon Hall6509dbf2016-06-21 17:01:17 -0700286 main.step( "Starting ONOS CLI sessions" )
Jon Hall9ebd1bd2016-04-19 01:37:17 -0700287 cliResults = main.TRUE
288 threads = []
289 for i in range( main.numCtrls ):
Jon Hallf37d44d2017-05-24 10:37:30 -0700290 t = main.Thread( target=main.CLIs[ i ].startOnosCli,
Jon Hall9ebd1bd2016-04-19 01:37:17 -0700291 name="startOnosCli-" + str( i ),
Jon Hallf37d44d2017-05-24 10:37:30 -0700292 args=[ main.nodes[ i ].ip_address ] )
Jon Hall9ebd1bd2016-04-19 01:37:17 -0700293 threads.append( t )
294 t.start()
295
296 for t in threads:
297 t.join()
298 cliResults = cliResults and t.result
299 utilities.assert_equals( expect=main.TRUE, actual=cliResults,
300 onpass="ONOS cli startup successful",
301 onfail="ONOS cli startup failed" )
302
303 # Create a list of active nodes for use when some nodes are stopped
304 main.activeNodes = [ i for i in range( 0, main.numCtrls ) ]
305
306 if main.params[ 'tcpdump' ].lower() == "true":
307 main.step( "Start Packet Capture MN" )
308 main.Mininet2.startTcpdump(
309 str( main.params[ 'MNtcpdump' ][ 'folder' ] ) + str( main.TEST )
310 + "-MN.pcap",
311 intf=main.params[ 'MNtcpdump' ][ 'intf' ],
312 port=main.params[ 'MNtcpdump' ][ 'port' ] )
313
314 main.step( "Checking ONOS nodes" )
315 nodeResults = utilities.retry( main.HA.nodesCheck,
316 False,
Jon Hallf37d44d2017-05-24 10:37:30 -0700317 args=[ main.activeNodes ],
Jon Hall9ebd1bd2016-04-19 01:37:17 -0700318 attempts=5 )
319 utilities.assert_equals( expect=True, actual=nodeResults,
320 onpass="Nodes check successful",
321 onfail="Nodes check NOT successful" )
322
323 if not nodeResults:
Jon Hall7ac7bc32016-05-05 10:57:02 -0700324 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -0700325 cli = main.CLIs[ i ]
Jon Hall9ebd1bd2016-04-19 01:37:17 -0700326 main.log.debug( "{} components not ACTIVE: \n{}".format(
327 cli.name,
328 cli.sendline( "scr:list | grep -v ACTIVE" ) ) )
Jon Hall9ebd1bd2016-04-19 01:37:17 -0700329 main.log.error( "Failed to start ONOS, stopping test" )
330 main.cleanup()
331 main.exit()
332
333 main.step( "Activate apps defined in the params file" )
334 # get data from the params
335 apps = main.params.get( 'apps' )
336 if apps:
Jon Hallf37d44d2017-05-24 10:37:30 -0700337 apps = apps.split( ',' )
Jon Hall9ebd1bd2016-04-19 01:37:17 -0700338 main.log.warn( apps )
339 activateResult = True
340 for app in apps:
341 main.CLIs[ 0 ].app( app, "Activate" )
342 # TODO: check this worked
343 time.sleep( 10 ) # wait for apps to activate
344 for app in apps:
345 state = main.CLIs[ 0 ].appStatus( app )
346 if state == "ACTIVE":
347 activateResult = activateResult and True
348 else:
349 main.log.error( "{} is in {} state".format( app, state ) )
350 activateResult = False
351 utilities.assert_equals( expect=True,
352 actual=activateResult,
353 onpass="Successfully activated apps",
354 onfail="Failed to activate apps" )
355 else:
356 main.log.warn( "No apps were specified to be loaded after startup" )
357
358 main.step( "Set ONOS configurations" )
359 config = main.params.get( 'ONOS_Configuration' )
360 if config:
361 main.log.debug( config )
362 checkResult = main.TRUE
363 for component in config:
Jon Hallf37d44d2017-05-24 10:37:30 -0700364 for setting in config[ component ]:
365 value = config[ component ][ setting ]
Jon Hall9ebd1bd2016-04-19 01:37:17 -0700366 check = main.CLIs[ 0 ].setCfg( component, setting, value )
367 main.log.info( "Value was changed? {}".format( main.TRUE == check ) )
368 checkResult = check and checkResult
369 utilities.assert_equals( expect=main.TRUE,
370 actual=checkResult,
371 onpass="Successfully set config",
372 onfail="Failed to set config" )
373 else:
374 main.log.warn( "No configurations were specified to be changed after startup" )
375
376 main.step( "App Ids check" )
377 appCheck = main.TRUE
378 threads = []
379 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -0700380 t = main.Thread( target=main.CLIs[ i ].appToIDCheck,
Jon Hall9ebd1bd2016-04-19 01:37:17 -0700381 name="appToIDCheck-" + str( i ),
382 args=[] )
383 threads.append( t )
384 t.start()
385
386 for t in threads:
387 t.join()
388 appCheck = appCheck and t.result
389 if appCheck != main.TRUE:
Jon Hallf37d44d2017-05-24 10:37:30 -0700390 node = main.activeNodes[ 0 ]
391 main.log.warn( main.CLIs[ node ].apps() )
392 main.log.warn( main.CLIs[ node ].appIDs() )
Jon Hall9ebd1bd2016-04-19 01:37:17 -0700393 utilities.assert_equals( expect=main.TRUE, actual=appCheck,
394 onpass="App Ids seem to be correct",
395 onfail="Something is wrong with app Ids" )
396
397 def CASE2( self, main ):
398 """
399 Assign devices to controllers
400 """
401 import re
402 assert main.numCtrls, "main.numCtrls not defined"
403 assert main, "main not defined"
404 assert utilities.assert_equals, "utilities.assert_equals not defined"
405 assert main.CLIs, "main.CLIs not defined"
406 assert main.nodes, "main.nodes not defined"
407
408 main.case( "Assigning devices to controllers" )
409 main.caseExplanation = "Assign switches to ONOS using 'ovs-vsctl' " +\
410 "and check that an ONOS node becomes the " +\
411 "master of the device."
412 main.step( "Assign switches to controllers" )
413
414 ipList = []
415 for i in range( main.ONOSbench.maxNodes ):
416 ipList.append( main.nodes[ i ].ip_address )
417 swList = []
418 for i in range( 1, 29 ):
419 swList.append( "s" + str( i ) )
420 main.Mininet1.assignSwController( sw=swList, ip=ipList )
421
422 mastershipCheck = main.TRUE
423 for i in range( 1, 29 ):
424 response = main.Mininet1.getSwController( "s" + str( i ) )
425 try:
426 main.log.info( str( response ) )
427 except Exception:
428 main.log.info( repr( response ) )
429 for node in main.nodes:
430 if re.search( "tcp:" + node.ip_address, response ):
431 mastershipCheck = mastershipCheck and main.TRUE
432 else:
433 main.log.error( "Error, node " + node.ip_address + " is " +
434 "not in the list of controllers s" +
435 str( i ) + " is connecting to." )
436 mastershipCheck = main.FALSE
437 utilities.assert_equals(
438 expect=main.TRUE,
439 actual=mastershipCheck,
440 onpass="Switch mastership assigned correctly",
441 onfail="Switches not assigned correctly to controllers" )
442
443 def CASE21( self, main ):
444 """
445 Assign mastership to controllers
446 """
447 import time
448 assert main.numCtrls, "main.numCtrls not defined"
449 assert main, "main not defined"
450 assert utilities.assert_equals, "utilities.assert_equals not defined"
451 assert main.CLIs, "main.CLIs not defined"
452 assert main.nodes, "main.nodes not defined"
453
454 main.case( "Assigning Controller roles for switches" )
455 main.caseExplanation = "Check that ONOS is connected to each " +\
456 "device. Then manually assign" +\
457 " mastership to specific ONOS nodes using" +\
458 " 'device-role'"
459 main.step( "Assign mastership of switches to specific controllers" )
460 # Manually assign mastership to the controller we want
461 roleCall = main.TRUE
462
Jon Hallf37d44d2017-05-24 10:37:30 -0700463 ipList = []
Jon Hall9ebd1bd2016-04-19 01:37:17 -0700464 deviceList = []
Jon Hallf37d44d2017-05-24 10:37:30 -0700465 onosCli = main.CLIs[ main.activeNodes[ 0 ] ]
Jon Hall9ebd1bd2016-04-19 01:37:17 -0700466 try:
467 # Assign mastership to specific controllers. This assignment was
468 # determined for a 7 node cluser, but will work with any sized
469 # cluster
470 for i in range( 1, 29 ): # switches 1 through 28
471 # set up correct variables:
472 if i == 1:
473 c = 0
474 ip = main.nodes[ c ].ip_address # ONOS1
475 deviceId = onosCli.getDevice( "1000" ).get( 'id' )
476 elif i == 2:
477 c = 1 % main.numCtrls
478 ip = main.nodes[ c ].ip_address # ONOS2
479 deviceId = onosCli.getDevice( "2000" ).get( 'id' )
480 elif i == 3:
481 c = 1 % main.numCtrls
482 ip = main.nodes[ c ].ip_address # ONOS2
483 deviceId = onosCli.getDevice( "3000" ).get( 'id' )
484 elif i == 4:
485 c = 3 % main.numCtrls
486 ip = main.nodes[ c ].ip_address # ONOS4
487 deviceId = onosCli.getDevice( "3004" ).get( 'id' )
488 elif i == 5:
489 c = 2 % main.numCtrls
490 ip = main.nodes[ c ].ip_address # ONOS3
491 deviceId = onosCli.getDevice( "5000" ).get( 'id' )
492 elif i == 6:
493 c = 2 % main.numCtrls
494 ip = main.nodes[ c ].ip_address # ONOS3
495 deviceId = onosCli.getDevice( "6000" ).get( 'id' )
496 elif i == 7:
497 c = 5 % main.numCtrls
498 ip = main.nodes[ c ].ip_address # ONOS6
499 deviceId = onosCli.getDevice( "6007" ).get( 'id' )
500 elif i >= 8 and i <= 17:
501 c = 4 % main.numCtrls
502 ip = main.nodes[ c ].ip_address # ONOS5
503 dpid = '3' + str( i ).zfill( 3 )
504 deviceId = onosCli.getDevice( dpid ).get( 'id' )
505 elif i >= 18 and i <= 27:
506 c = 6 % main.numCtrls
507 ip = main.nodes[ c ].ip_address # ONOS7
508 dpid = '6' + str( i ).zfill( 3 )
509 deviceId = onosCli.getDevice( dpid ).get( 'id' )
510 elif i == 28:
511 c = 0
512 ip = main.nodes[ c ].ip_address # ONOS1
513 deviceId = onosCli.getDevice( "2800" ).get( 'id' )
514 else:
515 main.log.error( "You didn't write an else statement for " +
516 "switch s" + str( i ) )
517 roleCall = main.FALSE
518 # Assign switch
519 assert deviceId, "No device id for s" + str( i ) + " in ONOS"
520 # TODO: make this controller dynamic
521 roleCall = roleCall and onosCli.deviceRole( deviceId, ip )
522 ipList.append( ip )
523 deviceList.append( deviceId )
524 except ( AttributeError, AssertionError ):
525 main.log.exception( "Something is wrong with ONOS device view" )
526 main.log.info( onosCli.devices() )
527 utilities.assert_equals(
528 expect=main.TRUE,
529 actual=roleCall,
530 onpass="Re-assigned switch mastership to designated controller",
531 onfail="Something wrong with deviceRole calls" )
532
533 main.step( "Check mastership was correctly assigned" )
534 roleCheck = main.TRUE
535 # NOTE: This is due to the fact that device mastership change is not
536 # atomic and is actually a multi step process
537 time.sleep( 5 )
538 for i in range( len( ipList ) ):
Jon Hallf37d44d2017-05-24 10:37:30 -0700539 ip = ipList[ i ]
540 deviceId = deviceList[ i ]
Jon Hall9ebd1bd2016-04-19 01:37:17 -0700541 # Check assignment
542 master = onosCli.getRole( deviceId ).get( 'master' )
543 if ip in master:
544 roleCheck = roleCheck and main.TRUE
545 else:
546 roleCheck = roleCheck and main.FALSE
547 main.log.error( "Error, controller " + ip + " is not" +
548 " master " + "of device " +
549 str( deviceId ) + ". Master is " +
550 repr( master ) + "." )
551 utilities.assert_equals(
552 expect=main.TRUE,
553 actual=roleCheck,
554 onpass="Switches were successfully reassigned to designated " +
555 "controller",
556 onfail="Switches were not successfully reassigned" )
557
558 def CASE3( self, main ):
559 """
560 Assign intents
561 """
562 import time
563 import json
564 assert main.numCtrls, "main.numCtrls not defined"
565 assert main, "main not defined"
566 assert utilities.assert_equals, "utilities.assert_equals not defined"
567 assert main.CLIs, "main.CLIs not defined"
568 assert main.nodes, "main.nodes not defined"
569 try:
570 labels
571 except NameError:
572 main.log.error( "labels not defined, setting to []" )
573 labels = []
574 try:
575 data
576 except NameError:
577 main.log.error( "data not defined, setting to []" )
578 data = []
579 # NOTE: we must reinstall intents until we have a persistant intent
580 # datastore!
581 main.case( "Adding host Intents" )
582 main.caseExplanation = "Discover hosts by using pingall then " +\
583 "assign predetermined host-to-host intents." +\
584 " After installation, check that the intent" +\
585 " is distributed to all nodes and the state" +\
586 " is INSTALLED"
587
588 # install onos-app-fwd
589 main.step( "Install reactive forwarding app" )
Jon Hallf37d44d2017-05-24 10:37:30 -0700590 onosCli = main.CLIs[ main.activeNodes[ 0 ] ]
Jon Hall9ebd1bd2016-04-19 01:37:17 -0700591 installResults = onosCli.activateApp( "org.onosproject.fwd" )
592 utilities.assert_equals( expect=main.TRUE, actual=installResults,
593 onpass="Install fwd successful",
594 onfail="Install fwd failed" )
595
596 main.step( "Check app ids" )
597 appCheck = main.TRUE
598 threads = []
599 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -0700600 t = main.Thread( target=main.CLIs[ i ].appToIDCheck,
Jon Hall9ebd1bd2016-04-19 01:37:17 -0700601 name="appToIDCheck-" + str( i ),
602 args=[] )
603 threads.append( t )
604 t.start()
605
606 for t in threads:
607 t.join()
608 appCheck = appCheck and t.result
609 if appCheck != main.TRUE:
610 main.log.warn( onosCli.apps() )
611 main.log.warn( onosCli.appIDs() )
612 utilities.assert_equals( expect=main.TRUE, actual=appCheck,
613 onpass="App Ids seem to be correct",
614 onfail="Something is wrong with app Ids" )
615
616 main.step( "Discovering Hosts( Via pingall for now )" )
617 # FIXME: Once we have a host discovery mechanism, use that instead
618 # REACTIVE FWD test
619 pingResult = main.FALSE
620 passMsg = "Reactive Pingall test passed"
621 time1 = time.time()
622 pingResult = main.Mininet1.pingall()
623 time2 = time.time()
624 if not pingResult:
Jon Hallf37d44d2017-05-24 10:37:30 -0700625 main.log.warn( "First pingall failed. Trying again..." )
Jon Hall9ebd1bd2016-04-19 01:37:17 -0700626 pingResult = main.Mininet1.pingall()
627 passMsg += " on the second try"
628 utilities.assert_equals(
629 expect=main.TRUE,
630 actual=pingResult,
Jon Hallf37d44d2017-05-24 10:37:30 -0700631 onpass=passMsg,
Jon Hall9ebd1bd2016-04-19 01:37:17 -0700632 onfail="Reactive Pingall failed, " +
633 "one or more ping pairs failed" )
634 main.log.info( "Time for pingall: %2f seconds" %
635 ( time2 - time1 ) )
636 # timeout for fwd flows
637 time.sleep( 11 )
638 # uninstall onos-app-fwd
639 main.step( "Uninstall reactive forwarding app" )
Jon Hallf37d44d2017-05-24 10:37:30 -0700640 node = main.activeNodes[ 0 ]
641 uninstallResult = main.CLIs[ node ].deactivateApp( "org.onosproject.fwd" )
Jon Hall9ebd1bd2016-04-19 01:37:17 -0700642 utilities.assert_equals( expect=main.TRUE, actual=uninstallResult,
643 onpass="Uninstall fwd successful",
644 onfail="Uninstall fwd failed" )
645
646 main.step( "Check app ids" )
647 threads = []
648 appCheck2 = main.TRUE
649 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -0700650 t = main.Thread( target=main.CLIs[ i ].appToIDCheck,
Jon Hall9ebd1bd2016-04-19 01:37:17 -0700651 name="appToIDCheck-" + str( i ),
652 args=[] )
653 threads.append( t )
654 t.start()
655
656 for t in threads:
657 t.join()
658 appCheck2 = appCheck2 and t.result
659 if appCheck2 != main.TRUE:
Jon Hallf37d44d2017-05-24 10:37:30 -0700660 node = main.activeNodes[ 0 ]
661 main.log.warn( main.CLIs[ node ].apps() )
662 main.log.warn( main.CLIs[ node ].appIDs() )
Jon Hall9ebd1bd2016-04-19 01:37:17 -0700663 utilities.assert_equals( expect=main.TRUE, actual=appCheck2,
664 onpass="App Ids seem to be correct",
665 onfail="Something is wrong with app Ids" )
666
667 main.step( "Add host intents via cli" )
668 intentIds = []
669 # TODO: move the host numbers to params
670 # Maybe look at all the paths we ping?
671 intentAddResult = True
672 hostResult = main.TRUE
673 for i in range( 8, 18 ):
674 main.log.info( "Adding host intent between h" + str( i ) +
675 " and h" + str( i + 10 ) )
676 host1 = "00:00:00:00:00:" + \
677 str( hex( i )[ 2: ] ).zfill( 2 ).upper()
678 host2 = "00:00:00:00:00:" + \
679 str( hex( i + 10 )[ 2: ] ).zfill( 2 ).upper()
680 # NOTE: getHost can return None
681 host1Dict = onosCli.getHost( host1 )
682 host2Dict = onosCli.getHost( host2 )
683 host1Id = None
684 host2Id = None
685 if host1Dict and host2Dict:
686 host1Id = host1Dict.get( 'id', None )
687 host2Id = host2Dict.get( 'id', None )
688 if host1Id and host2Id:
689 nodeNum = ( i % len( main.activeNodes ) )
Jon Hallf37d44d2017-05-24 10:37:30 -0700690 node = main.activeNodes[ nodeNum ]
691 tmpId = main.CLIs[ node ].addHostIntent( host1Id, host2Id )
Jon Hall9ebd1bd2016-04-19 01:37:17 -0700692 if tmpId:
693 main.log.info( "Added intent with id: " + tmpId )
694 intentIds.append( tmpId )
695 else:
696 main.log.error( "addHostIntent returned: " +
697 repr( tmpId ) )
698 else:
699 main.log.error( "Error, getHost() failed for h" + str( i ) +
700 " and/or h" + str( i + 10 ) )
Jon Hallf37d44d2017-05-24 10:37:30 -0700701 node = main.activeNodes[ 0 ]
702 hosts = main.CLIs[ node ].hosts()
Jon Hall9ebd1bd2016-04-19 01:37:17 -0700703 main.log.warn( "Hosts output: " )
704 try:
705 main.log.warn( json.dumps( json.loads( hosts ),
706 sort_keys=True,
707 indent=4,
708 separators=( ',', ': ' ) ) )
709 except ( ValueError, TypeError ):
710 main.log.warn( repr( hosts ) )
711 hostResult = main.FALSE
712 utilities.assert_equals( expect=main.TRUE, actual=hostResult,
713 onpass="Found a host id for each host",
714 onfail="Error looking up host ids" )
715
716 intentStart = time.time()
717 onosIds = onosCli.getAllIntentsId()
718 main.log.info( "Submitted intents: " + str( intentIds ) )
719 main.log.info( "Intents in ONOS: " + str( onosIds ) )
720 for intent in intentIds:
721 if intent in onosIds:
722 pass # intent submitted is in onos
723 else:
724 intentAddResult = False
725 if intentAddResult:
726 intentStop = time.time()
727 else:
728 intentStop = None
729 # Print the intent states
730 intents = onosCli.intents()
731 intentStates = []
732 installedCheck = True
733 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
734 count = 0
735 try:
736 for intent in json.loads( intents ):
737 state = intent.get( 'state', None )
738 if "INSTALLED" not in state:
739 installedCheck = False
740 intentId = intent.get( 'id', None )
741 intentStates.append( ( intentId, state ) )
742 except ( ValueError, TypeError ):
743 main.log.exception( "Error parsing intents" )
744 # add submitted intents not in the store
745 tmplist = [ i for i, s in intentStates ]
746 missingIntents = False
747 for i in intentIds:
748 if i not in tmplist:
749 intentStates.append( ( i, " - " ) )
750 missingIntents = True
751 intentStates.sort()
752 for i, s in intentStates:
753 count += 1
754 main.log.info( "%-6s%-15s%-15s" %
755 ( str( count ), str( i ), str( s ) ) )
756 leaders = onosCli.leaders()
757 try:
758 missing = False
759 if leaders:
760 parsedLeaders = json.loads( leaders )
761 main.log.warn( json.dumps( parsedLeaders,
762 sort_keys=True,
763 indent=4,
764 separators=( ',', ': ' ) ) )
765 # check for all intent partitions
766 topics = []
767 for i in range( 14 ):
Jon Hall8dafdcc2016-09-16 10:21:25 -0700768 topics.append( "work-partition-" + str( i ) )
Jon Hall9ebd1bd2016-04-19 01:37:17 -0700769 main.log.debug( topics )
Jon Hallf37d44d2017-05-24 10:37:30 -0700770 ONOStopics = [ j[ 'topic' ] for j in parsedLeaders ]
Jon Hall9ebd1bd2016-04-19 01:37:17 -0700771 for topic in topics:
772 if topic not in ONOStopics:
773 main.log.error( "Error: " + topic +
774 " not in leaders" )
775 missing = True
776 else:
777 main.log.error( "leaders() returned None" )
778 except ( ValueError, TypeError ):
779 main.log.exception( "Error parsing leaders" )
780 main.log.error( repr( leaders ) )
781 # Check all nodes
782 if missing:
783 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -0700784 response = main.CLIs[ i ].leaders( jsonFormat=False )
785 main.log.warn( str( main.CLIs[ i ].name ) + " leaders output: \n" +
Jon Hall9ebd1bd2016-04-19 01:37:17 -0700786 str( response ) )
787
788 partitions = onosCli.partitions()
789 try:
Jon Hallf37d44d2017-05-24 10:37:30 -0700790 if partitions:
Jon Hall9ebd1bd2016-04-19 01:37:17 -0700791 parsedPartitions = json.loads( partitions )
792 main.log.warn( json.dumps( parsedPartitions,
793 sort_keys=True,
794 indent=4,
795 separators=( ',', ': ' ) ) )
796 # TODO check for a leader in all paritions
797 # TODO check for consistency among nodes
798 else:
799 main.log.error( "partitions() returned None" )
800 except ( ValueError, TypeError ):
801 main.log.exception( "Error parsing partitions" )
802 main.log.error( repr( partitions ) )
803 pendingMap = onosCli.pendingMap()
804 try:
Jon Hallf37d44d2017-05-24 10:37:30 -0700805 if pendingMap:
Jon Hall9ebd1bd2016-04-19 01:37:17 -0700806 parsedPending = json.loads( pendingMap )
807 main.log.warn( json.dumps( parsedPending,
808 sort_keys=True,
809 indent=4,
810 separators=( ',', ': ' ) ) )
811 # TODO check something here?
812 else:
813 main.log.error( "pendingMap() returned None" )
814 except ( ValueError, TypeError ):
815 main.log.exception( "Error parsing pending map" )
816 main.log.error( repr( pendingMap ) )
817
818 intentAddResult = bool( intentAddResult and not missingIntents and
819 installedCheck )
820 if not intentAddResult:
821 main.log.error( "Error in pushing host intents to ONOS" )
822
823 main.step( "Intent Anti-Entropy dispersion" )
Jon Hallf37d44d2017-05-24 10:37:30 -0700824 for j in range( 100 ):
Jon Hall9ebd1bd2016-04-19 01:37:17 -0700825 correct = True
826 main.log.info( "Submitted intents: " + str( sorted( intentIds ) ) )
827 for i in main.activeNodes:
828 onosIds = []
Jon Hallf37d44d2017-05-24 10:37:30 -0700829 ids = main.CLIs[ i ].getAllIntentsId()
Jon Hall9ebd1bd2016-04-19 01:37:17 -0700830 onosIds.append( ids )
Jon Hallf37d44d2017-05-24 10:37:30 -0700831 main.log.debug( "Intents in " + main.CLIs[ i ].name + ": " +
Jon Hall9ebd1bd2016-04-19 01:37:17 -0700832 str( sorted( onosIds ) ) )
833 if sorted( ids ) != sorted( intentIds ):
834 main.log.warn( "Set of intent IDs doesn't match" )
835 correct = False
836 break
837 else:
Jon Hallf37d44d2017-05-24 10:37:30 -0700838 intents = json.loads( main.CLIs[ i ].intents() )
Jon Hall9ebd1bd2016-04-19 01:37:17 -0700839 for intent in intents:
840 if intent[ 'state' ] != "INSTALLED":
841 main.log.warn( "Intent " + intent[ 'id' ] +
842 " is " + intent[ 'state' ] )
843 correct = False
844 break
845 if correct:
846 break
847 else:
Jon Hallf37d44d2017-05-24 10:37:30 -0700848 time.sleep( 1 )
Jon Hall9ebd1bd2016-04-19 01:37:17 -0700849 if not intentStop:
850 intentStop = time.time()
851 global gossipTime
852 gossipTime = intentStop - intentStart
853 main.log.info( "It took about " + str( gossipTime ) +
854 " seconds for all intents to appear in each node" )
855 append = False
856 title = "Gossip Intents"
857 count = 1
858 while append is False:
859 curTitle = title + str( count )
860 if curTitle not in labels:
861 labels.append( curTitle )
862 data.append( str( gossipTime ) )
863 append = True
864 else:
865 count += 1
Jon Hallf37d44d2017-05-24 10:37:30 -0700866 gossipPeriod = int( main.params[ 'timers' ][ 'gossip' ] )
Jon Hall9ebd1bd2016-04-19 01:37:17 -0700867 maxGossipTime = gossipPeriod * len( main.activeNodes )
868 utilities.assert_greater_equals(
869 expect=maxGossipTime, actual=gossipTime,
870 onpass="ECM anti-entropy for intents worked within " +
871 "expected time",
872 onfail="Intent ECM anti-entropy took too long. " +
873 "Expected time:{}, Actual time:{}".format( maxGossipTime,
874 gossipTime ) )
875 if gossipTime <= maxGossipTime:
876 intentAddResult = True
877
878 if not intentAddResult or "key" in pendingMap:
879 import time
880 installedCheck = True
881 main.log.info( "Sleeping 60 seconds to see if intents are found" )
882 time.sleep( 60 )
883 onosIds = onosCli.getAllIntentsId()
884 main.log.info( "Submitted intents: " + str( intentIds ) )
885 main.log.info( "Intents in ONOS: " + str( onosIds ) )
886 # Print the intent states
887 intents = onosCli.intents()
888 intentStates = []
889 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
890 count = 0
891 try:
892 for intent in json.loads( intents ):
893 # Iter through intents of a node
894 state = intent.get( 'state', None )
895 if "INSTALLED" not in state:
896 installedCheck = False
897 intentId = intent.get( 'id', None )
898 intentStates.append( ( intentId, state ) )
899 except ( ValueError, TypeError ):
900 main.log.exception( "Error parsing intents" )
901 # add submitted intents not in the store
902 tmplist = [ i for i, s in intentStates ]
903 for i in intentIds:
904 if i not in tmplist:
905 intentStates.append( ( i, " - " ) )
906 intentStates.sort()
907 for i, s in intentStates:
908 count += 1
909 main.log.info( "%-6s%-15s%-15s" %
910 ( str( count ), str( i ), str( s ) ) )
911 leaders = onosCli.leaders()
912 try:
913 missing = False
914 if leaders:
915 parsedLeaders = json.loads( leaders )
916 main.log.warn( json.dumps( parsedLeaders,
917 sort_keys=True,
918 indent=4,
919 separators=( ',', ': ' ) ) )
920 # check for all intent partitions
921 # check for election
922 topics = []
923 for i in range( 14 ):
Jon Hall8dafdcc2016-09-16 10:21:25 -0700924 topics.append( "work-partition-" + str( i ) )
Jon Hall9ebd1bd2016-04-19 01:37:17 -0700925 # FIXME: this should only be after we start the app
926 topics.append( "org.onosproject.election" )
927 main.log.debug( topics )
Jon Hallf37d44d2017-05-24 10:37:30 -0700928 ONOStopics = [ j[ 'topic' ] for j in parsedLeaders ]
Jon Hall9ebd1bd2016-04-19 01:37:17 -0700929 for topic in topics:
930 if topic not in ONOStopics:
931 main.log.error( "Error: " + topic +
932 " not in leaders" )
933 missing = True
934 else:
935 main.log.error( "leaders() returned None" )
936 except ( ValueError, TypeError ):
937 main.log.exception( "Error parsing leaders" )
938 main.log.error( repr( leaders ) )
939 # Check all nodes
940 if missing:
941 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -0700942 node = main.CLIs[ i ]
943 response = node.leaders( jsonFormat=False )
Jon Hall9ebd1bd2016-04-19 01:37:17 -0700944 main.log.warn( str( node.name ) + " leaders output: \n" +
945 str( response ) )
946
947 partitions = onosCli.partitions()
948 try:
Jon Hallf37d44d2017-05-24 10:37:30 -0700949 if partitions:
Jon Hall9ebd1bd2016-04-19 01:37:17 -0700950 parsedPartitions = json.loads( partitions )
951 main.log.warn( json.dumps( parsedPartitions,
952 sort_keys=True,
953 indent=4,
954 separators=( ',', ': ' ) ) )
955 # TODO check for a leader in all paritions
956 # TODO check for consistency among nodes
957 else:
958 main.log.error( "partitions() returned None" )
959 except ( ValueError, TypeError ):
960 main.log.exception( "Error parsing partitions" )
961 main.log.error( repr( partitions ) )
962 pendingMap = onosCli.pendingMap()
963 try:
Jon Hallf37d44d2017-05-24 10:37:30 -0700964 if pendingMap:
Jon Hall9ebd1bd2016-04-19 01:37:17 -0700965 parsedPending = json.loads( pendingMap )
966 main.log.warn( json.dumps( parsedPending,
967 sort_keys=True,
968 indent=4,
969 separators=( ',', ': ' ) ) )
970 # TODO check something here?
971 else:
972 main.log.error( "pendingMap() returned None" )
973 except ( ValueError, TypeError ):
974 main.log.exception( "Error parsing pending map" )
975 main.log.error( repr( pendingMap ) )
976
977 def CASE4( self, main ):
978 """
979 Ping across added host intents
980 """
981 import json
982 import time
983 assert main.numCtrls, "main.numCtrls not defined"
984 assert main, "main not defined"
985 assert utilities.assert_equals, "utilities.assert_equals not defined"
986 assert main.CLIs, "main.CLIs not defined"
987 assert main.nodes, "main.nodes not defined"
988 main.case( "Verify connectivity by sending traffic across Intents" )
989 main.caseExplanation = "Ping across added host intents to check " +\
990 "functionality and check the state of " +\
991 "the intent"
992
Jon Hallf37d44d2017-05-24 10:37:30 -0700993 onosCli = main.CLIs[ main.activeNodes[ 0 ] ]
Jon Hall9ebd1bd2016-04-19 01:37:17 -0700994 main.step( "Check Intent state" )
995 installedCheck = False
996 loopCount = 0
997 while not installedCheck and loopCount < 40:
998 installedCheck = True
999 # Print the intent states
1000 intents = onosCli.intents()
1001 intentStates = []
1002 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
1003 count = 0
1004 # Iter through intents of a node
1005 try:
1006 for intent in json.loads( intents ):
1007 state = intent.get( 'state', None )
1008 if "INSTALLED" not in state:
1009 installedCheck = False
1010 intentId = intent.get( 'id', None )
1011 intentStates.append( ( intentId, state ) )
1012 except ( ValueError, TypeError ):
1013 main.log.exception( "Error parsing intents." )
1014 # Print states
1015 intentStates.sort()
1016 for i, s in intentStates:
1017 count += 1
1018 main.log.info( "%-6s%-15s%-15s" %
1019 ( str( count ), str( i ), str( s ) ) )
1020 if not installedCheck:
1021 time.sleep( 1 )
1022 loopCount += 1
1023 utilities.assert_equals( expect=True, actual=installedCheck,
1024 onpass="Intents are all INSTALLED",
1025 onfail="Intents are not all in " +
1026 "INSTALLED state" )
1027
1028 main.step( "Ping across added host intents" )
1029 PingResult = main.TRUE
1030 for i in range( 8, 18 ):
1031 ping = main.Mininet1.pingHost( src="h" + str( i ),
1032 target="h" + str( i + 10 ) )
1033 PingResult = PingResult and ping
1034 if ping == main.FALSE:
1035 main.log.warn( "Ping failed between h" + str( i ) +
1036 " and h" + str( i + 10 ) )
1037 elif ping == main.TRUE:
1038 main.log.info( "Ping test passed!" )
1039 # Don't set PingResult or you'd override failures
1040 if PingResult == main.FALSE:
1041 main.log.error(
1042 "Intents have not been installed correctly, pings failed." )
1043 # TODO: pretty print
1044 main.log.warn( "ONOS1 intents: " )
1045 try:
1046 tmpIntents = onosCli.intents()
1047 main.log.warn( json.dumps( json.loads( tmpIntents ),
1048 sort_keys=True,
1049 indent=4,
1050 separators=( ',', ': ' ) ) )
1051 except ( ValueError, TypeError ):
1052 main.log.warn( repr( tmpIntents ) )
1053 utilities.assert_equals(
1054 expect=main.TRUE,
1055 actual=PingResult,
1056 onpass="Intents have been installed correctly and pings work",
1057 onfail="Intents have not been installed correctly, pings failed." )
1058
1059 main.step( "Check leadership of topics" )
1060 leaders = onosCli.leaders()
1061 topicCheck = main.TRUE
1062 try:
1063 if leaders:
1064 parsedLeaders = json.loads( leaders )
1065 main.log.warn( json.dumps( parsedLeaders,
1066 sort_keys=True,
1067 indent=4,
1068 separators=( ',', ': ' ) ) )
1069 # check for all intent partitions
1070 # check for election
1071 # TODO: Look at Devices as topics now that it uses this system
1072 topics = []
1073 for i in range( 14 ):
Jon Hall8dafdcc2016-09-16 10:21:25 -07001074 topics.append( "work-partition-" + str( i ) )
Jon Hall9ebd1bd2016-04-19 01:37:17 -07001075 # FIXME: this should only be after we start the app
1076 # FIXME: topics.append( "org.onosproject.election" )
1077 # Print leaders output
1078 main.log.debug( topics )
Jon Hallf37d44d2017-05-24 10:37:30 -07001079 ONOStopics = [ j[ 'topic' ] for j in parsedLeaders ]
Jon Hall9ebd1bd2016-04-19 01:37:17 -07001080 for topic in topics:
1081 if topic not in ONOStopics:
1082 main.log.error( "Error: " + topic +
1083 " not in leaders" )
1084 topicCheck = main.FALSE
1085 else:
1086 main.log.error( "leaders() returned None" )
1087 topicCheck = main.FALSE
1088 except ( ValueError, TypeError ):
1089 topicCheck = main.FALSE
1090 main.log.exception( "Error parsing leaders" )
1091 main.log.error( repr( leaders ) )
1092 # TODO: Check for a leader of these topics
1093 # Check all nodes
1094 if topicCheck:
1095 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07001096 node = main.CLIs[ i ]
1097 response = node.leaders( jsonFormat=False )
Jon Hall9ebd1bd2016-04-19 01:37:17 -07001098 main.log.warn( str( node.name ) + " leaders output: \n" +
1099 str( response ) )
1100
1101 utilities.assert_equals( expect=main.TRUE, actual=topicCheck,
1102 onpass="intent Partitions is in leaders",
1103 onfail="Some topics were lost " )
1104 # Print partitions
1105 partitions = onosCli.partitions()
1106 try:
Jon Hallf37d44d2017-05-24 10:37:30 -07001107 if partitions:
Jon Hall9ebd1bd2016-04-19 01:37:17 -07001108 parsedPartitions = json.loads( partitions )
1109 main.log.warn( json.dumps( parsedPartitions,
1110 sort_keys=True,
1111 indent=4,
1112 separators=( ',', ': ' ) ) )
1113 # TODO check for a leader in all paritions
1114 # TODO check for consistency among nodes
1115 else:
1116 main.log.error( "partitions() returned None" )
1117 except ( ValueError, TypeError ):
1118 main.log.exception( "Error parsing partitions" )
1119 main.log.error( repr( partitions ) )
1120 # Print Pending Map
1121 pendingMap = onosCli.pendingMap()
1122 try:
Jon Hallf37d44d2017-05-24 10:37:30 -07001123 if pendingMap:
Jon Hall9ebd1bd2016-04-19 01:37:17 -07001124 parsedPending = json.loads( pendingMap )
1125 main.log.warn( json.dumps( parsedPending,
1126 sort_keys=True,
1127 indent=4,
1128 separators=( ',', ': ' ) ) )
1129 # TODO check something here?
1130 else:
1131 main.log.error( "pendingMap() returned None" )
1132 except ( ValueError, TypeError ):
1133 main.log.exception( "Error parsing pending map" )
1134 main.log.error( repr( pendingMap ) )
1135
1136 if not installedCheck:
1137 main.log.info( "Waiting 60 seconds to see if the state of " +
1138 "intents change" )
1139 time.sleep( 60 )
1140 # Print the intent states
1141 intents = onosCli.intents()
1142 intentStates = []
1143 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
1144 count = 0
1145 # Iter through intents of a node
1146 try:
1147 for intent in json.loads( intents ):
1148 state = intent.get( 'state', None )
1149 if "INSTALLED" not in state:
1150 installedCheck = False
1151 intentId = intent.get( 'id', None )
1152 intentStates.append( ( intentId, state ) )
1153 except ( ValueError, TypeError ):
1154 main.log.exception( "Error parsing intents." )
1155 intentStates.sort()
1156 for i, s in intentStates:
1157 count += 1
1158 main.log.info( "%-6s%-15s%-15s" %
1159 ( str( count ), str( i ), str( s ) ) )
1160 leaders = onosCli.leaders()
1161 try:
1162 missing = False
1163 if leaders:
1164 parsedLeaders = json.loads( leaders )
1165 main.log.warn( json.dumps( parsedLeaders,
1166 sort_keys=True,
1167 indent=4,
1168 separators=( ',', ': ' ) ) )
1169 # check for all intent partitions
1170 # check for election
1171 topics = []
1172 for i in range( 14 ):
Jon Hall8dafdcc2016-09-16 10:21:25 -07001173 topics.append( "work-partition-" + str( i ) )
Jon Hall9ebd1bd2016-04-19 01:37:17 -07001174 # FIXME: this should only be after we start the app
1175 topics.append( "org.onosproject.election" )
1176 main.log.debug( topics )
Jon Hallf37d44d2017-05-24 10:37:30 -07001177 ONOStopics = [ j[ 'topic' ] for j in parsedLeaders ]
Jon Hall9ebd1bd2016-04-19 01:37:17 -07001178 for topic in topics:
1179 if topic not in ONOStopics:
1180 main.log.error( "Error: " + topic +
1181 " not in leaders" )
1182 missing = True
1183 else:
1184 main.log.error( "leaders() returned None" )
1185 except ( ValueError, TypeError ):
1186 main.log.exception( "Error parsing leaders" )
1187 main.log.error( repr( leaders ) )
1188 if missing:
1189 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07001190 node = main.CLIs[ i ]
1191 response = node.leaders( jsonFormat=False )
Jon Hall9ebd1bd2016-04-19 01:37:17 -07001192 main.log.warn( str( node.name ) + " leaders output: \n" +
1193 str( response ) )
1194
1195 partitions = onosCli.partitions()
1196 try:
Jon Hallf37d44d2017-05-24 10:37:30 -07001197 if partitions:
Jon Hall9ebd1bd2016-04-19 01:37:17 -07001198 parsedPartitions = json.loads( partitions )
1199 main.log.warn( json.dumps( parsedPartitions,
1200 sort_keys=True,
1201 indent=4,
1202 separators=( ',', ': ' ) ) )
1203 # TODO check for a leader in all paritions
1204 # TODO check for consistency among nodes
1205 else:
1206 main.log.error( "partitions() returned None" )
1207 except ( ValueError, TypeError ):
1208 main.log.exception( "Error parsing partitions" )
1209 main.log.error( repr( partitions ) )
1210 pendingMap = onosCli.pendingMap()
1211 try:
Jon Hallf37d44d2017-05-24 10:37:30 -07001212 if pendingMap:
Jon Hall9ebd1bd2016-04-19 01:37:17 -07001213 parsedPending = json.loads( pendingMap )
1214 main.log.warn( json.dumps( parsedPending,
1215 sort_keys=True,
1216 indent=4,
1217 separators=( ',', ': ' ) ) )
1218 # TODO check something here?
1219 else:
1220 main.log.error( "pendingMap() returned None" )
1221 except ( ValueError, TypeError ):
1222 main.log.exception( "Error parsing pending map" )
1223 main.log.error( repr( pendingMap ) )
1224 # Print flowrules
Jon Hallf37d44d2017-05-24 10:37:30 -07001225 node = main.activeNodes[ 0 ]
1226 main.log.debug( main.CLIs[ node ].flows( jsonFormat=False ) )
Jon Hall9ebd1bd2016-04-19 01:37:17 -07001227 main.step( "Wait a minute then ping again" )
1228 # the wait is above
1229 PingResult = main.TRUE
1230 for i in range( 8, 18 ):
1231 ping = main.Mininet1.pingHost( src="h" + str( i ),
1232 target="h" + str( i + 10 ) )
1233 PingResult = PingResult and ping
1234 if ping == main.FALSE:
1235 main.log.warn( "Ping failed between h" + str( i ) +
1236 " and h" + str( i + 10 ) )
1237 elif ping == main.TRUE:
1238 main.log.info( "Ping test passed!" )
1239 # Don't set PingResult or you'd override failures
1240 if PingResult == main.FALSE:
1241 main.log.error(
1242 "Intents have not been installed correctly, pings failed." )
1243 # TODO: pretty print
1244 main.log.warn( "ONOS1 intents: " )
1245 try:
1246 tmpIntents = onosCli.intents()
1247 main.log.warn( json.dumps( json.loads( tmpIntents ),
1248 sort_keys=True,
1249 indent=4,
1250 separators=( ',', ': ' ) ) )
1251 except ( ValueError, TypeError ):
1252 main.log.warn( repr( tmpIntents ) )
1253 utilities.assert_equals(
1254 expect=main.TRUE,
1255 actual=PingResult,
1256 onpass="Intents have been installed correctly and pings work",
1257 onfail="Intents have not been installed correctly, pings failed." )
1258
1259 def CASE5( self, main ):
1260 """
1261 Reading state of ONOS
1262 """
1263 import json
1264 import time
1265 assert main.numCtrls, "main.numCtrls not defined"
1266 assert main, "main not defined"
1267 assert utilities.assert_equals, "utilities.assert_equals not defined"
1268 assert main.CLIs, "main.CLIs not defined"
1269 assert main.nodes, "main.nodes not defined"
1270
1271 main.case( "Setting up and gathering data for current state" )
1272 # The general idea for this test case is to pull the state of
1273 # ( intents,flows, topology,... ) from each ONOS node
1274 # We can then compare them with each other and also with past states
1275
1276 main.step( "Check that each switch has a master" )
1277 global mastershipState
1278 mastershipState = '[]'
1279
1280 # Assert that each device has a master
1281 rolesNotNull = main.TRUE
1282 threads = []
1283 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07001284 t = main.Thread( target=main.CLIs[ i ].rolesNotNull,
Jon Hall9ebd1bd2016-04-19 01:37:17 -07001285 name="rolesNotNull-" + str( i ),
1286 args=[] )
1287 threads.append( t )
1288 t.start()
1289
1290 for t in threads:
1291 t.join()
1292 rolesNotNull = rolesNotNull and t.result
1293 utilities.assert_equals(
1294 expect=main.TRUE,
1295 actual=rolesNotNull,
1296 onpass="Each device has a master",
1297 onfail="Some devices don't have a master assigned" )
1298
1299 main.step( "Get the Mastership of each switch from each controller" )
1300 ONOSMastership = []
1301 consistentMastership = True
1302 rolesResults = True
1303 threads = []
1304 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07001305 t = main.Thread( target=main.CLIs[ i ].roles,
Jon Hall9ebd1bd2016-04-19 01:37:17 -07001306 name="roles-" + str( i ),
1307 args=[] )
1308 threads.append( t )
1309 t.start()
1310
1311 for t in threads:
1312 t.join()
1313 ONOSMastership.append( t.result )
1314
1315 for i in range( len( ONOSMastership ) ):
Jon Hallf37d44d2017-05-24 10:37:30 -07001316 node = str( main.activeNodes[ i ] + 1 )
1317 if not ONOSMastership[ i ] or "Error" in ONOSMastership[ i ]:
Jon Hall9ebd1bd2016-04-19 01:37:17 -07001318 main.log.error( "Error in getting ONOS" + node + " roles" )
1319 main.log.warn( "ONOS" + node + " mastership response: " +
Jon Hallf37d44d2017-05-24 10:37:30 -07001320 repr( ONOSMastership[ i ] ) )
Jon Hall9ebd1bd2016-04-19 01:37:17 -07001321 rolesResults = False
1322 utilities.assert_equals(
1323 expect=True,
1324 actual=rolesResults,
1325 onpass="No error in reading roles output",
1326 onfail="Error in reading roles from ONOS" )
1327
1328 main.step( "Check for consistency in roles from each controller" )
Jon Hallf37d44d2017-05-24 10:37:30 -07001329 if all( [ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
Jon Hall9ebd1bd2016-04-19 01:37:17 -07001330 main.log.info(
1331 "Switch roles are consistent across all ONOS nodes" )
1332 else:
1333 consistentMastership = False
1334 utilities.assert_equals(
1335 expect=True,
1336 actual=consistentMastership,
1337 onpass="Switch roles are consistent across all ONOS nodes",
1338 onfail="ONOS nodes have different views of switch roles" )
1339
1340 if rolesResults and not consistentMastership:
1341 for i in range( len( main.activeNodes ) ):
Jon Hallf37d44d2017-05-24 10:37:30 -07001342 node = str( main.activeNodes[ i ] + 1 )
Jon Hall9ebd1bd2016-04-19 01:37:17 -07001343 try:
1344 main.log.warn(
1345 "ONOS" + node + " roles: ",
1346 json.dumps(
1347 json.loads( ONOSMastership[ i ] ),
1348 sort_keys=True,
1349 indent=4,
1350 separators=( ',', ': ' ) ) )
1351 except ( ValueError, TypeError ):
1352 main.log.warn( repr( ONOSMastership[ i ] ) )
1353 elif rolesResults and consistentMastership:
1354 mastershipState = ONOSMastership[ 0 ]
1355
1356 main.step( "Get the intents from each controller" )
1357 global intentState
1358 intentState = []
1359 ONOSIntents = []
1360 consistentIntents = True # Are Intents consistent across nodes?
1361 intentsResults = True # Could we read Intents from ONOS?
1362 threads = []
1363 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07001364 t = main.Thread( target=main.CLIs[ i ].intents,
Jon Hall9ebd1bd2016-04-19 01:37:17 -07001365 name="intents-" + str( i ),
1366 args=[],
1367 kwargs={ 'jsonFormat': True } )
1368 threads.append( t )
1369 t.start()
1370
1371 for t in threads:
1372 t.join()
1373 ONOSIntents.append( t.result )
1374
1375 for i in range( len( ONOSIntents ) ):
Jon Hallf37d44d2017-05-24 10:37:30 -07001376 node = str( main.activeNodes[ i ] + 1 )
Jon Hall9ebd1bd2016-04-19 01:37:17 -07001377 if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
1378 main.log.error( "Error in getting ONOS" + node + " intents" )
1379 main.log.warn( "ONOS" + node + " intents response: " +
1380 repr( ONOSIntents[ i ] ) )
1381 intentsResults = False
1382 utilities.assert_equals(
1383 expect=True,
1384 actual=intentsResults,
1385 onpass="No error in reading intents output",
1386 onfail="Error in reading intents from ONOS" )
1387
1388 main.step( "Check for consistency in Intents from each controller" )
Jon Hallf37d44d2017-05-24 10:37:30 -07001389 if all( [ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
Jon Hall9ebd1bd2016-04-19 01:37:17 -07001390 main.log.info( "Intents are consistent across all ONOS " +
1391 "nodes" )
1392 else:
1393 consistentIntents = False
1394 main.log.error( "Intents not consistent" )
1395 utilities.assert_equals(
1396 expect=True,
1397 actual=consistentIntents,
1398 onpass="Intents are consistent across all ONOS nodes",
1399 onfail="ONOS nodes have different views of intents" )
1400
1401 if intentsResults:
1402 # Try to make it easy to figure out what is happening
1403 #
1404 # Intent ONOS1 ONOS2 ...
1405 # 0x01 INSTALLED INSTALLING
1406 # ... ... ...
1407 # ... ... ...
1408 title = " Id"
1409 for n in main.activeNodes:
1410 title += " " * 10 + "ONOS" + str( n + 1 )
1411 main.log.warn( title )
1412 # get all intent keys in the cluster
1413 keys = []
1414 try:
1415 # Get the set of all intent keys
1416 for nodeStr in ONOSIntents:
1417 node = json.loads( nodeStr )
1418 for intent in node:
1419 keys.append( intent.get( 'id' ) )
1420 keys = set( keys )
1421 # For each intent key, print the state on each node
1422 for key in keys:
1423 row = "%-13s" % key
1424 for nodeStr in ONOSIntents:
1425 node = json.loads( nodeStr )
1426 for intent in node:
1427 if intent.get( 'id', "Error" ) == key:
1428 row += "%-15s" % intent.get( 'state' )
1429 main.log.warn( row )
1430 # End of intent state table
1431 except ValueError as e:
1432 main.log.exception( e )
1433 main.log.debug( "nodeStr was: " + repr( nodeStr ) )
1434
1435 if intentsResults and not consistentIntents:
1436 # print the json objects
Jon Hallf37d44d2017-05-24 10:37:30 -07001437 n = str( main.activeNodes[ -1 ] + 1 )
Jon Hall9ebd1bd2016-04-19 01:37:17 -07001438 main.log.debug( "ONOS" + n + " intents: " )
1439 main.log.debug( json.dumps( json.loads( ONOSIntents[ -1 ] ),
1440 sort_keys=True,
1441 indent=4,
1442 separators=( ',', ': ' ) ) )
1443 for i in range( len( ONOSIntents ) ):
Jon Hallf37d44d2017-05-24 10:37:30 -07001444 node = str( main.activeNodes[ i ] + 1 )
Jon Hall9ebd1bd2016-04-19 01:37:17 -07001445 if ONOSIntents[ i ] != ONOSIntents[ -1 ]:
1446 main.log.debug( "ONOS" + node + " intents: " )
Jon Hallf37d44d2017-05-24 10:37:30 -07001447 main.log.debug( json.dumps( json.loads( ONOSIntents[ i ] ),
Jon Hall9ebd1bd2016-04-19 01:37:17 -07001448 sort_keys=True,
1449 indent=4,
1450 separators=( ',', ': ' ) ) )
1451 else:
1452 main.log.debug( "ONOS" + node + " intents match ONOS" +
1453 n + " intents" )
1454 elif intentsResults and consistentIntents:
1455 intentState = ONOSIntents[ 0 ]
1456
1457 main.step( "Get the flows from each controller" )
1458 global flowState
1459 flowState = []
1460 ONOSFlows = []
1461 ONOSFlowsJson = []
1462 flowCheck = main.FALSE
1463 consistentFlows = True
1464 flowsResults = True
1465 threads = []
1466 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07001467 t = main.Thread( target=main.CLIs[ i ].flows,
Jon Hall9ebd1bd2016-04-19 01:37:17 -07001468 name="flows-" + str( i ),
1469 args=[],
1470 kwargs={ 'jsonFormat': True } )
1471 threads.append( t )
1472 t.start()
1473
1474 # NOTE: Flows command can take some time to run
Jon Hallf37d44d2017-05-24 10:37:30 -07001475 time.sleep( 30 )
Jon Hall9ebd1bd2016-04-19 01:37:17 -07001476 for t in threads:
1477 t.join()
1478 result = t.result
1479 ONOSFlows.append( result )
1480
1481 for i in range( len( ONOSFlows ) ):
Jon Hallf37d44d2017-05-24 10:37:30 -07001482 num = str( main.activeNodes[ i ] + 1 )
Jon Hall9ebd1bd2016-04-19 01:37:17 -07001483 if not ONOSFlows[ i ] or "Error" in ONOSFlows[ i ]:
1484 main.log.error( "Error in getting ONOS" + num + " flows" )
1485 main.log.warn( "ONOS" + num + " flows response: " +
1486 repr( ONOSFlows[ i ] ) )
1487 flowsResults = False
1488 ONOSFlowsJson.append( None )
1489 else:
1490 try:
1491 ONOSFlowsJson.append( json.loads( ONOSFlows[ i ] ) )
1492 except ( ValueError, TypeError ):
1493 # FIXME: change this to log.error?
1494 main.log.exception( "Error in parsing ONOS" + num +
1495 " response as json." )
1496 main.log.error( repr( ONOSFlows[ i ] ) )
1497 ONOSFlowsJson.append( None )
1498 flowsResults = False
1499 utilities.assert_equals(
1500 expect=True,
1501 actual=flowsResults,
1502 onpass="No error in reading flows output",
1503 onfail="Error in reading flows from ONOS" )
1504
1505 main.step( "Check for consistency in Flows from each controller" )
1506 tmp = [ len( i ) == len( ONOSFlowsJson[ 0 ] ) for i in ONOSFlowsJson ]
1507 if all( tmp ):
1508 main.log.info( "Flow count is consistent across all ONOS nodes" )
1509 else:
1510 consistentFlows = False
1511 utilities.assert_equals(
1512 expect=True,
1513 actual=consistentFlows,
1514 onpass="The flow count is consistent across all ONOS nodes",
1515 onfail="ONOS nodes have different flow counts" )
1516
1517 if flowsResults and not consistentFlows:
1518 for i in range( len( ONOSFlows ) ):
Jon Hallf37d44d2017-05-24 10:37:30 -07001519 node = str( main.activeNodes[ i ] + 1 )
Jon Hall9ebd1bd2016-04-19 01:37:17 -07001520 try:
1521 main.log.warn(
1522 "ONOS" + node + " flows: " +
Jon Hallf37d44d2017-05-24 10:37:30 -07001523 json.dumps( json.loads( ONOSFlows[ i ] ), sort_keys=True,
Jon Hall9ebd1bd2016-04-19 01:37:17 -07001524 indent=4, separators=( ',', ': ' ) ) )
1525 except ( ValueError, TypeError ):
1526 main.log.warn( "ONOS" + node + " flows: " +
1527 repr( ONOSFlows[ i ] ) )
1528 elif flowsResults and consistentFlows:
1529 flowCheck = main.TRUE
1530 flowState = ONOSFlows[ 0 ]
1531
1532 main.step( "Get the OF Table entries" )
1533 global flows
1534 flows = []
1535 for i in range( 1, 29 ):
1536 flows.append( main.Mininet1.getFlowTable( "s" + str( i ), version="1.3", debug=False ) )
1537 if flowCheck == main.FALSE:
1538 for table in flows:
1539 main.log.warn( table )
1540 # TODO: Compare switch flow tables with ONOS flow tables
1541
1542 main.step( "Start continuous pings" )
1543 main.Mininet2.pingLong(
1544 src=main.params[ 'PING' ][ 'source1' ],
1545 target=main.params[ 'PING' ][ 'target1' ],
1546 pingTime=500 )
1547 main.Mininet2.pingLong(
1548 src=main.params[ 'PING' ][ 'source2' ],
1549 target=main.params[ 'PING' ][ 'target2' ],
1550 pingTime=500 )
1551 main.Mininet2.pingLong(
1552 src=main.params[ 'PING' ][ 'source3' ],
1553 target=main.params[ 'PING' ][ 'target3' ],
1554 pingTime=500 )
1555 main.Mininet2.pingLong(
1556 src=main.params[ 'PING' ][ 'source4' ],
1557 target=main.params[ 'PING' ][ 'target4' ],
1558 pingTime=500 )
1559 main.Mininet2.pingLong(
1560 src=main.params[ 'PING' ][ 'source5' ],
1561 target=main.params[ 'PING' ][ 'target5' ],
1562 pingTime=500 )
1563 main.Mininet2.pingLong(
1564 src=main.params[ 'PING' ][ 'source6' ],
1565 target=main.params[ 'PING' ][ 'target6' ],
1566 pingTime=500 )
1567 main.Mininet2.pingLong(
1568 src=main.params[ 'PING' ][ 'source7' ],
1569 target=main.params[ 'PING' ][ 'target7' ],
1570 pingTime=500 )
1571 main.Mininet2.pingLong(
1572 src=main.params[ 'PING' ][ 'source8' ],
1573 target=main.params[ 'PING' ][ 'target8' ],
1574 pingTime=500 )
1575 main.Mininet2.pingLong(
1576 src=main.params[ 'PING' ][ 'source9' ],
1577 target=main.params[ 'PING' ][ 'target9' ],
1578 pingTime=500 )
1579 main.Mininet2.pingLong(
1580 src=main.params[ 'PING' ][ 'source10' ],
1581 target=main.params[ 'PING' ][ 'target10' ],
1582 pingTime=500 )
1583
1584 main.step( "Collecting topology information from ONOS" )
1585 devices = []
1586 threads = []
1587 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07001588 t = main.Thread( target=main.CLIs[ i ].devices,
Jon Hall9ebd1bd2016-04-19 01:37:17 -07001589 name="devices-" + str( i ),
Jon Hallf37d44d2017-05-24 10:37:30 -07001590 args=[] )
Jon Hall9ebd1bd2016-04-19 01:37:17 -07001591 threads.append( t )
1592 t.start()
1593
1594 for t in threads:
1595 t.join()
1596 devices.append( t.result )
1597 hosts = []
1598 threads = []
1599 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07001600 t = main.Thread( target=main.CLIs[ i ].hosts,
Jon Hall9ebd1bd2016-04-19 01:37:17 -07001601 name="hosts-" + str( i ),
Jon Hallf37d44d2017-05-24 10:37:30 -07001602 args=[] )
Jon Hall9ebd1bd2016-04-19 01:37:17 -07001603 threads.append( t )
1604 t.start()
1605
1606 for t in threads:
1607 t.join()
1608 try:
1609 hosts.append( json.loads( t.result ) )
1610 except ( ValueError, TypeError ):
1611 # FIXME: better handling of this, print which node
1612 # Maybe use thread name?
1613 main.log.exception( "Error parsing json output of hosts" )
1614 main.log.warn( repr( t.result ) )
1615 hosts.append( None )
1616
1617 ports = []
1618 threads = []
1619 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07001620 t = main.Thread( target=main.CLIs[ i ].ports,
Jon Hall9ebd1bd2016-04-19 01:37:17 -07001621 name="ports-" + str( i ),
Jon Hallf37d44d2017-05-24 10:37:30 -07001622 args=[] )
Jon Hall9ebd1bd2016-04-19 01:37:17 -07001623 threads.append( t )
1624 t.start()
1625
1626 for t in threads:
1627 t.join()
1628 ports.append( t.result )
1629 links = []
1630 threads = []
1631 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07001632 t = main.Thread( target=main.CLIs[ i ].links,
Jon Hall9ebd1bd2016-04-19 01:37:17 -07001633 name="links-" + str( i ),
Jon Hallf37d44d2017-05-24 10:37:30 -07001634 args=[] )
Jon Hall9ebd1bd2016-04-19 01:37:17 -07001635 threads.append( t )
1636 t.start()
1637
1638 for t in threads:
1639 t.join()
1640 links.append( t.result )
1641 clusters = []
1642 threads = []
1643 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07001644 t = main.Thread( target=main.CLIs[ i ].clusters,
Jon Hall9ebd1bd2016-04-19 01:37:17 -07001645 name="clusters-" + str( i ),
Jon Hallf37d44d2017-05-24 10:37:30 -07001646 args=[] )
Jon Hall9ebd1bd2016-04-19 01:37:17 -07001647 threads.append( t )
1648 t.start()
1649
1650 for t in threads:
1651 t.join()
1652 clusters.append( t.result )
1653 # Compare json objects for hosts and dataplane clusters
1654
1655 # hosts
1656 main.step( "Host view is consistent across ONOS nodes" )
1657 consistentHostsResult = main.TRUE
1658 for controller in range( len( hosts ) ):
Jon Hallf37d44d2017-05-24 10:37:30 -07001659 controllerStr = str( main.activeNodes[ controller ] + 1 )
Jon Hall9ebd1bd2016-04-19 01:37:17 -07001660 if hosts[ controller ] and "Error" not in hosts[ controller ]:
1661 if hosts[ controller ] == hosts[ 0 ]:
1662 continue
1663 else: # hosts not consistent
1664 main.log.error( "hosts from ONOS" +
1665 controllerStr +
1666 " is inconsistent with ONOS1" )
1667 main.log.warn( repr( hosts[ controller ] ) )
1668 consistentHostsResult = main.FALSE
1669
1670 else:
1671 main.log.error( "Error in getting ONOS hosts from ONOS" +
1672 controllerStr )
1673 consistentHostsResult = main.FALSE
1674 main.log.warn( "ONOS" + controllerStr +
1675 " hosts response: " +
1676 repr( hosts[ controller ] ) )
1677 utilities.assert_equals(
1678 expect=main.TRUE,
1679 actual=consistentHostsResult,
1680 onpass="Hosts view is consistent across all ONOS nodes",
1681 onfail="ONOS nodes have different views of hosts" )
1682
1683 main.step( "Each host has an IP address" )
1684 ipResult = main.TRUE
1685 for controller in range( 0, len( hosts ) ):
Jon Hallf37d44d2017-05-24 10:37:30 -07001686 controllerStr = str( main.activeNodes[ controller ] + 1 )
Jon Hall9ebd1bd2016-04-19 01:37:17 -07001687 if hosts[ controller ]:
1688 for host in hosts[ controller ]:
Jon Hallf37d44d2017-05-24 10:37:30 -07001689 if not host.get( 'ipAddresses', [] ):
Jon Hall9ebd1bd2016-04-19 01:37:17 -07001690 main.log.error( "Error with host ips on controller" +
1691 controllerStr + ": " + str( host ) )
1692 ipResult = main.FALSE
1693 utilities.assert_equals(
1694 expect=main.TRUE,
1695 actual=ipResult,
1696 onpass="The ips of the hosts aren't empty",
1697 onfail="The ip of at least one host is missing" )
1698
1699 # Strongly connected clusters of devices
1700 main.step( "Cluster view is consistent across ONOS nodes" )
1701 consistentClustersResult = main.TRUE
1702 for controller in range( len( clusters ) ):
Jon Hallf37d44d2017-05-24 10:37:30 -07001703 controllerStr = str( main.activeNodes[ controller ] + 1 )
Jon Hall9ebd1bd2016-04-19 01:37:17 -07001704 if "Error" not in clusters[ controller ]:
1705 if clusters[ controller ] == clusters[ 0 ]:
1706 continue
1707 else: # clusters not consistent
1708 main.log.error( "clusters from ONOS" + controllerStr +
1709 " is inconsistent with ONOS1" )
1710 consistentClustersResult = main.FALSE
1711
1712 else:
1713 main.log.error( "Error in getting dataplane clusters " +
1714 "from ONOS" + controllerStr )
1715 consistentClustersResult = main.FALSE
1716 main.log.warn( "ONOS" + controllerStr +
1717 " clusters response: " +
1718 repr( clusters[ controller ] ) )
1719 utilities.assert_equals(
1720 expect=main.TRUE,
1721 actual=consistentClustersResult,
1722 onpass="Clusters view is consistent across all ONOS nodes",
1723 onfail="ONOS nodes have different views of clusters" )
Jon Hall64948022016-05-12 13:38:50 -07001724 if not consistentClustersResult:
Jon Hall9ebd1bd2016-04-19 01:37:17 -07001725 main.log.debug( clusters )
Jon Hall64948022016-05-12 13:38:50 -07001726
Jon Hall9ebd1bd2016-04-19 01:37:17 -07001727 # there should always only be one cluster
1728 main.step( "Cluster view correct across ONOS nodes" )
1729 try:
1730 numClusters = len( json.loads( clusters[ 0 ] ) )
1731 except ( ValueError, TypeError ):
1732 main.log.exception( "Error parsing clusters[0]: " +
1733 repr( clusters[ 0 ] ) )
1734 numClusters = "ERROR"
1735 utilities.assert_equals(
1736 expect=1,
1737 actual=numClusters,
1738 onpass="ONOS shows 1 SCC",
1739 onfail="ONOS shows " + str( numClusters ) + " SCCs" )
1740
1741 main.step( "Comparing ONOS topology to MN" )
1742 devicesResults = main.TRUE
1743 linksResults = main.TRUE
1744 hostsResults = main.TRUE
1745 mnSwitches = main.Mininet1.getSwitches()
1746 mnLinks = main.Mininet1.getLinks()
1747 mnHosts = main.Mininet1.getHosts()
1748 for controller in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07001749 controllerStr = str( main.activeNodes[ controller ] + 1 )
Jon Hall9ebd1bd2016-04-19 01:37:17 -07001750 if devices[ controller ] and ports[ controller ] and\
Jon Hallf37d44d2017-05-24 10:37:30 -07001751 "Error" not in devices[ controller ] and\
1752 "Error" not in ports[ controller ]:
1753 currentDevicesResult = main.Mininet1.compareSwitches(
1754 mnSwitches,
1755 json.loads( devices[ controller ] ),
1756 json.loads( ports[ controller ] ) )
Jon Hall9ebd1bd2016-04-19 01:37:17 -07001757 else:
1758 currentDevicesResult = main.FALSE
1759 utilities.assert_equals( expect=main.TRUE,
1760 actual=currentDevicesResult,
1761 onpass="ONOS" + controllerStr +
1762 " Switches view is correct",
1763 onfail="ONOS" + controllerStr +
1764 " Switches view is incorrect" )
1765 if links[ controller ] and "Error" not in links[ controller ]:
1766 currentLinksResult = main.Mininet1.compareLinks(
1767 mnSwitches, mnLinks,
1768 json.loads( links[ controller ] ) )
1769 else:
1770 currentLinksResult = main.FALSE
1771 utilities.assert_equals( expect=main.TRUE,
1772 actual=currentLinksResult,
1773 onpass="ONOS" + controllerStr +
1774 " links view is correct",
1775 onfail="ONOS" + controllerStr +
1776 " links view is incorrect" )
1777
1778 if hosts[ controller ] and "Error" not in hosts[ controller ]:
1779 currentHostsResult = main.Mininet1.compareHosts(
1780 mnHosts,
1781 hosts[ controller ] )
1782 else:
1783 currentHostsResult = main.FALSE
1784 utilities.assert_equals( expect=main.TRUE,
1785 actual=currentHostsResult,
1786 onpass="ONOS" + controllerStr +
1787 " hosts exist in Mininet",
1788 onfail="ONOS" + controllerStr +
1789 " hosts don't match Mininet" )
1790
1791 devicesResults = devicesResults and currentDevicesResult
1792 linksResults = linksResults and currentLinksResult
1793 hostsResults = hostsResults and currentHostsResult
1794
1795 main.step( "Device information is correct" )
1796 utilities.assert_equals(
1797 expect=main.TRUE,
1798 actual=devicesResults,
1799 onpass="Device information is correct",
1800 onfail="Device information is incorrect" )
1801
1802 main.step( "Links are correct" )
1803 utilities.assert_equals(
1804 expect=main.TRUE,
1805 actual=linksResults,
1806 onpass="Link are correct",
1807 onfail="Links are incorrect" )
1808
1809 main.step( "Hosts are correct" )
1810 utilities.assert_equals(
1811 expect=main.TRUE,
1812 actual=hostsResults,
1813 onpass="Hosts are correct",
1814 onfail="Hosts are incorrect" )
1815
1816 def CASE6( self, main ):
1817 """
1818 The Scaling case.
1819 """
1820 import time
1821 import re
1822 assert main.numCtrls, "main.numCtrls not defined"
1823 assert main, "main not defined"
1824 assert utilities.assert_equals, "utilities.assert_equals not defined"
1825 assert main.CLIs, "main.CLIs not defined"
1826 assert main.nodes, "main.nodes not defined"
1827 try:
1828 labels
1829 except NameError:
1830 main.log.error( "labels not defined, setting to []" )
1831 global labels
1832 labels = []
1833 try:
1834 data
1835 except NameError:
1836 main.log.error( "data not defined, setting to []" )
1837 global data
1838 data = []
1839
Jon Hall69b2b982016-05-11 12:04:59 -07001840 main.case( "Scale the number of nodes in the ONOS cluster" )
Jon Hall9ebd1bd2016-04-19 01:37:17 -07001841
1842 main.step( "Checking ONOS Logs for errors" )
1843 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07001844 node = main.nodes[ i ]
Jon Hall9ebd1bd2016-04-19 01:37:17 -07001845 main.log.debug( "Checking logs for errors on " + node.name + ":" )
1846 main.log.warn( main.ONOSbench.checkLogs( node.ip_address ) )
1847
1848 """
1849 pop # of nodes from a list, might look like 1,3b,3,5b,5,7b,7,7b,5,5b,3...
1850 modify cluster.json file appropriately
1851 install/deactivate node as needed
1852 """
Jon Hall9ebd1bd2016-04-19 01:37:17 -07001853 try:
1854 prevNodes = main.activeNodes
Jon Hallf37d44d2017-05-24 10:37:30 -07001855 scale = main.scaling.pop( 0 )
Jon Hall9ebd1bd2016-04-19 01:37:17 -07001856 if "e" in scale:
1857 equal = True
1858 else:
1859 equal = False
Jon Hallf37d44d2017-05-24 10:37:30 -07001860 main.numCtrls = int( re.search( "\d+", scale ).group( 0 ) )
Jon Hall9ebd1bd2016-04-19 01:37:17 -07001861 main.log.info( "Scaling to {} nodes".format( main.numCtrls ) )
1862 genResult = main.Server.generateFile( main.numCtrls, equal=equal )
1863 utilities.assert_equals( expect=main.TRUE, actual=genResult,
1864 onpass="New cluster metadata file generated",
1865 onfail="Failled to generate new metadata file" )
1866 time.sleep( 5 ) # Give time for nodes to read new file
1867 except IndexError:
1868 main.cleanup()
1869 main.exit()
1870
1871 main.activeNodes = [ i for i in range( 0, main.numCtrls ) ]
1872 newNodes = [ x for x in main.activeNodes if x not in prevNodes ]
1873
1874 main.step( "Start new nodes" ) # OR stop old nodes?
1875 started = main.TRUE
1876 for i in newNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07001877 started = main.ONOSbench.onosStart( main.nodes[ i ].ip_address ) and main.TRUE
Jon Hall9ebd1bd2016-04-19 01:37:17 -07001878 utilities.assert_equals( expect=main.TRUE, actual=started,
1879 onpass="ONOS started",
1880 onfail="ONOS start NOT successful" )
1881
1882 main.step( "Checking if ONOS is up yet" )
1883 for i in range( 2 ):
1884 onosIsupResult = main.TRUE
1885 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07001886 node = main.nodes[ i ]
Jon Hall168c1862017-01-31 17:35:34 -08001887 main.ONOSbench.onosSecureSSH( node=node.ip_address )
Jon Hall9ebd1bd2016-04-19 01:37:17 -07001888 started = main.ONOSbench.isup( node.ip_address )
1889 if not started:
1890 main.log.error( node.name + " didn't start!" )
1891 onosIsupResult = onosIsupResult and started
1892 if onosIsupResult == main.TRUE:
1893 break
1894 utilities.assert_equals( expect=main.TRUE, actual=onosIsupResult,
1895 onpass="ONOS started",
1896 onfail="ONOS start NOT successful" )
1897
Jon Hall6509dbf2016-06-21 17:01:17 -07001898 main.step( "Starting ONOS CLI sessions" )
Jon Hall9ebd1bd2016-04-19 01:37:17 -07001899 cliResults = main.TRUE
1900 threads = []
1901 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07001902 t = main.Thread( target=main.CLIs[ i ].startOnosCli,
Jon Hall9ebd1bd2016-04-19 01:37:17 -07001903 name="startOnosCli-" + str( i ),
Jon Hallf37d44d2017-05-24 10:37:30 -07001904 args=[ main.nodes[ i ].ip_address ] )
Jon Hall9ebd1bd2016-04-19 01:37:17 -07001905 threads.append( t )
1906 t.start()
1907
1908 for t in threads:
1909 t.join()
1910 cliResults = cliResults and t.result
1911 utilities.assert_equals( expect=main.TRUE, actual=cliResults,
1912 onpass="ONOS cli started",
1913 onfail="ONOS clis did not start" )
1914
1915 main.step( "Checking ONOS nodes" )
1916 nodeResults = utilities.retry( main.HA.nodesCheck,
1917 False,
Jon Hallf37d44d2017-05-24 10:37:30 -07001918 args=[ main.activeNodes ],
Jon Hall9ebd1bd2016-04-19 01:37:17 -07001919 attempts=5 )
1920 utilities.assert_equals( expect=True, actual=nodeResults,
1921 onpass="Nodes check successful",
1922 onfail="Nodes check NOT successful" )
1923
1924 for i in range( 10 ):
1925 ready = True
1926 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07001927 cli = main.CLIs[ i ]
Jon Hall9ebd1bd2016-04-19 01:37:17 -07001928 output = cli.summary()
1929 if not output:
1930 ready = False
1931 if ready:
1932 break
1933 time.sleep( 30 )
1934 utilities.assert_equals( expect=True, actual=ready,
1935 onpass="ONOS summary command succeded",
1936 onfail="ONOS summary command failed" )
1937 if not ready:
1938 main.cleanup()
1939 main.exit()
1940
1941 # Rerun for election on new nodes
1942 runResults = main.TRUE
1943 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07001944 cli = main.CLIs[ i ]
Jon Hall9ebd1bd2016-04-19 01:37:17 -07001945 run = cli.electionTestRun()
1946 if run != main.TRUE:
1947 main.log.error( "Error running for election on " + cli.name )
1948 runResults = runResults and run
1949 utilities.assert_equals( expect=main.TRUE, actual=runResults,
1950 onpass="Reran for election",
1951 onfail="Failed to rerun for election" )
1952
1953 # TODO: Make this configurable
1954 time.sleep( 60 )
1955 for node in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07001956 main.log.warn( "\n****************** {} **************".format( main.nodes[ node ].ip_address ) )
1957 main.log.debug( main.CLIs[ node ].nodes( jsonFormat=False ) )
1958 main.log.debug( main.CLIs[ node ].leaders( jsonFormat=False ) )
1959 main.log.debug( main.CLIs[ node ].partitions( jsonFormat=False ) )
1960 main.log.debug( main.CLIs[ node ].apps( jsonFormat=False ) )
Jon Hall9ebd1bd2016-04-19 01:37:17 -07001961
1962 def CASE7( self, main ):
1963 """
1964 Check state after ONOS scaling
1965 """
1966 import json
1967 assert main.numCtrls, "main.numCtrls not defined"
1968 assert main, "main not defined"
1969 assert utilities.assert_equals, "utilities.assert_equals not defined"
1970 assert main.CLIs, "main.CLIs not defined"
1971 assert main.nodes, "main.nodes not defined"
1972 main.case( "Running ONOS Constant State Tests" )
1973
1974 main.step( "Check that each switch has a master" )
1975 # Assert that each device has a master
1976 rolesNotNull = main.TRUE
1977 threads = []
1978 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07001979 t = main.Thread( target=main.CLIs[ i ].rolesNotNull,
Jon Hall9ebd1bd2016-04-19 01:37:17 -07001980 name="rolesNotNull-" + str( i ),
Jon Hallf37d44d2017-05-24 10:37:30 -07001981 args=[] )
Jon Hall9ebd1bd2016-04-19 01:37:17 -07001982 threads.append( t )
1983 t.start()
1984
1985 for t in threads:
1986 t.join()
1987 rolesNotNull = rolesNotNull and t.result
1988 utilities.assert_equals(
1989 expect=main.TRUE,
1990 actual=rolesNotNull,
1991 onpass="Each device has a master",
1992 onfail="Some devices don't have a master assigned" )
1993
1994 main.step( "Read device roles from ONOS" )
1995 ONOSMastership = []
1996 consistentMastership = True
1997 rolesResults = True
1998 threads = []
1999 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07002000 t = main.Thread( target=main.CLIs[ i ].roles,
Jon Hall9ebd1bd2016-04-19 01:37:17 -07002001 name="roles-" + str( i ),
2002 args=[] )
2003 threads.append( t )
2004 t.start()
2005
2006 for t in threads:
2007 t.join()
2008 ONOSMastership.append( t.result )
2009
2010 for i in range( len( ONOSMastership ) ):
Jon Hallf37d44d2017-05-24 10:37:30 -07002011 node = str( main.activeNodes[ i ] + 1 )
2012 if not ONOSMastership[ i ] or "Error" in ONOSMastership[ i ]:
Jon Hall9ebd1bd2016-04-19 01:37:17 -07002013 main.log.error( "Error in getting ONOS" + node + " roles" )
2014 main.log.warn( "ONOS" + node + " mastership response: " +
Jon Hallf37d44d2017-05-24 10:37:30 -07002015 repr( ONOSMastership[ i ] ) )
Jon Hall9ebd1bd2016-04-19 01:37:17 -07002016 rolesResults = False
2017 utilities.assert_equals(
2018 expect=True,
2019 actual=rolesResults,
2020 onpass="No error in reading roles output",
2021 onfail="Error in reading roles from ONOS" )
2022
2023 main.step( "Check for consistency in roles from each controller" )
Jon Hallf37d44d2017-05-24 10:37:30 -07002024 if all( [ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
Jon Hall9ebd1bd2016-04-19 01:37:17 -07002025 main.log.info(
2026 "Switch roles are consistent across all ONOS nodes" )
2027 else:
2028 consistentMastership = False
2029 utilities.assert_equals(
2030 expect=True,
2031 actual=consistentMastership,
2032 onpass="Switch roles are consistent across all ONOS nodes",
2033 onfail="ONOS nodes have different views of switch roles" )
2034
2035 if rolesResults and not consistentMastership:
2036 for i in range( len( ONOSMastership ) ):
Jon Hallf37d44d2017-05-24 10:37:30 -07002037 node = str( main.activeNodes[ i ] + 1 )
Jon Hall9ebd1bd2016-04-19 01:37:17 -07002038 main.log.warn( "ONOS" + node + " roles: ",
2039 json.dumps( json.loads( ONOSMastership[ i ] ),
2040 sort_keys=True,
2041 indent=4,
2042 separators=( ',', ': ' ) ) )
2043
2044 # NOTE: we expect mastership to change on controller scaling down
2045
2046 main.step( "Get the intents and compare across all nodes" )
2047 ONOSIntents = []
2048 intentCheck = main.FALSE
2049 consistentIntents = True
2050 intentsResults = True
2051 threads = []
2052 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07002053 t = main.Thread( target=main.CLIs[ i ].intents,
Jon Hall9ebd1bd2016-04-19 01:37:17 -07002054 name="intents-" + str( i ),
2055 args=[],
2056 kwargs={ 'jsonFormat': True } )
2057 threads.append( t )
2058 t.start()
2059
2060 for t in threads:
2061 t.join()
2062 ONOSIntents.append( t.result )
2063
Jon Hallf37d44d2017-05-24 10:37:30 -07002064 for i in range( len( ONOSIntents ) ):
2065 node = str( main.activeNodes[ i ] + 1 )
Jon Hall9ebd1bd2016-04-19 01:37:17 -07002066 if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
2067 main.log.error( "Error in getting ONOS" + node + " intents" )
2068 main.log.warn( "ONOS" + node + " intents response: " +
2069 repr( ONOSIntents[ i ] ) )
2070 intentsResults = False
2071 utilities.assert_equals(
2072 expect=True,
2073 actual=intentsResults,
2074 onpass="No error in reading intents output",
2075 onfail="Error in reading intents from ONOS" )
2076
2077 main.step( "Check for consistency in Intents from each controller" )
Jon Hallf37d44d2017-05-24 10:37:30 -07002078 if all( [ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
Jon Hall9ebd1bd2016-04-19 01:37:17 -07002079 main.log.info( "Intents are consistent across all ONOS " +
2080 "nodes" )
2081 else:
2082 consistentIntents = False
2083
2084 # Try to make it easy to figure out what is happening
2085 #
2086 # Intent ONOS1 ONOS2 ...
2087 # 0x01 INSTALLED INSTALLING
2088 # ... ... ...
2089 # ... ... ...
2090 title = " ID"
2091 for n in main.activeNodes:
2092 title += " " * 10 + "ONOS" + str( n + 1 )
2093 main.log.warn( title )
2094 # get all intent keys in the cluster
2095 keys = []
2096 for nodeStr in ONOSIntents:
2097 node = json.loads( nodeStr )
2098 for intent in node:
2099 keys.append( intent.get( 'id' ) )
2100 keys = set( keys )
2101 for key in keys:
2102 row = "%-13s" % key
2103 for nodeStr in ONOSIntents:
2104 node = json.loads( nodeStr )
2105 for intent in node:
2106 if intent.get( 'id' ) == key:
2107 row += "%-15s" % intent.get( 'state' )
2108 main.log.warn( row )
2109 # End table view
2110
2111 utilities.assert_equals(
2112 expect=True,
2113 actual=consistentIntents,
2114 onpass="Intents are consistent across all ONOS nodes",
2115 onfail="ONOS nodes have different views of intents" )
2116 intentStates = []
2117 for node in ONOSIntents: # Iter through ONOS nodes
2118 nodeStates = []
2119 # Iter through intents of a node
2120 try:
2121 for intent in json.loads( node ):
2122 nodeStates.append( intent[ 'state' ] )
2123 except ( ValueError, TypeError ):
2124 main.log.exception( "Error in parsing intents" )
2125 main.log.error( repr( node ) )
2126 intentStates.append( nodeStates )
Jon Hallf37d44d2017-05-24 10:37:30 -07002127 out = [ ( i, nodeStates.count( i ) ) for i in set( nodeStates ) ]
Jon Hall9ebd1bd2016-04-19 01:37:17 -07002128 main.log.info( dict( out ) )
2129
2130 if intentsResults and not consistentIntents:
2131 for i in range( len( main.activeNodes ) ):
Jon Hallf37d44d2017-05-24 10:37:30 -07002132 node = str( main.activeNodes[ i ] + 1 )
Jon Hall9ebd1bd2016-04-19 01:37:17 -07002133 main.log.warn( "ONOS" + node + " intents: " )
2134 main.log.warn( json.dumps(
2135 json.loads( ONOSIntents[ i ] ),
2136 sort_keys=True,
2137 indent=4,
2138 separators=( ',', ': ' ) ) )
2139 elif intentsResults and consistentIntents:
2140 intentCheck = main.TRUE
2141
2142 main.step( "Compare current intents with intents before the scaling" )
2143 # NOTE: this requires case 5 to pass for intentState to be set.
2144 # maybe we should stop the test if that fails?
2145 sameIntents = main.FALSE
2146 try:
2147 intentState
2148 except NameError:
2149 main.log.warn( "No previous intent state was saved" )
2150 else:
2151 if intentState and intentState == ONOSIntents[ 0 ]:
2152 sameIntents = main.TRUE
2153 main.log.info( "Intents are consistent with before scaling" )
2154 # TODO: possibly the states have changed? we may need to figure out
2155 # what the acceptable states are
2156 elif len( intentState ) == len( ONOSIntents[ 0 ] ):
2157 sameIntents = main.TRUE
2158 try:
2159 before = json.loads( intentState )
2160 after = json.loads( ONOSIntents[ 0 ] )
2161 for intent in before:
2162 if intent not in after:
2163 sameIntents = main.FALSE
2164 main.log.debug( "Intent is not currently in ONOS " +
2165 "(at least in the same form):" )
2166 main.log.debug( json.dumps( intent ) )
2167 except ( ValueError, TypeError ):
2168 main.log.exception( "Exception printing intents" )
Jon Hallf37d44d2017-05-24 10:37:30 -07002169 main.log.debug( repr( ONOSIntents[ 0 ] ) )
Jon Hall9ebd1bd2016-04-19 01:37:17 -07002170 main.log.debug( repr( intentState ) )
2171 if sameIntents == main.FALSE:
2172 try:
2173 main.log.debug( "ONOS intents before: " )
2174 main.log.debug( json.dumps( json.loads( intentState ),
2175 sort_keys=True, indent=4,
2176 separators=( ',', ': ' ) ) )
2177 main.log.debug( "Current ONOS intents: " )
2178 main.log.debug( json.dumps( json.loads( ONOSIntents[ 0 ] ),
2179 sort_keys=True, indent=4,
2180 separators=( ',', ': ' ) ) )
2181 except ( ValueError, TypeError ):
2182 main.log.exception( "Exception printing intents" )
Jon Hallf37d44d2017-05-24 10:37:30 -07002183 main.log.debug( repr( ONOSIntents[ 0 ] ) )
Jon Hall9ebd1bd2016-04-19 01:37:17 -07002184 main.log.debug( repr( intentState ) )
2185 utilities.assert_equals(
2186 expect=main.TRUE,
2187 actual=sameIntents,
2188 onpass="Intents are consistent with before scaling",
2189 onfail="The Intents changed during scaling" )
2190 intentCheck = intentCheck and sameIntents
2191
2192 main.step( "Get the OF Table entries and compare to before " +
2193 "component scaling" )
2194 FlowTables = main.TRUE
2195 for i in range( 28 ):
2196 main.log.info( "Checking flow table on s" + str( i + 1 ) )
2197 tmpFlows = main.Mininet1.getFlowTable( "s" + str( i + 1 ), version="1.3", debug=False )
Jon Hallf37d44d2017-05-24 10:37:30 -07002198 curSwitch = main.Mininet1.flowTableComp( flows[ i ], tmpFlows )
Jon Hall9ebd1bd2016-04-19 01:37:17 -07002199 FlowTables = FlowTables and curSwitch
2200 if curSwitch == main.FALSE:
2201 main.log.warn( "Differences in flow table for switch: s{}".format( i + 1 ) )
2202 utilities.assert_equals(
2203 expect=main.TRUE,
2204 actual=FlowTables,
2205 onpass="No changes were found in the flow tables",
2206 onfail="Changes were found in the flow tables" )
2207
2208 main.Mininet2.pingLongKill()
Jon Hallf37d44d2017-05-24 10:37:30 -07002209 """
Jon Hall9ebd1bd2016-04-19 01:37:17 -07002210 # main.step( "Check the continuous pings to ensure that no packets " +
2211 # "were dropped during component failure" )
2212 main.Mininet2.pingKill( main.params[ 'TESTONUSER' ],
2213 main.params[ 'TESTONIP' ] )
2214 LossInPings = main.FALSE
2215 # NOTE: checkForLoss returns main.FALSE with 0% packet loss
2216 for i in range( 8, 18 ):
2217 main.log.info(
2218 "Checking for a loss in pings along flow from s" +
2219 str( i ) )
2220 LossInPings = main.Mininet2.checkForLoss(
2221 "/tmp/ping.h" +
2222 str( i ) ) or LossInPings
2223 if LossInPings == main.TRUE:
2224 main.log.info( "Loss in ping detected" )
2225 elif LossInPings == main.ERROR:
2226 main.log.info( "There are multiple mininet process running" )
2227 elif LossInPings == main.FALSE:
2228 main.log.info( "No Loss in the pings" )
2229 main.log.info( "No loss of dataplane connectivity" )
2230 # utilities.assert_equals(
2231 # expect=main.FALSE,
2232 # actual=LossInPings,
2233 # onpass="No Loss of connectivity",
2234 # onfail="Loss of dataplane connectivity detected" )
2235
2236 # NOTE: Since intents are not persisted with IntnentStore,
2237 # we expect loss in dataplane connectivity
2238 LossInPings = main.FALSE
Jon Hallf37d44d2017-05-24 10:37:30 -07002239 """
Jon Hall9ebd1bd2016-04-19 01:37:17 -07002240 main.step( "Leadership Election is still functional" )
2241 # Test of LeadershipElection
2242 leaderList = []
2243 leaderResult = main.TRUE
2244
2245 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07002246 cli = main.CLIs[ i ]
Jon Hall9ebd1bd2016-04-19 01:37:17 -07002247 leaderN = cli.electionTestLeader()
2248 leaderList.append( leaderN )
2249 if leaderN == main.FALSE:
2250 # error in response
2251 main.log.error( "Something is wrong with " +
2252 "electionTestLeader function, check the" +
2253 " error logs" )
2254 leaderResult = main.FALSE
2255 elif leaderN is None:
2256 main.log.error( cli.name +
2257 " shows no leader for the election-app." )
2258 leaderResult = main.FALSE
2259 if len( set( leaderList ) ) != 1:
2260 leaderResult = main.FALSE
2261 main.log.error(
2262 "Inconsistent view of leader for the election test app" )
2263 # TODO: print the list
2264 utilities.assert_equals(
2265 expect=main.TRUE,
2266 actual=leaderResult,
2267 onpass="Leadership election passed",
2268 onfail="Something went wrong with Leadership election" )
2269
2270 def CASE8( self, main ):
2271 """
2272 Compare topo
2273 """
2274 import json
2275 import time
2276 assert main.numCtrls, "main.numCtrls not defined"
2277 assert main, "main not defined"
2278 assert utilities.assert_equals, "utilities.assert_equals not defined"
2279 assert main.CLIs, "main.CLIs not defined"
2280 assert main.nodes, "main.nodes not defined"
2281
2282 main.case( "Compare ONOS Topology view to Mininet topology" )
2283 main.caseExplanation = "Compare topology objects between Mininet" +\
2284 " and ONOS"
2285 topoResult = main.FALSE
2286 topoFailMsg = "ONOS topology don't match Mininet"
2287 elapsed = 0
2288 count = 0
2289 main.step( "Comparing ONOS topology to MN topology" )
2290 startTime = time.time()
2291 # Give time for Gossip to work
2292 while topoResult == main.FALSE and ( elapsed < 60 or count < 3 ):
2293 devicesResults = main.TRUE
2294 linksResults = main.TRUE
2295 hostsResults = main.TRUE
2296 hostAttachmentResults = True
2297 count += 1
2298 cliStart = time.time()
2299 devices = []
2300 threads = []
2301 for i in main.activeNodes:
2302 t = main.Thread( target=utilities.retry,
2303 name="devices-" + str( i ),
Jon Hallf37d44d2017-05-24 10:37:30 -07002304 args=[ main.CLIs[ i ].devices, [ None ] ],
2305 kwargs={ 'sleep': 5, 'attempts': 5,
Jon Hall9ebd1bd2016-04-19 01:37:17 -07002306 'randomTime': True } )
2307 threads.append( t )
2308 t.start()
2309
2310 for t in threads:
2311 t.join()
2312 devices.append( t.result )
2313 hosts = []
2314 ipResult = main.TRUE
2315 threads = []
2316 for i in main.activeNodes:
2317 t = main.Thread( target=utilities.retry,
2318 name="hosts-" + str( i ),
Jon Hallf37d44d2017-05-24 10:37:30 -07002319 args=[ main.CLIs[ i ].hosts, [ None ] ],
2320 kwargs={ 'sleep': 5, 'attempts': 5,
Jon Hall9ebd1bd2016-04-19 01:37:17 -07002321 'randomTime': True } )
2322 threads.append( t )
2323 t.start()
2324
2325 for t in threads:
2326 t.join()
2327 try:
2328 hosts.append( json.loads( t.result ) )
2329 except ( ValueError, TypeError ):
2330 main.log.exception( "Error parsing hosts results" )
2331 main.log.error( repr( t.result ) )
2332 hosts.append( None )
2333 for controller in range( 0, len( hosts ) ):
Jon Hallf37d44d2017-05-24 10:37:30 -07002334 controllerStr = str( main.activeNodes[ controller ] + 1 )
Jon Hall9ebd1bd2016-04-19 01:37:17 -07002335 if hosts[ controller ]:
2336 for host in hosts[ controller ]:
2337 if host is None or host.get( 'ipAddresses', [] ) == []:
2338 main.log.error(
2339 "Error with host ipAddresses on controller" +
2340 controllerStr + ": " + str( host ) )
2341 ipResult = main.FALSE
2342 ports = []
2343 threads = []
2344 for i in main.activeNodes:
2345 t = main.Thread( target=utilities.retry,
2346 name="ports-" + str( i ),
Jon Hallf37d44d2017-05-24 10:37:30 -07002347 args=[ main.CLIs[ i ].ports, [ None ] ],
2348 kwargs={ 'sleep': 5, 'attempts': 5,
Jon Hall9ebd1bd2016-04-19 01:37:17 -07002349 'randomTime': True } )
2350 threads.append( t )
2351 t.start()
2352
2353 for t in threads:
2354 t.join()
2355 ports.append( t.result )
2356 links = []
2357 threads = []
2358 for i in main.activeNodes:
2359 t = main.Thread( target=utilities.retry,
2360 name="links-" + str( i ),
Jon Hallf37d44d2017-05-24 10:37:30 -07002361 args=[ main.CLIs[ i ].links, [ None ] ],
2362 kwargs={ 'sleep': 5, 'attempts': 5,
Jon Hall9ebd1bd2016-04-19 01:37:17 -07002363 'randomTime': True } )
2364 threads.append( t )
2365 t.start()
2366
2367 for t in threads:
2368 t.join()
2369 links.append( t.result )
2370 clusters = []
2371 threads = []
2372 for i in main.activeNodes:
2373 t = main.Thread( target=utilities.retry,
2374 name="clusters-" + str( i ),
Jon Hallf37d44d2017-05-24 10:37:30 -07002375 args=[ main.CLIs[ i ].clusters, [ None ] ],
2376 kwargs={ 'sleep': 5, 'attempts': 5,
Jon Hall9ebd1bd2016-04-19 01:37:17 -07002377 'randomTime': True } )
2378 threads.append( t )
2379 t.start()
2380
2381 for t in threads:
2382 t.join()
2383 clusters.append( t.result )
2384
2385 elapsed = time.time() - startTime
2386 cliTime = time.time() - cliStart
2387 print "Elapsed time: " + str( elapsed )
2388 print "CLI time: " + str( cliTime )
2389
2390 if all( e is None for e in devices ) and\
2391 all( e is None for e in hosts ) and\
2392 all( e is None for e in ports ) and\
2393 all( e is None for e in links ) and\
2394 all( e is None for e in clusters ):
Jon Hallf37d44d2017-05-24 10:37:30 -07002395 topoFailMsg = "Could not get topology from ONOS"
2396 main.log.error( topoFailMsg )
2397 continue # Try again, No use trying to compare
Jon Hall9ebd1bd2016-04-19 01:37:17 -07002398
2399 mnSwitches = main.Mininet1.getSwitches()
2400 mnLinks = main.Mininet1.getLinks()
2401 mnHosts = main.Mininet1.getHosts()
2402 for controller in range( len( main.activeNodes ) ):
Jon Hallf37d44d2017-05-24 10:37:30 -07002403 controllerStr = str( main.activeNodes[ controller ] + 1 )
Jon Hall9ebd1bd2016-04-19 01:37:17 -07002404 if devices[ controller ] and ports[ controller ] and\
Jon Hallf37d44d2017-05-24 10:37:30 -07002405 "Error" not in devices[ controller ] and\
2406 "Error" not in ports[ controller ]:
Jon Hall9ebd1bd2016-04-19 01:37:17 -07002407
2408 try:
2409 currentDevicesResult = main.Mininet1.compareSwitches(
2410 mnSwitches,
2411 json.loads( devices[ controller ] ),
2412 json.loads( ports[ controller ] ) )
2413 except ( TypeError, ValueError ):
2414 main.log.exception( "Object not as expected; devices={!r}\nports={!r}".format(
2415 devices[ controller ], ports[ controller ] ) )
2416 else:
2417 currentDevicesResult = main.FALSE
2418 utilities.assert_equals( expect=main.TRUE,
2419 actual=currentDevicesResult,
2420 onpass="ONOS" + controllerStr +
2421 " Switches view is correct",
2422 onfail="ONOS" + controllerStr +
2423 " Switches view is incorrect" )
2424
2425 if links[ controller ] and "Error" not in links[ controller ]:
2426 currentLinksResult = main.Mininet1.compareLinks(
2427 mnSwitches, mnLinks,
2428 json.loads( links[ controller ] ) )
2429 else:
2430 currentLinksResult = main.FALSE
2431 utilities.assert_equals( expect=main.TRUE,
2432 actual=currentLinksResult,
2433 onpass="ONOS" + controllerStr +
2434 " links view is correct",
2435 onfail="ONOS" + controllerStr +
2436 " links view is incorrect" )
2437 if hosts[ controller ] and "Error" not in hosts[ controller ]:
2438 currentHostsResult = main.Mininet1.compareHosts(
2439 mnHosts,
2440 hosts[ controller ] )
2441 elif hosts[ controller ] == []:
2442 currentHostsResult = main.TRUE
2443 else:
2444 currentHostsResult = main.FALSE
2445 utilities.assert_equals( expect=main.TRUE,
2446 actual=currentHostsResult,
2447 onpass="ONOS" + controllerStr +
2448 " hosts exist in Mininet",
2449 onfail="ONOS" + controllerStr +
2450 " hosts don't match Mininet" )
2451 # CHECKING HOST ATTACHMENT POINTS
2452 hostAttachment = True
2453 zeroHosts = False
2454 # FIXME: topo-HA/obelisk specific mappings:
2455 # key is mac and value is dpid
2456 mappings = {}
2457 for i in range( 1, 29 ): # hosts 1 through 28
2458 # set up correct variables:
Jon Hallf37d44d2017-05-24 10:37:30 -07002459 macId = "00:" * 5 + hex( i ).split( "0x" )[ 1 ].upper().zfill( 2 )
Jon Hall9ebd1bd2016-04-19 01:37:17 -07002460 if i == 1:
Jon Hallf37d44d2017-05-24 10:37:30 -07002461 deviceId = "1000".zfill( 16 )
Jon Hall9ebd1bd2016-04-19 01:37:17 -07002462 elif i == 2:
Jon Hallf37d44d2017-05-24 10:37:30 -07002463 deviceId = "2000".zfill( 16 )
Jon Hall9ebd1bd2016-04-19 01:37:17 -07002464 elif i == 3:
Jon Hallf37d44d2017-05-24 10:37:30 -07002465 deviceId = "3000".zfill( 16 )
Jon Hall9ebd1bd2016-04-19 01:37:17 -07002466 elif i == 4:
Jon Hallf37d44d2017-05-24 10:37:30 -07002467 deviceId = "3004".zfill( 16 )
Jon Hall9ebd1bd2016-04-19 01:37:17 -07002468 elif i == 5:
Jon Hallf37d44d2017-05-24 10:37:30 -07002469 deviceId = "5000".zfill( 16 )
Jon Hall9ebd1bd2016-04-19 01:37:17 -07002470 elif i == 6:
Jon Hallf37d44d2017-05-24 10:37:30 -07002471 deviceId = "6000".zfill( 16 )
Jon Hall9ebd1bd2016-04-19 01:37:17 -07002472 elif i == 7:
Jon Hallf37d44d2017-05-24 10:37:30 -07002473 deviceId = "6007".zfill( 16 )
Jon Hall9ebd1bd2016-04-19 01:37:17 -07002474 elif i >= 8 and i <= 17:
2475 dpid = '3' + str( i ).zfill( 3 )
Jon Hallf37d44d2017-05-24 10:37:30 -07002476 deviceId = dpid.zfill( 16 )
Jon Hall9ebd1bd2016-04-19 01:37:17 -07002477 elif i >= 18 and i <= 27:
2478 dpid = '6' + str( i ).zfill( 3 )
Jon Hallf37d44d2017-05-24 10:37:30 -07002479 deviceId = dpid.zfill( 16 )
Jon Hall9ebd1bd2016-04-19 01:37:17 -07002480 elif i == 28:
Jon Hallf37d44d2017-05-24 10:37:30 -07002481 deviceId = "2800".zfill( 16 )
Jon Hall9ebd1bd2016-04-19 01:37:17 -07002482 mappings[ macId ] = deviceId
2483 if hosts[ controller ] is not None and "Error" not in hosts[ controller ]:
2484 if hosts[ controller ] == []:
2485 main.log.warn( "There are no hosts discovered" )
2486 zeroHosts = True
2487 else:
2488 for host in hosts[ controller ]:
2489 mac = None
2490 location = None
2491 device = None
2492 port = None
2493 try:
2494 mac = host.get( 'mac' )
2495 assert mac, "mac field could not be found for this host object"
2496
Jeremy Ronquillo0e538bc2017-06-13 15:16:09 -07002497 location = host.get( 'locations' )[ 0 ]
Jon Hall9ebd1bd2016-04-19 01:37:17 -07002498 assert location, "location field could not be found for this host object"
2499
2500 # Trim the protocol identifier off deviceId
Jon Hallf37d44d2017-05-24 10:37:30 -07002501 device = str( location.get( 'elementId' ) ).split( ':' )[ 1 ]
Jon Hall9ebd1bd2016-04-19 01:37:17 -07002502 assert device, "elementId field could not be found for this host location object"
2503
2504 port = location.get( 'port' )
2505 assert port, "port field could not be found for this host location object"
2506
2507 # Now check if this matches where they should be
2508 if mac and device and port:
2509 if str( port ) != "1":
2510 main.log.error( "The attachment port is incorrect for " +
2511 "host " + str( mac ) +
Jon Hallf37d44d2017-05-24 10:37:30 -07002512 ". Expected: 1 Actual: " + str( port ) )
Jon Hall9ebd1bd2016-04-19 01:37:17 -07002513 hostAttachment = False
2514 if device != mappings[ str( mac ) ]:
2515 main.log.error( "The attachment device is incorrect for " +
2516 "host " + str( mac ) +
2517 ". Expected: " + mappings[ str( mac ) ] +
2518 " Actual: " + device )
2519 hostAttachment = False
2520 else:
2521 hostAttachment = False
2522 except AssertionError:
2523 main.log.exception( "Json object not as expected" )
2524 main.log.error( repr( host ) )
2525 hostAttachment = False
2526 else:
2527 main.log.error( "No hosts json output or \"Error\"" +
2528 " in output. hosts = " +
2529 repr( hosts[ controller ] ) )
2530 if zeroHosts is False:
2531 # TODO: Find a way to know if there should be hosts in a
2532 # given point of the test
2533 hostAttachment = True
2534
2535 # END CHECKING HOST ATTACHMENT POINTS
2536 devicesResults = devicesResults and currentDevicesResult
2537 linksResults = linksResults and currentLinksResult
2538 hostsResults = hostsResults and currentHostsResult
2539 hostAttachmentResults = hostAttachmentResults and\
2540 hostAttachment
2541 topoResult = ( devicesResults and linksResults
2542 and hostsResults and ipResult and
2543 hostAttachmentResults )
2544 utilities.assert_equals( expect=True,
2545 actual=topoResult,
2546 onpass="ONOS topology matches Mininet",
2547 onfail=topoFailMsg )
2548 # End of While loop to pull ONOS state
2549
2550 # Compare json objects for hosts and dataplane clusters
2551
2552 # hosts
2553 main.step( "Hosts view is consistent across all ONOS nodes" )
2554 consistentHostsResult = main.TRUE
2555 for controller in range( len( hosts ) ):
Jon Hallf37d44d2017-05-24 10:37:30 -07002556 controllerStr = str( main.activeNodes[ controller ] + 1 )
Jon Hall9ebd1bd2016-04-19 01:37:17 -07002557 if hosts[ controller ] is not None and "Error" not in hosts[ controller ]:
2558 if hosts[ controller ] == hosts[ 0 ]:
2559 continue
2560 else: # hosts not consistent
2561 main.log.error( "hosts from ONOS" + controllerStr +
2562 " is inconsistent with ONOS1" )
2563 main.log.warn( repr( hosts[ controller ] ) )
2564 consistentHostsResult = main.FALSE
2565
2566 else:
2567 main.log.error( "Error in getting ONOS hosts from ONOS" +
2568 controllerStr )
2569 consistentHostsResult = main.FALSE
2570 main.log.warn( "ONOS" + controllerStr +
2571 " hosts response: " +
2572 repr( hosts[ controller ] ) )
2573 utilities.assert_equals(
2574 expect=main.TRUE,
2575 actual=consistentHostsResult,
2576 onpass="Hosts view is consistent across all ONOS nodes",
2577 onfail="ONOS nodes have different views of hosts" )
2578
2579 main.step( "Hosts information is correct" )
2580 hostsResults = hostsResults and ipResult
2581 utilities.assert_equals(
2582 expect=main.TRUE,
2583 actual=hostsResults,
2584 onpass="Host information is correct",
2585 onfail="Host information is incorrect" )
2586
2587 main.step( "Host attachment points to the network" )
2588 utilities.assert_equals(
2589 expect=True,
2590 actual=hostAttachmentResults,
2591 onpass="Hosts are correctly attached to the network",
2592 onfail="ONOS did not correctly attach hosts to the network" )
2593
2594 # Strongly connected clusters of devices
2595 main.step( "Clusters view is consistent across all ONOS nodes" )
2596 consistentClustersResult = main.TRUE
2597 for controller in range( len( clusters ) ):
Jon Hallf37d44d2017-05-24 10:37:30 -07002598 controllerStr = str( main.activeNodes[ controller ] + 1 )
Jon Hall9ebd1bd2016-04-19 01:37:17 -07002599 if "Error" not in clusters[ controller ]:
2600 if clusters[ controller ] == clusters[ 0 ]:
2601 continue
2602 else: # clusters not consistent
2603 main.log.error( "clusters from ONOS" +
2604 controllerStr +
2605 " is inconsistent with ONOS1" )
2606 consistentClustersResult = main.FALSE
2607 else:
2608 main.log.error( "Error in getting dataplane clusters " +
2609 "from ONOS" + controllerStr )
2610 consistentClustersResult = main.FALSE
2611 main.log.warn( "ONOS" + controllerStr +
2612 " clusters response: " +
2613 repr( clusters[ controller ] ) )
2614 utilities.assert_equals(
2615 expect=main.TRUE,
2616 actual=consistentClustersResult,
2617 onpass="Clusters view is consistent across all ONOS nodes",
2618 onfail="ONOS nodes have different views of clusters" )
Jon Hall64948022016-05-12 13:38:50 -07002619 if not consistentClustersResult:
2620 main.log.debug( clusters )
Jon Hall9ebd1bd2016-04-19 01:37:17 -07002621
2622 main.step( "There is only one SCC" )
2623 # there should always only be one cluster
2624 try:
2625 numClusters = len( json.loads( clusters[ 0 ] ) )
2626 except ( ValueError, TypeError ):
2627 main.log.exception( "Error parsing clusters[0]: " +
Jon Hallf37d44d2017-05-24 10:37:30 -07002628 repr( clusters[ 0 ] ) )
Jon Hall9ebd1bd2016-04-19 01:37:17 -07002629 numClusters = "ERROR"
2630 clusterResults = main.FALSE
2631 if numClusters == 1:
2632 clusterResults = main.TRUE
2633 utilities.assert_equals(
2634 expect=1,
2635 actual=numClusters,
2636 onpass="ONOS shows 1 SCC",
2637 onfail="ONOS shows " + str( numClusters ) + " SCCs" )
2638
2639 topoResult = ( devicesResults and linksResults
2640 and hostsResults and consistentHostsResult
2641 and consistentClustersResult and clusterResults
2642 and ipResult and hostAttachmentResults )
2643
2644 topoResult = topoResult and int( count <= 2 )
2645 note = "note it takes about " + str( int( cliTime ) ) + \
2646 " seconds for the test to make all the cli calls to fetch " +\
2647 "the topology from each ONOS instance"
2648 main.log.info(
2649 "Very crass estimate for topology discovery/convergence( " +
2650 str( note ) + " ): " + str( elapsed ) + " seconds, " +
2651 str( count ) + " tries" )
2652
2653 main.step( "Device information is correct" )
2654 utilities.assert_equals(
2655 expect=main.TRUE,
2656 actual=devicesResults,
2657 onpass="Device information is correct",
2658 onfail="Device information is incorrect" )
2659
2660 main.step( "Links are correct" )
2661 utilities.assert_equals(
2662 expect=main.TRUE,
2663 actual=linksResults,
2664 onpass="Link are correct",
2665 onfail="Links are incorrect" )
2666
2667 main.step( "Hosts are correct" )
2668 utilities.assert_equals(
2669 expect=main.TRUE,
2670 actual=hostsResults,
2671 onpass="Hosts are correct",
2672 onfail="Hosts are incorrect" )
2673
2674 # FIXME: move this to an ONOS state case
2675 main.step( "Checking ONOS nodes" )
2676 nodeResults = utilities.retry( main.HA.nodesCheck,
2677 False,
Jon Hallf37d44d2017-05-24 10:37:30 -07002678 args=[ main.activeNodes ],
Jon Hall9ebd1bd2016-04-19 01:37:17 -07002679 attempts=5 )
2680 utilities.assert_equals( expect=True, actual=nodeResults,
2681 onpass="Nodes check successful",
2682 onfail="Nodes check NOT successful" )
2683 if not nodeResults:
2684 for i in main.activeNodes:
2685 main.log.debug( "{} components not ACTIVE: \n{}".format(
Jon Hallf37d44d2017-05-24 10:37:30 -07002686 main.CLIs[ i ].name,
2687 main.CLIs[ i ].sendline( "scr:list | grep -v ACTIVE" ) ) )
Jon Hall9ebd1bd2016-04-19 01:37:17 -07002688
Jon Halld2871c22016-07-26 11:01:14 -07002689 if not topoResult:
2690 main.cleanup()
2691 main.exit()
2692
Jon Hall9ebd1bd2016-04-19 01:37:17 -07002693 def CASE9( self, main ):
2694 """
2695 Link s3-s28 down
2696 """
2697 import time
2698 assert main.numCtrls, "main.numCtrls not defined"
2699 assert main, "main not defined"
2700 assert utilities.assert_equals, "utilities.assert_equals not defined"
2701 assert main.CLIs, "main.CLIs not defined"
2702 assert main.nodes, "main.nodes not defined"
2703 # NOTE: You should probably run a topology check after this
2704
2705 linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
2706
2707 description = "Turn off a link to ensure that Link Discovery " +\
2708 "is working properly"
2709 main.case( description )
2710
2711 main.step( "Kill Link between s3 and s28" )
2712 LinkDown = main.Mininet1.link( END1="s3", END2="s28", OPTION="down" )
2713 main.log.info( "Waiting " + str( linkSleep ) +
2714 " seconds for link down to be discovered" )
2715 time.sleep( linkSleep )
2716 utilities.assert_equals( expect=main.TRUE, actual=LinkDown,
2717 onpass="Link down successful",
2718 onfail="Failed to bring link down" )
2719 # TODO do some sort of check here
2720
2721 def CASE10( self, main ):
2722 """
2723 Link s3-s28 up
2724 """
2725 import time
2726 assert main.numCtrls, "main.numCtrls not defined"
2727 assert main, "main not defined"
2728 assert utilities.assert_equals, "utilities.assert_equals not defined"
2729 assert main.CLIs, "main.CLIs not defined"
2730 assert main.nodes, "main.nodes not defined"
2731 # NOTE: You should probably run a topology check after this
2732
2733 linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
2734
2735 description = "Restore a link to ensure that Link Discovery is " + \
2736 "working properly"
2737 main.case( description )
2738
2739 main.step( "Bring link between s3 and s28 back up" )
2740 LinkUp = main.Mininet1.link( END1="s3", END2="s28", OPTION="up" )
2741 main.log.info( "Waiting " + str( linkSleep ) +
2742 " seconds for link up to be discovered" )
2743 time.sleep( linkSleep )
2744 utilities.assert_equals( expect=main.TRUE, actual=LinkUp,
2745 onpass="Link up successful",
2746 onfail="Failed to bring link up" )
2747 # TODO do some sort of check here
2748
2749 def CASE11( self, main ):
2750 """
2751 Switch Down
2752 """
2753 # NOTE: You should probably run a topology check after this
2754 import time
2755 assert main.numCtrls, "main.numCtrls not defined"
2756 assert main, "main not defined"
2757 assert utilities.assert_equals, "utilities.assert_equals not defined"
2758 assert main.CLIs, "main.CLIs not defined"
2759 assert main.nodes, "main.nodes not defined"
2760
2761 switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
2762
2763 description = "Killing a switch to ensure it is discovered correctly"
Jon Hallf37d44d2017-05-24 10:37:30 -07002764 onosCli = main.CLIs[ main.activeNodes[ 0 ] ]
Jon Hall9ebd1bd2016-04-19 01:37:17 -07002765 main.case( description )
2766 switch = main.params[ 'kill' ][ 'switch' ]
2767 switchDPID = main.params[ 'kill' ][ 'dpid' ]
2768
2769 # TODO: Make this switch parameterizable
2770 main.step( "Kill " + switch )
2771 main.log.info( "Deleting " + switch )
2772 main.Mininet1.delSwitch( switch )
2773 main.log.info( "Waiting " + str( switchSleep ) +
2774 " seconds for switch down to be discovered" )
2775 time.sleep( switchSleep )
2776 device = onosCli.getDevice( dpid=switchDPID )
2777 # Peek at the deleted switch
2778 main.log.warn( str( device ) )
2779 result = main.FALSE
2780 if device and device[ 'available' ] is False:
2781 result = main.TRUE
2782 utilities.assert_equals( expect=main.TRUE, actual=result,
2783 onpass="Kill switch successful",
2784 onfail="Failed to kill switch?" )
2785
2786 def CASE12( self, main ):
2787 """
2788 Switch Up
2789 """
2790 # NOTE: You should probably run a topology check after this
2791 import time
2792 assert main.numCtrls, "main.numCtrls not defined"
2793 assert main, "main not defined"
2794 assert utilities.assert_equals, "utilities.assert_equals not defined"
2795 assert main.CLIs, "main.CLIs not defined"
2796 assert main.nodes, "main.nodes not defined"
2797
2798 switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
2799 switch = main.params[ 'kill' ][ 'switch' ]
2800 switchDPID = main.params[ 'kill' ][ 'dpid' ]
2801 links = main.params[ 'kill' ][ 'links' ].split()
Jon Hallf37d44d2017-05-24 10:37:30 -07002802 onosCli = main.CLIs[ main.activeNodes[ 0 ] ]
Jon Hall9ebd1bd2016-04-19 01:37:17 -07002803 description = "Adding a switch to ensure it is discovered correctly"
2804 main.case( description )
2805
2806 main.step( "Add back " + switch )
2807 main.Mininet1.addSwitch( switch, dpid=switchDPID )
2808 for peer in links:
2809 main.Mininet1.addLink( switch, peer )
2810 ipList = [ node.ip_address for node in main.nodes ]
2811 main.Mininet1.assignSwController( sw=switch, ip=ipList )
2812 main.log.info( "Waiting " + str( switchSleep ) +
2813 " seconds for switch up to be discovered" )
2814 time.sleep( switchSleep )
2815 device = onosCli.getDevice( dpid=switchDPID )
2816 # Peek at the deleted switch
2817 main.log.warn( str( device ) )
2818 result = main.FALSE
2819 if device and device[ 'available' ]:
2820 result = main.TRUE
2821 utilities.assert_equals( expect=main.TRUE, actual=result,
2822 onpass="add switch successful",
2823 onfail="Failed to add switch?" )
2824
2825 def CASE13( self, main ):
2826 """
2827 Clean up
2828 """
2829 assert main.numCtrls, "main.numCtrls not defined"
2830 assert main, "main not defined"
2831 assert utilities.assert_equals, "utilities.assert_equals not defined"
2832 assert main.CLIs, "main.CLIs not defined"
2833 assert main.nodes, "main.nodes not defined"
2834
2835 main.case( "Test Cleanup" )
2836 main.step( "Killing tcpdumps" )
2837 main.Mininet2.stopTcpdump()
2838
2839 if main.params[ 'BACKUP' ][ 'ENABLED' ] == "True":
2840 main.step( "Copying MN pcap and ONOS log files to test station" )
2841 # NOTE: MN Pcap file is being saved to logdir.
2842 # We scp this file as MN and TestON aren't necessarily the same vm
2843
2844 # FIXME: To be replaced with a Jenkin's post script
2845 # TODO: Load these from params
2846 # NOTE: must end in /
2847 logFolder = "/opt/onos/log/"
2848 logFiles = [ "karaf.log", "karaf.log.1" ]
2849 # NOTE: must end in /
2850 for f in logFiles:
2851 for node in main.nodes:
2852 dstName = main.logdir + "/" + node.name + "-" + f
2853 main.ONOSbench.secureCopy( node.user_name, node.ip_address,
2854 logFolder + f, dstName )
2855 # std*.log's
2856 # NOTE: must end in /
2857 logFolder = "/opt/onos/var/"
2858 logFiles = [ "stderr.log", "stdout.log" ]
2859 # NOTE: must end in /
2860 for f in logFiles:
2861 for node in main.nodes:
2862 dstName = main.logdir + "/" + node.name + "-" + f
2863 main.ONOSbench.secureCopy( node.user_name, node.ip_address,
2864 logFolder + f, dstName )
2865 else:
2866 main.log.debug( "skipping saving log files" )
2867
2868 main.step( "Stopping Mininet" )
2869 mnResult = main.Mininet1.stopNet()
2870 utilities.assert_equals( expect=main.TRUE, actual=mnResult,
2871 onpass="Mininet stopped",
2872 onfail="MN cleanup NOT successful" )
2873
2874 main.step( "Checking ONOS Logs for errors" )
2875 for node in main.nodes:
2876 main.log.debug( "Checking logs for errors on " + node.name + ":" )
2877 main.log.warn( main.ONOSbench.checkLogs( node.ip_address ) )
2878
2879 try:
Jon Hallf37d44d2017-05-24 10:37:30 -07002880 timerLog = open( main.logdir + "/Timers.csv", 'w' )
Jon Hall9ebd1bd2016-04-19 01:37:17 -07002881 main.log.error( ", ".join( labels ) + "\n" + ", ".join( data ) )
2882 timerLog.write( ", ".join( labels ) + "\n" + ", ".join( data ) )
2883 timerLog.close()
Jon Hallf37d44d2017-05-24 10:37:30 -07002884 except NameError as e:
2885 main.log.exception( e )
Jon Hall9ebd1bd2016-04-19 01:37:17 -07002886
2887 main.step( "Stopping webserver" )
Jon Hallf37d44d2017-05-24 10:37:30 -07002888 status = main.Server.stop()
Jon Hall9ebd1bd2016-04-19 01:37:17 -07002889 utilities.assert_equals( expect=main.TRUE, actual=status,
2890 onpass="Stop Server",
2891 onfail="Failled to stop SimpleHTTPServer" )
2892 del main.Server
2893
2894 def CASE14( self, main ):
2895 """
2896 start election app on all onos nodes
2897 """
2898 import time
2899 assert main.numCtrls, "main.numCtrls not defined"
2900 assert main, "main not defined"
2901 assert utilities.assert_equals, "utilities.assert_equals not defined"
2902 assert main.CLIs, "main.CLIs not defined"
2903 assert main.nodes, "main.nodes not defined"
2904
Jon Hallf37d44d2017-05-24 10:37:30 -07002905 main.case( "Start Leadership Election app" )
Jon Hall9ebd1bd2016-04-19 01:37:17 -07002906 main.step( "Install leadership election app" )
Jon Hallf37d44d2017-05-24 10:37:30 -07002907 onosCli = main.CLIs[ main.activeNodes[ 0 ] ]
Jon Hall9ebd1bd2016-04-19 01:37:17 -07002908 appResult = onosCli.activateApp( "org.onosproject.election" )
2909 utilities.assert_equals(
2910 expect=main.TRUE,
2911 actual=appResult,
2912 onpass="Election app installed",
2913 onfail="Something went wrong with installing Leadership election" )
2914
2915 main.step( "Run for election on each node" )
2916 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07002917 main.CLIs[ i ].electionTestRun()
2918 time.sleep( 5 )
2919 activeCLIs = [ main.CLIs[ i ] for i in main.activeNodes ]
Jon Hall9ebd1bd2016-04-19 01:37:17 -07002920 sameResult, leaders = main.HA.consistentLeaderboards( activeCLIs )
2921 utilities.assert_equals(
2922 expect=True,
2923 actual=sameResult,
2924 onpass="All nodes see the same leaderboards",
2925 onfail="Inconsistent leaderboards" )
2926
2927 if sameResult:
2928 leader = leaders[ 0 ][ 0 ]
Jon Hallf37d44d2017-05-24 10:37:30 -07002929 if main.nodes[ main.activeNodes[ 0 ] ].ip_address in leader:
Jon Hall9ebd1bd2016-04-19 01:37:17 -07002930 correctLeader = True
2931 else:
2932 correctLeader = False
2933 main.step( "First node was elected leader" )
2934 utilities.assert_equals(
2935 expect=True,
2936 actual=correctLeader,
2937 onpass="Correct leader was elected",
2938 onfail="Incorrect leader" )
2939
2940 def CASE15( self, main ):
2941 """
2942 Check that Leadership Election is still functional
2943 15.1 Run election on each node
2944 15.2 Check that each node has the same leaders and candidates
2945 15.3 Find current leader and withdraw
2946 15.4 Check that a new node was elected leader
2947 15.5 Check that that new leader was the candidate of old leader
2948 15.6 Run for election on old leader
2949 15.7 Check that oldLeader is a candidate, and leader if only 1 node
2950 15.8 Make sure that the old leader was added to the candidate list
2951
2952 old and new variable prefixes refer to data from before vs after
2953 withdrawl and later before withdrawl vs after re-election
2954 """
2955 import time
2956 assert main.numCtrls, "main.numCtrls not defined"
2957 assert main, "main not defined"
2958 assert utilities.assert_equals, "utilities.assert_equals not defined"
2959 assert main.CLIs, "main.CLIs not defined"
2960 assert main.nodes, "main.nodes not defined"
2961
2962 description = "Check that Leadership Election is still functional"
2963 main.case( description )
2964 # NOTE: Need to re-run after restarts since being a canidate is not persistant
2965
2966 oldLeaders = [] # list of lists of each nodes' candidates before
2967 newLeaders = [] # list of lists of each nodes' candidates after
2968 oldLeader = '' # the old leader from oldLeaders, None if not same
2969 newLeader = '' # the new leaders fron newLoeaders, None if not same
2970 oldLeaderCLI = None # the CLI of the old leader used for re-electing
2971 expectNoLeader = False # True when there is only one leader
2972 if main.numCtrls == 1:
2973 expectNoLeader = True
2974
2975 main.step( "Run for election on each node" )
2976 electionResult = main.TRUE
2977
2978 for i in main.activeNodes: # run test election on each node
Jon Hallf37d44d2017-05-24 10:37:30 -07002979 if main.CLIs[ i ].electionTestRun() == main.FALSE:
Jon Hall9ebd1bd2016-04-19 01:37:17 -07002980 electionResult = main.FALSE
2981 utilities.assert_equals(
2982 expect=main.TRUE,
2983 actual=electionResult,
2984 onpass="All nodes successfully ran for leadership",
2985 onfail="At least one node failed to run for leadership" )
2986
2987 if electionResult == main.FALSE:
2988 main.log.error(
2989 "Skipping Test Case because Election Test App isn't loaded" )
2990 main.skipCase()
2991
2992 main.step( "Check that each node shows the same leader and candidates" )
2993 failMessage = "Nodes have different leaderboards"
Jon Hallf37d44d2017-05-24 10:37:30 -07002994 activeCLIs = [ main.CLIs[ i ] for i in main.activeNodes ]
Jon Hall9ebd1bd2016-04-19 01:37:17 -07002995 sameResult, oldLeaders = main.HA.consistentLeaderboards( activeCLIs )
2996 if sameResult:
2997 oldLeader = oldLeaders[ 0 ][ 0 ]
2998 main.log.warn( oldLeader )
2999 else:
3000 oldLeader = None
3001 utilities.assert_equals(
3002 expect=True,
3003 actual=sameResult,
3004 onpass="Leaderboards are consistent for the election topic",
3005 onfail=failMessage )
3006
3007 main.step( "Find current leader and withdraw" )
3008 withdrawResult = main.TRUE
3009 # do some sanity checking on leader before using it
3010 if oldLeader is None:
3011 main.log.error( "Leadership isn't consistent." )
3012 withdrawResult = main.FALSE
3013 # Get the CLI of the oldLeader
3014 for i in main.activeNodes:
3015 if oldLeader == main.nodes[ i ].ip_address:
3016 oldLeaderCLI = main.CLIs[ i ]
3017 break
3018 else: # FOR/ELSE statement
3019 main.log.error( "Leader election, could not find current leader" )
3020 if oldLeader:
3021 withdrawResult = oldLeaderCLI.electionTestWithdraw()
3022 utilities.assert_equals(
3023 expect=main.TRUE,
3024 actual=withdrawResult,
3025 onpass="Node was withdrawn from election",
3026 onfail="Node was not withdrawn from election" )
3027
3028 main.step( "Check that a new node was elected leader" )
3029 failMessage = "Nodes have different leaders"
3030 # Get new leaders and candidates
3031 newLeaderResult, newLeaders = main.HA.consistentLeaderboards( activeCLIs )
3032 newLeader = None
3033 if newLeaderResult:
3034 if newLeaders[ 0 ][ 0 ] == 'none':
3035 main.log.error( "No leader was elected on at least 1 node" )
3036 if not expectNoLeader:
3037 newLeaderResult = False
3038 newLeader = newLeaders[ 0 ][ 0 ]
3039
3040 # Check that the new leader is not the older leader, which was withdrawn
3041 if newLeader == oldLeader:
3042 newLeaderResult = False
3043 main.log.error( "All nodes still see old leader: " + str( oldLeader ) +
Jon Hallf37d44d2017-05-24 10:37:30 -07003044 " as the current leader" )
Jon Hall9ebd1bd2016-04-19 01:37:17 -07003045 utilities.assert_equals(
3046 expect=True,
3047 actual=newLeaderResult,
3048 onpass="Leadership election passed",
3049 onfail="Something went wrong with Leadership election" )
3050
3051 main.step( "Check that that new leader was the candidate of old leader" )
3052 # candidates[ 2 ] should become the top candidate after withdrawl
3053 correctCandidateResult = main.TRUE
3054 if expectNoLeader:
3055 if newLeader == 'none':
3056 main.log.info( "No leader expected. None found. Pass" )
3057 correctCandidateResult = main.TRUE
3058 else:
3059 main.log.info( "Expected no leader, got: " + str( newLeader ) )
3060 correctCandidateResult = main.FALSE
Jon Hallf37d44d2017-05-24 10:37:30 -07003061 elif len( oldLeaders[ 0 ] ) >= 3:
Jon Hall9ebd1bd2016-04-19 01:37:17 -07003062 if newLeader == oldLeaders[ 0 ][ 2 ]:
3063 # correct leader was elected
3064 correctCandidateResult = main.TRUE
3065 else:
3066 correctCandidateResult = main.FALSE
3067 main.log.error( "Candidate {} was elected. {} should have had priority.".format(
3068 newLeader, oldLeaders[ 0 ][ 2 ] ) )
3069 else:
3070 main.log.warn( "Could not determine who should be the correct leader" )
3071 main.log.debug( oldLeaders[ 0 ] )
3072 correctCandidateResult = main.FALSE
3073 utilities.assert_equals(
3074 expect=main.TRUE,
3075 actual=correctCandidateResult,
3076 onpass="Correct Candidate Elected",
3077 onfail="Incorrect Candidate Elected" )
3078
3079 main.step( "Run for election on old leader( just so everyone " +
3080 "is in the hat )" )
3081 if oldLeaderCLI is not None:
3082 runResult = oldLeaderCLI.electionTestRun()
3083 else:
3084 main.log.error( "No old leader to re-elect" )
3085 runResult = main.FALSE
3086 utilities.assert_equals(
3087 expect=main.TRUE,
3088 actual=runResult,
3089 onpass="App re-ran for election",
3090 onfail="App failed to run for election" )
3091
3092 main.step(
3093 "Check that oldLeader is a candidate, and leader if only 1 node" )
3094 # verify leader didn't just change
3095 # Get new leaders and candidates
3096 reRunLeaders = []
3097 time.sleep( 5 ) # Paremterize
3098 positionResult, reRunLeaders = main.HA.consistentLeaderboards( activeCLIs )
3099
3100 # Check that the re-elected node is last on the candidate List
Jon Hallf37d44d2017-05-24 10:37:30 -07003101 if not reRunLeaders[ 0 ]:
Jon Hall9ebd1bd2016-04-19 01:37:17 -07003102 positionResult = main.FALSE
3103 elif oldLeader != reRunLeaders[ 0 ][ -1 ]:
Jon Hallf37d44d2017-05-24 10:37:30 -07003104 main.log.error( "Old Leader ({}) not in the proper position: {} ".format( str( oldLeader ),
Jon Hall9ebd1bd2016-04-19 01:37:17 -07003105 str( reRunLeaders[ 0 ] ) ) )
3106 positionResult = main.FALSE
3107 utilities.assert_equals(
3108 expect=True,
3109 actual=positionResult,
3110 onpass="Old leader successfully re-ran for election",
3111 onfail="Something went wrong with Leadership election after " +
3112 "the old leader re-ran for election" )
3113
3114 def CASE16( self, main ):
3115 """
3116 Install Distributed Primitives app
3117 """
3118 import time
3119 assert main.numCtrls, "main.numCtrls not defined"
3120 assert main, "main not defined"
3121 assert utilities.assert_equals, "utilities.assert_equals not defined"
3122 assert main.CLIs, "main.CLIs not defined"
3123 assert main.nodes, "main.nodes not defined"
3124
3125 # Variables for the distributed primitives tests
Jon Hall7a6ebfd2017-03-13 10:58:58 -07003126 main.pCounterName = "TestON-Partitions"
3127 main.pCounterValue = 0
Jon Hallf37d44d2017-05-24 10:37:30 -07003128 main.onosSet = set( [] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07003129 main.onosSetName = "TestON-set"
Jon Hall9ebd1bd2016-04-19 01:37:17 -07003130
3131 description = "Install Primitives app"
3132 main.case( description )
3133 main.step( "Install Primitives app" )
3134 appName = "org.onosproject.distributedprimitives"
Jon Hallf37d44d2017-05-24 10:37:30 -07003135 node = main.activeNodes[ 0 ]
3136 appResults = main.CLIs[ node ].activateApp( appName )
Jon Hall9ebd1bd2016-04-19 01:37:17 -07003137 utilities.assert_equals( expect=main.TRUE,
3138 actual=appResults,
3139 onpass="Primitives app activated",
3140 onfail="Primitives app not activated" )
3141 time.sleep( 5 ) # To allow all nodes to activate
3142
3143 def CASE17( self, main ):
3144 """
3145 Check for basic functionality with distributed primitives
3146 """
Jon Hall7a6ebfd2017-03-13 10:58:58 -07003147 main.HA.CASE17( main )