blob: 2bfa27234f8284cda88723751df7e4c7eb250d07 [file] [log] [blame]
Jon Hall69b2b982016-05-11 12:04:59 -07001"""
2Description: This test is to determine if ONOS can handle
3 dynamic swapping of cluster nodes.
4
5List of test cases:
6CASE1: Compile ONOS and push it to the test machines
7CASE2: Assign devices to controllers
8CASE21: Assign mastership to controllers
9CASE3: Assign intents
10CASE4: Ping across added host intents
11CASE5: Reading state of ONOS
12CASE6: Swap nodes
13CASE7: Check state after control plane failure
14CASE8: Compare topo
15CASE9: Link s3-s28 down
16CASE10: Link s3-s28 up
17CASE11: Switch down
18CASE12: Switch up
19CASE13: Clean up
20CASE14: start election app on all onos nodes
21CASE15: Check that Leadership Election is still functional
22CASE16: Install Distributed Primitives app
23CASE17: Check for basic functionality with distributed primitives
24"""
Jon Hall69b2b982016-05-11 12:04:59 -070025class HAswapNodes:
26
27 def __init__( self ):
28 self.default = ''
29
30 def CASE1( self, main ):
31 """
32 CASE1 is to compile ONOS and push it to the test machines
33
34 Startup sequence:
35 cell <name>
36 onos-verify-cell
37 NOTE: temporary - onos-remove-raft-logs
38 onos-uninstall
39 start mininet
40 git pull
41 mvn clean install
42 onos-package
43 onos-install -f
44 onos-wait-for-start
45 start cli sessions
46 start tcpdump
47 """
48 import time
49 import os
50 import re
51 main.log.info( "ONOS HA test: Restart all ONOS nodes - " +
52 "initialization" )
53 main.case( "Setting up test environment" )
54 main.caseExplanation = "Setup the test environment including " +\
55 "installing ONOS, starting Mininet and ONOS" +\
56 "cli sessions."
57
58 # load some variables from the params file
59 PULLCODE = False
60 if main.params[ 'Git' ] == 'True':
61 PULLCODE = True
62 gitBranch = main.params[ 'branch' ]
63 cellName = main.params[ 'ENV' ][ 'cellName' ]
64
65 main.numCtrls = int( main.params[ 'num_controllers' ] )
66 if main.ONOSbench.maxNodes:
67 if main.ONOSbench.maxNodes < main.numCtrls:
68 main.numCtrls = int( main.ONOSbench.maxNodes )
69 # set global variables
70 # These are for csv plotting in jenkins
71 global labels
72 global data
73 labels = []
74 data = []
75
76 try:
77 from tests.HA.dependencies.HA import HA
78 main.HA = HA()
79 from tests.HA.HAswapNodes.dependencies.Server import Server
80 main.Server = Server()
81 except Exception as e:
82 main.log.exception( e )
83 main.cleanup()
84 main.exit()
85
86 main.CLIs = []
87 main.nodes = []
88 ipList = []
89 for i in range( 1, main.numCtrls + 1 ):
90 try:
91 main.CLIs.append( getattr( main, 'ONOScli' + str( i ) ) )
92 main.nodes.append( getattr( main, 'ONOS' + str( i ) ) )
93 ipList.append( main.nodes[ -1 ].ip_address )
94 except AttributeError:
95 break
96
97 main.step( "Create cell file" )
98 cellAppString = main.params[ 'ENV' ][ 'appString' ]
99 main.ONOSbench.createCellFile( main.ONOSbench.ip_address, cellName,
100 main.Mininet1.ip_address,
101 cellAppString, ipList )
102
103 main.step( "Applying cell variable to environment" )
104 cellResult = main.ONOSbench.setCell( cellName )
105 utilities.assert_equals( expect=main.TRUE, actual=cellResult,
106 onpass="Set cell successfull",
107 onfail="Failled to set cell" )
108
109 main.step( "Verify connectivity to cell" )
110 verifyResult = main.ONOSbench.verifyCell()
111 utilities.assert_equals( expect=main.TRUE, actual=verifyResult,
112 onpass="Verify cell passed",
113 onfail="Failled to verify cell" )
114
115 # FIXME:this is short term fix
116 main.log.info( "Removing raft logs" )
117 main.ONOSbench.onosRemoveRaftLogs()
118
119 main.log.info( "Uninstalling ONOS" )
120 for node in main.nodes:
121 main.ONOSbench.onosUninstall( node.ip_address )
122
123 # Make sure ONOS is DEAD
124 main.log.info( "Killing any ONOS processes" )
125 killResults = main.TRUE
126 for node in main.nodes:
127 killed = main.ONOSbench.onosKill( node.ip_address )
128 killResults = killResults and killed
129
130 main.step( "Setup server for cluster metadata file" )
Jon Hallf37d44d2017-05-24 10:37:30 -0700131 port = main.params[ 'server' ][ 'port' ]
Jon Hall69b2b982016-05-11 12:04:59 -0700132 rootDir = os.path.dirname( main.testFile ) + "/dependencies"
133 main.log.debug( "Root dir: {}".format( rootDir ) )
134 status = main.Server.start( main.ONOSbench,
135 rootDir,
136 port=port,
137 logDir=main.logdir + "/server.log" )
138 utilities.assert_equals( expect=main.TRUE, actual=status,
139 onpass="Server started",
140 onfail="Failled to start SimpleHTTPServer" )
141
142 main.step( "Generate initial metadata file" )
143 if main.numCtrls >= 5:
144 main.numCtrls -= 2
145 else:
146 main.log.error( "Not enough ONOS nodes to run this test. Requires 5 or more" )
147 genResult = main.Server.generateFile( main.numCtrls )
148 utilities.assert_equals( expect=main.TRUE, actual=genResult,
149 onpass="New cluster metadata file generated",
150 onfail="Failled to generate new metadata file" )
151
152 cleanInstallResult = main.TRUE
153 gitPullResult = main.TRUE
154
155 main.step( "Starting Mininet" )
156 # scp topo file to mininet
157 # TODO: move to params?
158 topoName = "obelisk.py"
159 filePath = main.ONOSbench.home + "/tools/test/topos/"
160 main.ONOSbench.scp( main.Mininet1,
161 filePath + topoName,
162 main.Mininet1.home,
163 direction="to" )
Jon Hallf37d44d2017-05-24 10:37:30 -0700164 mnResult = main.Mininet1.startNet()
Jon Hall69b2b982016-05-11 12:04:59 -0700165 utilities.assert_equals( expect=main.TRUE, actual=mnResult,
166 onpass="Mininet Started",
167 onfail="Error starting Mininet" )
168
169 main.step( "Git checkout and pull " + gitBranch )
170 if PULLCODE:
171 main.ONOSbench.gitCheckout( gitBranch )
172 gitPullResult = main.ONOSbench.gitPull()
173 # values of 1 or 3 are good
174 utilities.assert_lesser( expect=0, actual=gitPullResult,
175 onpass="Git pull successful",
176 onfail="Git pull failed" )
177 main.ONOSbench.getVersion( report=True )
178
179 main.step( "Using mvn clean install" )
180 cleanInstallResult = main.TRUE
181 if PULLCODE and gitPullResult == main.TRUE:
182 cleanInstallResult = main.ONOSbench.cleanInstall()
183 else:
184 main.log.warn( "Did not pull new code so skipping mvn " +
185 "clean install" )
186 utilities.assert_equals( expect=main.TRUE,
187 actual=cleanInstallResult,
188 onpass="MCI successful",
189 onfail="MCI failed" )
190 # GRAPHS
191 # NOTE: important params here:
192 # job = name of Jenkins job
193 # Plot Name = Plot-HA, only can be used if multiple plots
194 # index = The number of the graph under plot name
195 job = "HAswapNodes"
196 plotName = "Plot-HA"
Jon Hall676e5432016-09-26 11:32:50 -0700197 index = "2"
Jon Hall69b2b982016-05-11 12:04:59 -0700198 graphs = '<ac:structured-macro ac:name="html">\n'
199 graphs += '<ac:plain-text-body><![CDATA[\n'
200 graphs += '<iframe src="https://onos-jenkins.onlab.us/job/' + job +\
201 '/plot/' + plotName + '/getPlot?index=' + index +\
202 '&width=500&height=300"' +\
203 'noborder="0" width="500" height="300" scrolling="yes" ' +\
204 'seamless="seamless"></iframe>\n'
205 graphs += ']]></ac:plain-text-body>\n'
206 graphs += '</ac:structured-macro>\n'
Jon Hallf37d44d2017-05-24 10:37:30 -0700207 main.log.wiki( graphs )
Jon Hall69b2b982016-05-11 12:04:59 -0700208
209 main.step( "Copying backup config files" )
210 path = "~/onos/tools/package/bin/onos-service"
211 cp = main.ONOSbench.scp( main.ONOSbench,
212 path,
213 path + ".backup",
214 direction="to" )
215
216 utilities.assert_equals( expect=main.TRUE,
217 actual=cp,
218 onpass="Copy backup config file succeeded",
219 onfail="Copy backup config file failed" )
220 # we need to modify the onos-service file to use remote metadata file
221 # url for cluster metadata file
Jon Hallf37d44d2017-05-24 10:37:30 -0700222 iface = main.params[ 'server' ].get( 'interface' )
Jon Hall8f6d4622016-05-23 15:27:18 -0700223 ip = main.ONOSbench.getIpAddr( iface=iface )
Jon Hall69b2b982016-05-11 12:04:59 -0700224 metaFile = "cluster.json"
225 javaArgs = r"-Donos.cluster.metadata.uri=http:\/\/{}:{}\/{}".format( ip, port, metaFile )
226 main.log.warn( javaArgs )
227 main.log.warn( repr( javaArgs ) )
228 handle = main.ONOSbench.handle
229 sed = r"sed -i 's/bash/bash\nexport JAVA_OPTS=${{JAVA_OPTS:-{}}}\n/' {}".format( javaArgs, path )
230 main.log.warn( sed )
231 main.log.warn( repr( sed ) )
232 handle.sendline( sed )
Jon Hallbd60ea02016-08-23 10:03:59 -0700233 handle.expect( metaFile )
234 output = handle.before
Jon Hall69b2b982016-05-11 12:04:59 -0700235 handle.expect( "\$" )
Jon Hallbd60ea02016-08-23 10:03:59 -0700236 output += handle.before
237 main.log.debug( repr( output ) )
Jon Hall69b2b982016-05-11 12:04:59 -0700238
239 main.step( "Creating ONOS package" )
Jon Hallbd60ea02016-08-23 10:03:59 -0700240 packageResult = main.ONOSbench.buckBuild()
Jon Hall69b2b982016-05-11 12:04:59 -0700241 utilities.assert_equals( expect=main.TRUE, actual=packageResult,
242 onpass="ONOS package successful",
243 onfail="ONOS package failed" )
Jon Hallbd60ea02016-08-23 10:03:59 -0700244 if not packageResult:
245 main.cleanup()
246 main.exit()
Jon Hall69b2b982016-05-11 12:04:59 -0700247
248 main.step( "Installing ONOS package" )
249 onosInstallResult = main.TRUE
250 for i in range( main.ONOSbench.maxNodes ):
Jon Hallf37d44d2017-05-24 10:37:30 -0700251 node = main.nodes[ i ]
Jon Hall69b2b982016-05-11 12:04:59 -0700252 options = "-f"
253 if i >= main.numCtrls:
254 options = "-nf" # Don't start more than the current scale
255 tmpResult = main.ONOSbench.onosInstall( options=options,
256 node=node.ip_address )
257 onosInstallResult = onosInstallResult and tmpResult
258 utilities.assert_equals( expect=main.TRUE, actual=onosInstallResult,
259 onpass="ONOS install successful",
260 onfail="ONOS install failed" )
261
262 # Cleanup custom onos-service file
263 main.ONOSbench.scp( main.ONOSbench,
264 path + ".backup",
265 path,
266 direction="to" )
267
You Wangf5de25b2017-01-06 15:13:01 -0800268 main.step( "Set up ONOS secure SSH" )
269 secureSshResult = main.TRUE
Jon Hall168c1862017-01-31 17:35:34 -0800270 for i in range( main.numCtrls ):
Jon Hallf37d44d2017-05-24 10:37:30 -0700271 node = main.nodes[ i ]
You Wangf5de25b2017-01-06 15:13:01 -0800272 secureSshResult = secureSshResult and main.ONOSbench.onosSecureSSH( node=node.ip_address )
273 utilities.assert_equals( expect=main.TRUE, actual=secureSshResult,
274 onpass="Test step PASS",
275 onfail="Test step FAIL" )
276
Jon Hall69b2b982016-05-11 12:04:59 -0700277 main.step( "Checking if ONOS is up yet" )
278 for i in range( 2 ):
279 onosIsupResult = main.TRUE
280 for i in range( main.numCtrls ):
Jon Hallf37d44d2017-05-24 10:37:30 -0700281 node = main.nodes[ i ]
Jon Hall69b2b982016-05-11 12:04:59 -0700282 started = main.ONOSbench.isup( node.ip_address )
283 if not started:
284 main.log.error( node.name + " hasn't started" )
285 onosIsupResult = onosIsupResult and started
286 if onosIsupResult == main.TRUE:
287 break
288 utilities.assert_equals( expect=main.TRUE, actual=onosIsupResult,
289 onpass="ONOS startup successful",
290 onfail="ONOS startup failed" )
291
Jon Hall6509dbf2016-06-21 17:01:17 -0700292 main.step( "Starting ONOS CLI sessions" )
Jon Hall69b2b982016-05-11 12:04:59 -0700293 cliResults = main.TRUE
294 threads = []
295 for i in range( main.numCtrls ):
Jon Hallf37d44d2017-05-24 10:37:30 -0700296 t = main.Thread( target=main.CLIs[ i ].startOnosCli,
Jon Hall69b2b982016-05-11 12:04:59 -0700297 name="startOnosCli-" + str( i ),
Jon Hallf37d44d2017-05-24 10:37:30 -0700298 args=[ main.nodes[ i ].ip_address ] )
Jon Hall69b2b982016-05-11 12:04:59 -0700299 threads.append( t )
300 t.start()
301
302 for t in threads:
303 t.join()
304 cliResults = cliResults and t.result
305 utilities.assert_equals( expect=main.TRUE, actual=cliResults,
306 onpass="ONOS cli startup successful",
307 onfail="ONOS cli startup failed" )
308
309 # Create a list of active nodes for use when some nodes are stopped
310 main.activeNodes = [ i for i in range( 0, main.numCtrls ) ]
311
312 if main.params[ 'tcpdump' ].lower() == "true":
313 main.step( "Start Packet Capture MN" )
314 main.Mininet2.startTcpdump(
315 str( main.params[ 'MNtcpdump' ][ 'folder' ] ) + str( main.TEST )
316 + "-MN.pcap",
317 intf=main.params[ 'MNtcpdump' ][ 'intf' ],
318 port=main.params[ 'MNtcpdump' ][ 'port' ] )
319
320 main.step( "Checking ONOS nodes" )
321 nodeResults = utilities.retry( main.HA.nodesCheck,
322 False,
Jon Hallf37d44d2017-05-24 10:37:30 -0700323 args=[ main.activeNodes ],
Jon Hall69b2b982016-05-11 12:04:59 -0700324 attempts=5 )
325 utilities.assert_equals( expect=True, actual=nodeResults,
326 onpass="Nodes check successful",
327 onfail="Nodes check NOT successful" )
328
329 if not nodeResults:
330 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -0700331 cli = main.CLIs[ i ]
Jon Hall69b2b982016-05-11 12:04:59 -0700332 main.log.debug( "{} components not ACTIVE: \n{}".format(
333 cli.name,
334 cli.sendline( "scr:list | grep -v ACTIVE" ) ) )
335 main.log.error( "Failed to start ONOS, stopping test" )
336 main.cleanup()
337 main.exit()
338
339 main.step( "Activate apps defined in the params file" )
340 # get data from the params
341 apps = main.params.get( 'apps' )
342 if apps:
Jon Hallf37d44d2017-05-24 10:37:30 -0700343 apps = apps.split( ',' )
Jon Hall69b2b982016-05-11 12:04:59 -0700344 main.log.warn( apps )
345 activateResult = True
346 for app in apps:
347 main.CLIs[ 0 ].app( app, "Activate" )
348 # TODO: check this worked
349 time.sleep( 10 ) # wait for apps to activate
350 for app in apps:
351 state = main.CLIs[ 0 ].appStatus( app )
352 if state == "ACTIVE":
353 activateResult = activateResult and True
354 else:
355 main.log.error( "{} is in {} state".format( app, state ) )
356 activateResult = False
357 utilities.assert_equals( expect=True,
358 actual=activateResult,
359 onpass="Successfully activated apps",
360 onfail="Failed to activate apps" )
361 else:
362 main.log.warn( "No apps were specified to be loaded after startup" )
363
364 main.step( "Set ONOS configurations" )
365 config = main.params.get( 'ONOS_Configuration' )
366 if config:
367 main.log.debug( config )
368 checkResult = main.TRUE
369 for component in config:
Jon Hallf37d44d2017-05-24 10:37:30 -0700370 for setting in config[ component ]:
371 value = config[ component ][ setting ]
Jon Hall69b2b982016-05-11 12:04:59 -0700372 check = main.CLIs[ 0 ].setCfg( component, setting, value )
373 main.log.info( "Value was changed? {}".format( main.TRUE == check ) )
374 checkResult = check and checkResult
375 utilities.assert_equals( expect=main.TRUE,
376 actual=checkResult,
377 onpass="Successfully set config",
378 onfail="Failed to set config" )
379 else:
380 main.log.warn( "No configurations were specified to be changed after startup" )
381
382 main.step( "App Ids check" )
383 appCheck = main.TRUE
384 threads = []
385 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -0700386 t = main.Thread( target=main.CLIs[ i ].appToIDCheck,
Jon Hall69b2b982016-05-11 12:04:59 -0700387 name="appToIDCheck-" + str( i ),
388 args=[] )
389 threads.append( t )
390 t.start()
391
392 for t in threads:
393 t.join()
394 appCheck = appCheck and t.result
395 if appCheck != main.TRUE:
Jon Hallf37d44d2017-05-24 10:37:30 -0700396 node = main.activeNodes[ 0 ]
397 main.log.warn( main.CLIs[ node ].apps() )
398 main.log.warn( main.CLIs[ node ].appIDs() )
Jon Hall69b2b982016-05-11 12:04:59 -0700399 utilities.assert_equals( expect=main.TRUE, actual=appCheck,
400 onpass="App Ids seem to be correct",
401 onfail="Something is wrong with app Ids" )
402
403 def CASE2( self, main ):
404 """
405 Assign devices to controllers
406 """
407 import re
408 assert main.numCtrls, "main.numCtrls not defined"
409 assert main, "main not defined"
410 assert utilities.assert_equals, "utilities.assert_equals not defined"
411 assert main.CLIs, "main.CLIs not defined"
412 assert main.nodes, "main.nodes not defined"
413
414 main.case( "Assigning devices to controllers" )
415 main.caseExplanation = "Assign switches to ONOS using 'ovs-vsctl' " +\
416 "and check that an ONOS node becomes the " +\
417 "master of the device."
418 main.step( "Assign switches to controllers" )
419
420 ipList = []
421 for i in range( main.ONOSbench.maxNodes ):
422 ipList.append( main.nodes[ i ].ip_address )
423 swList = []
424 for i in range( 1, 29 ):
425 swList.append( "s" + str( i ) )
426 main.Mininet1.assignSwController( sw=swList, ip=ipList )
427
428 mastershipCheck = main.TRUE
429 for i in range( 1, 29 ):
430 response = main.Mininet1.getSwController( "s" + str( i ) )
431 try:
432 main.log.info( str( response ) )
433 except Exception:
434 main.log.info( repr( response ) )
435 for node in main.nodes:
436 if re.search( "tcp:" + node.ip_address, response ):
437 mastershipCheck = mastershipCheck and main.TRUE
438 else:
439 main.log.error( "Error, node " + node.ip_address + " is " +
440 "not in the list of controllers s" +
441 str( i ) + " is connecting to." )
442 mastershipCheck = main.FALSE
443 utilities.assert_equals(
444 expect=main.TRUE,
445 actual=mastershipCheck,
446 onpass="Switch mastership assigned correctly",
447 onfail="Switches not assigned correctly to controllers" )
448
449 def CASE21( self, main ):
450 """
451 Assign mastership to controllers
452 """
453 import time
454 assert main.numCtrls, "main.numCtrls not defined"
455 assert main, "main not defined"
456 assert utilities.assert_equals, "utilities.assert_equals not defined"
457 assert main.CLIs, "main.CLIs not defined"
458 assert main.nodes, "main.nodes not defined"
459
460 main.case( "Assigning Controller roles for switches" )
461 main.caseExplanation = "Check that ONOS is connected to each " +\
462 "device. Then manually assign" +\
463 " mastership to specific ONOS nodes using" +\
464 " 'device-role'"
465 main.step( "Assign mastership of switches to specific controllers" )
466 # Manually assign mastership to the controller we want
467 roleCall = main.TRUE
468
Jon Hallf37d44d2017-05-24 10:37:30 -0700469 ipList = []
Jon Hall69b2b982016-05-11 12:04:59 -0700470 deviceList = []
Jon Hallf37d44d2017-05-24 10:37:30 -0700471 onosCli = main.CLIs[ main.activeNodes[ 0 ] ]
Jon Hall69b2b982016-05-11 12:04:59 -0700472 try:
473 # Assign mastership to specific controllers. This assignment was
474 # determined for a 7 node cluser, but will work with any sized
475 # cluster
476 for i in range( 1, 29 ): # switches 1 through 28
477 # set up correct variables:
478 if i == 1:
479 c = 0
480 ip = main.nodes[ c ].ip_address # ONOS1
481 deviceId = onosCli.getDevice( "1000" ).get( 'id' )
482 elif i == 2:
483 c = 1 % main.numCtrls
484 ip = main.nodes[ c ].ip_address # ONOS2
485 deviceId = onosCli.getDevice( "2000" ).get( 'id' )
486 elif i == 3:
487 c = 1 % main.numCtrls
488 ip = main.nodes[ c ].ip_address # ONOS2
489 deviceId = onosCli.getDevice( "3000" ).get( 'id' )
490 elif i == 4:
491 c = 3 % main.numCtrls
492 ip = main.nodes[ c ].ip_address # ONOS4
493 deviceId = onosCli.getDevice( "3004" ).get( 'id' )
494 elif i == 5:
495 c = 2 % main.numCtrls
496 ip = main.nodes[ c ].ip_address # ONOS3
497 deviceId = onosCli.getDevice( "5000" ).get( 'id' )
498 elif i == 6:
499 c = 2 % main.numCtrls
500 ip = main.nodes[ c ].ip_address # ONOS3
501 deviceId = onosCli.getDevice( "6000" ).get( 'id' )
502 elif i == 7:
503 c = 5 % main.numCtrls
504 ip = main.nodes[ c ].ip_address # ONOS6
505 deviceId = onosCli.getDevice( "6007" ).get( 'id' )
506 elif i >= 8 and i <= 17:
507 c = 4 % main.numCtrls
508 ip = main.nodes[ c ].ip_address # ONOS5
509 dpid = '3' + str( i ).zfill( 3 )
510 deviceId = onosCli.getDevice( dpid ).get( 'id' )
511 elif i >= 18 and i <= 27:
512 c = 6 % main.numCtrls
513 ip = main.nodes[ c ].ip_address # ONOS7
514 dpid = '6' + str( i ).zfill( 3 )
515 deviceId = onosCli.getDevice( dpid ).get( 'id' )
516 elif i == 28:
517 c = 0
518 ip = main.nodes[ c ].ip_address # ONOS1
519 deviceId = onosCli.getDevice( "2800" ).get( 'id' )
520 else:
521 main.log.error( "You didn't write an else statement for " +
522 "switch s" + str( i ) )
523 roleCall = main.FALSE
524 # Assign switch
525 assert deviceId, "No device id for s" + str( i ) + " in ONOS"
526 # TODO: make this controller dynamic
527 roleCall = roleCall and onosCli.deviceRole( deviceId, ip )
528 ipList.append( ip )
529 deviceList.append( deviceId )
530 except ( AttributeError, AssertionError ):
531 main.log.exception( "Something is wrong with ONOS device view" )
532 main.log.info( onosCli.devices() )
533 utilities.assert_equals(
534 expect=main.TRUE,
535 actual=roleCall,
536 onpass="Re-assigned switch mastership to designated controller",
537 onfail="Something wrong with deviceRole calls" )
538
539 main.step( "Check mastership was correctly assigned" )
540 roleCheck = main.TRUE
541 # NOTE: This is due to the fact that device mastership change is not
542 # atomic and is actually a multi step process
543 time.sleep( 5 )
544 for i in range( len( ipList ) ):
Jon Hallf37d44d2017-05-24 10:37:30 -0700545 ip = ipList[ i ]
546 deviceId = deviceList[ i ]
Jon Hall69b2b982016-05-11 12:04:59 -0700547 # Check assignment
548 master = onosCli.getRole( deviceId ).get( 'master' )
549 if ip in master:
550 roleCheck = roleCheck and main.TRUE
551 else:
552 roleCheck = roleCheck and main.FALSE
553 main.log.error( "Error, controller " + ip + " is not" +
554 " master " + "of device " +
555 str( deviceId ) + ". Master is " +
556 repr( master ) + "." )
557 utilities.assert_equals(
558 expect=main.TRUE,
559 actual=roleCheck,
560 onpass="Switches were successfully reassigned to designated " +
561 "controller",
562 onfail="Switches were not successfully reassigned" )
563
564 def CASE3( self, main ):
565 """
566 Assign intents
567 """
568 import time
569 import json
570 assert main.numCtrls, "main.numCtrls not defined"
571 assert main, "main not defined"
572 assert utilities.assert_equals, "utilities.assert_equals not defined"
573 assert main.CLIs, "main.CLIs not defined"
574 assert main.nodes, "main.nodes not defined"
575 try:
576 labels
577 except NameError:
578 main.log.error( "labels not defined, setting to []" )
579 labels = []
580 try:
581 data
582 except NameError:
583 main.log.error( "data not defined, setting to []" )
584 data = []
585 # NOTE: we must reinstall intents until we have a persistant intent
586 # datastore!
587 main.case( "Adding host Intents" )
588 main.caseExplanation = "Discover hosts by using pingall then " +\
589 "assign predetermined host-to-host intents." +\
590 " After installation, check that the intent" +\
591 " is distributed to all nodes and the state" +\
592 " is INSTALLED"
593
594 # install onos-app-fwd
595 main.step( "Install reactive forwarding app" )
Jon Hallf37d44d2017-05-24 10:37:30 -0700596 onosCli = main.CLIs[ main.activeNodes[ 0 ] ]
Jon Hall69b2b982016-05-11 12:04:59 -0700597 installResults = onosCli.activateApp( "org.onosproject.fwd" )
598 utilities.assert_equals( expect=main.TRUE, actual=installResults,
599 onpass="Install fwd successful",
600 onfail="Install fwd failed" )
601
602 main.step( "Check app ids" )
603 appCheck = main.TRUE
604 threads = []
605 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -0700606 t = main.Thread( target=main.CLIs[ i ].appToIDCheck,
Jon Hall69b2b982016-05-11 12:04:59 -0700607 name="appToIDCheck-" + str( i ),
608 args=[] )
609 threads.append( t )
610 t.start()
611
612 for t in threads:
613 t.join()
614 appCheck = appCheck and t.result
615 if appCheck != main.TRUE:
616 main.log.warn( onosCli.apps() )
617 main.log.warn( onosCli.appIDs() )
618 utilities.assert_equals( expect=main.TRUE, actual=appCheck,
619 onpass="App Ids seem to be correct",
620 onfail="Something is wrong with app Ids" )
621
622 main.step( "Discovering Hosts( Via pingall for now )" )
623 # FIXME: Once we have a host discovery mechanism, use that instead
624 # REACTIVE FWD test
625 pingResult = main.FALSE
626 passMsg = "Reactive Pingall test passed"
627 time1 = time.time()
628 pingResult = main.Mininet1.pingall()
629 time2 = time.time()
630 if not pingResult:
Jon Hallf37d44d2017-05-24 10:37:30 -0700631 main.log.warn( "First pingall failed. Trying again..." )
Jon Hall69b2b982016-05-11 12:04:59 -0700632 pingResult = main.Mininet1.pingall()
633 passMsg += " on the second try"
634 utilities.assert_equals(
635 expect=main.TRUE,
636 actual=pingResult,
Jon Hallf37d44d2017-05-24 10:37:30 -0700637 onpass=passMsg,
Jon Hall69b2b982016-05-11 12:04:59 -0700638 onfail="Reactive Pingall failed, " +
639 "one or more ping pairs failed" )
640 main.log.info( "Time for pingall: %2f seconds" %
641 ( time2 - time1 ) )
642 # timeout for fwd flows
643 time.sleep( 11 )
644 # uninstall onos-app-fwd
645 main.step( "Uninstall reactive forwarding app" )
Jon Hallf37d44d2017-05-24 10:37:30 -0700646 node = main.activeNodes[ 0 ]
647 uninstallResult = main.CLIs[ node ].deactivateApp( "org.onosproject.fwd" )
Jon Hall69b2b982016-05-11 12:04:59 -0700648 utilities.assert_equals( expect=main.TRUE, actual=uninstallResult,
649 onpass="Uninstall fwd successful",
650 onfail="Uninstall fwd failed" )
651
652 main.step( "Check app ids" )
653 threads = []
654 appCheck2 = main.TRUE
655 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -0700656 t = main.Thread( target=main.CLIs[ i ].appToIDCheck,
Jon Hall69b2b982016-05-11 12:04:59 -0700657 name="appToIDCheck-" + str( i ),
658 args=[] )
659 threads.append( t )
660 t.start()
661
662 for t in threads:
663 t.join()
664 appCheck2 = appCheck2 and t.result
665 if appCheck2 != main.TRUE:
Jon Hallf37d44d2017-05-24 10:37:30 -0700666 node = main.activeNodes[ 0 ]
667 main.log.warn( main.CLIs[ node ].apps() )
668 main.log.warn( main.CLIs[ node ].appIDs() )
Jon Hall69b2b982016-05-11 12:04:59 -0700669 utilities.assert_equals( expect=main.TRUE, actual=appCheck2,
670 onpass="App Ids seem to be correct",
671 onfail="Something is wrong with app Ids" )
672
673 main.step( "Add host intents via cli" )
674 intentIds = []
675 # TODO: move the host numbers to params
676 # Maybe look at all the paths we ping?
677 intentAddResult = True
678 hostResult = main.TRUE
679 for i in range( 8, 18 ):
680 main.log.info( "Adding host intent between h" + str( i ) +
681 " and h" + str( i + 10 ) )
682 host1 = "00:00:00:00:00:" + \
683 str( hex( i )[ 2: ] ).zfill( 2 ).upper()
684 host2 = "00:00:00:00:00:" + \
685 str( hex( i + 10 )[ 2: ] ).zfill( 2 ).upper()
686 # NOTE: getHost can return None
687 host1Dict = onosCli.getHost( host1 )
688 host2Dict = onosCli.getHost( host2 )
689 host1Id = None
690 host2Id = None
691 if host1Dict and host2Dict:
692 host1Id = host1Dict.get( 'id', None )
693 host2Id = host2Dict.get( 'id', None )
694 if host1Id and host2Id:
695 nodeNum = ( i % len( main.activeNodes ) )
Jon Hallf37d44d2017-05-24 10:37:30 -0700696 node = main.activeNodes[ nodeNum ]
697 tmpId = main.CLIs[ node ].addHostIntent( host1Id, host2Id )
Jon Hall69b2b982016-05-11 12:04:59 -0700698 if tmpId:
699 main.log.info( "Added intent with id: " + tmpId )
700 intentIds.append( tmpId )
701 else:
702 main.log.error( "addHostIntent returned: " +
703 repr( tmpId ) )
704 else:
705 main.log.error( "Error, getHost() failed for h" + str( i ) +
706 " and/or h" + str( i + 10 ) )
Jon Hallf37d44d2017-05-24 10:37:30 -0700707 node = main.activeNodes[ 0 ]
708 hosts = main.CLIs[ node ].hosts()
Jon Hall69b2b982016-05-11 12:04:59 -0700709 main.log.warn( "Hosts output: " )
710 try:
711 main.log.warn( json.dumps( json.loads( hosts ),
712 sort_keys=True,
713 indent=4,
714 separators=( ',', ': ' ) ) )
715 except ( ValueError, TypeError ):
716 main.log.warn( repr( hosts ) )
717 hostResult = main.FALSE
718 utilities.assert_equals( expect=main.TRUE, actual=hostResult,
719 onpass="Found a host id for each host",
720 onfail="Error looking up host ids" )
721
722 intentStart = time.time()
723 onosIds = onosCli.getAllIntentsId()
724 main.log.info( "Submitted intents: " + str( intentIds ) )
725 main.log.info( "Intents in ONOS: " + str( onosIds ) )
726 for intent in intentIds:
727 if intent in onosIds:
728 pass # intent submitted is in onos
729 else:
730 intentAddResult = False
731 if intentAddResult:
732 intentStop = time.time()
733 else:
734 intentStop = None
735 # Print the intent states
736 intents = onosCli.intents()
737 intentStates = []
738 installedCheck = True
739 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
740 count = 0
741 try:
742 for intent in json.loads( intents ):
743 state = intent.get( 'state', None )
744 if "INSTALLED" not in state:
745 installedCheck = False
746 intentId = intent.get( 'id', None )
747 intentStates.append( ( intentId, state ) )
748 except ( ValueError, TypeError ):
749 main.log.exception( "Error parsing intents" )
750 # add submitted intents not in the store
751 tmplist = [ i for i, s in intentStates ]
752 missingIntents = False
753 for i in intentIds:
754 if i not in tmplist:
755 intentStates.append( ( i, " - " ) )
756 missingIntents = True
757 intentStates.sort()
758 for i, s in intentStates:
759 count += 1
760 main.log.info( "%-6s%-15s%-15s" %
761 ( str( count ), str( i ), str( s ) ) )
762 leaders = onosCli.leaders()
763 try:
764 missing = False
765 if leaders:
766 parsedLeaders = json.loads( leaders )
767 main.log.warn( json.dumps( parsedLeaders,
768 sort_keys=True,
769 indent=4,
770 separators=( ',', ': ' ) ) )
771 # check for all intent partitions
772 topics = []
773 for i in range( 14 ):
Jon Hall8dafdcc2016-09-16 10:21:25 -0700774 topics.append( "work-partition-" + str( i ) )
Jon Hall69b2b982016-05-11 12:04:59 -0700775 main.log.debug( topics )
Jon Hallf37d44d2017-05-24 10:37:30 -0700776 ONOStopics = [ j[ 'topic' ] for j in parsedLeaders ]
Jon Hall69b2b982016-05-11 12:04:59 -0700777 for topic in topics:
778 if topic not in ONOStopics:
779 main.log.error( "Error: " + topic +
780 " not in leaders" )
781 missing = True
782 else:
783 main.log.error( "leaders() returned None" )
784 except ( ValueError, TypeError ):
785 main.log.exception( "Error parsing leaders" )
786 main.log.error( repr( leaders ) )
787 # Check all nodes
788 if missing:
789 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -0700790 response = main.CLIs[ i ].leaders( jsonFormat=False )
791 main.log.warn( str( main.CLIs[ i ].name ) + " leaders output: \n" +
Jon Hall69b2b982016-05-11 12:04:59 -0700792 str( response ) )
793
794 partitions = onosCli.partitions()
795 try:
Jon Hallf37d44d2017-05-24 10:37:30 -0700796 if partitions:
Jon Hall69b2b982016-05-11 12:04:59 -0700797 parsedPartitions = json.loads( partitions )
798 main.log.warn( json.dumps( parsedPartitions,
799 sort_keys=True,
800 indent=4,
801 separators=( ',', ': ' ) ) )
802 # TODO check for a leader in all paritions
803 # TODO check for consistency among nodes
804 else:
805 main.log.error( "partitions() returned None" )
806 except ( ValueError, TypeError ):
807 main.log.exception( "Error parsing partitions" )
808 main.log.error( repr( partitions ) )
809 pendingMap = onosCli.pendingMap()
810 try:
Jon Hallf37d44d2017-05-24 10:37:30 -0700811 if pendingMap:
Jon Hall69b2b982016-05-11 12:04:59 -0700812 parsedPending = json.loads( pendingMap )
813 main.log.warn( json.dumps( parsedPending,
814 sort_keys=True,
815 indent=4,
816 separators=( ',', ': ' ) ) )
817 # TODO check something here?
818 else:
819 main.log.error( "pendingMap() returned None" )
820 except ( ValueError, TypeError ):
821 main.log.exception( "Error parsing pending map" )
822 main.log.error( repr( pendingMap ) )
823
824 intentAddResult = bool( intentAddResult and not missingIntents and
825 installedCheck )
826 if not intentAddResult:
827 main.log.error( "Error in pushing host intents to ONOS" )
828
829 main.step( "Intent Anti-Entropy dispersion" )
Jon Hallf37d44d2017-05-24 10:37:30 -0700830 for j in range( 100 ):
Jon Hall69b2b982016-05-11 12:04:59 -0700831 correct = True
832 main.log.info( "Submitted intents: " + str( sorted( intentIds ) ) )
833 for i in main.activeNodes:
834 onosIds = []
Jon Hallf37d44d2017-05-24 10:37:30 -0700835 ids = main.CLIs[ i ].getAllIntentsId()
Jon Hall69b2b982016-05-11 12:04:59 -0700836 onosIds.append( ids )
Jon Hallf37d44d2017-05-24 10:37:30 -0700837 main.log.debug( "Intents in " + main.CLIs[ i ].name + ": " +
Jon Hall69b2b982016-05-11 12:04:59 -0700838 str( sorted( onosIds ) ) )
839 if sorted( ids ) != sorted( intentIds ):
840 main.log.warn( "Set of intent IDs doesn't match" )
841 correct = False
842 break
843 else:
Jon Hallf37d44d2017-05-24 10:37:30 -0700844 intents = json.loads( main.CLIs[ i ].intents() )
Jon Hall69b2b982016-05-11 12:04:59 -0700845 for intent in intents:
846 if intent[ 'state' ] != "INSTALLED":
847 main.log.warn( "Intent " + intent[ 'id' ] +
848 " is " + intent[ 'state' ] )
849 correct = False
850 break
851 if correct:
852 break
853 else:
Jon Hallf37d44d2017-05-24 10:37:30 -0700854 time.sleep( 1 )
Jon Hall69b2b982016-05-11 12:04:59 -0700855 if not intentStop:
856 intentStop = time.time()
857 global gossipTime
858 gossipTime = intentStop - intentStart
859 main.log.info( "It took about " + str( gossipTime ) +
860 " seconds for all intents to appear in each node" )
861 append = False
862 title = "Gossip Intents"
863 count = 1
864 while append is False:
865 curTitle = title + str( count )
866 if curTitle not in labels:
867 labels.append( curTitle )
868 data.append( str( gossipTime ) )
869 append = True
870 else:
871 count += 1
Jon Hallf37d44d2017-05-24 10:37:30 -0700872 gossipPeriod = int( main.params[ 'timers' ][ 'gossip' ] )
Jon Hall69b2b982016-05-11 12:04:59 -0700873 maxGossipTime = gossipPeriod * len( main.activeNodes )
874 utilities.assert_greater_equals(
875 expect=maxGossipTime, actual=gossipTime,
876 onpass="ECM anti-entropy for intents worked within " +
877 "expected time",
878 onfail="Intent ECM anti-entropy took too long. " +
879 "Expected time:{}, Actual time:{}".format( maxGossipTime,
880 gossipTime ) )
881 if gossipTime <= maxGossipTime:
882 intentAddResult = True
883
884 if not intentAddResult or "key" in pendingMap:
885 import time
886 installedCheck = True
887 main.log.info( "Sleeping 60 seconds to see if intents are found" )
888 time.sleep( 60 )
889 onosIds = onosCli.getAllIntentsId()
890 main.log.info( "Submitted intents: " + str( intentIds ) )
891 main.log.info( "Intents in ONOS: " + str( onosIds ) )
892 # Print the intent states
893 intents = onosCli.intents()
894 intentStates = []
895 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
896 count = 0
897 try:
898 for intent in json.loads( intents ):
899 # Iter through intents of a node
900 state = intent.get( 'state', None )
901 if "INSTALLED" not in state:
902 installedCheck = False
903 intentId = intent.get( 'id', None )
904 intentStates.append( ( intentId, state ) )
905 except ( ValueError, TypeError ):
906 main.log.exception( "Error parsing intents" )
907 # add submitted intents not in the store
908 tmplist = [ i for i, s in intentStates ]
909 for i in intentIds:
910 if i not in tmplist:
911 intentStates.append( ( i, " - " ) )
912 intentStates.sort()
913 for i, s in intentStates:
914 count += 1
915 main.log.info( "%-6s%-15s%-15s" %
916 ( str( count ), str( i ), str( s ) ) )
917 leaders = onosCli.leaders()
918 try:
919 missing = False
920 if leaders:
921 parsedLeaders = json.loads( leaders )
922 main.log.warn( json.dumps( parsedLeaders,
923 sort_keys=True,
924 indent=4,
925 separators=( ',', ': ' ) ) )
926 # check for all intent partitions
927 # check for election
928 topics = []
929 for i in range( 14 ):
Jon Hall8dafdcc2016-09-16 10:21:25 -0700930 topics.append( "work-partition-" + str( i ) )
Jon Hall69b2b982016-05-11 12:04:59 -0700931 # FIXME: this should only be after we start the app
932 topics.append( "org.onosproject.election" )
933 main.log.debug( topics )
Jon Hallf37d44d2017-05-24 10:37:30 -0700934 ONOStopics = [ j[ 'topic' ] for j in parsedLeaders ]
Jon Hall69b2b982016-05-11 12:04:59 -0700935 for topic in topics:
936 if topic not in ONOStopics:
937 main.log.error( "Error: " + topic +
938 " not in leaders" )
939 missing = True
940 else:
941 main.log.error( "leaders() returned None" )
942 except ( ValueError, TypeError ):
943 main.log.exception( "Error parsing leaders" )
944 main.log.error( repr( leaders ) )
945 # Check all nodes
946 if missing:
947 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -0700948 node = main.CLIs[ i ]
949 response = node.leaders( jsonFormat=False )
Jon Hall69b2b982016-05-11 12:04:59 -0700950 main.log.warn( str( node.name ) + " leaders output: \n" +
951 str( response ) )
952
953 partitions = onosCli.partitions()
954 try:
Jon Hallf37d44d2017-05-24 10:37:30 -0700955 if partitions:
Jon Hall69b2b982016-05-11 12:04:59 -0700956 parsedPartitions = json.loads( partitions )
957 main.log.warn( json.dumps( parsedPartitions,
958 sort_keys=True,
959 indent=4,
960 separators=( ',', ': ' ) ) )
961 # TODO check for a leader in all paritions
962 # TODO check for consistency among nodes
963 else:
964 main.log.error( "partitions() returned None" )
965 except ( ValueError, TypeError ):
966 main.log.exception( "Error parsing partitions" )
967 main.log.error( repr( partitions ) )
968 pendingMap = onosCli.pendingMap()
969 try:
Jon Hallf37d44d2017-05-24 10:37:30 -0700970 if pendingMap:
Jon Hall69b2b982016-05-11 12:04:59 -0700971 parsedPending = json.loads( pendingMap )
972 main.log.warn( json.dumps( parsedPending,
973 sort_keys=True,
974 indent=4,
975 separators=( ',', ': ' ) ) )
976 # TODO check something here?
977 else:
978 main.log.error( "pendingMap() returned None" )
979 except ( ValueError, TypeError ):
980 main.log.exception( "Error parsing pending map" )
981 main.log.error( repr( pendingMap ) )
982
983 def CASE4( self, main ):
984 """
985 Ping across added host intents
986 """
987 import json
988 import time
989 assert main.numCtrls, "main.numCtrls not defined"
990 assert main, "main not defined"
991 assert utilities.assert_equals, "utilities.assert_equals not defined"
992 assert main.CLIs, "main.CLIs not defined"
993 assert main.nodes, "main.nodes not defined"
994 main.case( "Verify connectivity by sending traffic across Intents" )
995 main.caseExplanation = "Ping across added host intents to check " +\
996 "functionality and check the state of " +\
997 "the intent"
998
Jon Hallf37d44d2017-05-24 10:37:30 -0700999 onosCli = main.CLIs[ main.activeNodes[ 0 ] ]
Jon Hall69b2b982016-05-11 12:04:59 -07001000 main.step( "Check Intent state" )
1001 installedCheck = False
1002 loopCount = 0
1003 while not installedCheck and loopCount < 40:
1004 installedCheck = True
1005 # Print the intent states
1006 intents = onosCli.intents()
1007 intentStates = []
1008 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
1009 count = 0
1010 # Iter through intents of a node
1011 try:
1012 for intent in json.loads( intents ):
1013 state = intent.get( 'state', None )
1014 if "INSTALLED" not in state:
1015 installedCheck = False
1016 intentId = intent.get( 'id', None )
1017 intentStates.append( ( intentId, state ) )
1018 except ( ValueError, TypeError ):
1019 main.log.exception( "Error parsing intents." )
1020 # Print states
1021 intentStates.sort()
1022 for i, s in intentStates:
1023 count += 1
1024 main.log.info( "%-6s%-15s%-15s" %
1025 ( str( count ), str( i ), str( s ) ) )
1026 if not installedCheck:
1027 time.sleep( 1 )
1028 loopCount += 1
1029 utilities.assert_equals( expect=True, actual=installedCheck,
1030 onpass="Intents are all INSTALLED",
1031 onfail="Intents are not all in " +
1032 "INSTALLED state" )
1033
1034 main.step( "Ping across added host intents" )
1035 PingResult = main.TRUE
1036 for i in range( 8, 18 ):
1037 ping = main.Mininet1.pingHost( src="h" + str( i ),
1038 target="h" + str( i + 10 ) )
1039 PingResult = PingResult and ping
1040 if ping == main.FALSE:
1041 main.log.warn( "Ping failed between h" + str( i ) +
1042 " and h" + str( i + 10 ) )
1043 elif ping == main.TRUE:
1044 main.log.info( "Ping test passed!" )
1045 # Don't set PingResult or you'd override failures
1046 if PingResult == main.FALSE:
1047 main.log.error(
1048 "Intents have not been installed correctly, pings failed." )
1049 # TODO: pretty print
1050 main.log.warn( "ONOS1 intents: " )
1051 try:
1052 tmpIntents = onosCli.intents()
1053 main.log.warn( json.dumps( json.loads( tmpIntents ),
1054 sort_keys=True,
1055 indent=4,
1056 separators=( ',', ': ' ) ) )
1057 except ( ValueError, TypeError ):
1058 main.log.warn( repr( tmpIntents ) )
1059 utilities.assert_equals(
1060 expect=main.TRUE,
1061 actual=PingResult,
1062 onpass="Intents have been installed correctly and pings work",
1063 onfail="Intents have not been installed correctly, pings failed." )
1064
1065 main.step( "Check leadership of topics" )
1066 leaders = onosCli.leaders()
1067 topicCheck = main.TRUE
1068 try:
1069 if leaders:
1070 parsedLeaders = json.loads( leaders )
1071 main.log.warn( json.dumps( parsedLeaders,
1072 sort_keys=True,
1073 indent=4,
1074 separators=( ',', ': ' ) ) )
1075 # check for all intent partitions
1076 # check for election
1077 # TODO: Look at Devices as topics now that it uses this system
1078 topics = []
1079 for i in range( 14 ):
Jon Hall8dafdcc2016-09-16 10:21:25 -07001080 topics.append( "work-partition-" + str( i ) )
Jon Hall69b2b982016-05-11 12:04:59 -07001081 # FIXME: this should only be after we start the app
1082 # FIXME: topics.append( "org.onosproject.election" )
1083 # Print leaders output
1084 main.log.debug( topics )
Jon Hallf37d44d2017-05-24 10:37:30 -07001085 ONOStopics = [ j[ 'topic' ] for j in parsedLeaders ]
Jon Hall69b2b982016-05-11 12:04:59 -07001086 for topic in topics:
1087 if topic not in ONOStopics:
1088 main.log.error( "Error: " + topic +
1089 " not in leaders" )
1090 topicCheck = main.FALSE
1091 else:
1092 main.log.error( "leaders() returned None" )
1093 topicCheck = main.FALSE
1094 except ( ValueError, TypeError ):
1095 topicCheck = main.FALSE
1096 main.log.exception( "Error parsing leaders" )
1097 main.log.error( repr( leaders ) )
1098 # TODO: Check for a leader of these topics
1099 # Check all nodes
1100 if topicCheck:
1101 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07001102 node = main.CLIs[ i ]
1103 response = node.leaders( jsonFormat=False )
Jon Hall69b2b982016-05-11 12:04:59 -07001104 main.log.warn( str( node.name ) + " leaders output: \n" +
1105 str( response ) )
1106
1107 utilities.assert_equals( expect=main.TRUE, actual=topicCheck,
1108 onpass="intent Partitions is in leaders",
1109 onfail="Some topics were lost " )
1110 # Print partitions
1111 partitions = onosCli.partitions()
1112 try:
Jon Hallf37d44d2017-05-24 10:37:30 -07001113 if partitions:
Jon Hall69b2b982016-05-11 12:04:59 -07001114 parsedPartitions = json.loads( partitions )
1115 main.log.warn( json.dumps( parsedPartitions,
1116 sort_keys=True,
1117 indent=4,
1118 separators=( ',', ': ' ) ) )
1119 # TODO check for a leader in all paritions
1120 # TODO check for consistency among nodes
1121 else:
1122 main.log.error( "partitions() returned None" )
1123 except ( ValueError, TypeError ):
1124 main.log.exception( "Error parsing partitions" )
1125 main.log.error( repr( partitions ) )
1126 # Print Pending Map
1127 pendingMap = onosCli.pendingMap()
1128 try:
Jon Hallf37d44d2017-05-24 10:37:30 -07001129 if pendingMap:
Jon Hall69b2b982016-05-11 12:04:59 -07001130 parsedPending = json.loads( pendingMap )
1131 main.log.warn( json.dumps( parsedPending,
1132 sort_keys=True,
1133 indent=4,
1134 separators=( ',', ': ' ) ) )
1135 # TODO check something here?
1136 else:
1137 main.log.error( "pendingMap() returned None" )
1138 except ( ValueError, TypeError ):
1139 main.log.exception( "Error parsing pending map" )
1140 main.log.error( repr( pendingMap ) )
1141
1142 if not installedCheck:
1143 main.log.info( "Waiting 60 seconds to see if the state of " +
1144 "intents change" )
1145 time.sleep( 60 )
1146 # Print the intent states
1147 intents = onosCli.intents()
1148 intentStates = []
1149 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
1150 count = 0
1151 # Iter through intents of a node
1152 try:
1153 for intent in json.loads( intents ):
1154 state = intent.get( 'state', None )
1155 if "INSTALLED" not in state:
1156 installedCheck = False
1157 intentId = intent.get( 'id', None )
1158 intentStates.append( ( intentId, state ) )
1159 except ( ValueError, TypeError ):
1160 main.log.exception( "Error parsing intents." )
1161 intentStates.sort()
1162 for i, s in intentStates:
1163 count += 1
1164 main.log.info( "%-6s%-15s%-15s" %
1165 ( str( count ), str( i ), str( s ) ) )
1166 leaders = onosCli.leaders()
1167 try:
1168 missing = False
1169 if leaders:
1170 parsedLeaders = json.loads( leaders )
1171 main.log.warn( json.dumps( parsedLeaders,
1172 sort_keys=True,
1173 indent=4,
1174 separators=( ',', ': ' ) ) )
1175 # check for all intent partitions
1176 # check for election
1177 topics = []
1178 for i in range( 14 ):
Jon Hall8dafdcc2016-09-16 10:21:25 -07001179 topics.append( "work-partition-" + str( i ) )
Jon Hall69b2b982016-05-11 12:04:59 -07001180 # FIXME: this should only be after we start the app
1181 topics.append( "org.onosproject.election" )
1182 main.log.debug( topics )
Jon Hallf37d44d2017-05-24 10:37:30 -07001183 ONOStopics = [ j[ 'topic' ] for j in parsedLeaders ]
Jon Hall69b2b982016-05-11 12:04:59 -07001184 for topic in topics:
1185 if topic not in ONOStopics:
1186 main.log.error( "Error: " + topic +
1187 " not in leaders" )
1188 missing = True
1189 else:
1190 main.log.error( "leaders() returned None" )
1191 except ( ValueError, TypeError ):
1192 main.log.exception( "Error parsing leaders" )
1193 main.log.error( repr( leaders ) )
1194 if missing:
1195 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07001196 node = main.CLIs[ i ]
1197 response = node.leaders( jsonFormat=False )
Jon Hall69b2b982016-05-11 12:04:59 -07001198 main.log.warn( str( node.name ) + " leaders output: \n" +
1199 str( response ) )
1200
1201 partitions = onosCli.partitions()
1202 try:
Jon Hallf37d44d2017-05-24 10:37:30 -07001203 if partitions:
Jon Hall69b2b982016-05-11 12:04:59 -07001204 parsedPartitions = json.loads( partitions )
1205 main.log.warn( json.dumps( parsedPartitions,
1206 sort_keys=True,
1207 indent=4,
1208 separators=( ',', ': ' ) ) )
1209 # TODO check for a leader in all paritions
1210 # TODO check for consistency among nodes
1211 else:
1212 main.log.error( "partitions() returned None" )
1213 except ( ValueError, TypeError ):
1214 main.log.exception( "Error parsing partitions" )
1215 main.log.error( repr( partitions ) )
1216 pendingMap = onosCli.pendingMap()
1217 try:
Jon Hallf37d44d2017-05-24 10:37:30 -07001218 if pendingMap:
Jon Hall69b2b982016-05-11 12:04:59 -07001219 parsedPending = json.loads( pendingMap )
1220 main.log.warn( json.dumps( parsedPending,
1221 sort_keys=True,
1222 indent=4,
1223 separators=( ',', ': ' ) ) )
1224 # TODO check something here?
1225 else:
1226 main.log.error( "pendingMap() returned None" )
1227 except ( ValueError, TypeError ):
1228 main.log.exception( "Error parsing pending map" )
1229 main.log.error( repr( pendingMap ) )
1230 # Print flowrules
Jon Hallf37d44d2017-05-24 10:37:30 -07001231 node = main.activeNodes[ 0 ]
1232 main.log.debug( main.CLIs[ node ].flows( jsonFormat=False ) )
Jon Hall69b2b982016-05-11 12:04:59 -07001233 main.step( "Wait a minute then ping again" )
1234 # the wait is above
1235 PingResult = main.TRUE
1236 for i in range( 8, 18 ):
1237 ping = main.Mininet1.pingHost( src="h" + str( i ),
1238 target="h" + str( i + 10 ) )
1239 PingResult = PingResult and ping
1240 if ping == main.FALSE:
1241 main.log.warn( "Ping failed between h" + str( i ) +
1242 " and h" + str( i + 10 ) )
1243 elif ping == main.TRUE:
1244 main.log.info( "Ping test passed!" )
1245 # Don't set PingResult or you'd override failures
1246 if PingResult == main.FALSE:
1247 main.log.error(
1248 "Intents have not been installed correctly, pings failed." )
1249 # TODO: pretty print
1250 main.log.warn( "ONOS1 intents: " )
1251 try:
1252 tmpIntents = onosCli.intents()
1253 main.log.warn( json.dumps( json.loads( tmpIntents ),
1254 sort_keys=True,
1255 indent=4,
1256 separators=( ',', ': ' ) ) )
1257 except ( ValueError, TypeError ):
1258 main.log.warn( repr( tmpIntents ) )
1259 utilities.assert_equals(
1260 expect=main.TRUE,
1261 actual=PingResult,
1262 onpass="Intents have been installed correctly and pings work",
1263 onfail="Intents have not been installed correctly, pings failed." )
1264
1265 def CASE5( self, main ):
1266 """
1267 Reading state of ONOS
1268 """
1269 import json
1270 import time
1271 assert main.numCtrls, "main.numCtrls not defined"
1272 assert main, "main not defined"
1273 assert utilities.assert_equals, "utilities.assert_equals not defined"
1274 assert main.CLIs, "main.CLIs not defined"
1275 assert main.nodes, "main.nodes not defined"
1276
1277 main.case( "Setting up and gathering data for current state" )
1278 # The general idea for this test case is to pull the state of
1279 # ( intents,flows, topology,... ) from each ONOS node
1280 # We can then compare them with each other and also with past states
1281
1282 main.step( "Check that each switch has a master" )
1283 global mastershipState
1284 mastershipState = '[]'
1285
1286 # Assert that each device has a master
1287 rolesNotNull = main.TRUE
1288 threads = []
1289 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07001290 t = main.Thread( target=main.CLIs[ i ].rolesNotNull,
Jon Hall69b2b982016-05-11 12:04:59 -07001291 name="rolesNotNull-" + str( i ),
1292 args=[] )
1293 threads.append( t )
1294 t.start()
1295
1296 for t in threads:
1297 t.join()
1298 rolesNotNull = rolesNotNull and t.result
1299 utilities.assert_equals(
1300 expect=main.TRUE,
1301 actual=rolesNotNull,
1302 onpass="Each device has a master",
1303 onfail="Some devices don't have a master assigned" )
1304
1305 main.step( "Get the Mastership of each switch from each controller" )
1306 ONOSMastership = []
1307 consistentMastership = True
1308 rolesResults = True
1309 threads = []
1310 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07001311 t = main.Thread( target=main.CLIs[ i ].roles,
Jon Hall69b2b982016-05-11 12:04:59 -07001312 name="roles-" + str( i ),
1313 args=[] )
1314 threads.append( t )
1315 t.start()
1316
1317 for t in threads:
1318 t.join()
1319 ONOSMastership.append( t.result )
1320
1321 for i in range( len( ONOSMastership ) ):
Jon Hallf37d44d2017-05-24 10:37:30 -07001322 node = str( main.activeNodes[ i ] + 1 )
1323 if not ONOSMastership[ i ] or "Error" in ONOSMastership[ i ]:
Jon Hall69b2b982016-05-11 12:04:59 -07001324 main.log.error( "Error in getting ONOS" + node + " roles" )
1325 main.log.warn( "ONOS" + node + " mastership response: " +
Jon Hallf37d44d2017-05-24 10:37:30 -07001326 repr( ONOSMastership[ i ] ) )
Jon Hall69b2b982016-05-11 12:04:59 -07001327 rolesResults = False
1328 utilities.assert_equals(
1329 expect=True,
1330 actual=rolesResults,
1331 onpass="No error in reading roles output",
1332 onfail="Error in reading roles from ONOS" )
1333
1334 main.step( "Check for consistency in roles from each controller" )
Jon Hallf37d44d2017-05-24 10:37:30 -07001335 if all( [ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
Jon Hall69b2b982016-05-11 12:04:59 -07001336 main.log.info(
1337 "Switch roles are consistent across all ONOS nodes" )
1338 else:
1339 consistentMastership = False
1340 utilities.assert_equals(
1341 expect=True,
1342 actual=consistentMastership,
1343 onpass="Switch roles are consistent across all ONOS nodes",
1344 onfail="ONOS nodes have different views of switch roles" )
1345
1346 if rolesResults and not consistentMastership:
1347 for i in range( len( main.activeNodes ) ):
Jon Hallf37d44d2017-05-24 10:37:30 -07001348 node = str( main.activeNodes[ i ] + 1 )
Jon Hall69b2b982016-05-11 12:04:59 -07001349 try:
1350 main.log.warn(
1351 "ONOS" + node + " roles: ",
1352 json.dumps(
1353 json.loads( ONOSMastership[ i ] ),
1354 sort_keys=True,
1355 indent=4,
1356 separators=( ',', ': ' ) ) )
1357 except ( ValueError, TypeError ):
1358 main.log.warn( repr( ONOSMastership[ i ] ) )
1359 elif rolesResults and consistentMastership:
1360 mastershipState = ONOSMastership[ 0 ]
1361
1362 main.step( "Get the intents from each controller" )
1363 global intentState
1364 intentState = []
1365 ONOSIntents = []
1366 consistentIntents = True # Are Intents consistent across nodes?
1367 intentsResults = True # Could we read Intents from ONOS?
1368 threads = []
1369 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07001370 t = main.Thread( target=main.CLIs[ i ].intents,
Jon Hall69b2b982016-05-11 12:04:59 -07001371 name="intents-" + str( i ),
1372 args=[],
1373 kwargs={ 'jsonFormat': True } )
1374 threads.append( t )
1375 t.start()
1376
1377 for t in threads:
1378 t.join()
1379 ONOSIntents.append( t.result )
1380
1381 for i in range( len( ONOSIntents ) ):
Jon Hallf37d44d2017-05-24 10:37:30 -07001382 node = str( main.activeNodes[ i ] + 1 )
Jon Hall69b2b982016-05-11 12:04:59 -07001383 if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
1384 main.log.error( "Error in getting ONOS" + node + " intents" )
1385 main.log.warn( "ONOS" + node + " intents response: " +
1386 repr( ONOSIntents[ i ] ) )
1387 intentsResults = False
1388 utilities.assert_equals(
1389 expect=True,
1390 actual=intentsResults,
1391 onpass="No error in reading intents output",
1392 onfail="Error in reading intents from ONOS" )
1393
1394 main.step( "Check for consistency in Intents from each controller" )
Jon Hallf37d44d2017-05-24 10:37:30 -07001395 if all( [ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
Jon Hall69b2b982016-05-11 12:04:59 -07001396 main.log.info( "Intents are consistent across all ONOS " +
1397 "nodes" )
1398 else:
1399 consistentIntents = False
1400 main.log.error( "Intents not consistent" )
1401 utilities.assert_equals(
1402 expect=True,
1403 actual=consistentIntents,
1404 onpass="Intents are consistent across all ONOS nodes",
1405 onfail="ONOS nodes have different views of intents" )
1406
1407 if intentsResults:
1408 # Try to make it easy to figure out what is happening
1409 #
1410 # Intent ONOS1 ONOS2 ...
1411 # 0x01 INSTALLED INSTALLING
1412 # ... ... ...
1413 # ... ... ...
1414 title = " Id"
1415 for n in main.activeNodes:
1416 title += " " * 10 + "ONOS" + str( n + 1 )
1417 main.log.warn( title )
1418 # get all intent keys in the cluster
1419 keys = []
1420 try:
1421 # Get the set of all intent keys
1422 for nodeStr in ONOSIntents:
1423 node = json.loads( nodeStr )
1424 for intent in node:
1425 keys.append( intent.get( 'id' ) )
1426 keys = set( keys )
1427 # For each intent key, print the state on each node
1428 for key in keys:
1429 row = "%-13s" % key
1430 for nodeStr in ONOSIntents:
1431 node = json.loads( nodeStr )
1432 for intent in node:
1433 if intent.get( 'id', "Error" ) == key:
1434 row += "%-15s" % intent.get( 'state' )
1435 main.log.warn( row )
1436 # End of intent state table
1437 except ValueError as e:
1438 main.log.exception( e )
1439 main.log.debug( "nodeStr was: " + repr( nodeStr ) )
1440
1441 if intentsResults and not consistentIntents:
1442 # print the json objects
Jon Hallf37d44d2017-05-24 10:37:30 -07001443 n = str( main.activeNodes[ -1 ] + 1 )
Jon Hall69b2b982016-05-11 12:04:59 -07001444 main.log.debug( "ONOS" + n + " intents: " )
1445 main.log.debug( json.dumps( json.loads( ONOSIntents[ -1 ] ),
1446 sort_keys=True,
1447 indent=4,
1448 separators=( ',', ': ' ) ) )
1449 for i in range( len( ONOSIntents ) ):
Jon Hallf37d44d2017-05-24 10:37:30 -07001450 node = str( main.activeNodes[ i ] + 1 )
Jon Hall69b2b982016-05-11 12:04:59 -07001451 if ONOSIntents[ i ] != ONOSIntents[ -1 ]:
1452 main.log.debug( "ONOS" + node + " intents: " )
Jon Hallf37d44d2017-05-24 10:37:30 -07001453 main.log.debug( json.dumps( json.loads( ONOSIntents[ i ] ),
Jon Hall69b2b982016-05-11 12:04:59 -07001454 sort_keys=True,
1455 indent=4,
1456 separators=( ',', ': ' ) ) )
1457 else:
1458 main.log.debug( "ONOS" + node + " intents match ONOS" +
1459 n + " intents" )
1460 elif intentsResults and consistentIntents:
1461 intentState = ONOSIntents[ 0 ]
1462
1463 main.step( "Get the flows from each controller" )
1464 global flowState
1465 flowState = []
1466 ONOSFlows = []
1467 ONOSFlowsJson = []
1468 flowCheck = main.FALSE
1469 consistentFlows = True
1470 flowsResults = True
1471 threads = []
1472 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07001473 t = main.Thread( target=main.CLIs[ i ].flows,
Jon Hall69b2b982016-05-11 12:04:59 -07001474 name="flows-" + str( i ),
1475 args=[],
1476 kwargs={ 'jsonFormat': True } )
1477 threads.append( t )
1478 t.start()
1479
1480 # NOTE: Flows command can take some time to run
Jon Hallf37d44d2017-05-24 10:37:30 -07001481 time.sleep( 30 )
Jon Hall69b2b982016-05-11 12:04:59 -07001482 for t in threads:
1483 t.join()
1484 result = t.result
1485 ONOSFlows.append( result )
1486
1487 for i in range( len( ONOSFlows ) ):
Jon Hallf37d44d2017-05-24 10:37:30 -07001488 num = str( main.activeNodes[ i ] + 1 )
Jon Hall69b2b982016-05-11 12:04:59 -07001489 if not ONOSFlows[ i ] or "Error" in ONOSFlows[ i ]:
1490 main.log.error( "Error in getting ONOS" + num + " flows" )
1491 main.log.warn( "ONOS" + num + " flows response: " +
1492 repr( ONOSFlows[ i ] ) )
1493 flowsResults = False
1494 ONOSFlowsJson.append( None )
1495 else:
1496 try:
1497 ONOSFlowsJson.append( json.loads( ONOSFlows[ i ] ) )
1498 except ( ValueError, TypeError ):
1499 # FIXME: change this to log.error?
1500 main.log.exception( "Error in parsing ONOS" + num +
1501 " response as json." )
1502 main.log.error( repr( ONOSFlows[ i ] ) )
1503 ONOSFlowsJson.append( None )
1504 flowsResults = False
1505 utilities.assert_equals(
1506 expect=True,
1507 actual=flowsResults,
1508 onpass="No error in reading flows output",
1509 onfail="Error in reading flows from ONOS" )
1510
1511 main.step( "Check for consistency in Flows from each controller" )
1512 tmp = [ len( i ) == len( ONOSFlowsJson[ 0 ] ) for i in ONOSFlowsJson ]
1513 if all( tmp ):
1514 main.log.info( "Flow count is consistent across all ONOS nodes" )
1515 else:
1516 consistentFlows = False
1517 utilities.assert_equals(
1518 expect=True,
1519 actual=consistentFlows,
1520 onpass="The flow count is consistent across all ONOS nodes",
1521 onfail="ONOS nodes have different flow counts" )
1522
1523 if flowsResults and not consistentFlows:
1524 for i in range( len( ONOSFlows ) ):
Jon Hallf37d44d2017-05-24 10:37:30 -07001525 node = str( main.activeNodes[ i ] + 1 )
Jon Hall69b2b982016-05-11 12:04:59 -07001526 try:
1527 main.log.warn(
1528 "ONOS" + node + " flows: " +
Jon Hallf37d44d2017-05-24 10:37:30 -07001529 json.dumps( json.loads( ONOSFlows[ i ] ), sort_keys=True,
Jon Hall69b2b982016-05-11 12:04:59 -07001530 indent=4, separators=( ',', ': ' ) ) )
1531 except ( ValueError, TypeError ):
1532 main.log.warn( "ONOS" + node + " flows: " +
1533 repr( ONOSFlows[ i ] ) )
1534 elif flowsResults and consistentFlows:
1535 flowCheck = main.TRUE
1536 flowState = ONOSFlows[ 0 ]
1537
1538 main.step( "Get the OF Table entries" )
1539 global flows
1540 flows = []
1541 for i in range( 1, 29 ):
1542 flows.append( main.Mininet1.getFlowTable( "s" + str( i ), version="1.3", debug=False ) )
1543 if flowCheck == main.FALSE:
1544 for table in flows:
1545 main.log.warn( table )
1546 # TODO: Compare switch flow tables with ONOS flow tables
1547
1548 main.step( "Start continuous pings" )
1549 main.Mininet2.pingLong(
1550 src=main.params[ 'PING' ][ 'source1' ],
1551 target=main.params[ 'PING' ][ 'target1' ],
1552 pingTime=500 )
1553 main.Mininet2.pingLong(
1554 src=main.params[ 'PING' ][ 'source2' ],
1555 target=main.params[ 'PING' ][ 'target2' ],
1556 pingTime=500 )
1557 main.Mininet2.pingLong(
1558 src=main.params[ 'PING' ][ 'source3' ],
1559 target=main.params[ 'PING' ][ 'target3' ],
1560 pingTime=500 )
1561 main.Mininet2.pingLong(
1562 src=main.params[ 'PING' ][ 'source4' ],
1563 target=main.params[ 'PING' ][ 'target4' ],
1564 pingTime=500 )
1565 main.Mininet2.pingLong(
1566 src=main.params[ 'PING' ][ 'source5' ],
1567 target=main.params[ 'PING' ][ 'target5' ],
1568 pingTime=500 )
1569 main.Mininet2.pingLong(
1570 src=main.params[ 'PING' ][ 'source6' ],
1571 target=main.params[ 'PING' ][ 'target6' ],
1572 pingTime=500 )
1573 main.Mininet2.pingLong(
1574 src=main.params[ 'PING' ][ 'source7' ],
1575 target=main.params[ 'PING' ][ 'target7' ],
1576 pingTime=500 )
1577 main.Mininet2.pingLong(
1578 src=main.params[ 'PING' ][ 'source8' ],
1579 target=main.params[ 'PING' ][ 'target8' ],
1580 pingTime=500 )
1581 main.Mininet2.pingLong(
1582 src=main.params[ 'PING' ][ 'source9' ],
1583 target=main.params[ 'PING' ][ 'target9' ],
1584 pingTime=500 )
1585 main.Mininet2.pingLong(
1586 src=main.params[ 'PING' ][ 'source10' ],
1587 target=main.params[ 'PING' ][ 'target10' ],
1588 pingTime=500 )
1589
1590 main.step( "Collecting topology information from ONOS" )
1591 devices = []
1592 threads = []
1593 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07001594 t = main.Thread( target=main.CLIs[ i ].devices,
Jon Hall69b2b982016-05-11 12:04:59 -07001595 name="devices-" + str( i ),
Jon Hallf37d44d2017-05-24 10:37:30 -07001596 args=[] )
Jon Hall69b2b982016-05-11 12:04:59 -07001597 threads.append( t )
1598 t.start()
1599
1600 for t in threads:
1601 t.join()
1602 devices.append( t.result )
1603 hosts = []
1604 threads = []
1605 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07001606 t = main.Thread( target=main.CLIs[ i ].hosts,
Jon Hall69b2b982016-05-11 12:04:59 -07001607 name="hosts-" + str( i ),
Jon Hallf37d44d2017-05-24 10:37:30 -07001608 args=[] )
Jon Hall69b2b982016-05-11 12:04:59 -07001609 threads.append( t )
1610 t.start()
1611
1612 for t in threads:
1613 t.join()
1614 try:
1615 hosts.append( json.loads( t.result ) )
1616 except ( ValueError, TypeError ):
1617 # FIXME: better handling of this, print which node
1618 # Maybe use thread name?
1619 main.log.exception( "Error parsing json output of hosts" )
1620 main.log.warn( repr( t.result ) )
1621 hosts.append( None )
1622
1623 ports = []
1624 threads = []
1625 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07001626 t = main.Thread( target=main.CLIs[ i ].ports,
Jon Hall69b2b982016-05-11 12:04:59 -07001627 name="ports-" + str( i ),
Jon Hallf37d44d2017-05-24 10:37:30 -07001628 args=[] )
Jon Hall69b2b982016-05-11 12:04:59 -07001629 threads.append( t )
1630 t.start()
1631
1632 for t in threads:
1633 t.join()
1634 ports.append( t.result )
1635 links = []
1636 threads = []
1637 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07001638 t = main.Thread( target=main.CLIs[ i ].links,
Jon Hall69b2b982016-05-11 12:04:59 -07001639 name="links-" + str( i ),
Jon Hallf37d44d2017-05-24 10:37:30 -07001640 args=[] )
Jon Hall69b2b982016-05-11 12:04:59 -07001641 threads.append( t )
1642 t.start()
1643
1644 for t in threads:
1645 t.join()
1646 links.append( t.result )
1647 clusters = []
1648 threads = []
1649 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07001650 t = main.Thread( target=main.CLIs[ i ].clusters,
Jon Hall69b2b982016-05-11 12:04:59 -07001651 name="clusters-" + str( i ),
Jon Hallf37d44d2017-05-24 10:37:30 -07001652 args=[] )
Jon Hall69b2b982016-05-11 12:04:59 -07001653 threads.append( t )
1654 t.start()
1655
1656 for t in threads:
1657 t.join()
1658 clusters.append( t.result )
1659 # Compare json objects for hosts and dataplane clusters
1660
1661 # hosts
1662 main.step( "Host view is consistent across ONOS nodes" )
1663 consistentHostsResult = main.TRUE
1664 for controller in range( len( hosts ) ):
Jon Hallf37d44d2017-05-24 10:37:30 -07001665 controllerStr = str( main.activeNodes[ controller ] + 1 )
Jon Hall69b2b982016-05-11 12:04:59 -07001666 if hosts[ controller ] and "Error" not in hosts[ controller ]:
1667 if hosts[ controller ] == hosts[ 0 ]:
1668 continue
1669 else: # hosts not consistent
1670 main.log.error( "hosts from ONOS" +
1671 controllerStr +
1672 " is inconsistent with ONOS1" )
1673 main.log.warn( repr( hosts[ controller ] ) )
1674 consistentHostsResult = main.FALSE
1675
1676 else:
1677 main.log.error( "Error in getting ONOS hosts from ONOS" +
1678 controllerStr )
1679 consistentHostsResult = main.FALSE
1680 main.log.warn( "ONOS" + controllerStr +
1681 " hosts response: " +
1682 repr( hosts[ controller ] ) )
1683 utilities.assert_equals(
1684 expect=main.TRUE,
1685 actual=consistentHostsResult,
1686 onpass="Hosts view is consistent across all ONOS nodes",
1687 onfail="ONOS nodes have different views of hosts" )
1688
1689 main.step( "Each host has an IP address" )
1690 ipResult = main.TRUE
1691 for controller in range( 0, len( hosts ) ):
Jon Hallf37d44d2017-05-24 10:37:30 -07001692 controllerStr = str( main.activeNodes[ controller ] + 1 )
Jon Hall69b2b982016-05-11 12:04:59 -07001693 if hosts[ controller ]:
1694 for host in hosts[ controller ]:
Jon Hallf37d44d2017-05-24 10:37:30 -07001695 if not host.get( 'ipAddresses', [] ):
Jon Hall69b2b982016-05-11 12:04:59 -07001696 main.log.error( "Error with host ips on controller" +
1697 controllerStr + ": " + str( host ) )
1698 ipResult = main.FALSE
1699 utilities.assert_equals(
1700 expect=main.TRUE,
1701 actual=ipResult,
1702 onpass="The ips of the hosts aren't empty",
1703 onfail="The ip of at least one host is missing" )
1704
1705 # Strongly connected clusters of devices
1706 main.step( "Cluster view is consistent across ONOS nodes" )
1707 consistentClustersResult = main.TRUE
1708 for controller in range( len( clusters ) ):
Jon Hallf37d44d2017-05-24 10:37:30 -07001709 controllerStr = str( main.activeNodes[ controller ] + 1 )
Jon Hall69b2b982016-05-11 12:04:59 -07001710 if "Error" not in clusters[ controller ]:
1711 if clusters[ controller ] == clusters[ 0 ]:
1712 continue
1713 else: # clusters not consistent
1714 main.log.error( "clusters from ONOS" + controllerStr +
1715 " is inconsistent with ONOS1" )
1716 consistentClustersResult = main.FALSE
1717
1718 else:
1719 main.log.error( "Error in getting dataplane clusters " +
1720 "from ONOS" + controllerStr )
1721 consistentClustersResult = main.FALSE
1722 main.log.warn( "ONOS" + controllerStr +
1723 " clusters response: " +
1724 repr( clusters[ controller ] ) )
1725 utilities.assert_equals(
1726 expect=main.TRUE,
1727 actual=consistentClustersResult,
1728 onpass="Clusters view is consistent across all ONOS nodes",
1729 onfail="ONOS nodes have different views of clusters" )
1730 if not consistentClustersResult:
1731 main.log.debug( clusters )
1732
1733 # there should always only be one cluster
1734 main.step( "Cluster view correct across ONOS nodes" )
1735 try:
1736 numClusters = len( json.loads( clusters[ 0 ] ) )
1737 except ( ValueError, TypeError ):
1738 main.log.exception( "Error parsing clusters[0]: " +
1739 repr( clusters[ 0 ] ) )
1740 numClusters = "ERROR"
1741 utilities.assert_equals(
1742 expect=1,
1743 actual=numClusters,
1744 onpass="ONOS shows 1 SCC",
1745 onfail="ONOS shows " + str( numClusters ) + " SCCs" )
1746
1747 main.step( "Comparing ONOS topology to MN" )
1748 devicesResults = main.TRUE
1749 linksResults = main.TRUE
1750 hostsResults = main.TRUE
1751 mnSwitches = main.Mininet1.getSwitches()
1752 mnLinks = main.Mininet1.getLinks()
1753 mnHosts = main.Mininet1.getHosts()
1754 for controller in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07001755 controllerStr = str( main.activeNodes[ controller ] + 1 )
Jon Hall69b2b982016-05-11 12:04:59 -07001756 if devices[ controller ] and ports[ controller ] and\
Jon Hallf37d44d2017-05-24 10:37:30 -07001757 "Error" not in devices[ controller ] and\
1758 "Error" not in ports[ controller ]:
1759 currentDevicesResult = main.Mininet1.compareSwitches(
1760 mnSwitches,
1761 json.loads( devices[ controller ] ),
1762 json.loads( ports[ controller ] ) )
Jon Hall69b2b982016-05-11 12:04:59 -07001763 else:
1764 currentDevicesResult = main.FALSE
1765 utilities.assert_equals( expect=main.TRUE,
1766 actual=currentDevicesResult,
1767 onpass="ONOS" + controllerStr +
1768 " Switches view is correct",
1769 onfail="ONOS" + controllerStr +
1770 " Switches view is incorrect" )
1771 if links[ controller ] and "Error" not in links[ controller ]:
1772 currentLinksResult = main.Mininet1.compareLinks(
1773 mnSwitches, mnLinks,
1774 json.loads( links[ controller ] ) )
1775 else:
1776 currentLinksResult = main.FALSE
1777 utilities.assert_equals( expect=main.TRUE,
1778 actual=currentLinksResult,
1779 onpass="ONOS" + controllerStr +
1780 " links view is correct",
1781 onfail="ONOS" + controllerStr +
1782 " links view is incorrect" )
1783
1784 if hosts[ controller ] and "Error" not in hosts[ controller ]:
1785 currentHostsResult = main.Mininet1.compareHosts(
1786 mnHosts,
1787 hosts[ controller ] )
1788 else:
1789 currentHostsResult = main.FALSE
1790 utilities.assert_equals( expect=main.TRUE,
1791 actual=currentHostsResult,
1792 onpass="ONOS" + controllerStr +
1793 " hosts exist in Mininet",
1794 onfail="ONOS" + controllerStr +
1795 " hosts don't match Mininet" )
1796
1797 devicesResults = devicesResults and currentDevicesResult
1798 linksResults = linksResults and currentLinksResult
1799 hostsResults = hostsResults and currentHostsResult
1800
1801 main.step( "Device information is correct" )
1802 utilities.assert_equals(
1803 expect=main.TRUE,
1804 actual=devicesResults,
1805 onpass="Device information is correct",
1806 onfail="Device information is incorrect" )
1807
1808 main.step( "Links are correct" )
1809 utilities.assert_equals(
1810 expect=main.TRUE,
1811 actual=linksResults,
1812 onpass="Link are correct",
1813 onfail="Links are incorrect" )
1814
1815 main.step( "Hosts are correct" )
1816 utilities.assert_equals(
1817 expect=main.TRUE,
1818 actual=hostsResults,
1819 onpass="Hosts are correct",
1820 onfail="Hosts are incorrect" )
1821
1822 def CASE6( self, main ):
1823 """
1824 The Scaling case.
1825 """
1826 import time
1827 import re
1828 assert main.numCtrls, "main.numCtrls not defined"
1829 assert main, "main not defined"
1830 assert utilities.assert_equals, "utilities.assert_equals not defined"
1831 assert main.CLIs, "main.CLIs not defined"
1832 assert main.nodes, "main.nodes not defined"
1833 try:
1834 labels
1835 except NameError:
1836 main.log.error( "labels not defined, setting to []" )
1837 global labels
1838 labels = []
1839 try:
1840 data
1841 except NameError:
1842 main.log.error( "data not defined, setting to []" )
1843 global data
1844 data = []
1845
1846 main.case( "Swap some of the ONOS nodes" )
1847
1848 main.step( "Checking ONOS Logs for errors" )
1849 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07001850 node = main.nodes[ i ]
Jon Hall69b2b982016-05-11 12:04:59 -07001851 main.log.debug( "Checking logs for errors on " + node.name + ":" )
1852 main.log.warn( main.ONOSbench.checkLogs( node.ip_address ) )
1853
1854 main.step( "Generate new metadata file" )
Jon Hallf37d44d2017-05-24 10:37:30 -07001855 old = [ main.activeNodes[ 1 ], main.activeNodes[ -2 ] ]
1856 new = range( main.ONOSbench.maxNodes )[ -2: ]
Jon Hall69b2b982016-05-11 12:04:59 -07001857 assert len( old ) == len( new ), "Length of nodes to swap don't match"
1858 handle = main.ONOSbench.handle
1859 for x, y in zip( old, new ):
1860 handle.sendline( "export OC{}=$OC{}".format( x + 1, y + 1 ) )
1861 handle.expect( "\$" ) # from the variable
1862 ret = handle.before
1863 handle.expect( "\$" ) # From the prompt
1864 ret += handle.before
1865 main.log.debug( ret )
1866 main.activeNodes.remove( x )
1867 main.activeNodes.append( y )
1868
1869 genResult = main.Server.generateFile( main.numCtrls )
1870 utilities.assert_equals( expect=main.TRUE, actual=genResult,
1871 onpass="New cluster metadata file generated",
1872 onfail="Failled to generate new metadata file" )
1873 time.sleep( 5 ) # Give time for nodes to read new file
1874
1875 main.step( "Start new nodes" ) # OR stop old nodes?
1876 started = main.TRUE
1877 for i in new:
Jon Hallf37d44d2017-05-24 10:37:30 -07001878 started = main.ONOSbench.onosStart( main.nodes[ i ].ip_address ) and main.TRUE
Jon Hall69b2b982016-05-11 12:04:59 -07001879 utilities.assert_equals( expect=main.TRUE, actual=started,
1880 onpass="ONOS started",
1881 onfail="ONOS start NOT successful" )
1882
1883 main.step( "Checking if ONOS is up yet" )
1884 for i in range( 2 ):
1885 onosIsupResult = main.TRUE
1886 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07001887 node = main.nodes[ i ]
Jon Hall168c1862017-01-31 17:35:34 -08001888 main.ONOSbench.onosSecureSSH( node=node.ip_address )
Jon Hall69b2b982016-05-11 12:04:59 -07001889 started = main.ONOSbench.isup( node.ip_address )
1890 if not started:
1891 main.log.error( node.name + " didn't start!" )
1892 onosIsupResult = onosIsupResult and started
1893 if onosIsupResult == main.TRUE:
1894 break
1895 utilities.assert_equals( expect=main.TRUE, actual=onosIsupResult,
1896 onpass="ONOS started",
1897 onfail="ONOS start NOT successful" )
1898
Jon Hall6509dbf2016-06-21 17:01:17 -07001899 main.step( "Starting ONOS CLI sessions" )
Jon Hall69b2b982016-05-11 12:04:59 -07001900 cliResults = main.TRUE
1901 threads = []
1902 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07001903 t = main.Thread( target=main.CLIs[ i ].startOnosCli,
Jon Hall69b2b982016-05-11 12:04:59 -07001904 name="startOnosCli-" + str( i ),
Jon Hallf37d44d2017-05-24 10:37:30 -07001905 args=[ main.nodes[ i ].ip_address ] )
Jon Hall69b2b982016-05-11 12:04:59 -07001906 threads.append( t )
1907 t.start()
1908
1909 for t in threads:
1910 t.join()
1911 cliResults = cliResults and t.result
1912 utilities.assert_equals( expect=main.TRUE, actual=cliResults,
1913 onpass="ONOS cli started",
1914 onfail="ONOS clis did not start" )
1915
1916 main.step( "Checking ONOS nodes" )
1917 nodeResults = utilities.retry( main.HA.nodesCheck,
1918 False,
Jon Hallf37d44d2017-05-24 10:37:30 -07001919 args=[ main.activeNodes ],
Jon Hall69b2b982016-05-11 12:04:59 -07001920 attempts=5 )
1921 utilities.assert_equals( expect=True, actual=nodeResults,
1922 onpass="Nodes check successful",
1923 onfail="Nodes check NOT successful" )
1924
1925 for i in range( 10 ):
1926 ready = True
1927 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07001928 cli = main.CLIs[ i ]
Jon Hall69b2b982016-05-11 12:04:59 -07001929 output = cli.summary()
1930 if not output:
1931 ready = False
1932 if ready:
1933 break
1934 time.sleep( 30 )
1935 utilities.assert_equals( expect=True, actual=ready,
1936 onpass="ONOS summary command succeded",
1937 onfail="ONOS summary command failed" )
1938 if not ready:
1939 main.cleanup()
1940 main.exit()
1941
1942 # Rerun for election on new nodes
1943 runResults = main.TRUE
1944 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07001945 cli = main.CLIs[ i ]
Jon Hall69b2b982016-05-11 12:04:59 -07001946 run = cli.electionTestRun()
1947 if run != main.TRUE:
1948 main.log.error( "Error running for election on " + cli.name )
1949 runResults = runResults and run
1950 utilities.assert_equals( expect=main.TRUE, actual=runResults,
1951 onpass="Reran for election",
1952 onfail="Failed to rerun for election" )
1953
1954 for node in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07001955 main.log.warn( "\n****************** {} **************".format( main.nodes[ node ].ip_address ) )
1956 main.log.debug( main.CLIs[ node ].nodes( jsonFormat=False ) )
1957 main.log.debug( main.CLIs[ node ].leaders( jsonFormat=False ) )
1958 main.log.debug( main.CLIs[ node ].partitions( jsonFormat=False ) )
1959 main.log.debug( main.CLIs[ node ].apps( jsonFormat=False ) )
Jon Hall69b2b982016-05-11 12:04:59 -07001960
1961 main.step( "Reapplying cell variable to environment" )
1962 cellName = main.params[ 'ENV' ][ 'cellName' ]
1963 cellResult = main.ONOSbench.setCell( cellName )
1964 utilities.assert_equals( expect=main.TRUE, actual=cellResult,
1965 onpass="Set cell successfull",
1966 onfail="Failled to set cell" )
1967
1968 def CASE7( self, main ):
1969 """
1970 Check state after ONOS scaling
1971 """
1972 import json
1973 assert main.numCtrls, "main.numCtrls not defined"
1974 assert main, "main not defined"
1975 assert utilities.assert_equals, "utilities.assert_equals not defined"
1976 assert main.CLIs, "main.CLIs not defined"
1977 assert main.nodes, "main.nodes not defined"
1978 main.case( "Running ONOS Constant State Tests" )
1979
1980 main.step( "Check that each switch has a master" )
1981 # Assert that each device has a master
1982 rolesNotNull = main.TRUE
1983 threads = []
1984 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07001985 t = main.Thread( target=main.CLIs[ i ].rolesNotNull,
Jon Hall69b2b982016-05-11 12:04:59 -07001986 name="rolesNotNull-" + str( i ),
Jon Hallf37d44d2017-05-24 10:37:30 -07001987 args=[] )
Jon Hall69b2b982016-05-11 12:04:59 -07001988 threads.append( t )
1989 t.start()
1990
1991 for t in threads:
1992 t.join()
1993 rolesNotNull = rolesNotNull and t.result
1994 utilities.assert_equals(
1995 expect=main.TRUE,
1996 actual=rolesNotNull,
1997 onpass="Each device has a master",
1998 onfail="Some devices don't have a master assigned" )
1999
2000 main.step( "Read device roles from ONOS" )
2001 ONOSMastership = []
2002 consistentMastership = True
2003 rolesResults = True
2004 threads = []
2005 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07002006 t = main.Thread( target=main.CLIs[ i ].roles,
Jon Hall69b2b982016-05-11 12:04:59 -07002007 name="roles-" + str( i ),
2008 args=[] )
2009 threads.append( t )
2010 t.start()
2011
2012 for t in threads:
2013 t.join()
2014 ONOSMastership.append( t.result )
2015
2016 for i in range( len( ONOSMastership ) ):
Jon Hallf37d44d2017-05-24 10:37:30 -07002017 node = str( main.activeNodes[ i ] + 1 )
2018 if not ONOSMastership[ i ] or "Error" in ONOSMastership[ i ]:
Jon Hall69b2b982016-05-11 12:04:59 -07002019 main.log.error( "Error in getting ONOS" + node + " roles" )
2020 main.log.warn( "ONOS" + node + " mastership response: " +
Jon Hallf37d44d2017-05-24 10:37:30 -07002021 repr( ONOSMastership[ i ] ) )
Jon Hall69b2b982016-05-11 12:04:59 -07002022 rolesResults = False
2023 utilities.assert_equals(
2024 expect=True,
2025 actual=rolesResults,
2026 onpass="No error in reading roles output",
2027 onfail="Error in reading roles from ONOS" )
2028
2029 main.step( "Check for consistency in roles from each controller" )
Jon Hallf37d44d2017-05-24 10:37:30 -07002030 if all( [ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
Jon Hall69b2b982016-05-11 12:04:59 -07002031 main.log.info(
2032 "Switch roles are consistent across all ONOS nodes" )
2033 else:
2034 consistentMastership = False
2035 utilities.assert_equals(
2036 expect=True,
2037 actual=consistentMastership,
2038 onpass="Switch roles are consistent across all ONOS nodes",
2039 onfail="ONOS nodes have different views of switch roles" )
2040
2041 if rolesResults and not consistentMastership:
2042 for i in range( len( ONOSMastership ) ):
Jon Hallf37d44d2017-05-24 10:37:30 -07002043 node = str( main.activeNodes[ i ] + 1 )
Jon Hall69b2b982016-05-11 12:04:59 -07002044 main.log.warn( "ONOS" + node + " roles: ",
2045 json.dumps( json.loads( ONOSMastership[ i ] ),
2046 sort_keys=True,
2047 indent=4,
2048 separators=( ',', ': ' ) ) )
2049
2050 # NOTE: we expect mastership to change on controller scaling down
2051
2052 main.step( "Get the intents and compare across all nodes" )
2053 ONOSIntents = []
2054 intentCheck = main.FALSE
2055 consistentIntents = True
2056 intentsResults = True
2057 threads = []
2058 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07002059 t = main.Thread( target=main.CLIs[ i ].intents,
Jon Hall69b2b982016-05-11 12:04:59 -07002060 name="intents-" + str( i ),
2061 args=[],
2062 kwargs={ 'jsonFormat': True } )
2063 threads.append( t )
2064 t.start()
2065
2066 for t in threads:
2067 t.join()
2068 ONOSIntents.append( t.result )
2069
Jon Hallf37d44d2017-05-24 10:37:30 -07002070 for i in range( len( ONOSIntents ) ):
2071 node = str( main.activeNodes[ i ] + 1 )
Jon Hall69b2b982016-05-11 12:04:59 -07002072 if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
2073 main.log.error( "Error in getting ONOS" + node + " intents" )
2074 main.log.warn( "ONOS" + node + " intents response: " +
2075 repr( ONOSIntents[ i ] ) )
2076 intentsResults = False
2077 utilities.assert_equals(
2078 expect=True,
2079 actual=intentsResults,
2080 onpass="No error in reading intents output",
2081 onfail="Error in reading intents from ONOS" )
2082
2083 main.step( "Check for consistency in Intents from each controller" )
Jon Hallf37d44d2017-05-24 10:37:30 -07002084 if all( [ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
Jon Hall69b2b982016-05-11 12:04:59 -07002085 main.log.info( "Intents are consistent across all ONOS " +
2086 "nodes" )
2087 else:
2088 consistentIntents = False
2089
2090 # Try to make it easy to figure out what is happening
2091 #
2092 # Intent ONOS1 ONOS2 ...
2093 # 0x01 INSTALLED INSTALLING
2094 # ... ... ...
2095 # ... ... ...
2096 title = " ID"
2097 for n in main.activeNodes:
2098 title += " " * 10 + "ONOS" + str( n + 1 )
2099 main.log.warn( title )
2100 # get all intent keys in the cluster
2101 keys = []
2102 for nodeStr in ONOSIntents:
2103 node = json.loads( nodeStr )
2104 for intent in node:
2105 keys.append( intent.get( 'id' ) )
2106 keys = set( keys )
2107 for key in keys:
2108 row = "%-13s" % key
2109 for nodeStr in ONOSIntents:
2110 node = json.loads( nodeStr )
2111 for intent in node:
2112 if intent.get( 'id' ) == key:
2113 row += "%-15s" % intent.get( 'state' )
2114 main.log.warn( row )
2115 # End table view
2116
2117 utilities.assert_equals(
2118 expect=True,
2119 actual=consistentIntents,
2120 onpass="Intents are consistent across all ONOS nodes",
2121 onfail="ONOS nodes have different views of intents" )
2122 intentStates = []
2123 for node in ONOSIntents: # Iter through ONOS nodes
2124 nodeStates = []
2125 # Iter through intents of a node
2126 try:
2127 for intent in json.loads( node ):
2128 nodeStates.append( intent[ 'state' ] )
2129 except ( ValueError, TypeError ):
2130 main.log.exception( "Error in parsing intents" )
2131 main.log.error( repr( node ) )
2132 intentStates.append( nodeStates )
Jon Hallf37d44d2017-05-24 10:37:30 -07002133 out = [ ( i, nodeStates.count( i ) ) for i in set( nodeStates ) ]
Jon Hall69b2b982016-05-11 12:04:59 -07002134 main.log.info( dict( out ) )
2135
2136 if intentsResults and not consistentIntents:
2137 for i in range( len( main.activeNodes ) ):
Jon Hallf37d44d2017-05-24 10:37:30 -07002138 node = str( main.activeNodes[ i ] + 1 )
Jon Hall69b2b982016-05-11 12:04:59 -07002139 main.log.warn( "ONOS" + node + " intents: " )
2140 main.log.warn( json.dumps(
2141 json.loads( ONOSIntents[ i ] ),
2142 sort_keys=True,
2143 indent=4,
2144 separators=( ',', ': ' ) ) )
2145 elif intentsResults and consistentIntents:
2146 intentCheck = main.TRUE
2147
2148 main.step( "Compare current intents with intents before the scaling" )
2149 # NOTE: this requires case 5 to pass for intentState to be set.
2150 # maybe we should stop the test if that fails?
2151 sameIntents = main.FALSE
2152 try:
2153 intentState
2154 except NameError:
2155 main.log.warn( "No previous intent state was saved" )
2156 else:
2157 if intentState and intentState == ONOSIntents[ 0 ]:
2158 sameIntents = main.TRUE
2159 main.log.info( "Intents are consistent with before scaling" )
2160 # TODO: possibly the states have changed? we may need to figure out
2161 # what the acceptable states are
2162 elif len( intentState ) == len( ONOSIntents[ 0 ] ):
2163 sameIntents = main.TRUE
2164 try:
2165 before = json.loads( intentState )
2166 after = json.loads( ONOSIntents[ 0 ] )
2167 for intent in before:
2168 if intent not in after:
2169 sameIntents = main.FALSE
2170 main.log.debug( "Intent is not currently in ONOS " +
2171 "(at least in the same form):" )
2172 main.log.debug( json.dumps( intent ) )
2173 except ( ValueError, TypeError ):
2174 main.log.exception( "Exception printing intents" )
Jon Hallf37d44d2017-05-24 10:37:30 -07002175 main.log.debug( repr( ONOSIntents[ 0 ] ) )
Jon Hall69b2b982016-05-11 12:04:59 -07002176 main.log.debug( repr( intentState ) )
2177 if sameIntents == main.FALSE:
2178 try:
2179 main.log.debug( "ONOS intents before: " )
2180 main.log.debug( json.dumps( json.loads( intentState ),
2181 sort_keys=True, indent=4,
2182 separators=( ',', ': ' ) ) )
2183 main.log.debug( "Current ONOS intents: " )
2184 main.log.debug( json.dumps( json.loads( ONOSIntents[ 0 ] ),
2185 sort_keys=True, indent=4,
2186 separators=( ',', ': ' ) ) )
2187 except ( ValueError, TypeError ):
2188 main.log.exception( "Exception printing intents" )
Jon Hallf37d44d2017-05-24 10:37:30 -07002189 main.log.debug( repr( ONOSIntents[ 0 ] ) )
Jon Hall69b2b982016-05-11 12:04:59 -07002190 main.log.debug( repr( intentState ) )
2191 utilities.assert_equals(
2192 expect=main.TRUE,
2193 actual=sameIntents,
2194 onpass="Intents are consistent with before scaling",
2195 onfail="The Intents changed during scaling" )
2196 intentCheck = intentCheck and sameIntents
2197
2198 main.step( "Get the OF Table entries and compare to before " +
2199 "component scaling" )
2200 FlowTables = main.TRUE
2201 for i in range( 28 ):
2202 main.log.info( "Checking flow table on s" + str( i + 1 ) )
2203 tmpFlows = main.Mininet1.getFlowTable( "s" + str( i + 1 ), version="1.3", debug=False )
Jon Hallf37d44d2017-05-24 10:37:30 -07002204 curSwitch = main.Mininet1.flowTableComp( flows[ i ], tmpFlows )
Jon Hall69b2b982016-05-11 12:04:59 -07002205 FlowTables = FlowTables and curSwitch
2206 if curSwitch == main.FALSE:
2207 main.log.warn( "Differences in flow table for switch: s{}".format( i + 1 ) )
2208 utilities.assert_equals(
2209 expect=main.TRUE,
2210 actual=FlowTables,
2211 onpass="No changes were found in the flow tables",
2212 onfail="Changes were found in the flow tables" )
2213
2214 main.Mininet2.pingLongKill()
Jon Hallf37d44d2017-05-24 10:37:30 -07002215 """
Jon Hall69b2b982016-05-11 12:04:59 -07002216 # main.step( "Check the continuous pings to ensure that no packets " +
2217 # "were dropped during component failure" )
2218 main.Mininet2.pingKill( main.params[ 'TESTONUSER' ],
2219 main.params[ 'TESTONIP' ] )
2220 LossInPings = main.FALSE
2221 # NOTE: checkForLoss returns main.FALSE with 0% packet loss
2222 for i in range( 8, 18 ):
2223 main.log.info(
2224 "Checking for a loss in pings along flow from s" +
2225 str( i ) )
2226 LossInPings = main.Mininet2.checkForLoss(
2227 "/tmp/ping.h" +
2228 str( i ) ) or LossInPings
2229 if LossInPings == main.TRUE:
2230 main.log.info( "Loss in ping detected" )
2231 elif LossInPings == main.ERROR:
2232 main.log.info( "There are multiple mininet process running" )
2233 elif LossInPings == main.FALSE:
2234 main.log.info( "No Loss in the pings" )
2235 main.log.info( "No loss of dataplane connectivity" )
2236 # utilities.assert_equals(
2237 # expect=main.FALSE,
2238 # actual=LossInPings,
2239 # onpass="No Loss of connectivity",
2240 # onfail="Loss of dataplane connectivity detected" )
2241
2242 # NOTE: Since intents are not persisted with IntnentStore,
2243 # we expect loss in dataplane connectivity
2244 LossInPings = main.FALSE
Jon Hallf37d44d2017-05-24 10:37:30 -07002245 """
Jon Hall69b2b982016-05-11 12:04:59 -07002246 main.step( "Leadership Election is still functional" )
2247 # Test of LeadershipElection
2248 leaderList = []
2249 leaderResult = main.TRUE
2250
2251 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07002252 cli = main.CLIs[ i ]
Jon Hall69b2b982016-05-11 12:04:59 -07002253 leaderN = cli.electionTestLeader()
2254 leaderList.append( leaderN )
2255 if leaderN == main.FALSE:
2256 # error in response
2257 main.log.error( "Something is wrong with " +
2258 "electionTestLeader function, check the" +
2259 " error logs" )
2260 leaderResult = main.FALSE
2261 elif leaderN is None:
2262 main.log.error( cli.name +
2263 " shows no leader for the election-app." )
2264 leaderResult = main.FALSE
2265 if len( set( leaderList ) ) != 1:
2266 leaderResult = main.FALSE
2267 main.log.error(
2268 "Inconsistent view of leader for the election test app" )
2269 # TODO: print the list
2270 utilities.assert_equals(
2271 expect=main.TRUE,
2272 actual=leaderResult,
2273 onpass="Leadership election passed",
2274 onfail="Something went wrong with Leadership election" )
2275
2276 def CASE8( self, main ):
2277 """
2278 Compare topo
2279 """
2280 import json
2281 import time
2282 assert main.numCtrls, "main.numCtrls not defined"
2283 assert main, "main not defined"
2284 assert utilities.assert_equals, "utilities.assert_equals not defined"
2285 assert main.CLIs, "main.CLIs not defined"
2286 assert main.nodes, "main.nodes not defined"
2287
2288 main.case( "Compare ONOS Topology view to Mininet topology" )
2289 main.caseExplanation = "Compare topology objects between Mininet" +\
2290 " and ONOS"
2291 topoResult = main.FALSE
2292 topoFailMsg = "ONOS topology don't match Mininet"
2293 elapsed = 0
2294 count = 0
2295 main.step( "Comparing ONOS topology to MN topology" )
2296 startTime = time.time()
2297 # Give time for Gossip to work
2298 while topoResult == main.FALSE and ( elapsed < 60 or count < 3 ):
2299 devicesResults = main.TRUE
2300 linksResults = main.TRUE
2301 hostsResults = main.TRUE
2302 hostAttachmentResults = True
2303 count += 1
2304 cliStart = time.time()
2305 devices = []
2306 threads = []
2307 for i in main.activeNodes:
2308 t = main.Thread( target=utilities.retry,
2309 name="devices-" + str( i ),
Jon Hallf37d44d2017-05-24 10:37:30 -07002310 args=[ main.CLIs[ i ].devices, [ None ] ],
2311 kwargs={ 'sleep': 5, 'attempts': 5,
Jon Hall69b2b982016-05-11 12:04:59 -07002312 'randomTime': True } )
2313 threads.append( t )
2314 t.start()
2315
2316 for t in threads:
2317 t.join()
2318 devices.append( t.result )
2319 hosts = []
2320 ipResult = main.TRUE
2321 threads = []
2322 for i in main.activeNodes:
2323 t = main.Thread( target=utilities.retry,
2324 name="hosts-" + str( i ),
Jon Hallf37d44d2017-05-24 10:37:30 -07002325 args=[ main.CLIs[ i ].hosts, [ None ] ],
2326 kwargs={ 'sleep': 5, 'attempts': 5,
Jon Hall69b2b982016-05-11 12:04:59 -07002327 'randomTime': True } )
2328 threads.append( t )
2329 t.start()
2330
2331 for t in threads:
2332 t.join()
2333 try:
2334 hosts.append( json.loads( t.result ) )
2335 except ( ValueError, TypeError ):
2336 main.log.exception( "Error parsing hosts results" )
2337 main.log.error( repr( t.result ) )
2338 hosts.append( None )
2339 for controller in range( 0, len( hosts ) ):
Jon Hallf37d44d2017-05-24 10:37:30 -07002340 controllerStr = str( main.activeNodes[ controller ] + 1 )
Jon Hall69b2b982016-05-11 12:04:59 -07002341 if hosts[ controller ]:
2342 for host in hosts[ controller ]:
2343 if host is None or host.get( 'ipAddresses', [] ) == []:
2344 main.log.error(
2345 "Error with host ipAddresses on controller" +
2346 controllerStr + ": " + str( host ) )
2347 ipResult = main.FALSE
2348 ports = []
2349 threads = []
2350 for i in main.activeNodes:
2351 t = main.Thread( target=utilities.retry,
2352 name="ports-" + str( i ),
Jon Hallf37d44d2017-05-24 10:37:30 -07002353 args=[ main.CLIs[ i ].ports, [ None ] ],
2354 kwargs={ 'sleep': 5, 'attempts': 5,
Jon Hall69b2b982016-05-11 12:04:59 -07002355 'randomTime': True } )
2356 threads.append( t )
2357 t.start()
2358
2359 for t in threads:
2360 t.join()
2361 ports.append( t.result )
2362 links = []
2363 threads = []
2364 for i in main.activeNodes:
2365 t = main.Thread( target=utilities.retry,
2366 name="links-" + str( i ),
Jon Hallf37d44d2017-05-24 10:37:30 -07002367 args=[ main.CLIs[ i ].links, [ None ] ],
2368 kwargs={ 'sleep': 5, 'attempts': 5,
Jon Hall69b2b982016-05-11 12:04:59 -07002369 'randomTime': True } )
2370 threads.append( t )
2371 t.start()
2372
2373 for t in threads:
2374 t.join()
2375 links.append( t.result )
2376 clusters = []
2377 threads = []
2378 for i in main.activeNodes:
2379 t = main.Thread( target=utilities.retry,
2380 name="clusters-" + str( i ),
Jon Hallf37d44d2017-05-24 10:37:30 -07002381 args=[ main.CLIs[ i ].clusters, [ None ] ],
2382 kwargs={ 'sleep': 5, 'attempts': 5,
Jon Hall69b2b982016-05-11 12:04:59 -07002383 'randomTime': True } )
2384 threads.append( t )
2385 t.start()
2386
2387 for t in threads:
2388 t.join()
2389 clusters.append( t.result )
2390
2391 elapsed = time.time() - startTime
2392 cliTime = time.time() - cliStart
2393 print "Elapsed time: " + str( elapsed )
2394 print "CLI time: " + str( cliTime )
2395
2396 if all( e is None for e in devices ) and\
2397 all( e is None for e in hosts ) and\
2398 all( e is None for e in ports ) and\
2399 all( e is None for e in links ) and\
2400 all( e is None for e in clusters ):
Jon Hallf37d44d2017-05-24 10:37:30 -07002401 topoFailMsg = "Could not get topology from ONOS"
2402 main.log.error( topoFailMsg )
2403 continue # Try again, No use trying to compare
Jon Hall69b2b982016-05-11 12:04:59 -07002404
2405 mnSwitches = main.Mininet1.getSwitches()
2406 mnLinks = main.Mininet1.getLinks()
2407 mnHosts = main.Mininet1.getHosts()
2408 for controller in range( len( main.activeNodes ) ):
Jon Hallf37d44d2017-05-24 10:37:30 -07002409 controllerStr = str( main.activeNodes[ controller ] + 1 )
Jon Hall69b2b982016-05-11 12:04:59 -07002410 if devices[ controller ] and ports[ controller ] and\
Jon Hallf37d44d2017-05-24 10:37:30 -07002411 "Error" not in devices[ controller ] and\
2412 "Error" not in ports[ controller ]:
Jon Hall69b2b982016-05-11 12:04:59 -07002413
2414 try:
2415 currentDevicesResult = main.Mininet1.compareSwitches(
2416 mnSwitches,
2417 json.loads( devices[ controller ] ),
2418 json.loads( ports[ controller ] ) )
2419 except ( TypeError, ValueError ):
2420 main.log.exception( "Object not as expected; devices={!r}\nports={!r}".format(
2421 devices[ controller ], ports[ controller ] ) )
2422 else:
2423 currentDevicesResult = main.FALSE
2424 utilities.assert_equals( expect=main.TRUE,
2425 actual=currentDevicesResult,
2426 onpass="ONOS" + controllerStr +
2427 " Switches view is correct",
2428 onfail="ONOS" + controllerStr +
2429 " Switches view is incorrect" )
2430
2431 if links[ controller ] and "Error" not in links[ controller ]:
2432 currentLinksResult = main.Mininet1.compareLinks(
2433 mnSwitches, mnLinks,
2434 json.loads( links[ controller ] ) )
2435 else:
2436 currentLinksResult = main.FALSE
2437 utilities.assert_equals( expect=main.TRUE,
2438 actual=currentLinksResult,
2439 onpass="ONOS" + controllerStr +
2440 " links view is correct",
2441 onfail="ONOS" + controllerStr +
2442 " links view is incorrect" )
2443 if hosts[ controller ] and "Error" not in hosts[ controller ]:
2444 currentHostsResult = main.Mininet1.compareHosts(
2445 mnHosts,
2446 hosts[ controller ] )
2447 elif hosts[ controller ] == []:
2448 currentHostsResult = main.TRUE
2449 else:
2450 currentHostsResult = main.FALSE
2451 utilities.assert_equals( expect=main.TRUE,
2452 actual=currentHostsResult,
2453 onpass="ONOS" + controllerStr +
2454 " hosts exist in Mininet",
2455 onfail="ONOS" + controllerStr +
2456 " hosts don't match Mininet" )
2457 # CHECKING HOST ATTACHMENT POINTS
2458 hostAttachment = True
2459 zeroHosts = False
2460 # FIXME: topo-HA/obelisk specific mappings:
2461 # key is mac and value is dpid
2462 mappings = {}
2463 for i in range( 1, 29 ): # hosts 1 through 28
2464 # set up correct variables:
Jon Hallf37d44d2017-05-24 10:37:30 -07002465 macId = "00:" * 5 + hex( i ).split( "0x" )[ 1 ].upper().zfill( 2 )
Jon Hall69b2b982016-05-11 12:04:59 -07002466 if i == 1:
Jon Hallf37d44d2017-05-24 10:37:30 -07002467 deviceId = "1000".zfill( 16 )
Jon Hall69b2b982016-05-11 12:04:59 -07002468 elif i == 2:
Jon Hallf37d44d2017-05-24 10:37:30 -07002469 deviceId = "2000".zfill( 16 )
Jon Hall69b2b982016-05-11 12:04:59 -07002470 elif i == 3:
Jon Hallf37d44d2017-05-24 10:37:30 -07002471 deviceId = "3000".zfill( 16 )
Jon Hall69b2b982016-05-11 12:04:59 -07002472 elif i == 4:
Jon Hallf37d44d2017-05-24 10:37:30 -07002473 deviceId = "3004".zfill( 16 )
Jon Hall69b2b982016-05-11 12:04:59 -07002474 elif i == 5:
Jon Hallf37d44d2017-05-24 10:37:30 -07002475 deviceId = "5000".zfill( 16 )
Jon Hall69b2b982016-05-11 12:04:59 -07002476 elif i == 6:
Jon Hallf37d44d2017-05-24 10:37:30 -07002477 deviceId = "6000".zfill( 16 )
Jon Hall69b2b982016-05-11 12:04:59 -07002478 elif i == 7:
Jon Hallf37d44d2017-05-24 10:37:30 -07002479 deviceId = "6007".zfill( 16 )
Jon Hall69b2b982016-05-11 12:04:59 -07002480 elif i >= 8 and i <= 17:
2481 dpid = '3' + str( i ).zfill( 3 )
Jon Hallf37d44d2017-05-24 10:37:30 -07002482 deviceId = dpid.zfill( 16 )
Jon Hall69b2b982016-05-11 12:04:59 -07002483 elif i >= 18 and i <= 27:
2484 dpid = '6' + str( i ).zfill( 3 )
Jon Hallf37d44d2017-05-24 10:37:30 -07002485 deviceId = dpid.zfill( 16 )
Jon Hall69b2b982016-05-11 12:04:59 -07002486 elif i == 28:
Jon Hallf37d44d2017-05-24 10:37:30 -07002487 deviceId = "2800".zfill( 16 )
Jon Hall69b2b982016-05-11 12:04:59 -07002488 mappings[ macId ] = deviceId
2489 if hosts[ controller ] is not None and "Error" not in hosts[ controller ]:
2490 if hosts[ controller ] == []:
2491 main.log.warn( "There are no hosts discovered" )
2492 zeroHosts = True
2493 else:
2494 for host in hosts[ controller ]:
2495 mac = None
2496 location = None
2497 device = None
2498 port = None
2499 try:
2500 mac = host.get( 'mac' )
2501 assert mac, "mac field could not be found for this host object"
2502
2503 location = host.get( 'location' )
2504 assert location, "location field could not be found for this host object"
2505
2506 # Trim the protocol identifier off deviceId
Jon Hallf37d44d2017-05-24 10:37:30 -07002507 device = str( location.get( 'elementId' ) ).split( ':' )[ 1 ]
Jon Hall69b2b982016-05-11 12:04:59 -07002508 assert device, "elementId field could not be found for this host location object"
2509
2510 port = location.get( 'port' )
2511 assert port, "port field could not be found for this host location object"
2512
2513 # Now check if this matches where they should be
2514 if mac and device and port:
2515 if str( port ) != "1":
2516 main.log.error( "The attachment port is incorrect for " +
2517 "host " + str( mac ) +
Jon Hallf37d44d2017-05-24 10:37:30 -07002518 ". Expected: 1 Actual: " + str( port ) )
Jon Hall69b2b982016-05-11 12:04:59 -07002519 hostAttachment = False
2520 if device != mappings[ str( mac ) ]:
2521 main.log.error( "The attachment device is incorrect for " +
2522 "host " + str( mac ) +
2523 ". Expected: " + mappings[ str( mac ) ] +
2524 " Actual: " + device )
2525 hostAttachment = False
2526 else:
2527 hostAttachment = False
2528 except AssertionError:
2529 main.log.exception( "Json object not as expected" )
2530 main.log.error( repr( host ) )
2531 hostAttachment = False
2532 else:
2533 main.log.error( "No hosts json output or \"Error\"" +
2534 " in output. hosts = " +
2535 repr( hosts[ controller ] ) )
2536 if zeroHosts is False:
2537 # TODO: Find a way to know if there should be hosts in a
2538 # given point of the test
2539 hostAttachment = True
2540
2541 # END CHECKING HOST ATTACHMENT POINTS
2542 devicesResults = devicesResults and currentDevicesResult
2543 linksResults = linksResults and currentLinksResult
2544 hostsResults = hostsResults and currentHostsResult
2545 hostAttachmentResults = hostAttachmentResults and\
2546 hostAttachment
2547 topoResult = ( devicesResults and linksResults
2548 and hostsResults and ipResult and
2549 hostAttachmentResults )
2550 utilities.assert_equals( expect=True,
2551 actual=topoResult,
2552 onpass="ONOS topology matches Mininet",
2553 onfail=topoFailMsg )
2554 # End of While loop to pull ONOS state
2555
2556 # Compare json objects for hosts and dataplane clusters
2557
2558 # hosts
2559 main.step( "Hosts view is consistent across all ONOS nodes" )
2560 consistentHostsResult = main.TRUE
2561 for controller in range( len( hosts ) ):
Jon Hallf37d44d2017-05-24 10:37:30 -07002562 controllerStr = str( main.activeNodes[ controller ] + 1 )
Jon Hall69b2b982016-05-11 12:04:59 -07002563 if hosts[ controller ] is not None and "Error" not in hosts[ controller ]:
2564 if hosts[ controller ] == hosts[ 0 ]:
2565 continue
2566 else: # hosts not consistent
2567 main.log.error( "hosts from ONOS" + controllerStr +
2568 " is inconsistent with ONOS1" )
2569 main.log.warn( repr( hosts[ controller ] ) )
2570 consistentHostsResult = main.FALSE
2571
2572 else:
2573 main.log.error( "Error in getting ONOS hosts from ONOS" +
2574 controllerStr )
2575 consistentHostsResult = main.FALSE
2576 main.log.warn( "ONOS" + controllerStr +
2577 " hosts response: " +
2578 repr( hosts[ controller ] ) )
2579 utilities.assert_equals(
2580 expect=main.TRUE,
2581 actual=consistentHostsResult,
2582 onpass="Hosts view is consistent across all ONOS nodes",
2583 onfail="ONOS nodes have different views of hosts" )
2584
2585 main.step( "Hosts information is correct" )
2586 hostsResults = hostsResults and ipResult
2587 utilities.assert_equals(
2588 expect=main.TRUE,
2589 actual=hostsResults,
2590 onpass="Host information is correct",
2591 onfail="Host information is incorrect" )
2592
2593 main.step( "Host attachment points to the network" )
2594 utilities.assert_equals(
2595 expect=True,
2596 actual=hostAttachmentResults,
2597 onpass="Hosts are correctly attached to the network",
2598 onfail="ONOS did not correctly attach hosts to the network" )
2599
2600 # Strongly connected clusters of devices
2601 main.step( "Clusters view is consistent across all ONOS nodes" )
2602 consistentClustersResult = main.TRUE
2603 for controller in range( len( clusters ) ):
Jon Hallf37d44d2017-05-24 10:37:30 -07002604 controllerStr = str( main.activeNodes[ controller ] + 1 )
Jon Hall69b2b982016-05-11 12:04:59 -07002605 if "Error" not in clusters[ controller ]:
2606 if clusters[ controller ] == clusters[ 0 ]:
2607 continue
2608 else: # clusters not consistent
2609 main.log.error( "clusters from ONOS" +
2610 controllerStr +
2611 " is inconsistent with ONOS1" )
2612 consistentClustersResult = main.FALSE
2613 else:
2614 main.log.error( "Error in getting dataplane clusters " +
2615 "from ONOS" + controllerStr )
2616 consistentClustersResult = main.FALSE
2617 main.log.warn( "ONOS" + controllerStr +
2618 " clusters response: " +
2619 repr( clusters[ controller ] ) )
2620 utilities.assert_equals(
2621 expect=main.TRUE,
2622 actual=consistentClustersResult,
2623 onpass="Clusters view is consistent across all ONOS nodes",
2624 onfail="ONOS nodes have different views of clusters" )
2625 if not consistentClustersResult:
2626 main.log.debug( clusters )
2627 for x in links:
2628 main.log.warn( "{}: {}".format( len( x ), x ) )
2629
Jon Hall69b2b982016-05-11 12:04:59 -07002630 main.step( "There is only one SCC" )
2631 # there should always only be one cluster
2632 try:
2633 numClusters = len( json.loads( clusters[ 0 ] ) )
2634 except ( ValueError, TypeError ):
2635 main.log.exception( "Error parsing clusters[0]: " +
Jon Hallf37d44d2017-05-24 10:37:30 -07002636 repr( clusters[ 0 ] ) )
Jon Hall69b2b982016-05-11 12:04:59 -07002637 numClusters = "ERROR"
2638 clusterResults = main.FALSE
2639 if numClusters == 1:
2640 clusterResults = main.TRUE
2641 utilities.assert_equals(
2642 expect=1,
2643 actual=numClusters,
2644 onpass="ONOS shows 1 SCC",
2645 onfail="ONOS shows " + str( numClusters ) + " SCCs" )
2646
2647 topoResult = ( devicesResults and linksResults
2648 and hostsResults and consistentHostsResult
2649 and consistentClustersResult and clusterResults
2650 and ipResult and hostAttachmentResults )
2651
2652 topoResult = topoResult and int( count <= 2 )
2653 note = "note it takes about " + str( int( cliTime ) ) + \
2654 " seconds for the test to make all the cli calls to fetch " +\
2655 "the topology from each ONOS instance"
2656 main.log.info(
2657 "Very crass estimate for topology discovery/convergence( " +
2658 str( note ) + " ): " + str( elapsed ) + " seconds, " +
2659 str( count ) + " tries" )
2660
2661 main.step( "Device information is correct" )
2662 utilities.assert_equals(
2663 expect=main.TRUE,
2664 actual=devicesResults,
2665 onpass="Device information is correct",
2666 onfail="Device information is incorrect" )
2667
2668 main.step( "Links are correct" )
2669 utilities.assert_equals(
2670 expect=main.TRUE,
2671 actual=linksResults,
2672 onpass="Link are correct",
2673 onfail="Links are incorrect" )
2674
2675 main.step( "Hosts are correct" )
2676 utilities.assert_equals(
2677 expect=main.TRUE,
2678 actual=hostsResults,
2679 onpass="Hosts are correct",
2680 onfail="Hosts are incorrect" )
2681
2682 # FIXME: move this to an ONOS state case
2683 main.step( "Checking ONOS nodes" )
2684 nodeResults = utilities.retry( main.HA.nodesCheck,
2685 False,
Jon Hallf37d44d2017-05-24 10:37:30 -07002686 args=[ main.activeNodes ],
Jon Hall69b2b982016-05-11 12:04:59 -07002687 attempts=5 )
2688 utilities.assert_equals( expect=True, actual=nodeResults,
2689 onpass="Nodes check successful",
2690 onfail="Nodes check NOT successful" )
2691 if not nodeResults:
2692 for i in main.activeNodes:
2693 main.log.debug( "{} components not ACTIVE: \n{}".format(
Jon Hallf37d44d2017-05-24 10:37:30 -07002694 main.CLIs[ i ].name,
2695 main.CLIs[ i ].sendline( "scr:list | grep -v ACTIVE" ) ) )
Jon Hall69b2b982016-05-11 12:04:59 -07002696
Jon Halld2871c22016-07-26 11:01:14 -07002697 if not topoResult:
2698 main.cleanup()
2699 main.exit()
2700
Jon Hall69b2b982016-05-11 12:04:59 -07002701 def CASE9( self, main ):
2702 """
2703 Link s3-s28 down
2704 """
2705 import time
2706 assert main.numCtrls, "main.numCtrls not defined"
2707 assert main, "main not defined"
2708 assert utilities.assert_equals, "utilities.assert_equals not defined"
2709 assert main.CLIs, "main.CLIs not defined"
2710 assert main.nodes, "main.nodes not defined"
2711 # NOTE: You should probably run a topology check after this
2712
2713 linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
2714
2715 description = "Turn off a link to ensure that Link Discovery " +\
2716 "is working properly"
2717 main.case( description )
2718
2719 main.step( "Kill Link between s3 and s28" )
2720 LinkDown = main.Mininet1.link( END1="s3", END2="s28", OPTION="down" )
2721 main.log.info( "Waiting " + str( linkSleep ) +
2722 " seconds for link down to be discovered" )
2723 time.sleep( linkSleep )
2724 utilities.assert_equals( expect=main.TRUE, actual=LinkDown,
2725 onpass="Link down successful",
2726 onfail="Failed to bring link down" )
2727 # TODO do some sort of check here
2728
2729 def CASE10( self, main ):
2730 """
2731 Link s3-s28 up
2732 """
2733 import time
2734 assert main.numCtrls, "main.numCtrls not defined"
2735 assert main, "main not defined"
2736 assert utilities.assert_equals, "utilities.assert_equals not defined"
2737 assert main.CLIs, "main.CLIs not defined"
2738 assert main.nodes, "main.nodes not defined"
2739 # NOTE: You should probably run a topology check after this
2740
2741 linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
2742
2743 description = "Restore a link to ensure that Link Discovery is " + \
2744 "working properly"
2745 main.case( description )
2746
2747 main.step( "Bring link between s3 and s28 back up" )
2748 LinkUp = main.Mininet1.link( END1="s3", END2="s28", OPTION="up" )
2749 main.log.info( "Waiting " + str( linkSleep ) +
2750 " seconds for link up to be discovered" )
2751 time.sleep( linkSleep )
2752 utilities.assert_equals( expect=main.TRUE, actual=LinkUp,
2753 onpass="Link up successful",
2754 onfail="Failed to bring link up" )
2755 # TODO do some sort of check here
2756
2757 def CASE11( self, main ):
2758 """
2759 Switch Down
2760 """
2761 # NOTE: You should probably run a topology check after this
2762 import time
2763 assert main.numCtrls, "main.numCtrls not defined"
2764 assert main, "main not defined"
2765 assert utilities.assert_equals, "utilities.assert_equals not defined"
2766 assert main.CLIs, "main.CLIs not defined"
2767 assert main.nodes, "main.nodes not defined"
2768
2769 switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
2770
2771 description = "Killing a switch to ensure it is discovered correctly"
Jon Hallf37d44d2017-05-24 10:37:30 -07002772 onosCli = main.CLIs[ main.activeNodes[ 0 ] ]
Jon Hall69b2b982016-05-11 12:04:59 -07002773 main.case( description )
2774 switch = main.params[ 'kill' ][ 'switch' ]
2775 switchDPID = main.params[ 'kill' ][ 'dpid' ]
2776
2777 # TODO: Make this switch parameterizable
2778 main.step( "Kill " + switch )
2779 main.log.info( "Deleting " + switch )
2780 main.Mininet1.delSwitch( switch )
2781 main.log.info( "Waiting " + str( switchSleep ) +
2782 " seconds for switch down to be discovered" )
2783 time.sleep( switchSleep )
2784 device = onosCli.getDevice( dpid=switchDPID )
2785 # Peek at the deleted switch
2786 main.log.warn( str( device ) )
2787 result = main.FALSE
2788 if device and device[ 'available' ] is False:
2789 result = main.TRUE
2790 utilities.assert_equals( expect=main.TRUE, actual=result,
2791 onpass="Kill switch successful",
2792 onfail="Failed to kill switch?" )
2793
2794 def CASE12( self, main ):
2795 """
2796 Switch Up
2797 """
2798 # NOTE: You should probably run a topology check after this
2799 import time
2800 assert main.numCtrls, "main.numCtrls not defined"
2801 assert main, "main not defined"
2802 assert utilities.assert_equals, "utilities.assert_equals not defined"
2803 assert main.CLIs, "main.CLIs not defined"
2804 assert main.nodes, "main.nodes not defined"
2805
2806 switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
2807 switch = main.params[ 'kill' ][ 'switch' ]
2808 switchDPID = main.params[ 'kill' ][ 'dpid' ]
2809 links = main.params[ 'kill' ][ 'links' ].split()
Jon Hallf37d44d2017-05-24 10:37:30 -07002810 onosCli = main.CLIs[ main.activeNodes[ 0 ] ]
Jon Hall69b2b982016-05-11 12:04:59 -07002811 description = "Adding a switch to ensure it is discovered correctly"
2812 main.case( description )
2813
2814 main.step( "Add back " + switch )
2815 main.Mininet1.addSwitch( switch, dpid=switchDPID )
2816 for peer in links:
2817 main.Mininet1.addLink( switch, peer )
2818 ipList = [ node.ip_address for node in main.nodes ]
2819 main.Mininet1.assignSwController( sw=switch, ip=ipList )
2820 main.log.info( "Waiting " + str( switchSleep ) +
2821 " seconds for switch up to be discovered" )
2822 time.sleep( switchSleep )
2823 device = onosCli.getDevice( dpid=switchDPID )
2824 # Peek at the deleted switch
2825 main.log.warn( str( device ) )
2826 result = main.FALSE
2827 if device and device[ 'available' ]:
2828 result = main.TRUE
2829 utilities.assert_equals( expect=main.TRUE, actual=result,
2830 onpass="add switch successful",
2831 onfail="Failed to add switch?" )
2832
2833 def CASE13( self, main ):
2834 """
2835 Clean up
2836 """
2837 assert main.numCtrls, "main.numCtrls not defined"
2838 assert main, "main not defined"
2839 assert utilities.assert_equals, "utilities.assert_equals not defined"
2840 assert main.CLIs, "main.CLIs not defined"
2841 assert main.nodes, "main.nodes not defined"
2842
2843 main.case( "Test Cleanup" )
2844 main.step( "Killing tcpdumps" )
2845 main.Mininet2.stopTcpdump()
2846
2847 if main.params[ 'BACKUP' ][ 'ENABLED' ] == "True":
2848 main.step( "Copying MN pcap and ONOS log files to test station" )
2849 # NOTE: MN Pcap file is being saved to logdir.
2850 # We scp this file as MN and TestON aren't necessarily the same vm
2851
2852 # FIXME: To be replaced with a Jenkin's post script
2853 # TODO: Load these from params
2854 # NOTE: must end in /
2855 logFolder = "/opt/onos/log/"
2856 logFiles = [ "karaf.log", "karaf.log.1" ]
2857 # NOTE: must end in /
2858 for f in logFiles:
2859 for node in main.nodes:
2860 dstName = main.logdir + "/" + node.name + "-" + f
2861 main.ONOSbench.secureCopy( node.user_name, node.ip_address,
2862 logFolder + f, dstName )
2863 # std*.log's
2864 # NOTE: must end in /
2865 logFolder = "/opt/onos/var/"
2866 logFiles = [ "stderr.log", "stdout.log" ]
2867 # NOTE: must end in /
2868 for f in logFiles:
2869 for node in main.nodes:
2870 dstName = main.logdir + "/" + node.name + "-" + f
2871 main.ONOSbench.secureCopy( node.user_name, node.ip_address,
2872 logFolder + f, dstName )
2873 else:
2874 main.log.debug( "skipping saving log files" )
2875
2876 main.step( "Stopping Mininet" )
2877 mnResult = main.Mininet1.stopNet()
2878 utilities.assert_equals( expect=main.TRUE, actual=mnResult,
2879 onpass="Mininet stopped",
2880 onfail="MN cleanup NOT successful" )
2881
2882 main.step( "Checking ONOS Logs for errors" )
2883 for node in main.nodes:
2884 main.log.debug( "Checking logs for errors on " + node.name + ":" )
2885 main.log.warn( main.ONOSbench.checkLogs( node.ip_address ) )
2886
2887 try:
Jon Hallf37d44d2017-05-24 10:37:30 -07002888 timerLog = open( main.logdir + "/Timers.csv", 'w' )
Jon Hall69b2b982016-05-11 12:04:59 -07002889 main.log.error( ", ".join( labels ) + "\n" + ", ".join( data ) )
2890 timerLog.write( ", ".join( labels ) + "\n" + ", ".join( data ) )
2891 timerLog.close()
Jon Hallf37d44d2017-05-24 10:37:30 -07002892 except NameError as e:
2893 main.log.exception( e )
Jon Hall69b2b982016-05-11 12:04:59 -07002894
2895 main.step( "Stopping webserver" )
Jon Hallf37d44d2017-05-24 10:37:30 -07002896 status = main.Server.stop()
Jon Hall69b2b982016-05-11 12:04:59 -07002897 utilities.assert_equals( expect=main.TRUE, actual=status,
2898 onpass="Stop Server",
2899 onfail="Failled to stop SimpleHTTPServer" )
2900 del main.Server
2901
2902 def CASE14( self, main ):
2903 """
2904 start election app on all onos nodes
2905 """
2906 import time
2907 assert main.numCtrls, "main.numCtrls not defined"
2908 assert main, "main not defined"
2909 assert utilities.assert_equals, "utilities.assert_equals not defined"
2910 assert main.CLIs, "main.CLIs not defined"
2911 assert main.nodes, "main.nodes not defined"
2912
Jon Hallf37d44d2017-05-24 10:37:30 -07002913 main.case( "Start Leadership Election app" )
Jon Hall69b2b982016-05-11 12:04:59 -07002914 main.step( "Install leadership election app" )
Jon Hallf37d44d2017-05-24 10:37:30 -07002915 onosCli = main.CLIs[ main.activeNodes[ 0 ] ]
Jon Hall69b2b982016-05-11 12:04:59 -07002916 appResult = onosCli.activateApp( "org.onosproject.election" )
2917 utilities.assert_equals(
2918 expect=main.TRUE,
2919 actual=appResult,
2920 onpass="Election app installed",
2921 onfail="Something went wrong with installing Leadership election" )
2922
2923 main.step( "Run for election on each node" )
2924 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07002925 main.CLIs[ i ].electionTestRun()
2926 time.sleep( 5 )
2927 activeCLIs = [ main.CLIs[ i ] for i in main.activeNodes ]
Jon Hall69b2b982016-05-11 12:04:59 -07002928 sameResult, leaders = main.HA.consistentLeaderboards( activeCLIs )
2929 utilities.assert_equals(
2930 expect=True,
2931 actual=sameResult,
2932 onpass="All nodes see the same leaderboards",
2933 onfail="Inconsistent leaderboards" )
2934
2935 if sameResult:
2936 leader = leaders[ 0 ][ 0 ]
Jon Hallf37d44d2017-05-24 10:37:30 -07002937 if main.nodes[ main.activeNodes[ 0 ] ].ip_address in leader:
Jon Hall69b2b982016-05-11 12:04:59 -07002938 correctLeader = True
2939 else:
2940 correctLeader = False
2941 main.step( "First node was elected leader" )
2942 utilities.assert_equals(
2943 expect=True,
2944 actual=correctLeader,
2945 onpass="Correct leader was elected",
2946 onfail="Incorrect leader" )
2947
2948 def CASE15( self, main ):
2949 """
2950 Check that Leadership Election is still functional
2951 15.1 Run election on each node
2952 15.2 Check that each node has the same leaders and candidates
2953 15.3 Find current leader and withdraw
2954 15.4 Check that a new node was elected leader
2955 15.5 Check that that new leader was the candidate of old leader
2956 15.6 Run for election on old leader
2957 15.7 Check that oldLeader is a candidate, and leader if only 1 node
2958 15.8 Make sure that the old leader was added to the candidate list
2959
2960 old and new variable prefixes refer to data from before vs after
2961 withdrawl and later before withdrawl vs after re-election
2962 """
2963 import time
2964 assert main.numCtrls, "main.numCtrls not defined"
2965 assert main, "main not defined"
2966 assert utilities.assert_equals, "utilities.assert_equals not defined"
2967 assert main.CLIs, "main.CLIs not defined"
2968 assert main.nodes, "main.nodes not defined"
2969
2970 description = "Check that Leadership Election is still functional"
2971 main.case( description )
2972 # NOTE: Need to re-run after restarts since being a canidate is not persistant
2973
2974 oldLeaders = [] # list of lists of each nodes' candidates before
2975 newLeaders = [] # list of lists of each nodes' candidates after
2976 oldLeader = '' # the old leader from oldLeaders, None if not same
2977 newLeader = '' # the new leaders fron newLoeaders, None if not same
2978 oldLeaderCLI = None # the CLI of the old leader used for re-electing
2979 expectNoLeader = False # True when there is only one leader
2980 if main.numCtrls == 1:
2981 expectNoLeader = True
2982
2983 main.step( "Run for election on each node" )
2984 electionResult = main.TRUE
2985
2986 for i in main.activeNodes: # run test election on each node
Jon Hallf37d44d2017-05-24 10:37:30 -07002987 if main.CLIs[ i ].electionTestRun() == main.FALSE:
Jon Hall69b2b982016-05-11 12:04:59 -07002988 electionResult = main.FALSE
2989 utilities.assert_equals(
2990 expect=main.TRUE,
2991 actual=electionResult,
2992 onpass="All nodes successfully ran for leadership",
2993 onfail="At least one node failed to run for leadership" )
2994
2995 if electionResult == main.FALSE:
2996 main.log.error(
2997 "Skipping Test Case because Election Test App isn't loaded" )
2998 main.skipCase()
2999
3000 main.step( "Check that each node shows the same leader and candidates" )
3001 failMessage = "Nodes have different leaderboards"
Jon Hallf37d44d2017-05-24 10:37:30 -07003002 activeCLIs = [ main.CLIs[ i ] for i in main.activeNodes ]
Jon Hall69b2b982016-05-11 12:04:59 -07003003 sameResult, oldLeaders = main.HA.consistentLeaderboards( activeCLIs )
3004 if sameResult:
3005 oldLeader = oldLeaders[ 0 ][ 0 ]
3006 main.log.warn( oldLeader )
3007 else:
3008 oldLeader = None
3009 utilities.assert_equals(
3010 expect=True,
3011 actual=sameResult,
3012 onpass="Leaderboards are consistent for the election topic",
3013 onfail=failMessage )
3014
3015 main.step( "Find current leader and withdraw" )
3016 withdrawResult = main.TRUE
3017 # do some sanity checking on leader before using it
3018 if oldLeader is None:
3019 main.log.error( "Leadership isn't consistent." )
3020 withdrawResult = main.FALSE
3021 # Get the CLI of the oldLeader
3022 for i in main.activeNodes:
3023 if oldLeader == main.nodes[ i ].ip_address:
3024 oldLeaderCLI = main.CLIs[ i ]
3025 break
3026 else: # FOR/ELSE statement
3027 main.log.error( "Leader election, could not find current leader" )
3028 if oldLeader:
3029 withdrawResult = oldLeaderCLI.electionTestWithdraw()
3030 utilities.assert_equals(
3031 expect=main.TRUE,
3032 actual=withdrawResult,
3033 onpass="Node was withdrawn from election",
3034 onfail="Node was not withdrawn from election" )
3035
3036 main.step( "Check that a new node was elected leader" )
3037 failMessage = "Nodes have different leaders"
3038 # Get new leaders and candidates
3039 newLeaderResult, newLeaders = main.HA.consistentLeaderboards( activeCLIs )
3040 newLeader = None
3041 if newLeaderResult:
3042 if newLeaders[ 0 ][ 0 ] == 'none':
3043 main.log.error( "No leader was elected on at least 1 node" )
3044 if not expectNoLeader:
3045 newLeaderResult = False
3046 newLeader = newLeaders[ 0 ][ 0 ]
3047
3048 # Check that the new leader is not the older leader, which was withdrawn
3049 if newLeader == oldLeader:
3050 newLeaderResult = False
3051 main.log.error( "All nodes still see old leader: " + str( oldLeader ) +
Jon Hallf37d44d2017-05-24 10:37:30 -07003052 " as the current leader" )
Jon Hall69b2b982016-05-11 12:04:59 -07003053 utilities.assert_equals(
3054 expect=True,
3055 actual=newLeaderResult,
3056 onpass="Leadership election passed",
3057 onfail="Something went wrong with Leadership election" )
3058
3059 main.step( "Check that that new leader was the candidate of old leader" )
3060 # candidates[ 2 ] should become the top candidate after withdrawl
3061 correctCandidateResult = main.TRUE
3062 if expectNoLeader:
3063 if newLeader == 'none':
3064 main.log.info( "No leader expected. None found. Pass" )
3065 correctCandidateResult = main.TRUE
3066 else:
3067 main.log.info( "Expected no leader, got: " + str( newLeader ) )
3068 correctCandidateResult = main.FALSE
Jon Hallf37d44d2017-05-24 10:37:30 -07003069 elif len( oldLeaders[ 0 ] ) >= 3:
Jon Hall69b2b982016-05-11 12:04:59 -07003070 if newLeader == oldLeaders[ 0 ][ 2 ]:
3071 # correct leader was elected
3072 correctCandidateResult = main.TRUE
3073 else:
3074 correctCandidateResult = main.FALSE
3075 main.log.error( "Candidate {} was elected. {} should have had priority.".format(
3076 newLeader, oldLeaders[ 0 ][ 2 ] ) )
3077 else:
3078 main.log.warn( "Could not determine who should be the correct leader" )
3079 main.log.debug( oldLeaders[ 0 ] )
3080 correctCandidateResult = main.FALSE
3081 utilities.assert_equals(
3082 expect=main.TRUE,
3083 actual=correctCandidateResult,
3084 onpass="Correct Candidate Elected",
3085 onfail="Incorrect Candidate Elected" )
3086
3087 main.step( "Run for election on old leader( just so everyone " +
3088 "is in the hat )" )
3089 if oldLeaderCLI is not None:
3090 runResult = oldLeaderCLI.electionTestRun()
3091 else:
3092 main.log.error( "No old leader to re-elect" )
3093 runResult = main.FALSE
3094 utilities.assert_equals(
3095 expect=main.TRUE,
3096 actual=runResult,
3097 onpass="App re-ran for election",
3098 onfail="App failed to run for election" )
3099
3100 main.step(
3101 "Check that oldLeader is a candidate, and leader if only 1 node" )
3102 # verify leader didn't just change
3103 # Get new leaders and candidates
3104 reRunLeaders = []
3105 time.sleep( 5 ) # Paremterize
3106 positionResult, reRunLeaders = main.HA.consistentLeaderboards( activeCLIs )
3107
3108 # Check that the re-elected node is last on the candidate List
Jon Hallf37d44d2017-05-24 10:37:30 -07003109 if not reRunLeaders[ 0 ]:
Jon Hall69b2b982016-05-11 12:04:59 -07003110 positionResult = main.FALSE
3111 elif oldLeader != reRunLeaders[ 0 ][ -1 ]:
Jon Hallf37d44d2017-05-24 10:37:30 -07003112 main.log.error( "Old Leader ({}) not in the proper position: {} ".format( str( oldLeader ),
Jon Hall69b2b982016-05-11 12:04:59 -07003113 str( reRunLeaders[ 0 ] ) ) )
3114 positionResult = main.FALSE
3115 utilities.assert_equals(
3116 expect=True,
3117 actual=positionResult,
3118 onpass="Old leader successfully re-ran for election",
3119 onfail="Something went wrong with Leadership election after " +
3120 "the old leader re-ran for election" )
3121
3122 def CASE16( self, main ):
3123 """
3124 Install Distributed Primitives app
3125 """
3126 import time
3127 assert main.numCtrls, "main.numCtrls not defined"
3128 assert main, "main not defined"
3129 assert utilities.assert_equals, "utilities.assert_equals not defined"
3130 assert main.CLIs, "main.CLIs not defined"
3131 assert main.nodes, "main.nodes not defined"
3132
3133 # Variables for the distributed primitives tests
Jon Hall7a6ebfd2017-03-13 10:58:58 -07003134 main.pCounterName = "TestON-Partitions"
3135 main.pCounterValue = 0
Jon Hallf37d44d2017-05-24 10:37:30 -07003136 main.onosSet = set( [] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07003137 main.onosSetName = "TestON-set"
Jon Hall69b2b982016-05-11 12:04:59 -07003138
3139 description = "Install Primitives app"
3140 main.case( description )
3141 main.step( "Install Primitives app" )
3142 appName = "org.onosproject.distributedprimitives"
Jon Hallf37d44d2017-05-24 10:37:30 -07003143 node = main.activeNodes[ 0 ]
3144 appResults = main.CLIs[ node ].activateApp( appName )
Jon Hall69b2b982016-05-11 12:04:59 -07003145 utilities.assert_equals( expect=main.TRUE,
3146 actual=appResults,
3147 onpass="Primitives app activated",
3148 onfail="Primitives app not activated" )
3149 time.sleep( 5 ) # To allow all nodes to activate
3150
3151 def CASE17( self, main ):
3152 """
3153 Check for basic functionality with distributed primitives
3154 """
Jon Hall7a6ebfd2017-03-13 10:58:58 -07003155 main.HA.CASE17( main )