blob: 081128716811afabffc6e3b54be69bd9459e797d [file] [log] [blame]
Jon Hall69b2b982016-05-11 12:04:59 -07001"""
2Description: This test is to determine if ONOS can handle
3 dynamic swapping of cluster nodes.
4
5List of test cases:
6CASE1: Compile ONOS and push it to the test machines
7CASE2: Assign devices to controllers
8CASE21: Assign mastership to controllers
9CASE3: Assign intents
10CASE4: Ping across added host intents
11CASE5: Reading state of ONOS
12CASE6: Swap nodes
13CASE7: Check state after control plane failure
14CASE8: Compare topo
15CASE9: Link s3-s28 down
16CASE10: Link s3-s28 up
17CASE11: Switch down
18CASE12: Switch up
19CASE13: Clean up
20CASE14: start election app on all onos nodes
21CASE15: Check that Leadership Election is still functional
22CASE16: Install Distributed Primitives app
23CASE17: Check for basic functionality with distributed primitives
24"""
Jon Hall69b2b982016-05-11 12:04:59 -070025class HAswapNodes:
26
27 def __init__( self ):
28 self.default = ''
29
30 def CASE1( self, main ):
31 """
32 CASE1 is to compile ONOS and push it to the test machines
33
34 Startup sequence:
35 cell <name>
36 onos-verify-cell
37 NOTE: temporary - onos-remove-raft-logs
38 onos-uninstall
39 start mininet
40 git pull
41 mvn clean install
42 onos-package
43 onos-install -f
44 onos-wait-for-start
45 start cli sessions
46 start tcpdump
47 """
48 import time
49 import os
50 import re
51 main.log.info( "ONOS HA test: Restart all ONOS nodes - " +
52 "initialization" )
53 main.case( "Setting up test environment" )
54 main.caseExplanation = "Setup the test environment including " +\
55 "installing ONOS, starting Mininet and ONOS" +\
56 "cli sessions."
57
58 # load some variables from the params file
59 PULLCODE = False
60 if main.params[ 'Git' ] == 'True':
61 PULLCODE = True
62 gitBranch = main.params[ 'branch' ]
63 cellName = main.params[ 'ENV' ][ 'cellName' ]
64
65 main.numCtrls = int( main.params[ 'num_controllers' ] )
66 if main.ONOSbench.maxNodes:
67 if main.ONOSbench.maxNodes < main.numCtrls:
68 main.numCtrls = int( main.ONOSbench.maxNodes )
69 # set global variables
70 # These are for csv plotting in jenkins
71 global labels
72 global data
73 labels = []
74 data = []
75
76 try:
77 from tests.HA.dependencies.HA import HA
78 main.HA = HA()
79 from tests.HA.HAswapNodes.dependencies.Server import Server
80 main.Server = Server()
81 except Exception as e:
82 main.log.exception( e )
83 main.cleanup()
84 main.exit()
85
86 main.CLIs = []
87 main.nodes = []
88 ipList = []
89 for i in range( 1, main.numCtrls + 1 ):
90 try:
91 main.CLIs.append( getattr( main, 'ONOScli' + str( i ) ) )
92 main.nodes.append( getattr( main, 'ONOS' + str( i ) ) )
93 ipList.append( main.nodes[ -1 ].ip_address )
94 except AttributeError:
95 break
96
97 main.step( "Create cell file" )
98 cellAppString = main.params[ 'ENV' ][ 'appString' ]
99 main.ONOSbench.createCellFile( main.ONOSbench.ip_address, cellName,
100 main.Mininet1.ip_address,
Devin Lim461f0872017-06-05 16:49:33 -0700101 cellAppString, ipList, main.ONOScli1.user_name )
Jon Hall69b2b982016-05-11 12:04:59 -0700102
103 main.step( "Applying cell variable to environment" )
104 cellResult = main.ONOSbench.setCell( cellName )
105 utilities.assert_equals( expect=main.TRUE, actual=cellResult,
106 onpass="Set cell successfull",
107 onfail="Failled to set cell" )
108
109 main.step( "Verify connectivity to cell" )
110 verifyResult = main.ONOSbench.verifyCell()
111 utilities.assert_equals( expect=main.TRUE, actual=verifyResult,
112 onpass="Verify cell passed",
113 onfail="Failled to verify cell" )
114
115 # FIXME:this is short term fix
116 main.log.info( "Removing raft logs" )
117 main.ONOSbench.onosRemoveRaftLogs()
118
119 main.log.info( "Uninstalling ONOS" )
120 for node in main.nodes:
121 main.ONOSbench.onosUninstall( node.ip_address )
122
123 # Make sure ONOS is DEAD
124 main.log.info( "Killing any ONOS processes" )
125 killResults = main.TRUE
126 for node in main.nodes:
127 killed = main.ONOSbench.onosKill( node.ip_address )
128 killResults = killResults and killed
129
130 main.step( "Setup server for cluster metadata file" )
Jon Hallf37d44d2017-05-24 10:37:30 -0700131 port = main.params[ 'server' ][ 'port' ]
Jon Hall69b2b982016-05-11 12:04:59 -0700132 rootDir = os.path.dirname( main.testFile ) + "/dependencies"
133 main.log.debug( "Root dir: {}".format( rootDir ) )
134 status = main.Server.start( main.ONOSbench,
135 rootDir,
136 port=port,
137 logDir=main.logdir + "/server.log" )
138 utilities.assert_equals( expect=main.TRUE, actual=status,
139 onpass="Server started",
140 onfail="Failled to start SimpleHTTPServer" )
141
142 main.step( "Generate initial metadata file" )
143 if main.numCtrls >= 5:
144 main.numCtrls -= 2
145 else:
146 main.log.error( "Not enough ONOS nodes to run this test. Requires 5 or more" )
147 genResult = main.Server.generateFile( main.numCtrls )
148 utilities.assert_equals( expect=main.TRUE, actual=genResult,
149 onpass="New cluster metadata file generated",
150 onfail="Failled to generate new metadata file" )
151
Jon Hall69b2b982016-05-11 12:04:59 -0700152 gitPullResult = main.TRUE
153
154 main.step( "Starting Mininet" )
155 # scp topo file to mininet
156 # TODO: move to params?
157 topoName = "obelisk.py"
158 filePath = main.ONOSbench.home + "/tools/test/topos/"
159 main.ONOSbench.scp( main.Mininet1,
160 filePath + topoName,
161 main.Mininet1.home,
162 direction="to" )
Jon Hallf37d44d2017-05-24 10:37:30 -0700163 mnResult = main.Mininet1.startNet()
Jon Hall69b2b982016-05-11 12:04:59 -0700164 utilities.assert_equals( expect=main.TRUE, actual=mnResult,
165 onpass="Mininet Started",
166 onfail="Error starting Mininet" )
167
168 main.step( "Git checkout and pull " + gitBranch )
169 if PULLCODE:
170 main.ONOSbench.gitCheckout( gitBranch )
171 gitPullResult = main.ONOSbench.gitPull()
172 # values of 1 or 3 are good
173 utilities.assert_lesser( expect=0, actual=gitPullResult,
174 onpass="Git pull successful",
175 onfail="Git pull failed" )
176 main.ONOSbench.getVersion( report=True )
177
Jon Hall69b2b982016-05-11 12:04:59 -0700178 # GRAPHS
179 # NOTE: important params here:
180 # job = name of Jenkins job
181 # Plot Name = Plot-HA, only can be used if multiple plots
182 # index = The number of the graph under plot name
183 job = "HAswapNodes"
184 plotName = "Plot-HA"
Jon Hall676e5432016-09-26 11:32:50 -0700185 index = "2"
Jon Hall69b2b982016-05-11 12:04:59 -0700186 graphs = '<ac:structured-macro ac:name="html">\n'
187 graphs += '<ac:plain-text-body><![CDATA[\n'
188 graphs += '<iframe src="https://onos-jenkins.onlab.us/job/' + job +\
189 '/plot/' + plotName + '/getPlot?index=' + index +\
190 '&width=500&height=300"' +\
191 'noborder="0" width="500" height="300" scrolling="yes" ' +\
192 'seamless="seamless"></iframe>\n'
193 graphs += ']]></ac:plain-text-body>\n'
194 graphs += '</ac:structured-macro>\n'
Jon Hallf37d44d2017-05-24 10:37:30 -0700195 main.log.wiki( graphs )
Jon Hall69b2b982016-05-11 12:04:59 -0700196
197 main.step( "Copying backup config files" )
198 path = "~/onos/tools/package/bin/onos-service"
199 cp = main.ONOSbench.scp( main.ONOSbench,
200 path,
201 path + ".backup",
202 direction="to" )
203
204 utilities.assert_equals( expect=main.TRUE,
205 actual=cp,
206 onpass="Copy backup config file succeeded",
207 onfail="Copy backup config file failed" )
208 # we need to modify the onos-service file to use remote metadata file
209 # url for cluster metadata file
Jon Hallf37d44d2017-05-24 10:37:30 -0700210 iface = main.params[ 'server' ].get( 'interface' )
Jon Hall8f6d4622016-05-23 15:27:18 -0700211 ip = main.ONOSbench.getIpAddr( iface=iface )
Jon Hall69b2b982016-05-11 12:04:59 -0700212 metaFile = "cluster.json"
213 javaArgs = r"-Donos.cluster.metadata.uri=http:\/\/{}:{}\/{}".format( ip, port, metaFile )
214 main.log.warn( javaArgs )
215 main.log.warn( repr( javaArgs ) )
216 handle = main.ONOSbench.handle
217 sed = r"sed -i 's/bash/bash\nexport JAVA_OPTS=${{JAVA_OPTS:-{}}}\n/' {}".format( javaArgs, path )
218 main.log.warn( sed )
219 main.log.warn( repr( sed ) )
220 handle.sendline( sed )
Jon Hallbd60ea02016-08-23 10:03:59 -0700221 handle.expect( metaFile )
222 output = handle.before
Jon Hall69b2b982016-05-11 12:04:59 -0700223 handle.expect( "\$" )
Jon Hallbd60ea02016-08-23 10:03:59 -0700224 output += handle.before
225 main.log.debug( repr( output ) )
Jon Hall69b2b982016-05-11 12:04:59 -0700226
227 main.step( "Creating ONOS package" )
Jon Hallbd60ea02016-08-23 10:03:59 -0700228 packageResult = main.ONOSbench.buckBuild()
Jon Hall69b2b982016-05-11 12:04:59 -0700229 utilities.assert_equals( expect=main.TRUE, actual=packageResult,
230 onpass="ONOS package successful",
231 onfail="ONOS package failed" )
Jon Hallbd60ea02016-08-23 10:03:59 -0700232 if not packageResult:
233 main.cleanup()
234 main.exit()
Jon Hall69b2b982016-05-11 12:04:59 -0700235
236 main.step( "Installing ONOS package" )
237 onosInstallResult = main.TRUE
238 for i in range( main.ONOSbench.maxNodes ):
Jon Hallf37d44d2017-05-24 10:37:30 -0700239 node = main.nodes[ i ]
Jon Hall69b2b982016-05-11 12:04:59 -0700240 options = "-f"
241 if i >= main.numCtrls:
242 options = "-nf" # Don't start more than the current scale
243 tmpResult = main.ONOSbench.onosInstall( options=options,
244 node=node.ip_address )
245 onosInstallResult = onosInstallResult and tmpResult
246 utilities.assert_equals( expect=main.TRUE, actual=onosInstallResult,
247 onpass="ONOS install successful",
248 onfail="ONOS install failed" )
249
250 # Cleanup custom onos-service file
251 main.ONOSbench.scp( main.ONOSbench,
252 path + ".backup",
253 path,
254 direction="to" )
255
You Wangf5de25b2017-01-06 15:13:01 -0800256 main.step( "Set up ONOS secure SSH" )
257 secureSshResult = main.TRUE
Jon Hall168c1862017-01-31 17:35:34 -0800258 for i in range( main.numCtrls ):
Jon Hallf37d44d2017-05-24 10:37:30 -0700259 node = main.nodes[ i ]
You Wangf5de25b2017-01-06 15:13:01 -0800260 secureSshResult = secureSshResult and main.ONOSbench.onosSecureSSH( node=node.ip_address )
261 utilities.assert_equals( expect=main.TRUE, actual=secureSshResult,
262 onpass="Test step PASS",
263 onfail="Test step FAIL" )
264
Jon Hall69b2b982016-05-11 12:04:59 -0700265 main.step( "Checking if ONOS is up yet" )
266 for i in range( 2 ):
267 onosIsupResult = main.TRUE
268 for i in range( main.numCtrls ):
Jon Hallf37d44d2017-05-24 10:37:30 -0700269 node = main.nodes[ i ]
Jon Hall69b2b982016-05-11 12:04:59 -0700270 started = main.ONOSbench.isup( node.ip_address )
271 if not started:
272 main.log.error( node.name + " hasn't started" )
273 onosIsupResult = onosIsupResult and started
274 if onosIsupResult == main.TRUE:
275 break
276 utilities.assert_equals( expect=main.TRUE, actual=onosIsupResult,
277 onpass="ONOS startup successful",
278 onfail="ONOS startup failed" )
279
Jon Hall6509dbf2016-06-21 17:01:17 -0700280 main.step( "Starting ONOS CLI sessions" )
Jon Hall69b2b982016-05-11 12:04:59 -0700281 cliResults = main.TRUE
282 threads = []
283 for i in range( main.numCtrls ):
Jon Hallf37d44d2017-05-24 10:37:30 -0700284 t = main.Thread( target=main.CLIs[ i ].startOnosCli,
Jon Hall69b2b982016-05-11 12:04:59 -0700285 name="startOnosCli-" + str( i ),
Jon Hallf37d44d2017-05-24 10:37:30 -0700286 args=[ main.nodes[ i ].ip_address ] )
Jon Hall69b2b982016-05-11 12:04:59 -0700287 threads.append( t )
288 t.start()
289
290 for t in threads:
291 t.join()
292 cliResults = cliResults and t.result
293 utilities.assert_equals( expect=main.TRUE, actual=cliResults,
294 onpass="ONOS cli startup successful",
295 onfail="ONOS cli startup failed" )
296
297 # Create a list of active nodes for use when some nodes are stopped
298 main.activeNodes = [ i for i in range( 0, main.numCtrls ) ]
299
300 if main.params[ 'tcpdump' ].lower() == "true":
301 main.step( "Start Packet Capture MN" )
302 main.Mininet2.startTcpdump(
303 str( main.params[ 'MNtcpdump' ][ 'folder' ] ) + str( main.TEST )
304 + "-MN.pcap",
305 intf=main.params[ 'MNtcpdump' ][ 'intf' ],
306 port=main.params[ 'MNtcpdump' ][ 'port' ] )
307
308 main.step( "Checking ONOS nodes" )
309 nodeResults = utilities.retry( main.HA.nodesCheck,
310 False,
Jon Hallf37d44d2017-05-24 10:37:30 -0700311 args=[ main.activeNodes ],
Jon Hall69b2b982016-05-11 12:04:59 -0700312 attempts=5 )
313 utilities.assert_equals( expect=True, actual=nodeResults,
314 onpass="Nodes check successful",
315 onfail="Nodes check NOT successful" )
316
317 if not nodeResults:
318 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -0700319 cli = main.CLIs[ i ]
Jon Hall69b2b982016-05-11 12:04:59 -0700320 main.log.debug( "{} components not ACTIVE: \n{}".format(
321 cli.name,
322 cli.sendline( "scr:list | grep -v ACTIVE" ) ) )
323 main.log.error( "Failed to start ONOS, stopping test" )
324 main.cleanup()
325 main.exit()
326
327 main.step( "Activate apps defined in the params file" )
328 # get data from the params
329 apps = main.params.get( 'apps' )
330 if apps:
Jon Hallf37d44d2017-05-24 10:37:30 -0700331 apps = apps.split( ',' )
Jon Hall69b2b982016-05-11 12:04:59 -0700332 main.log.warn( apps )
333 activateResult = True
334 for app in apps:
335 main.CLIs[ 0 ].app( app, "Activate" )
336 # TODO: check this worked
337 time.sleep( 10 ) # wait for apps to activate
338 for app in apps:
339 state = main.CLIs[ 0 ].appStatus( app )
340 if state == "ACTIVE":
341 activateResult = activateResult and True
342 else:
343 main.log.error( "{} is in {} state".format( app, state ) )
344 activateResult = False
345 utilities.assert_equals( expect=True,
346 actual=activateResult,
347 onpass="Successfully activated apps",
348 onfail="Failed to activate apps" )
349 else:
350 main.log.warn( "No apps were specified to be loaded after startup" )
351
352 main.step( "Set ONOS configurations" )
353 config = main.params.get( 'ONOS_Configuration' )
354 if config:
355 main.log.debug( config )
356 checkResult = main.TRUE
357 for component in config:
Jon Hallf37d44d2017-05-24 10:37:30 -0700358 for setting in config[ component ]:
359 value = config[ component ][ setting ]
Jon Hall69b2b982016-05-11 12:04:59 -0700360 check = main.CLIs[ 0 ].setCfg( component, setting, value )
361 main.log.info( "Value was changed? {}".format( main.TRUE == check ) )
362 checkResult = check and checkResult
363 utilities.assert_equals( expect=main.TRUE,
364 actual=checkResult,
365 onpass="Successfully set config",
366 onfail="Failed to set config" )
367 else:
368 main.log.warn( "No configurations were specified to be changed after startup" )
369
370 main.step( "App Ids check" )
371 appCheck = main.TRUE
372 threads = []
373 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -0700374 t = main.Thread( target=main.CLIs[ i ].appToIDCheck,
Jon Hall69b2b982016-05-11 12:04:59 -0700375 name="appToIDCheck-" + str( i ),
376 args=[] )
377 threads.append( t )
378 t.start()
379
380 for t in threads:
381 t.join()
382 appCheck = appCheck and t.result
383 if appCheck != main.TRUE:
Jon Hallf37d44d2017-05-24 10:37:30 -0700384 node = main.activeNodes[ 0 ]
385 main.log.warn( main.CLIs[ node ].apps() )
386 main.log.warn( main.CLIs[ node ].appIDs() )
Jon Hall69b2b982016-05-11 12:04:59 -0700387 utilities.assert_equals( expect=main.TRUE, actual=appCheck,
388 onpass="App Ids seem to be correct",
389 onfail="Something is wrong with app Ids" )
390
391 def CASE2( self, main ):
392 """
393 Assign devices to controllers
394 """
395 import re
396 assert main.numCtrls, "main.numCtrls not defined"
397 assert main, "main not defined"
398 assert utilities.assert_equals, "utilities.assert_equals not defined"
399 assert main.CLIs, "main.CLIs not defined"
400 assert main.nodes, "main.nodes not defined"
401
402 main.case( "Assigning devices to controllers" )
403 main.caseExplanation = "Assign switches to ONOS using 'ovs-vsctl' " +\
404 "and check that an ONOS node becomes the " +\
405 "master of the device."
406 main.step( "Assign switches to controllers" )
407
408 ipList = []
409 for i in range( main.ONOSbench.maxNodes ):
410 ipList.append( main.nodes[ i ].ip_address )
411 swList = []
412 for i in range( 1, 29 ):
413 swList.append( "s" + str( i ) )
414 main.Mininet1.assignSwController( sw=swList, ip=ipList )
415
416 mastershipCheck = main.TRUE
417 for i in range( 1, 29 ):
418 response = main.Mininet1.getSwController( "s" + str( i ) )
419 try:
420 main.log.info( str( response ) )
421 except Exception:
422 main.log.info( repr( response ) )
423 for node in main.nodes:
424 if re.search( "tcp:" + node.ip_address, response ):
425 mastershipCheck = mastershipCheck and main.TRUE
426 else:
427 main.log.error( "Error, node " + node.ip_address + " is " +
428 "not in the list of controllers s" +
429 str( i ) + " is connecting to." )
430 mastershipCheck = main.FALSE
431 utilities.assert_equals(
432 expect=main.TRUE,
433 actual=mastershipCheck,
434 onpass="Switch mastership assigned correctly",
435 onfail="Switches not assigned correctly to controllers" )
436
437 def CASE21( self, main ):
438 """
439 Assign mastership to controllers
440 """
441 import time
442 assert main.numCtrls, "main.numCtrls not defined"
443 assert main, "main not defined"
444 assert utilities.assert_equals, "utilities.assert_equals not defined"
445 assert main.CLIs, "main.CLIs not defined"
446 assert main.nodes, "main.nodes not defined"
447
448 main.case( "Assigning Controller roles for switches" )
449 main.caseExplanation = "Check that ONOS is connected to each " +\
450 "device. Then manually assign" +\
451 " mastership to specific ONOS nodes using" +\
452 " 'device-role'"
453 main.step( "Assign mastership of switches to specific controllers" )
454 # Manually assign mastership to the controller we want
455 roleCall = main.TRUE
456
Jon Hallf37d44d2017-05-24 10:37:30 -0700457 ipList = []
Jon Hall69b2b982016-05-11 12:04:59 -0700458 deviceList = []
Jon Hallf37d44d2017-05-24 10:37:30 -0700459 onosCli = main.CLIs[ main.activeNodes[ 0 ] ]
Jon Hall69b2b982016-05-11 12:04:59 -0700460 try:
461 # Assign mastership to specific controllers. This assignment was
462 # determined for a 7 node cluser, but will work with any sized
463 # cluster
464 for i in range( 1, 29 ): # switches 1 through 28
465 # set up correct variables:
466 if i == 1:
467 c = 0
468 ip = main.nodes[ c ].ip_address # ONOS1
469 deviceId = onosCli.getDevice( "1000" ).get( 'id' )
470 elif i == 2:
471 c = 1 % main.numCtrls
472 ip = main.nodes[ c ].ip_address # ONOS2
473 deviceId = onosCli.getDevice( "2000" ).get( 'id' )
474 elif i == 3:
475 c = 1 % main.numCtrls
476 ip = main.nodes[ c ].ip_address # ONOS2
477 deviceId = onosCli.getDevice( "3000" ).get( 'id' )
478 elif i == 4:
479 c = 3 % main.numCtrls
480 ip = main.nodes[ c ].ip_address # ONOS4
481 deviceId = onosCli.getDevice( "3004" ).get( 'id' )
482 elif i == 5:
483 c = 2 % main.numCtrls
484 ip = main.nodes[ c ].ip_address # ONOS3
485 deviceId = onosCli.getDevice( "5000" ).get( 'id' )
486 elif i == 6:
487 c = 2 % main.numCtrls
488 ip = main.nodes[ c ].ip_address # ONOS3
489 deviceId = onosCli.getDevice( "6000" ).get( 'id' )
490 elif i == 7:
491 c = 5 % main.numCtrls
492 ip = main.nodes[ c ].ip_address # ONOS6
493 deviceId = onosCli.getDevice( "6007" ).get( 'id' )
494 elif i >= 8 and i <= 17:
495 c = 4 % main.numCtrls
496 ip = main.nodes[ c ].ip_address # ONOS5
497 dpid = '3' + str( i ).zfill( 3 )
498 deviceId = onosCli.getDevice( dpid ).get( 'id' )
499 elif i >= 18 and i <= 27:
500 c = 6 % main.numCtrls
501 ip = main.nodes[ c ].ip_address # ONOS7
502 dpid = '6' + str( i ).zfill( 3 )
503 deviceId = onosCli.getDevice( dpid ).get( 'id' )
504 elif i == 28:
505 c = 0
506 ip = main.nodes[ c ].ip_address # ONOS1
507 deviceId = onosCli.getDevice( "2800" ).get( 'id' )
508 else:
509 main.log.error( "You didn't write an else statement for " +
510 "switch s" + str( i ) )
511 roleCall = main.FALSE
512 # Assign switch
513 assert deviceId, "No device id for s" + str( i ) + " in ONOS"
514 # TODO: make this controller dynamic
515 roleCall = roleCall and onosCli.deviceRole( deviceId, ip )
516 ipList.append( ip )
517 deviceList.append( deviceId )
518 except ( AttributeError, AssertionError ):
519 main.log.exception( "Something is wrong with ONOS device view" )
520 main.log.info( onosCli.devices() )
521 utilities.assert_equals(
522 expect=main.TRUE,
523 actual=roleCall,
524 onpass="Re-assigned switch mastership to designated controller",
525 onfail="Something wrong with deviceRole calls" )
526
527 main.step( "Check mastership was correctly assigned" )
528 roleCheck = main.TRUE
529 # NOTE: This is due to the fact that device mastership change is not
530 # atomic and is actually a multi step process
531 time.sleep( 5 )
532 for i in range( len( ipList ) ):
Jon Hallf37d44d2017-05-24 10:37:30 -0700533 ip = ipList[ i ]
534 deviceId = deviceList[ i ]
Jon Hall69b2b982016-05-11 12:04:59 -0700535 # Check assignment
536 master = onosCli.getRole( deviceId ).get( 'master' )
537 if ip in master:
538 roleCheck = roleCheck and main.TRUE
539 else:
540 roleCheck = roleCheck and main.FALSE
541 main.log.error( "Error, controller " + ip + " is not" +
542 " master " + "of device " +
543 str( deviceId ) + ". Master is " +
544 repr( master ) + "." )
545 utilities.assert_equals(
546 expect=main.TRUE,
547 actual=roleCheck,
548 onpass="Switches were successfully reassigned to designated " +
549 "controller",
550 onfail="Switches were not successfully reassigned" )
551
552 def CASE3( self, main ):
553 """
554 Assign intents
555 """
556 import time
557 import json
558 assert main.numCtrls, "main.numCtrls not defined"
559 assert main, "main not defined"
560 assert utilities.assert_equals, "utilities.assert_equals not defined"
561 assert main.CLIs, "main.CLIs not defined"
562 assert main.nodes, "main.nodes not defined"
563 try:
564 labels
565 except NameError:
566 main.log.error( "labels not defined, setting to []" )
567 labels = []
568 try:
569 data
570 except NameError:
571 main.log.error( "data not defined, setting to []" )
572 data = []
573 # NOTE: we must reinstall intents until we have a persistant intent
574 # datastore!
575 main.case( "Adding host Intents" )
576 main.caseExplanation = "Discover hosts by using pingall then " +\
577 "assign predetermined host-to-host intents." +\
578 " After installation, check that the intent" +\
579 " is distributed to all nodes and the state" +\
580 " is INSTALLED"
581
582 # install onos-app-fwd
583 main.step( "Install reactive forwarding app" )
Jon Hallf37d44d2017-05-24 10:37:30 -0700584 onosCli = main.CLIs[ main.activeNodes[ 0 ] ]
Jon Hall69b2b982016-05-11 12:04:59 -0700585 installResults = onosCli.activateApp( "org.onosproject.fwd" )
586 utilities.assert_equals( expect=main.TRUE, actual=installResults,
587 onpass="Install fwd successful",
588 onfail="Install fwd failed" )
589
590 main.step( "Check app ids" )
591 appCheck = main.TRUE
592 threads = []
593 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -0700594 t = main.Thread( target=main.CLIs[ i ].appToIDCheck,
Jon Hall69b2b982016-05-11 12:04:59 -0700595 name="appToIDCheck-" + str( i ),
596 args=[] )
597 threads.append( t )
598 t.start()
599
600 for t in threads:
601 t.join()
602 appCheck = appCheck and t.result
603 if appCheck != main.TRUE:
604 main.log.warn( onosCli.apps() )
605 main.log.warn( onosCli.appIDs() )
606 utilities.assert_equals( expect=main.TRUE, actual=appCheck,
607 onpass="App Ids seem to be correct",
608 onfail="Something is wrong with app Ids" )
609
610 main.step( "Discovering Hosts( Via pingall for now )" )
611 # FIXME: Once we have a host discovery mechanism, use that instead
612 # REACTIVE FWD test
613 pingResult = main.FALSE
614 passMsg = "Reactive Pingall test passed"
615 time1 = time.time()
616 pingResult = main.Mininet1.pingall()
617 time2 = time.time()
618 if not pingResult:
Jon Hallf37d44d2017-05-24 10:37:30 -0700619 main.log.warn( "First pingall failed. Trying again..." )
Jon Hall69b2b982016-05-11 12:04:59 -0700620 pingResult = main.Mininet1.pingall()
621 passMsg += " on the second try"
622 utilities.assert_equals(
623 expect=main.TRUE,
624 actual=pingResult,
Jon Hallf37d44d2017-05-24 10:37:30 -0700625 onpass=passMsg,
Jon Hall69b2b982016-05-11 12:04:59 -0700626 onfail="Reactive Pingall failed, " +
627 "one or more ping pairs failed" )
628 main.log.info( "Time for pingall: %2f seconds" %
629 ( time2 - time1 ) )
630 # timeout for fwd flows
631 time.sleep( 11 )
632 # uninstall onos-app-fwd
633 main.step( "Uninstall reactive forwarding app" )
Jon Hallf37d44d2017-05-24 10:37:30 -0700634 node = main.activeNodes[ 0 ]
635 uninstallResult = main.CLIs[ node ].deactivateApp( "org.onosproject.fwd" )
Jon Hall69b2b982016-05-11 12:04:59 -0700636 utilities.assert_equals( expect=main.TRUE, actual=uninstallResult,
637 onpass="Uninstall fwd successful",
638 onfail="Uninstall fwd failed" )
639
640 main.step( "Check app ids" )
641 threads = []
642 appCheck2 = main.TRUE
643 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -0700644 t = main.Thread( target=main.CLIs[ i ].appToIDCheck,
Jon Hall69b2b982016-05-11 12:04:59 -0700645 name="appToIDCheck-" + str( i ),
646 args=[] )
647 threads.append( t )
648 t.start()
649
650 for t in threads:
651 t.join()
652 appCheck2 = appCheck2 and t.result
653 if appCheck2 != main.TRUE:
Jon Hallf37d44d2017-05-24 10:37:30 -0700654 node = main.activeNodes[ 0 ]
655 main.log.warn( main.CLIs[ node ].apps() )
656 main.log.warn( main.CLIs[ node ].appIDs() )
Jon Hall69b2b982016-05-11 12:04:59 -0700657 utilities.assert_equals( expect=main.TRUE, actual=appCheck2,
658 onpass="App Ids seem to be correct",
659 onfail="Something is wrong with app Ids" )
660
661 main.step( "Add host intents via cli" )
662 intentIds = []
663 # TODO: move the host numbers to params
664 # Maybe look at all the paths we ping?
665 intentAddResult = True
666 hostResult = main.TRUE
667 for i in range( 8, 18 ):
668 main.log.info( "Adding host intent between h" + str( i ) +
669 " and h" + str( i + 10 ) )
670 host1 = "00:00:00:00:00:" + \
671 str( hex( i )[ 2: ] ).zfill( 2 ).upper()
672 host2 = "00:00:00:00:00:" + \
673 str( hex( i + 10 )[ 2: ] ).zfill( 2 ).upper()
674 # NOTE: getHost can return None
675 host1Dict = onosCli.getHost( host1 )
676 host2Dict = onosCli.getHost( host2 )
677 host1Id = None
678 host2Id = None
679 if host1Dict and host2Dict:
680 host1Id = host1Dict.get( 'id', None )
681 host2Id = host2Dict.get( 'id', None )
682 if host1Id and host2Id:
683 nodeNum = ( i % len( main.activeNodes ) )
Jon Hallf37d44d2017-05-24 10:37:30 -0700684 node = main.activeNodes[ nodeNum ]
685 tmpId = main.CLIs[ node ].addHostIntent( host1Id, host2Id )
Jon Hall69b2b982016-05-11 12:04:59 -0700686 if tmpId:
687 main.log.info( "Added intent with id: " + tmpId )
688 intentIds.append( tmpId )
689 else:
690 main.log.error( "addHostIntent returned: " +
691 repr( tmpId ) )
692 else:
693 main.log.error( "Error, getHost() failed for h" + str( i ) +
694 " and/or h" + str( i + 10 ) )
Jon Hallf37d44d2017-05-24 10:37:30 -0700695 node = main.activeNodes[ 0 ]
696 hosts = main.CLIs[ node ].hosts()
Jon Hall69b2b982016-05-11 12:04:59 -0700697 main.log.warn( "Hosts output: " )
698 try:
699 main.log.warn( json.dumps( json.loads( hosts ),
700 sort_keys=True,
701 indent=4,
702 separators=( ',', ': ' ) ) )
703 except ( ValueError, TypeError ):
704 main.log.warn( repr( hosts ) )
705 hostResult = main.FALSE
706 utilities.assert_equals( expect=main.TRUE, actual=hostResult,
707 onpass="Found a host id for each host",
708 onfail="Error looking up host ids" )
709
710 intentStart = time.time()
711 onosIds = onosCli.getAllIntentsId()
712 main.log.info( "Submitted intents: " + str( intentIds ) )
713 main.log.info( "Intents in ONOS: " + str( onosIds ) )
714 for intent in intentIds:
715 if intent in onosIds:
716 pass # intent submitted is in onos
717 else:
718 intentAddResult = False
719 if intentAddResult:
720 intentStop = time.time()
721 else:
722 intentStop = None
723 # Print the intent states
724 intents = onosCli.intents()
725 intentStates = []
726 installedCheck = True
727 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
728 count = 0
729 try:
730 for intent in json.loads( intents ):
731 state = intent.get( 'state', None )
732 if "INSTALLED" not in state:
733 installedCheck = False
734 intentId = intent.get( 'id', None )
735 intentStates.append( ( intentId, state ) )
736 except ( ValueError, TypeError ):
737 main.log.exception( "Error parsing intents" )
738 # add submitted intents not in the store
739 tmplist = [ i for i, s in intentStates ]
740 missingIntents = False
741 for i in intentIds:
742 if i not in tmplist:
743 intentStates.append( ( i, " - " ) )
744 missingIntents = True
745 intentStates.sort()
746 for i, s in intentStates:
747 count += 1
748 main.log.info( "%-6s%-15s%-15s" %
749 ( str( count ), str( i ), str( s ) ) )
750 leaders = onosCli.leaders()
751 try:
752 missing = False
753 if leaders:
754 parsedLeaders = json.loads( leaders )
755 main.log.warn( json.dumps( parsedLeaders,
756 sort_keys=True,
757 indent=4,
758 separators=( ',', ': ' ) ) )
759 # check for all intent partitions
760 topics = []
761 for i in range( 14 ):
Jon Hall8dafdcc2016-09-16 10:21:25 -0700762 topics.append( "work-partition-" + str( i ) )
Jon Hall69b2b982016-05-11 12:04:59 -0700763 main.log.debug( topics )
Jon Hallf37d44d2017-05-24 10:37:30 -0700764 ONOStopics = [ j[ 'topic' ] for j in parsedLeaders ]
Jon Hall69b2b982016-05-11 12:04:59 -0700765 for topic in topics:
766 if topic not in ONOStopics:
767 main.log.error( "Error: " + topic +
768 " not in leaders" )
769 missing = True
770 else:
771 main.log.error( "leaders() returned None" )
772 except ( ValueError, TypeError ):
773 main.log.exception( "Error parsing leaders" )
774 main.log.error( repr( leaders ) )
775 # Check all nodes
776 if missing:
777 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -0700778 response = main.CLIs[ i ].leaders( jsonFormat=False )
779 main.log.warn( str( main.CLIs[ i ].name ) + " leaders output: \n" +
Jon Hall69b2b982016-05-11 12:04:59 -0700780 str( response ) )
781
782 partitions = onosCli.partitions()
783 try:
Jon Hallf37d44d2017-05-24 10:37:30 -0700784 if partitions:
Jon Hall69b2b982016-05-11 12:04:59 -0700785 parsedPartitions = json.loads( partitions )
786 main.log.warn( json.dumps( parsedPartitions,
787 sort_keys=True,
788 indent=4,
789 separators=( ',', ': ' ) ) )
790 # TODO check for a leader in all paritions
791 # TODO check for consistency among nodes
792 else:
793 main.log.error( "partitions() returned None" )
794 except ( ValueError, TypeError ):
795 main.log.exception( "Error parsing partitions" )
796 main.log.error( repr( partitions ) )
797 pendingMap = onosCli.pendingMap()
798 try:
Jon Hallf37d44d2017-05-24 10:37:30 -0700799 if pendingMap:
Jon Hall69b2b982016-05-11 12:04:59 -0700800 parsedPending = json.loads( pendingMap )
801 main.log.warn( json.dumps( parsedPending,
802 sort_keys=True,
803 indent=4,
804 separators=( ',', ': ' ) ) )
805 # TODO check something here?
806 else:
807 main.log.error( "pendingMap() returned None" )
808 except ( ValueError, TypeError ):
809 main.log.exception( "Error parsing pending map" )
810 main.log.error( repr( pendingMap ) )
811
812 intentAddResult = bool( intentAddResult and not missingIntents and
813 installedCheck )
814 if not intentAddResult:
815 main.log.error( "Error in pushing host intents to ONOS" )
816
817 main.step( "Intent Anti-Entropy dispersion" )
Jon Hallf37d44d2017-05-24 10:37:30 -0700818 for j in range( 100 ):
Jon Hall69b2b982016-05-11 12:04:59 -0700819 correct = True
820 main.log.info( "Submitted intents: " + str( sorted( intentIds ) ) )
821 for i in main.activeNodes:
822 onosIds = []
Jon Hallf37d44d2017-05-24 10:37:30 -0700823 ids = main.CLIs[ i ].getAllIntentsId()
Jon Hall69b2b982016-05-11 12:04:59 -0700824 onosIds.append( ids )
Jon Hallf37d44d2017-05-24 10:37:30 -0700825 main.log.debug( "Intents in " + main.CLIs[ i ].name + ": " +
Jon Hall69b2b982016-05-11 12:04:59 -0700826 str( sorted( onosIds ) ) )
827 if sorted( ids ) != sorted( intentIds ):
828 main.log.warn( "Set of intent IDs doesn't match" )
829 correct = False
830 break
831 else:
Jon Hallf37d44d2017-05-24 10:37:30 -0700832 intents = json.loads( main.CLIs[ i ].intents() )
Jon Hall69b2b982016-05-11 12:04:59 -0700833 for intent in intents:
834 if intent[ 'state' ] != "INSTALLED":
835 main.log.warn( "Intent " + intent[ 'id' ] +
836 " is " + intent[ 'state' ] )
837 correct = False
838 break
839 if correct:
840 break
841 else:
Jon Hallf37d44d2017-05-24 10:37:30 -0700842 time.sleep( 1 )
Jon Hall69b2b982016-05-11 12:04:59 -0700843 if not intentStop:
844 intentStop = time.time()
845 global gossipTime
846 gossipTime = intentStop - intentStart
847 main.log.info( "It took about " + str( gossipTime ) +
848 " seconds for all intents to appear in each node" )
849 append = False
850 title = "Gossip Intents"
851 count = 1
852 while append is False:
853 curTitle = title + str( count )
854 if curTitle not in labels:
855 labels.append( curTitle )
856 data.append( str( gossipTime ) )
857 append = True
858 else:
859 count += 1
Jon Hallf37d44d2017-05-24 10:37:30 -0700860 gossipPeriod = int( main.params[ 'timers' ][ 'gossip' ] )
Jon Hall69b2b982016-05-11 12:04:59 -0700861 maxGossipTime = gossipPeriod * len( main.activeNodes )
862 utilities.assert_greater_equals(
863 expect=maxGossipTime, actual=gossipTime,
864 onpass="ECM anti-entropy for intents worked within " +
865 "expected time",
866 onfail="Intent ECM anti-entropy took too long. " +
867 "Expected time:{}, Actual time:{}".format( maxGossipTime,
868 gossipTime ) )
869 if gossipTime <= maxGossipTime:
870 intentAddResult = True
871
872 if not intentAddResult or "key" in pendingMap:
873 import time
874 installedCheck = True
875 main.log.info( "Sleeping 60 seconds to see if intents are found" )
876 time.sleep( 60 )
877 onosIds = onosCli.getAllIntentsId()
878 main.log.info( "Submitted intents: " + str( intentIds ) )
879 main.log.info( "Intents in ONOS: " + str( onosIds ) )
880 # Print the intent states
881 intents = onosCli.intents()
882 intentStates = []
883 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
884 count = 0
885 try:
886 for intent in json.loads( intents ):
887 # Iter through intents of a node
888 state = intent.get( 'state', None )
889 if "INSTALLED" not in state:
890 installedCheck = False
891 intentId = intent.get( 'id', None )
892 intentStates.append( ( intentId, state ) )
893 except ( ValueError, TypeError ):
894 main.log.exception( "Error parsing intents" )
895 # add submitted intents not in the store
896 tmplist = [ i for i, s in intentStates ]
897 for i in intentIds:
898 if i not in tmplist:
899 intentStates.append( ( i, " - " ) )
900 intentStates.sort()
901 for i, s in intentStates:
902 count += 1
903 main.log.info( "%-6s%-15s%-15s" %
904 ( str( count ), str( i ), str( s ) ) )
905 leaders = onosCli.leaders()
906 try:
907 missing = False
908 if leaders:
909 parsedLeaders = json.loads( leaders )
910 main.log.warn( json.dumps( parsedLeaders,
911 sort_keys=True,
912 indent=4,
913 separators=( ',', ': ' ) ) )
914 # check for all intent partitions
915 # check for election
916 topics = []
917 for i in range( 14 ):
Jon Hall8dafdcc2016-09-16 10:21:25 -0700918 topics.append( "work-partition-" + str( i ) )
Jon Hall69b2b982016-05-11 12:04:59 -0700919 # FIXME: this should only be after we start the app
920 topics.append( "org.onosproject.election" )
921 main.log.debug( topics )
Jon Hallf37d44d2017-05-24 10:37:30 -0700922 ONOStopics = [ j[ 'topic' ] for j in parsedLeaders ]
Jon Hall69b2b982016-05-11 12:04:59 -0700923 for topic in topics:
924 if topic not in ONOStopics:
925 main.log.error( "Error: " + topic +
926 " not in leaders" )
927 missing = True
928 else:
929 main.log.error( "leaders() returned None" )
930 except ( ValueError, TypeError ):
931 main.log.exception( "Error parsing leaders" )
932 main.log.error( repr( leaders ) )
933 # Check all nodes
934 if missing:
935 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -0700936 node = main.CLIs[ i ]
937 response = node.leaders( jsonFormat=False )
Jon Hall69b2b982016-05-11 12:04:59 -0700938 main.log.warn( str( node.name ) + " leaders output: \n" +
939 str( response ) )
940
941 partitions = onosCli.partitions()
942 try:
Jon Hallf37d44d2017-05-24 10:37:30 -0700943 if partitions:
Jon Hall69b2b982016-05-11 12:04:59 -0700944 parsedPartitions = json.loads( partitions )
945 main.log.warn( json.dumps( parsedPartitions,
946 sort_keys=True,
947 indent=4,
948 separators=( ',', ': ' ) ) )
949 # TODO check for a leader in all paritions
950 # TODO check for consistency among nodes
951 else:
952 main.log.error( "partitions() returned None" )
953 except ( ValueError, TypeError ):
954 main.log.exception( "Error parsing partitions" )
955 main.log.error( repr( partitions ) )
956 pendingMap = onosCli.pendingMap()
957 try:
Jon Hallf37d44d2017-05-24 10:37:30 -0700958 if pendingMap:
Jon Hall69b2b982016-05-11 12:04:59 -0700959 parsedPending = json.loads( pendingMap )
960 main.log.warn( json.dumps( parsedPending,
961 sort_keys=True,
962 indent=4,
963 separators=( ',', ': ' ) ) )
964 # TODO check something here?
965 else:
966 main.log.error( "pendingMap() returned None" )
967 except ( ValueError, TypeError ):
968 main.log.exception( "Error parsing pending map" )
969 main.log.error( repr( pendingMap ) )
970
971 def CASE4( self, main ):
972 """
973 Ping across added host intents
974 """
975 import json
976 import time
977 assert main.numCtrls, "main.numCtrls not defined"
978 assert main, "main not defined"
979 assert utilities.assert_equals, "utilities.assert_equals not defined"
980 assert main.CLIs, "main.CLIs not defined"
981 assert main.nodes, "main.nodes not defined"
982 main.case( "Verify connectivity by sending traffic across Intents" )
983 main.caseExplanation = "Ping across added host intents to check " +\
984 "functionality and check the state of " +\
985 "the intent"
986
Jon Hallf37d44d2017-05-24 10:37:30 -0700987 onosCli = main.CLIs[ main.activeNodes[ 0 ] ]
Jon Hall69b2b982016-05-11 12:04:59 -0700988 main.step( "Check Intent state" )
989 installedCheck = False
990 loopCount = 0
991 while not installedCheck and loopCount < 40:
992 installedCheck = True
993 # Print the intent states
994 intents = onosCli.intents()
995 intentStates = []
996 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
997 count = 0
998 # Iter through intents of a node
999 try:
1000 for intent in json.loads( intents ):
1001 state = intent.get( 'state', None )
1002 if "INSTALLED" not in state:
1003 installedCheck = False
1004 intentId = intent.get( 'id', None )
1005 intentStates.append( ( intentId, state ) )
1006 except ( ValueError, TypeError ):
1007 main.log.exception( "Error parsing intents." )
1008 # Print states
1009 intentStates.sort()
1010 for i, s in intentStates:
1011 count += 1
1012 main.log.info( "%-6s%-15s%-15s" %
1013 ( str( count ), str( i ), str( s ) ) )
1014 if not installedCheck:
1015 time.sleep( 1 )
1016 loopCount += 1
1017 utilities.assert_equals( expect=True, actual=installedCheck,
1018 onpass="Intents are all INSTALLED",
1019 onfail="Intents are not all in " +
1020 "INSTALLED state" )
1021
1022 main.step( "Ping across added host intents" )
1023 PingResult = main.TRUE
1024 for i in range( 8, 18 ):
1025 ping = main.Mininet1.pingHost( src="h" + str( i ),
1026 target="h" + str( i + 10 ) )
1027 PingResult = PingResult and ping
1028 if ping == main.FALSE:
1029 main.log.warn( "Ping failed between h" + str( i ) +
1030 " and h" + str( i + 10 ) )
1031 elif ping == main.TRUE:
1032 main.log.info( "Ping test passed!" )
1033 # Don't set PingResult or you'd override failures
1034 if PingResult == main.FALSE:
1035 main.log.error(
1036 "Intents have not been installed correctly, pings failed." )
1037 # TODO: pretty print
1038 main.log.warn( "ONOS1 intents: " )
1039 try:
1040 tmpIntents = onosCli.intents()
1041 main.log.warn( json.dumps( json.loads( tmpIntents ),
1042 sort_keys=True,
1043 indent=4,
1044 separators=( ',', ': ' ) ) )
1045 except ( ValueError, TypeError ):
1046 main.log.warn( repr( tmpIntents ) )
1047 utilities.assert_equals(
1048 expect=main.TRUE,
1049 actual=PingResult,
1050 onpass="Intents have been installed correctly and pings work",
1051 onfail="Intents have not been installed correctly, pings failed." )
1052
1053 main.step( "Check leadership of topics" )
1054 leaders = onosCli.leaders()
1055 topicCheck = main.TRUE
1056 try:
1057 if leaders:
1058 parsedLeaders = json.loads( leaders )
1059 main.log.warn( json.dumps( parsedLeaders,
1060 sort_keys=True,
1061 indent=4,
1062 separators=( ',', ': ' ) ) )
1063 # check for all intent partitions
1064 # check for election
1065 # TODO: Look at Devices as topics now that it uses this system
1066 topics = []
1067 for i in range( 14 ):
Jon Hall8dafdcc2016-09-16 10:21:25 -07001068 topics.append( "work-partition-" + str( i ) )
Jon Hall69b2b982016-05-11 12:04:59 -07001069 # FIXME: this should only be after we start the app
1070 # FIXME: topics.append( "org.onosproject.election" )
1071 # Print leaders output
1072 main.log.debug( topics )
Jon Hallf37d44d2017-05-24 10:37:30 -07001073 ONOStopics = [ j[ 'topic' ] for j in parsedLeaders ]
Jon Hall69b2b982016-05-11 12:04:59 -07001074 for topic in topics:
1075 if topic not in ONOStopics:
1076 main.log.error( "Error: " + topic +
1077 " not in leaders" )
1078 topicCheck = main.FALSE
1079 else:
1080 main.log.error( "leaders() returned None" )
1081 topicCheck = main.FALSE
1082 except ( ValueError, TypeError ):
1083 topicCheck = main.FALSE
1084 main.log.exception( "Error parsing leaders" )
1085 main.log.error( repr( leaders ) )
1086 # TODO: Check for a leader of these topics
1087 # Check all nodes
1088 if topicCheck:
1089 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07001090 node = main.CLIs[ i ]
1091 response = node.leaders( jsonFormat=False )
Jon Hall69b2b982016-05-11 12:04:59 -07001092 main.log.warn( str( node.name ) + " leaders output: \n" +
1093 str( response ) )
1094
1095 utilities.assert_equals( expect=main.TRUE, actual=topicCheck,
1096 onpass="intent Partitions is in leaders",
1097 onfail="Some topics were lost " )
1098 # Print partitions
1099 partitions = onosCli.partitions()
1100 try:
Jon Hallf37d44d2017-05-24 10:37:30 -07001101 if partitions:
Jon Hall69b2b982016-05-11 12:04:59 -07001102 parsedPartitions = json.loads( partitions )
1103 main.log.warn( json.dumps( parsedPartitions,
1104 sort_keys=True,
1105 indent=4,
1106 separators=( ',', ': ' ) ) )
1107 # TODO check for a leader in all paritions
1108 # TODO check for consistency among nodes
1109 else:
1110 main.log.error( "partitions() returned None" )
1111 except ( ValueError, TypeError ):
1112 main.log.exception( "Error parsing partitions" )
1113 main.log.error( repr( partitions ) )
1114 # Print Pending Map
1115 pendingMap = onosCli.pendingMap()
1116 try:
Jon Hallf37d44d2017-05-24 10:37:30 -07001117 if pendingMap:
Jon Hall69b2b982016-05-11 12:04:59 -07001118 parsedPending = json.loads( pendingMap )
1119 main.log.warn( json.dumps( parsedPending,
1120 sort_keys=True,
1121 indent=4,
1122 separators=( ',', ': ' ) ) )
1123 # TODO check something here?
1124 else:
1125 main.log.error( "pendingMap() returned None" )
1126 except ( ValueError, TypeError ):
1127 main.log.exception( "Error parsing pending map" )
1128 main.log.error( repr( pendingMap ) )
1129
1130 if not installedCheck:
1131 main.log.info( "Waiting 60 seconds to see if the state of " +
1132 "intents change" )
1133 time.sleep( 60 )
1134 # Print the intent states
1135 intents = onosCli.intents()
1136 intentStates = []
1137 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
1138 count = 0
1139 # Iter through intents of a node
1140 try:
1141 for intent in json.loads( intents ):
1142 state = intent.get( 'state', None )
1143 if "INSTALLED" not in state:
1144 installedCheck = False
1145 intentId = intent.get( 'id', None )
1146 intentStates.append( ( intentId, state ) )
1147 except ( ValueError, TypeError ):
1148 main.log.exception( "Error parsing intents." )
1149 intentStates.sort()
1150 for i, s in intentStates:
1151 count += 1
1152 main.log.info( "%-6s%-15s%-15s" %
1153 ( str( count ), str( i ), str( s ) ) )
1154 leaders = onosCli.leaders()
1155 try:
1156 missing = False
1157 if leaders:
1158 parsedLeaders = json.loads( leaders )
1159 main.log.warn( json.dumps( parsedLeaders,
1160 sort_keys=True,
1161 indent=4,
1162 separators=( ',', ': ' ) ) )
1163 # check for all intent partitions
1164 # check for election
1165 topics = []
1166 for i in range( 14 ):
Jon Hall8dafdcc2016-09-16 10:21:25 -07001167 topics.append( "work-partition-" + str( i ) )
Jon Hall69b2b982016-05-11 12:04:59 -07001168 # FIXME: this should only be after we start the app
1169 topics.append( "org.onosproject.election" )
1170 main.log.debug( topics )
Jon Hallf37d44d2017-05-24 10:37:30 -07001171 ONOStopics = [ j[ 'topic' ] for j in parsedLeaders ]
Jon Hall69b2b982016-05-11 12:04:59 -07001172 for topic in topics:
1173 if topic not in ONOStopics:
1174 main.log.error( "Error: " + topic +
1175 " not in leaders" )
1176 missing = True
1177 else:
1178 main.log.error( "leaders() returned None" )
1179 except ( ValueError, TypeError ):
1180 main.log.exception( "Error parsing leaders" )
1181 main.log.error( repr( leaders ) )
1182 if missing:
1183 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07001184 node = main.CLIs[ i ]
1185 response = node.leaders( jsonFormat=False )
Jon Hall69b2b982016-05-11 12:04:59 -07001186 main.log.warn( str( node.name ) + " leaders output: \n" +
1187 str( response ) )
1188
1189 partitions = onosCli.partitions()
1190 try:
Jon Hallf37d44d2017-05-24 10:37:30 -07001191 if partitions:
Jon Hall69b2b982016-05-11 12:04:59 -07001192 parsedPartitions = json.loads( partitions )
1193 main.log.warn( json.dumps( parsedPartitions,
1194 sort_keys=True,
1195 indent=4,
1196 separators=( ',', ': ' ) ) )
1197 # TODO check for a leader in all paritions
1198 # TODO check for consistency among nodes
1199 else:
1200 main.log.error( "partitions() returned None" )
1201 except ( ValueError, TypeError ):
1202 main.log.exception( "Error parsing partitions" )
1203 main.log.error( repr( partitions ) )
1204 pendingMap = onosCli.pendingMap()
1205 try:
Jon Hallf37d44d2017-05-24 10:37:30 -07001206 if pendingMap:
Jon Hall69b2b982016-05-11 12:04:59 -07001207 parsedPending = json.loads( pendingMap )
1208 main.log.warn( json.dumps( parsedPending,
1209 sort_keys=True,
1210 indent=4,
1211 separators=( ',', ': ' ) ) )
1212 # TODO check something here?
1213 else:
1214 main.log.error( "pendingMap() returned None" )
1215 except ( ValueError, TypeError ):
1216 main.log.exception( "Error parsing pending map" )
1217 main.log.error( repr( pendingMap ) )
1218 # Print flowrules
Jon Hallf37d44d2017-05-24 10:37:30 -07001219 node = main.activeNodes[ 0 ]
1220 main.log.debug( main.CLIs[ node ].flows( jsonFormat=False ) )
Jon Hall69b2b982016-05-11 12:04:59 -07001221 main.step( "Wait a minute then ping again" )
1222 # the wait is above
1223 PingResult = main.TRUE
1224 for i in range( 8, 18 ):
1225 ping = main.Mininet1.pingHost( src="h" + str( i ),
1226 target="h" + str( i + 10 ) )
1227 PingResult = PingResult and ping
1228 if ping == main.FALSE:
1229 main.log.warn( "Ping failed between h" + str( i ) +
1230 " and h" + str( i + 10 ) )
1231 elif ping == main.TRUE:
1232 main.log.info( "Ping test passed!" )
1233 # Don't set PingResult or you'd override failures
1234 if PingResult == main.FALSE:
1235 main.log.error(
1236 "Intents have not been installed correctly, pings failed." )
1237 # TODO: pretty print
1238 main.log.warn( "ONOS1 intents: " )
1239 try:
1240 tmpIntents = onosCli.intents()
1241 main.log.warn( json.dumps( json.loads( tmpIntents ),
1242 sort_keys=True,
1243 indent=4,
1244 separators=( ',', ': ' ) ) )
1245 except ( ValueError, TypeError ):
1246 main.log.warn( repr( tmpIntents ) )
1247 utilities.assert_equals(
1248 expect=main.TRUE,
1249 actual=PingResult,
1250 onpass="Intents have been installed correctly and pings work",
1251 onfail="Intents have not been installed correctly, pings failed." )
1252
1253 def CASE5( self, main ):
1254 """
1255 Reading state of ONOS
1256 """
1257 import json
1258 import time
1259 assert main.numCtrls, "main.numCtrls not defined"
1260 assert main, "main not defined"
1261 assert utilities.assert_equals, "utilities.assert_equals not defined"
1262 assert main.CLIs, "main.CLIs not defined"
1263 assert main.nodes, "main.nodes not defined"
1264
1265 main.case( "Setting up and gathering data for current state" )
1266 # The general idea for this test case is to pull the state of
1267 # ( intents,flows, topology,... ) from each ONOS node
1268 # We can then compare them with each other and also with past states
1269
1270 main.step( "Check that each switch has a master" )
1271 global mastershipState
1272 mastershipState = '[]'
1273
1274 # Assert that each device has a master
1275 rolesNotNull = main.TRUE
1276 threads = []
1277 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07001278 t = main.Thread( target=main.CLIs[ i ].rolesNotNull,
Jon Hall69b2b982016-05-11 12:04:59 -07001279 name="rolesNotNull-" + str( i ),
1280 args=[] )
1281 threads.append( t )
1282 t.start()
1283
1284 for t in threads:
1285 t.join()
1286 rolesNotNull = rolesNotNull and t.result
1287 utilities.assert_equals(
1288 expect=main.TRUE,
1289 actual=rolesNotNull,
1290 onpass="Each device has a master",
1291 onfail="Some devices don't have a master assigned" )
1292
1293 main.step( "Get the Mastership of each switch from each controller" )
1294 ONOSMastership = []
1295 consistentMastership = True
1296 rolesResults = True
1297 threads = []
1298 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07001299 t = main.Thread( target=main.CLIs[ i ].roles,
Jon Hall69b2b982016-05-11 12:04:59 -07001300 name="roles-" + str( i ),
1301 args=[] )
1302 threads.append( t )
1303 t.start()
1304
1305 for t in threads:
1306 t.join()
1307 ONOSMastership.append( t.result )
1308
1309 for i in range( len( ONOSMastership ) ):
Jon Hallf37d44d2017-05-24 10:37:30 -07001310 node = str( main.activeNodes[ i ] + 1 )
1311 if not ONOSMastership[ i ] or "Error" in ONOSMastership[ i ]:
Jon Hall69b2b982016-05-11 12:04:59 -07001312 main.log.error( "Error in getting ONOS" + node + " roles" )
1313 main.log.warn( "ONOS" + node + " mastership response: " +
Jon Hallf37d44d2017-05-24 10:37:30 -07001314 repr( ONOSMastership[ i ] ) )
Jon Hall69b2b982016-05-11 12:04:59 -07001315 rolesResults = False
1316 utilities.assert_equals(
1317 expect=True,
1318 actual=rolesResults,
1319 onpass="No error in reading roles output",
1320 onfail="Error in reading roles from ONOS" )
1321
1322 main.step( "Check for consistency in roles from each controller" )
Jon Hallf37d44d2017-05-24 10:37:30 -07001323 if all( [ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
Jon Hall69b2b982016-05-11 12:04:59 -07001324 main.log.info(
1325 "Switch roles are consistent across all ONOS nodes" )
1326 else:
1327 consistentMastership = False
1328 utilities.assert_equals(
1329 expect=True,
1330 actual=consistentMastership,
1331 onpass="Switch roles are consistent across all ONOS nodes",
1332 onfail="ONOS nodes have different views of switch roles" )
1333
1334 if rolesResults and not consistentMastership:
1335 for i in range( len( main.activeNodes ) ):
Jon Hallf37d44d2017-05-24 10:37:30 -07001336 node = str( main.activeNodes[ i ] + 1 )
Jon Hall69b2b982016-05-11 12:04:59 -07001337 try:
1338 main.log.warn(
1339 "ONOS" + node + " roles: ",
1340 json.dumps(
1341 json.loads( ONOSMastership[ i ] ),
1342 sort_keys=True,
1343 indent=4,
1344 separators=( ',', ': ' ) ) )
1345 except ( ValueError, TypeError ):
1346 main.log.warn( repr( ONOSMastership[ i ] ) )
1347 elif rolesResults and consistentMastership:
1348 mastershipState = ONOSMastership[ 0 ]
1349
1350 main.step( "Get the intents from each controller" )
1351 global intentState
1352 intentState = []
1353 ONOSIntents = []
1354 consistentIntents = True # Are Intents consistent across nodes?
1355 intentsResults = True # Could we read Intents from ONOS?
1356 threads = []
1357 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07001358 t = main.Thread( target=main.CLIs[ i ].intents,
Jon Hall69b2b982016-05-11 12:04:59 -07001359 name="intents-" + str( i ),
1360 args=[],
1361 kwargs={ 'jsonFormat': True } )
1362 threads.append( t )
1363 t.start()
1364
1365 for t in threads:
1366 t.join()
1367 ONOSIntents.append( t.result )
1368
1369 for i in range( len( ONOSIntents ) ):
Jon Hallf37d44d2017-05-24 10:37:30 -07001370 node = str( main.activeNodes[ i ] + 1 )
Jon Hall69b2b982016-05-11 12:04:59 -07001371 if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
1372 main.log.error( "Error in getting ONOS" + node + " intents" )
1373 main.log.warn( "ONOS" + node + " intents response: " +
1374 repr( ONOSIntents[ i ] ) )
1375 intentsResults = False
1376 utilities.assert_equals(
1377 expect=True,
1378 actual=intentsResults,
1379 onpass="No error in reading intents output",
1380 onfail="Error in reading intents from ONOS" )
1381
1382 main.step( "Check for consistency in Intents from each controller" )
Jon Hallf37d44d2017-05-24 10:37:30 -07001383 if all( [ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
Jon Hall69b2b982016-05-11 12:04:59 -07001384 main.log.info( "Intents are consistent across all ONOS " +
1385 "nodes" )
1386 else:
1387 consistentIntents = False
1388 main.log.error( "Intents not consistent" )
1389 utilities.assert_equals(
1390 expect=True,
1391 actual=consistentIntents,
1392 onpass="Intents are consistent across all ONOS nodes",
1393 onfail="ONOS nodes have different views of intents" )
1394
1395 if intentsResults:
1396 # Try to make it easy to figure out what is happening
1397 #
1398 # Intent ONOS1 ONOS2 ...
1399 # 0x01 INSTALLED INSTALLING
1400 # ... ... ...
1401 # ... ... ...
1402 title = " Id"
1403 for n in main.activeNodes:
1404 title += " " * 10 + "ONOS" + str( n + 1 )
1405 main.log.warn( title )
1406 # get all intent keys in the cluster
1407 keys = []
1408 try:
1409 # Get the set of all intent keys
1410 for nodeStr in ONOSIntents:
1411 node = json.loads( nodeStr )
1412 for intent in node:
1413 keys.append( intent.get( 'id' ) )
1414 keys = set( keys )
1415 # For each intent key, print the state on each node
1416 for key in keys:
1417 row = "%-13s" % key
1418 for nodeStr in ONOSIntents:
1419 node = json.loads( nodeStr )
1420 for intent in node:
1421 if intent.get( 'id', "Error" ) == key:
1422 row += "%-15s" % intent.get( 'state' )
1423 main.log.warn( row )
1424 # End of intent state table
1425 except ValueError as e:
1426 main.log.exception( e )
1427 main.log.debug( "nodeStr was: " + repr( nodeStr ) )
1428
1429 if intentsResults and not consistentIntents:
1430 # print the json objects
Jon Hallf37d44d2017-05-24 10:37:30 -07001431 n = str( main.activeNodes[ -1 ] + 1 )
Jon Hall69b2b982016-05-11 12:04:59 -07001432 main.log.debug( "ONOS" + n + " intents: " )
1433 main.log.debug( json.dumps( json.loads( ONOSIntents[ -1 ] ),
1434 sort_keys=True,
1435 indent=4,
1436 separators=( ',', ': ' ) ) )
1437 for i in range( len( ONOSIntents ) ):
Jon Hallf37d44d2017-05-24 10:37:30 -07001438 node = str( main.activeNodes[ i ] + 1 )
Jon Hall69b2b982016-05-11 12:04:59 -07001439 if ONOSIntents[ i ] != ONOSIntents[ -1 ]:
1440 main.log.debug( "ONOS" + node + " intents: " )
Jon Hallf37d44d2017-05-24 10:37:30 -07001441 main.log.debug( json.dumps( json.loads( ONOSIntents[ i ] ),
Jon Hall69b2b982016-05-11 12:04:59 -07001442 sort_keys=True,
1443 indent=4,
1444 separators=( ',', ': ' ) ) )
1445 else:
1446 main.log.debug( "ONOS" + node + " intents match ONOS" +
1447 n + " intents" )
1448 elif intentsResults and consistentIntents:
1449 intentState = ONOSIntents[ 0 ]
1450
1451 main.step( "Get the flows from each controller" )
1452 global flowState
1453 flowState = []
1454 ONOSFlows = []
1455 ONOSFlowsJson = []
1456 flowCheck = main.FALSE
1457 consistentFlows = True
1458 flowsResults = True
1459 threads = []
1460 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07001461 t = main.Thread( target=main.CLIs[ i ].flows,
Jon Hall69b2b982016-05-11 12:04:59 -07001462 name="flows-" + str( i ),
1463 args=[],
1464 kwargs={ 'jsonFormat': True } )
1465 threads.append( t )
1466 t.start()
1467
1468 # NOTE: Flows command can take some time to run
Jon Hallf37d44d2017-05-24 10:37:30 -07001469 time.sleep( 30 )
Jon Hall69b2b982016-05-11 12:04:59 -07001470 for t in threads:
1471 t.join()
1472 result = t.result
1473 ONOSFlows.append( result )
1474
1475 for i in range( len( ONOSFlows ) ):
Jon Hallf37d44d2017-05-24 10:37:30 -07001476 num = str( main.activeNodes[ i ] + 1 )
Jon Hall69b2b982016-05-11 12:04:59 -07001477 if not ONOSFlows[ i ] or "Error" in ONOSFlows[ i ]:
1478 main.log.error( "Error in getting ONOS" + num + " flows" )
1479 main.log.warn( "ONOS" + num + " flows response: " +
1480 repr( ONOSFlows[ i ] ) )
1481 flowsResults = False
1482 ONOSFlowsJson.append( None )
1483 else:
1484 try:
1485 ONOSFlowsJson.append( json.loads( ONOSFlows[ i ] ) )
1486 except ( ValueError, TypeError ):
1487 # FIXME: change this to log.error?
1488 main.log.exception( "Error in parsing ONOS" + num +
1489 " response as json." )
1490 main.log.error( repr( ONOSFlows[ i ] ) )
1491 ONOSFlowsJson.append( None )
1492 flowsResults = False
1493 utilities.assert_equals(
1494 expect=True,
1495 actual=flowsResults,
1496 onpass="No error in reading flows output",
1497 onfail="Error in reading flows from ONOS" )
1498
1499 main.step( "Check for consistency in Flows from each controller" )
1500 tmp = [ len( i ) == len( ONOSFlowsJson[ 0 ] ) for i in ONOSFlowsJson ]
1501 if all( tmp ):
1502 main.log.info( "Flow count is consistent across all ONOS nodes" )
1503 else:
1504 consistentFlows = False
1505 utilities.assert_equals(
1506 expect=True,
1507 actual=consistentFlows,
1508 onpass="The flow count is consistent across all ONOS nodes",
1509 onfail="ONOS nodes have different flow counts" )
1510
1511 if flowsResults and not consistentFlows:
1512 for i in range( len( ONOSFlows ) ):
Jon Hallf37d44d2017-05-24 10:37:30 -07001513 node = str( main.activeNodes[ i ] + 1 )
Jon Hall69b2b982016-05-11 12:04:59 -07001514 try:
1515 main.log.warn(
1516 "ONOS" + node + " flows: " +
Jon Hallf37d44d2017-05-24 10:37:30 -07001517 json.dumps( json.loads( ONOSFlows[ i ] ), sort_keys=True,
Jon Hall69b2b982016-05-11 12:04:59 -07001518 indent=4, separators=( ',', ': ' ) ) )
1519 except ( ValueError, TypeError ):
1520 main.log.warn( "ONOS" + node + " flows: " +
1521 repr( ONOSFlows[ i ] ) )
1522 elif flowsResults and consistentFlows:
1523 flowCheck = main.TRUE
1524 flowState = ONOSFlows[ 0 ]
1525
1526 main.step( "Get the OF Table entries" )
1527 global flows
1528 flows = []
1529 for i in range( 1, 29 ):
1530 flows.append( main.Mininet1.getFlowTable( "s" + str( i ), version="1.3", debug=False ) )
1531 if flowCheck == main.FALSE:
1532 for table in flows:
1533 main.log.warn( table )
1534 # TODO: Compare switch flow tables with ONOS flow tables
1535
1536 main.step( "Start continuous pings" )
1537 main.Mininet2.pingLong(
1538 src=main.params[ 'PING' ][ 'source1' ],
1539 target=main.params[ 'PING' ][ 'target1' ],
1540 pingTime=500 )
1541 main.Mininet2.pingLong(
1542 src=main.params[ 'PING' ][ 'source2' ],
1543 target=main.params[ 'PING' ][ 'target2' ],
1544 pingTime=500 )
1545 main.Mininet2.pingLong(
1546 src=main.params[ 'PING' ][ 'source3' ],
1547 target=main.params[ 'PING' ][ 'target3' ],
1548 pingTime=500 )
1549 main.Mininet2.pingLong(
1550 src=main.params[ 'PING' ][ 'source4' ],
1551 target=main.params[ 'PING' ][ 'target4' ],
1552 pingTime=500 )
1553 main.Mininet2.pingLong(
1554 src=main.params[ 'PING' ][ 'source5' ],
1555 target=main.params[ 'PING' ][ 'target5' ],
1556 pingTime=500 )
1557 main.Mininet2.pingLong(
1558 src=main.params[ 'PING' ][ 'source6' ],
1559 target=main.params[ 'PING' ][ 'target6' ],
1560 pingTime=500 )
1561 main.Mininet2.pingLong(
1562 src=main.params[ 'PING' ][ 'source7' ],
1563 target=main.params[ 'PING' ][ 'target7' ],
1564 pingTime=500 )
1565 main.Mininet2.pingLong(
1566 src=main.params[ 'PING' ][ 'source8' ],
1567 target=main.params[ 'PING' ][ 'target8' ],
1568 pingTime=500 )
1569 main.Mininet2.pingLong(
1570 src=main.params[ 'PING' ][ 'source9' ],
1571 target=main.params[ 'PING' ][ 'target9' ],
1572 pingTime=500 )
1573 main.Mininet2.pingLong(
1574 src=main.params[ 'PING' ][ 'source10' ],
1575 target=main.params[ 'PING' ][ 'target10' ],
1576 pingTime=500 )
1577
1578 main.step( "Collecting topology information from ONOS" )
1579 devices = []
1580 threads = []
1581 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07001582 t = main.Thread( target=main.CLIs[ i ].devices,
Jon Hall69b2b982016-05-11 12:04:59 -07001583 name="devices-" + str( i ),
Jon Hallf37d44d2017-05-24 10:37:30 -07001584 args=[] )
Jon Hall69b2b982016-05-11 12:04:59 -07001585 threads.append( t )
1586 t.start()
1587
1588 for t in threads:
1589 t.join()
1590 devices.append( t.result )
1591 hosts = []
1592 threads = []
1593 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07001594 t = main.Thread( target=main.CLIs[ i ].hosts,
Jon Hall69b2b982016-05-11 12:04:59 -07001595 name="hosts-" + str( i ),
Jon Hallf37d44d2017-05-24 10:37:30 -07001596 args=[] )
Jon Hall69b2b982016-05-11 12:04:59 -07001597 threads.append( t )
1598 t.start()
1599
1600 for t in threads:
1601 t.join()
1602 try:
1603 hosts.append( json.loads( t.result ) )
1604 except ( ValueError, TypeError ):
1605 # FIXME: better handling of this, print which node
1606 # Maybe use thread name?
1607 main.log.exception( "Error parsing json output of hosts" )
1608 main.log.warn( repr( t.result ) )
1609 hosts.append( None )
1610
1611 ports = []
1612 threads = []
1613 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07001614 t = main.Thread( target=main.CLIs[ i ].ports,
Jon Hall69b2b982016-05-11 12:04:59 -07001615 name="ports-" + str( i ),
Jon Hallf37d44d2017-05-24 10:37:30 -07001616 args=[] )
Jon Hall69b2b982016-05-11 12:04:59 -07001617 threads.append( t )
1618 t.start()
1619
1620 for t in threads:
1621 t.join()
1622 ports.append( t.result )
1623 links = []
1624 threads = []
1625 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07001626 t = main.Thread( target=main.CLIs[ i ].links,
Jon Hall69b2b982016-05-11 12:04:59 -07001627 name="links-" + str( i ),
Jon Hallf37d44d2017-05-24 10:37:30 -07001628 args=[] )
Jon Hall69b2b982016-05-11 12:04:59 -07001629 threads.append( t )
1630 t.start()
1631
1632 for t in threads:
1633 t.join()
1634 links.append( t.result )
1635 clusters = []
1636 threads = []
1637 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07001638 t = main.Thread( target=main.CLIs[ i ].clusters,
Jon Hall69b2b982016-05-11 12:04:59 -07001639 name="clusters-" + str( i ),
Jon Hallf37d44d2017-05-24 10:37:30 -07001640 args=[] )
Jon Hall69b2b982016-05-11 12:04:59 -07001641 threads.append( t )
1642 t.start()
1643
1644 for t in threads:
1645 t.join()
1646 clusters.append( t.result )
1647 # Compare json objects for hosts and dataplane clusters
1648
1649 # hosts
1650 main.step( "Host view is consistent across ONOS nodes" )
1651 consistentHostsResult = main.TRUE
1652 for controller in range( len( hosts ) ):
Jon Hallf37d44d2017-05-24 10:37:30 -07001653 controllerStr = str( main.activeNodes[ controller ] + 1 )
Jon Hall69b2b982016-05-11 12:04:59 -07001654 if hosts[ controller ] and "Error" not in hosts[ controller ]:
1655 if hosts[ controller ] == hosts[ 0 ]:
1656 continue
1657 else: # hosts not consistent
1658 main.log.error( "hosts from ONOS" +
1659 controllerStr +
1660 " is inconsistent with ONOS1" )
1661 main.log.warn( repr( hosts[ controller ] ) )
1662 consistentHostsResult = main.FALSE
1663
1664 else:
1665 main.log.error( "Error in getting ONOS hosts from ONOS" +
1666 controllerStr )
1667 consistentHostsResult = main.FALSE
1668 main.log.warn( "ONOS" + controllerStr +
1669 " hosts response: " +
1670 repr( hosts[ controller ] ) )
1671 utilities.assert_equals(
1672 expect=main.TRUE,
1673 actual=consistentHostsResult,
1674 onpass="Hosts view is consistent across all ONOS nodes",
1675 onfail="ONOS nodes have different views of hosts" )
1676
1677 main.step( "Each host has an IP address" )
1678 ipResult = main.TRUE
1679 for controller in range( 0, len( hosts ) ):
Jon Hallf37d44d2017-05-24 10:37:30 -07001680 controllerStr = str( main.activeNodes[ controller ] + 1 )
Jon Hall69b2b982016-05-11 12:04:59 -07001681 if hosts[ controller ]:
1682 for host in hosts[ controller ]:
Jon Hallf37d44d2017-05-24 10:37:30 -07001683 if not host.get( 'ipAddresses', [] ):
Jon Hall69b2b982016-05-11 12:04:59 -07001684 main.log.error( "Error with host ips on controller" +
1685 controllerStr + ": " + str( host ) )
1686 ipResult = main.FALSE
1687 utilities.assert_equals(
1688 expect=main.TRUE,
1689 actual=ipResult,
1690 onpass="The ips of the hosts aren't empty",
1691 onfail="The ip of at least one host is missing" )
1692
1693 # Strongly connected clusters of devices
1694 main.step( "Cluster view is consistent across ONOS nodes" )
1695 consistentClustersResult = main.TRUE
1696 for controller in range( len( clusters ) ):
Jon Hallf37d44d2017-05-24 10:37:30 -07001697 controllerStr = str( main.activeNodes[ controller ] + 1 )
Jon Hall69b2b982016-05-11 12:04:59 -07001698 if "Error" not in clusters[ controller ]:
1699 if clusters[ controller ] == clusters[ 0 ]:
1700 continue
1701 else: # clusters not consistent
1702 main.log.error( "clusters from ONOS" + controllerStr +
1703 " is inconsistent with ONOS1" )
1704 consistentClustersResult = main.FALSE
1705
1706 else:
1707 main.log.error( "Error in getting dataplane clusters " +
1708 "from ONOS" + controllerStr )
1709 consistentClustersResult = main.FALSE
1710 main.log.warn( "ONOS" + controllerStr +
1711 " clusters response: " +
1712 repr( clusters[ controller ] ) )
1713 utilities.assert_equals(
1714 expect=main.TRUE,
1715 actual=consistentClustersResult,
1716 onpass="Clusters view is consistent across all ONOS nodes",
1717 onfail="ONOS nodes have different views of clusters" )
1718 if not consistentClustersResult:
1719 main.log.debug( clusters )
1720
1721 # there should always only be one cluster
1722 main.step( "Cluster view correct across ONOS nodes" )
1723 try:
1724 numClusters = len( json.loads( clusters[ 0 ] ) )
1725 except ( ValueError, TypeError ):
1726 main.log.exception( "Error parsing clusters[0]: " +
1727 repr( clusters[ 0 ] ) )
1728 numClusters = "ERROR"
1729 utilities.assert_equals(
1730 expect=1,
1731 actual=numClusters,
1732 onpass="ONOS shows 1 SCC",
1733 onfail="ONOS shows " + str( numClusters ) + " SCCs" )
1734
1735 main.step( "Comparing ONOS topology to MN" )
1736 devicesResults = main.TRUE
1737 linksResults = main.TRUE
1738 hostsResults = main.TRUE
1739 mnSwitches = main.Mininet1.getSwitches()
1740 mnLinks = main.Mininet1.getLinks()
1741 mnHosts = main.Mininet1.getHosts()
1742 for controller in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07001743 controllerStr = str( main.activeNodes[ controller ] + 1 )
Jon Hall69b2b982016-05-11 12:04:59 -07001744 if devices[ controller ] and ports[ controller ] and\
Jon Hallf37d44d2017-05-24 10:37:30 -07001745 "Error" not in devices[ controller ] and\
1746 "Error" not in ports[ controller ]:
1747 currentDevicesResult = main.Mininet1.compareSwitches(
1748 mnSwitches,
1749 json.loads( devices[ controller ] ),
1750 json.loads( ports[ controller ] ) )
Jon Hall69b2b982016-05-11 12:04:59 -07001751 else:
1752 currentDevicesResult = main.FALSE
1753 utilities.assert_equals( expect=main.TRUE,
1754 actual=currentDevicesResult,
1755 onpass="ONOS" + controllerStr +
1756 " Switches view is correct",
1757 onfail="ONOS" + controllerStr +
1758 " Switches view is incorrect" )
1759 if links[ controller ] and "Error" not in links[ controller ]:
1760 currentLinksResult = main.Mininet1.compareLinks(
1761 mnSwitches, mnLinks,
1762 json.loads( links[ controller ] ) )
1763 else:
1764 currentLinksResult = main.FALSE
1765 utilities.assert_equals( expect=main.TRUE,
1766 actual=currentLinksResult,
1767 onpass="ONOS" + controllerStr +
1768 " links view is correct",
1769 onfail="ONOS" + controllerStr +
1770 " links view is incorrect" )
1771
1772 if hosts[ controller ] and "Error" not in hosts[ controller ]:
1773 currentHostsResult = main.Mininet1.compareHosts(
1774 mnHosts,
1775 hosts[ controller ] )
1776 else:
1777 currentHostsResult = main.FALSE
1778 utilities.assert_equals( expect=main.TRUE,
1779 actual=currentHostsResult,
1780 onpass="ONOS" + controllerStr +
1781 " hosts exist in Mininet",
1782 onfail="ONOS" + controllerStr +
1783 " hosts don't match Mininet" )
1784
1785 devicesResults = devicesResults and currentDevicesResult
1786 linksResults = linksResults and currentLinksResult
1787 hostsResults = hostsResults and currentHostsResult
1788
1789 main.step( "Device information is correct" )
1790 utilities.assert_equals(
1791 expect=main.TRUE,
1792 actual=devicesResults,
1793 onpass="Device information is correct",
1794 onfail="Device information is incorrect" )
1795
1796 main.step( "Links are correct" )
1797 utilities.assert_equals(
1798 expect=main.TRUE,
1799 actual=linksResults,
1800 onpass="Link are correct",
1801 onfail="Links are incorrect" )
1802
1803 main.step( "Hosts are correct" )
1804 utilities.assert_equals(
1805 expect=main.TRUE,
1806 actual=hostsResults,
1807 onpass="Hosts are correct",
1808 onfail="Hosts are incorrect" )
1809
1810 def CASE6( self, main ):
1811 """
1812 The Scaling case.
1813 """
1814 import time
1815 import re
1816 assert main.numCtrls, "main.numCtrls not defined"
1817 assert main, "main not defined"
1818 assert utilities.assert_equals, "utilities.assert_equals not defined"
1819 assert main.CLIs, "main.CLIs not defined"
1820 assert main.nodes, "main.nodes not defined"
1821 try:
1822 labels
1823 except NameError:
1824 main.log.error( "labels not defined, setting to []" )
1825 global labels
1826 labels = []
1827 try:
1828 data
1829 except NameError:
1830 main.log.error( "data not defined, setting to []" )
1831 global data
1832 data = []
1833
1834 main.case( "Swap some of the ONOS nodes" )
1835
1836 main.step( "Checking ONOS Logs for errors" )
1837 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07001838 node = main.nodes[ i ]
Jon Hall69b2b982016-05-11 12:04:59 -07001839 main.log.debug( "Checking logs for errors on " + node.name + ":" )
1840 main.log.warn( main.ONOSbench.checkLogs( node.ip_address ) )
1841
1842 main.step( "Generate new metadata file" )
Jon Hallf37d44d2017-05-24 10:37:30 -07001843 old = [ main.activeNodes[ 1 ], main.activeNodes[ -2 ] ]
1844 new = range( main.ONOSbench.maxNodes )[ -2: ]
Jon Hall69b2b982016-05-11 12:04:59 -07001845 assert len( old ) == len( new ), "Length of nodes to swap don't match"
1846 handle = main.ONOSbench.handle
1847 for x, y in zip( old, new ):
1848 handle.sendline( "export OC{}=$OC{}".format( x + 1, y + 1 ) )
1849 handle.expect( "\$" ) # from the variable
1850 ret = handle.before
1851 handle.expect( "\$" ) # From the prompt
1852 ret += handle.before
1853 main.log.debug( ret )
1854 main.activeNodes.remove( x )
1855 main.activeNodes.append( y )
1856
1857 genResult = main.Server.generateFile( main.numCtrls )
1858 utilities.assert_equals( expect=main.TRUE, actual=genResult,
1859 onpass="New cluster metadata file generated",
1860 onfail="Failled to generate new metadata file" )
1861 time.sleep( 5 ) # Give time for nodes to read new file
1862
1863 main.step( "Start new nodes" ) # OR stop old nodes?
1864 started = main.TRUE
1865 for i in new:
Jon Hallf37d44d2017-05-24 10:37:30 -07001866 started = main.ONOSbench.onosStart( main.nodes[ i ].ip_address ) and main.TRUE
Jon Hall69b2b982016-05-11 12:04:59 -07001867 utilities.assert_equals( expect=main.TRUE, actual=started,
1868 onpass="ONOS started",
1869 onfail="ONOS start NOT successful" )
1870
1871 main.step( "Checking if ONOS is up yet" )
1872 for i in range( 2 ):
1873 onosIsupResult = main.TRUE
1874 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07001875 node = main.nodes[ i ]
Jon Hall168c1862017-01-31 17:35:34 -08001876 main.ONOSbench.onosSecureSSH( node=node.ip_address )
Jon Hall69b2b982016-05-11 12:04:59 -07001877 started = main.ONOSbench.isup( node.ip_address )
1878 if not started:
1879 main.log.error( node.name + " didn't start!" )
1880 onosIsupResult = onosIsupResult and started
1881 if onosIsupResult == main.TRUE:
1882 break
1883 utilities.assert_equals( expect=main.TRUE, actual=onosIsupResult,
1884 onpass="ONOS started",
1885 onfail="ONOS start NOT successful" )
1886
Jon Hall6509dbf2016-06-21 17:01:17 -07001887 main.step( "Starting ONOS CLI sessions" )
Jon Hall69b2b982016-05-11 12:04:59 -07001888 cliResults = main.TRUE
1889 threads = []
1890 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07001891 t = main.Thread( target=main.CLIs[ i ].startOnosCli,
Jon Hall69b2b982016-05-11 12:04:59 -07001892 name="startOnosCli-" + str( i ),
Jon Hallf37d44d2017-05-24 10:37:30 -07001893 args=[ main.nodes[ i ].ip_address ] )
Jon Hall69b2b982016-05-11 12:04:59 -07001894 threads.append( t )
1895 t.start()
1896
1897 for t in threads:
1898 t.join()
1899 cliResults = cliResults and t.result
1900 utilities.assert_equals( expect=main.TRUE, actual=cliResults,
1901 onpass="ONOS cli started",
1902 onfail="ONOS clis did not start" )
1903
1904 main.step( "Checking ONOS nodes" )
1905 nodeResults = utilities.retry( main.HA.nodesCheck,
1906 False,
Jon Hallf37d44d2017-05-24 10:37:30 -07001907 args=[ main.activeNodes ],
Jon Hall69b2b982016-05-11 12:04:59 -07001908 attempts=5 )
1909 utilities.assert_equals( expect=True, actual=nodeResults,
1910 onpass="Nodes check successful",
1911 onfail="Nodes check NOT successful" )
1912
1913 for i in range( 10 ):
1914 ready = True
1915 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07001916 cli = main.CLIs[ i ]
Jon Hall69b2b982016-05-11 12:04:59 -07001917 output = cli.summary()
1918 if not output:
1919 ready = False
1920 if ready:
1921 break
1922 time.sleep( 30 )
1923 utilities.assert_equals( expect=True, actual=ready,
1924 onpass="ONOS summary command succeded",
1925 onfail="ONOS summary command failed" )
1926 if not ready:
1927 main.cleanup()
1928 main.exit()
1929
1930 # Rerun for election on new nodes
1931 runResults = main.TRUE
1932 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07001933 cli = main.CLIs[ i ]
Jon Hall69b2b982016-05-11 12:04:59 -07001934 run = cli.electionTestRun()
1935 if run != main.TRUE:
1936 main.log.error( "Error running for election on " + cli.name )
1937 runResults = runResults and run
1938 utilities.assert_equals( expect=main.TRUE, actual=runResults,
1939 onpass="Reran for election",
1940 onfail="Failed to rerun for election" )
1941
1942 for node in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07001943 main.log.warn( "\n****************** {} **************".format( main.nodes[ node ].ip_address ) )
1944 main.log.debug( main.CLIs[ node ].nodes( jsonFormat=False ) )
1945 main.log.debug( main.CLIs[ node ].leaders( jsonFormat=False ) )
1946 main.log.debug( main.CLIs[ node ].partitions( jsonFormat=False ) )
1947 main.log.debug( main.CLIs[ node ].apps( jsonFormat=False ) )
Jon Hall69b2b982016-05-11 12:04:59 -07001948
1949 main.step( "Reapplying cell variable to environment" )
1950 cellName = main.params[ 'ENV' ][ 'cellName' ]
1951 cellResult = main.ONOSbench.setCell( cellName )
1952 utilities.assert_equals( expect=main.TRUE, actual=cellResult,
1953 onpass="Set cell successfull",
1954 onfail="Failled to set cell" )
1955
1956 def CASE7( self, main ):
1957 """
1958 Check state after ONOS scaling
1959 """
1960 import json
1961 assert main.numCtrls, "main.numCtrls not defined"
1962 assert main, "main not defined"
1963 assert utilities.assert_equals, "utilities.assert_equals not defined"
1964 assert main.CLIs, "main.CLIs not defined"
1965 assert main.nodes, "main.nodes not defined"
1966 main.case( "Running ONOS Constant State Tests" )
1967
1968 main.step( "Check that each switch has a master" )
1969 # Assert that each device has a master
1970 rolesNotNull = main.TRUE
1971 threads = []
1972 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07001973 t = main.Thread( target=main.CLIs[ i ].rolesNotNull,
Jon Hall69b2b982016-05-11 12:04:59 -07001974 name="rolesNotNull-" + str( i ),
Jon Hallf37d44d2017-05-24 10:37:30 -07001975 args=[] )
Jon Hall69b2b982016-05-11 12:04:59 -07001976 threads.append( t )
1977 t.start()
1978
1979 for t in threads:
1980 t.join()
1981 rolesNotNull = rolesNotNull and t.result
1982 utilities.assert_equals(
1983 expect=main.TRUE,
1984 actual=rolesNotNull,
1985 onpass="Each device has a master",
1986 onfail="Some devices don't have a master assigned" )
1987
1988 main.step( "Read device roles from ONOS" )
1989 ONOSMastership = []
1990 consistentMastership = True
1991 rolesResults = True
1992 threads = []
1993 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07001994 t = main.Thread( target=main.CLIs[ i ].roles,
Jon Hall69b2b982016-05-11 12:04:59 -07001995 name="roles-" + str( i ),
1996 args=[] )
1997 threads.append( t )
1998 t.start()
1999
2000 for t in threads:
2001 t.join()
2002 ONOSMastership.append( t.result )
2003
2004 for i in range( len( ONOSMastership ) ):
Jon Hallf37d44d2017-05-24 10:37:30 -07002005 node = str( main.activeNodes[ i ] + 1 )
2006 if not ONOSMastership[ i ] or "Error" in ONOSMastership[ i ]:
Jon Hall69b2b982016-05-11 12:04:59 -07002007 main.log.error( "Error in getting ONOS" + node + " roles" )
2008 main.log.warn( "ONOS" + node + " mastership response: " +
Jon Hallf37d44d2017-05-24 10:37:30 -07002009 repr( ONOSMastership[ i ] ) )
Jon Hall69b2b982016-05-11 12:04:59 -07002010 rolesResults = False
2011 utilities.assert_equals(
2012 expect=True,
2013 actual=rolesResults,
2014 onpass="No error in reading roles output",
2015 onfail="Error in reading roles from ONOS" )
2016
2017 main.step( "Check for consistency in roles from each controller" )
Jon Hallf37d44d2017-05-24 10:37:30 -07002018 if all( [ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
Jon Hall69b2b982016-05-11 12:04:59 -07002019 main.log.info(
2020 "Switch roles are consistent across all ONOS nodes" )
2021 else:
2022 consistentMastership = False
2023 utilities.assert_equals(
2024 expect=True,
2025 actual=consistentMastership,
2026 onpass="Switch roles are consistent across all ONOS nodes",
2027 onfail="ONOS nodes have different views of switch roles" )
2028
2029 if rolesResults and not consistentMastership:
2030 for i in range( len( ONOSMastership ) ):
Jon Hallf37d44d2017-05-24 10:37:30 -07002031 node = str( main.activeNodes[ i ] + 1 )
Jon Hall69b2b982016-05-11 12:04:59 -07002032 main.log.warn( "ONOS" + node + " roles: ",
2033 json.dumps( json.loads( ONOSMastership[ i ] ),
2034 sort_keys=True,
2035 indent=4,
2036 separators=( ',', ': ' ) ) )
2037
2038 # NOTE: we expect mastership to change on controller scaling down
2039
2040 main.step( "Get the intents and compare across all nodes" )
2041 ONOSIntents = []
2042 intentCheck = main.FALSE
2043 consistentIntents = True
2044 intentsResults = True
2045 threads = []
2046 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07002047 t = main.Thread( target=main.CLIs[ i ].intents,
Jon Hall69b2b982016-05-11 12:04:59 -07002048 name="intents-" + str( i ),
2049 args=[],
2050 kwargs={ 'jsonFormat': True } )
2051 threads.append( t )
2052 t.start()
2053
2054 for t in threads:
2055 t.join()
2056 ONOSIntents.append( t.result )
2057
Jon Hallf37d44d2017-05-24 10:37:30 -07002058 for i in range( len( ONOSIntents ) ):
2059 node = str( main.activeNodes[ i ] + 1 )
Jon Hall69b2b982016-05-11 12:04:59 -07002060 if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
2061 main.log.error( "Error in getting ONOS" + node + " intents" )
2062 main.log.warn( "ONOS" + node + " intents response: " +
2063 repr( ONOSIntents[ i ] ) )
2064 intentsResults = False
2065 utilities.assert_equals(
2066 expect=True,
2067 actual=intentsResults,
2068 onpass="No error in reading intents output",
2069 onfail="Error in reading intents from ONOS" )
2070
2071 main.step( "Check for consistency in Intents from each controller" )
Jon Hallf37d44d2017-05-24 10:37:30 -07002072 if all( [ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
Jon Hall69b2b982016-05-11 12:04:59 -07002073 main.log.info( "Intents are consistent across all ONOS " +
2074 "nodes" )
2075 else:
2076 consistentIntents = False
2077
2078 # Try to make it easy to figure out what is happening
2079 #
2080 # Intent ONOS1 ONOS2 ...
2081 # 0x01 INSTALLED INSTALLING
2082 # ... ... ...
2083 # ... ... ...
2084 title = " ID"
2085 for n in main.activeNodes:
2086 title += " " * 10 + "ONOS" + str( n + 1 )
2087 main.log.warn( title )
2088 # get all intent keys in the cluster
2089 keys = []
2090 for nodeStr in ONOSIntents:
2091 node = json.loads( nodeStr )
2092 for intent in node:
2093 keys.append( intent.get( 'id' ) )
2094 keys = set( keys )
2095 for key in keys:
2096 row = "%-13s" % key
2097 for nodeStr in ONOSIntents:
2098 node = json.loads( nodeStr )
2099 for intent in node:
2100 if intent.get( 'id' ) == key:
2101 row += "%-15s" % intent.get( 'state' )
2102 main.log.warn( row )
2103 # End table view
2104
2105 utilities.assert_equals(
2106 expect=True,
2107 actual=consistentIntents,
2108 onpass="Intents are consistent across all ONOS nodes",
2109 onfail="ONOS nodes have different views of intents" )
2110 intentStates = []
2111 for node in ONOSIntents: # Iter through ONOS nodes
2112 nodeStates = []
2113 # Iter through intents of a node
2114 try:
2115 for intent in json.loads( node ):
2116 nodeStates.append( intent[ 'state' ] )
2117 except ( ValueError, TypeError ):
2118 main.log.exception( "Error in parsing intents" )
2119 main.log.error( repr( node ) )
2120 intentStates.append( nodeStates )
Jon Hallf37d44d2017-05-24 10:37:30 -07002121 out = [ ( i, nodeStates.count( i ) ) for i in set( nodeStates ) ]
Jon Hall69b2b982016-05-11 12:04:59 -07002122 main.log.info( dict( out ) )
2123
2124 if intentsResults and not consistentIntents:
2125 for i in range( len( main.activeNodes ) ):
Jon Hallf37d44d2017-05-24 10:37:30 -07002126 node = str( main.activeNodes[ i ] + 1 )
Jon Hall69b2b982016-05-11 12:04:59 -07002127 main.log.warn( "ONOS" + node + " intents: " )
2128 main.log.warn( json.dumps(
2129 json.loads( ONOSIntents[ i ] ),
2130 sort_keys=True,
2131 indent=4,
2132 separators=( ',', ': ' ) ) )
2133 elif intentsResults and consistentIntents:
2134 intentCheck = main.TRUE
2135
2136 main.step( "Compare current intents with intents before the scaling" )
2137 # NOTE: this requires case 5 to pass for intentState to be set.
2138 # maybe we should stop the test if that fails?
2139 sameIntents = main.FALSE
2140 try:
2141 intentState
2142 except NameError:
2143 main.log.warn( "No previous intent state was saved" )
2144 else:
2145 if intentState and intentState == ONOSIntents[ 0 ]:
2146 sameIntents = main.TRUE
2147 main.log.info( "Intents are consistent with before scaling" )
2148 # TODO: possibly the states have changed? we may need to figure out
2149 # what the acceptable states are
2150 elif len( intentState ) == len( ONOSIntents[ 0 ] ):
2151 sameIntents = main.TRUE
2152 try:
2153 before = json.loads( intentState )
2154 after = json.loads( ONOSIntents[ 0 ] )
2155 for intent in before:
2156 if intent not in after:
2157 sameIntents = main.FALSE
2158 main.log.debug( "Intent is not currently in ONOS " +
2159 "(at least in the same form):" )
2160 main.log.debug( json.dumps( intent ) )
2161 except ( ValueError, TypeError ):
2162 main.log.exception( "Exception printing intents" )
Jon Hallf37d44d2017-05-24 10:37:30 -07002163 main.log.debug( repr( ONOSIntents[ 0 ] ) )
Jon Hall69b2b982016-05-11 12:04:59 -07002164 main.log.debug( repr( intentState ) )
2165 if sameIntents == main.FALSE:
2166 try:
2167 main.log.debug( "ONOS intents before: " )
2168 main.log.debug( json.dumps( json.loads( intentState ),
2169 sort_keys=True, indent=4,
2170 separators=( ',', ': ' ) ) )
2171 main.log.debug( "Current ONOS intents: " )
2172 main.log.debug( json.dumps( json.loads( ONOSIntents[ 0 ] ),
2173 sort_keys=True, indent=4,
2174 separators=( ',', ': ' ) ) )
2175 except ( ValueError, TypeError ):
2176 main.log.exception( "Exception printing intents" )
Jon Hallf37d44d2017-05-24 10:37:30 -07002177 main.log.debug( repr( ONOSIntents[ 0 ] ) )
Jon Hall69b2b982016-05-11 12:04:59 -07002178 main.log.debug( repr( intentState ) )
2179 utilities.assert_equals(
2180 expect=main.TRUE,
2181 actual=sameIntents,
2182 onpass="Intents are consistent with before scaling",
2183 onfail="The Intents changed during scaling" )
2184 intentCheck = intentCheck and sameIntents
2185
2186 main.step( "Get the OF Table entries and compare to before " +
2187 "component scaling" )
2188 FlowTables = main.TRUE
2189 for i in range( 28 ):
2190 main.log.info( "Checking flow table on s" + str( i + 1 ) )
2191 tmpFlows = main.Mininet1.getFlowTable( "s" + str( i + 1 ), version="1.3", debug=False )
Jon Hallf37d44d2017-05-24 10:37:30 -07002192 curSwitch = main.Mininet1.flowTableComp( flows[ i ], tmpFlows )
Jon Hall69b2b982016-05-11 12:04:59 -07002193 FlowTables = FlowTables and curSwitch
2194 if curSwitch == main.FALSE:
2195 main.log.warn( "Differences in flow table for switch: s{}".format( i + 1 ) )
2196 utilities.assert_equals(
2197 expect=main.TRUE,
2198 actual=FlowTables,
2199 onpass="No changes were found in the flow tables",
2200 onfail="Changes were found in the flow tables" )
2201
2202 main.Mininet2.pingLongKill()
Jon Hallf37d44d2017-05-24 10:37:30 -07002203 """
Jon Hall69b2b982016-05-11 12:04:59 -07002204 # main.step( "Check the continuous pings to ensure that no packets " +
2205 # "were dropped during component failure" )
2206 main.Mininet2.pingKill( main.params[ 'TESTONUSER' ],
2207 main.params[ 'TESTONIP' ] )
2208 LossInPings = main.FALSE
2209 # NOTE: checkForLoss returns main.FALSE with 0% packet loss
2210 for i in range( 8, 18 ):
2211 main.log.info(
2212 "Checking for a loss in pings along flow from s" +
2213 str( i ) )
2214 LossInPings = main.Mininet2.checkForLoss(
2215 "/tmp/ping.h" +
2216 str( i ) ) or LossInPings
2217 if LossInPings == main.TRUE:
2218 main.log.info( "Loss in ping detected" )
2219 elif LossInPings == main.ERROR:
2220 main.log.info( "There are multiple mininet process running" )
2221 elif LossInPings == main.FALSE:
2222 main.log.info( "No Loss in the pings" )
2223 main.log.info( "No loss of dataplane connectivity" )
2224 # utilities.assert_equals(
2225 # expect=main.FALSE,
2226 # actual=LossInPings,
2227 # onpass="No Loss of connectivity",
2228 # onfail="Loss of dataplane connectivity detected" )
2229
2230 # NOTE: Since intents are not persisted with IntnentStore,
2231 # we expect loss in dataplane connectivity
2232 LossInPings = main.FALSE
Jon Hallf37d44d2017-05-24 10:37:30 -07002233 """
Jon Hall69b2b982016-05-11 12:04:59 -07002234 main.step( "Leadership Election is still functional" )
2235 # Test of LeadershipElection
2236 leaderList = []
2237 leaderResult = main.TRUE
2238
2239 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07002240 cli = main.CLIs[ i ]
Jon Hall69b2b982016-05-11 12:04:59 -07002241 leaderN = cli.electionTestLeader()
2242 leaderList.append( leaderN )
2243 if leaderN == main.FALSE:
2244 # error in response
2245 main.log.error( "Something is wrong with " +
2246 "electionTestLeader function, check the" +
2247 " error logs" )
2248 leaderResult = main.FALSE
2249 elif leaderN is None:
2250 main.log.error( cli.name +
2251 " shows no leader for the election-app." )
2252 leaderResult = main.FALSE
2253 if len( set( leaderList ) ) != 1:
2254 leaderResult = main.FALSE
2255 main.log.error(
2256 "Inconsistent view of leader for the election test app" )
2257 # TODO: print the list
2258 utilities.assert_equals(
2259 expect=main.TRUE,
2260 actual=leaderResult,
2261 onpass="Leadership election passed",
2262 onfail="Something went wrong with Leadership election" )
2263
2264 def CASE8( self, main ):
2265 """
2266 Compare topo
2267 """
2268 import json
2269 import time
2270 assert main.numCtrls, "main.numCtrls not defined"
2271 assert main, "main not defined"
2272 assert utilities.assert_equals, "utilities.assert_equals not defined"
2273 assert main.CLIs, "main.CLIs not defined"
2274 assert main.nodes, "main.nodes not defined"
2275
2276 main.case( "Compare ONOS Topology view to Mininet topology" )
2277 main.caseExplanation = "Compare topology objects between Mininet" +\
2278 " and ONOS"
2279 topoResult = main.FALSE
2280 topoFailMsg = "ONOS topology don't match Mininet"
2281 elapsed = 0
2282 count = 0
2283 main.step( "Comparing ONOS topology to MN topology" )
2284 startTime = time.time()
2285 # Give time for Gossip to work
2286 while topoResult == main.FALSE and ( elapsed < 60 or count < 3 ):
2287 devicesResults = main.TRUE
2288 linksResults = main.TRUE
2289 hostsResults = main.TRUE
2290 hostAttachmentResults = True
2291 count += 1
2292 cliStart = time.time()
2293 devices = []
2294 threads = []
2295 for i in main.activeNodes:
2296 t = main.Thread( target=utilities.retry,
2297 name="devices-" + str( i ),
Jon Hallf37d44d2017-05-24 10:37:30 -07002298 args=[ main.CLIs[ i ].devices, [ None ] ],
2299 kwargs={ 'sleep': 5, 'attempts': 5,
Jon Hall69b2b982016-05-11 12:04:59 -07002300 'randomTime': True } )
2301 threads.append( t )
2302 t.start()
2303
2304 for t in threads:
2305 t.join()
2306 devices.append( t.result )
2307 hosts = []
2308 ipResult = main.TRUE
2309 threads = []
2310 for i in main.activeNodes:
2311 t = main.Thread( target=utilities.retry,
2312 name="hosts-" + str( i ),
Jon Hallf37d44d2017-05-24 10:37:30 -07002313 args=[ main.CLIs[ i ].hosts, [ None ] ],
2314 kwargs={ 'sleep': 5, 'attempts': 5,
Jon Hall69b2b982016-05-11 12:04:59 -07002315 'randomTime': True } )
2316 threads.append( t )
2317 t.start()
2318
2319 for t in threads:
2320 t.join()
2321 try:
2322 hosts.append( json.loads( t.result ) )
2323 except ( ValueError, TypeError ):
2324 main.log.exception( "Error parsing hosts results" )
2325 main.log.error( repr( t.result ) )
2326 hosts.append( None )
2327 for controller in range( 0, len( hosts ) ):
Jon Hallf37d44d2017-05-24 10:37:30 -07002328 controllerStr = str( main.activeNodes[ controller ] + 1 )
Jon Hall69b2b982016-05-11 12:04:59 -07002329 if hosts[ controller ]:
2330 for host in hosts[ controller ]:
2331 if host is None or host.get( 'ipAddresses', [] ) == []:
2332 main.log.error(
2333 "Error with host ipAddresses on controller" +
2334 controllerStr + ": " + str( host ) )
2335 ipResult = main.FALSE
2336 ports = []
2337 threads = []
2338 for i in main.activeNodes:
2339 t = main.Thread( target=utilities.retry,
2340 name="ports-" + str( i ),
Jon Hallf37d44d2017-05-24 10:37:30 -07002341 args=[ main.CLIs[ i ].ports, [ None ] ],
2342 kwargs={ 'sleep': 5, 'attempts': 5,
Jon Hall69b2b982016-05-11 12:04:59 -07002343 'randomTime': True } )
2344 threads.append( t )
2345 t.start()
2346
2347 for t in threads:
2348 t.join()
2349 ports.append( t.result )
2350 links = []
2351 threads = []
2352 for i in main.activeNodes:
2353 t = main.Thread( target=utilities.retry,
2354 name="links-" + str( i ),
Jon Hallf37d44d2017-05-24 10:37:30 -07002355 args=[ main.CLIs[ i ].links, [ None ] ],
2356 kwargs={ 'sleep': 5, 'attempts': 5,
Jon Hall69b2b982016-05-11 12:04:59 -07002357 'randomTime': True } )
2358 threads.append( t )
2359 t.start()
2360
2361 for t in threads:
2362 t.join()
2363 links.append( t.result )
2364 clusters = []
2365 threads = []
2366 for i in main.activeNodes:
2367 t = main.Thread( target=utilities.retry,
2368 name="clusters-" + str( i ),
Jon Hallf37d44d2017-05-24 10:37:30 -07002369 args=[ main.CLIs[ i ].clusters, [ None ] ],
2370 kwargs={ 'sleep': 5, 'attempts': 5,
Jon Hall69b2b982016-05-11 12:04:59 -07002371 'randomTime': True } )
2372 threads.append( t )
2373 t.start()
2374
2375 for t in threads:
2376 t.join()
2377 clusters.append( t.result )
2378
2379 elapsed = time.time() - startTime
2380 cliTime = time.time() - cliStart
2381 print "Elapsed time: " + str( elapsed )
2382 print "CLI time: " + str( cliTime )
2383
2384 if all( e is None for e in devices ) and\
2385 all( e is None for e in hosts ) and\
2386 all( e is None for e in ports ) and\
2387 all( e is None for e in links ) and\
2388 all( e is None for e in clusters ):
Jon Hallf37d44d2017-05-24 10:37:30 -07002389 topoFailMsg = "Could not get topology from ONOS"
2390 main.log.error( topoFailMsg )
2391 continue # Try again, No use trying to compare
Jon Hall69b2b982016-05-11 12:04:59 -07002392
2393 mnSwitches = main.Mininet1.getSwitches()
2394 mnLinks = main.Mininet1.getLinks()
2395 mnHosts = main.Mininet1.getHosts()
2396 for controller in range( len( main.activeNodes ) ):
Jon Hallf37d44d2017-05-24 10:37:30 -07002397 controllerStr = str( main.activeNodes[ controller ] + 1 )
Jon Hall69b2b982016-05-11 12:04:59 -07002398 if devices[ controller ] and ports[ controller ] and\
Jon Hallf37d44d2017-05-24 10:37:30 -07002399 "Error" not in devices[ controller ] and\
2400 "Error" not in ports[ controller ]:
Jon Hall69b2b982016-05-11 12:04:59 -07002401
2402 try:
2403 currentDevicesResult = main.Mininet1.compareSwitches(
2404 mnSwitches,
2405 json.loads( devices[ controller ] ),
2406 json.loads( ports[ controller ] ) )
2407 except ( TypeError, ValueError ):
2408 main.log.exception( "Object not as expected; devices={!r}\nports={!r}".format(
2409 devices[ controller ], ports[ controller ] ) )
2410 else:
2411 currentDevicesResult = main.FALSE
2412 utilities.assert_equals( expect=main.TRUE,
2413 actual=currentDevicesResult,
2414 onpass="ONOS" + controllerStr +
2415 " Switches view is correct",
2416 onfail="ONOS" + controllerStr +
2417 " Switches view is incorrect" )
2418
2419 if links[ controller ] and "Error" not in links[ controller ]:
2420 currentLinksResult = main.Mininet1.compareLinks(
2421 mnSwitches, mnLinks,
2422 json.loads( links[ controller ] ) )
2423 else:
2424 currentLinksResult = main.FALSE
2425 utilities.assert_equals( expect=main.TRUE,
2426 actual=currentLinksResult,
2427 onpass="ONOS" + controllerStr +
2428 " links view is correct",
2429 onfail="ONOS" + controllerStr +
2430 " links view is incorrect" )
2431 if hosts[ controller ] and "Error" not in hosts[ controller ]:
2432 currentHostsResult = main.Mininet1.compareHosts(
2433 mnHosts,
2434 hosts[ controller ] )
2435 elif hosts[ controller ] == []:
2436 currentHostsResult = main.TRUE
2437 else:
2438 currentHostsResult = main.FALSE
2439 utilities.assert_equals( expect=main.TRUE,
2440 actual=currentHostsResult,
2441 onpass="ONOS" + controllerStr +
2442 " hosts exist in Mininet",
2443 onfail="ONOS" + controllerStr +
2444 " hosts don't match Mininet" )
2445 # CHECKING HOST ATTACHMENT POINTS
2446 hostAttachment = True
2447 zeroHosts = False
2448 # FIXME: topo-HA/obelisk specific mappings:
2449 # key is mac and value is dpid
2450 mappings = {}
2451 for i in range( 1, 29 ): # hosts 1 through 28
2452 # set up correct variables:
Jon Hallf37d44d2017-05-24 10:37:30 -07002453 macId = "00:" * 5 + hex( i ).split( "0x" )[ 1 ].upper().zfill( 2 )
Jon Hall69b2b982016-05-11 12:04:59 -07002454 if i == 1:
Jon Hallf37d44d2017-05-24 10:37:30 -07002455 deviceId = "1000".zfill( 16 )
Jon Hall69b2b982016-05-11 12:04:59 -07002456 elif i == 2:
Jon Hallf37d44d2017-05-24 10:37:30 -07002457 deviceId = "2000".zfill( 16 )
Jon Hall69b2b982016-05-11 12:04:59 -07002458 elif i == 3:
Jon Hallf37d44d2017-05-24 10:37:30 -07002459 deviceId = "3000".zfill( 16 )
Jon Hall69b2b982016-05-11 12:04:59 -07002460 elif i == 4:
Jon Hallf37d44d2017-05-24 10:37:30 -07002461 deviceId = "3004".zfill( 16 )
Jon Hall69b2b982016-05-11 12:04:59 -07002462 elif i == 5:
Jon Hallf37d44d2017-05-24 10:37:30 -07002463 deviceId = "5000".zfill( 16 )
Jon Hall69b2b982016-05-11 12:04:59 -07002464 elif i == 6:
Jon Hallf37d44d2017-05-24 10:37:30 -07002465 deviceId = "6000".zfill( 16 )
Jon Hall69b2b982016-05-11 12:04:59 -07002466 elif i == 7:
Jon Hallf37d44d2017-05-24 10:37:30 -07002467 deviceId = "6007".zfill( 16 )
Jon Hall69b2b982016-05-11 12:04:59 -07002468 elif i >= 8 and i <= 17:
2469 dpid = '3' + str( i ).zfill( 3 )
Jon Hallf37d44d2017-05-24 10:37:30 -07002470 deviceId = dpid.zfill( 16 )
Jon Hall69b2b982016-05-11 12:04:59 -07002471 elif i >= 18 and i <= 27:
2472 dpid = '6' + str( i ).zfill( 3 )
Jon Hallf37d44d2017-05-24 10:37:30 -07002473 deviceId = dpid.zfill( 16 )
Jon Hall69b2b982016-05-11 12:04:59 -07002474 elif i == 28:
Jon Hallf37d44d2017-05-24 10:37:30 -07002475 deviceId = "2800".zfill( 16 )
Jon Hall69b2b982016-05-11 12:04:59 -07002476 mappings[ macId ] = deviceId
2477 if hosts[ controller ] is not None and "Error" not in hosts[ controller ]:
2478 if hosts[ controller ] == []:
2479 main.log.warn( "There are no hosts discovered" )
2480 zeroHosts = True
2481 else:
2482 for host in hosts[ controller ]:
2483 mac = None
2484 location = None
2485 device = None
2486 port = None
2487 try:
2488 mac = host.get( 'mac' )
2489 assert mac, "mac field could not be found for this host object"
2490
2491 location = host.get( 'location' )
2492 assert location, "location field could not be found for this host object"
2493
2494 # Trim the protocol identifier off deviceId
Jon Hallf37d44d2017-05-24 10:37:30 -07002495 device = str( location.get( 'elementId' ) ).split( ':' )[ 1 ]
Jon Hall69b2b982016-05-11 12:04:59 -07002496 assert device, "elementId field could not be found for this host location object"
2497
2498 port = location.get( 'port' )
2499 assert port, "port field could not be found for this host location object"
2500
2501 # Now check if this matches where they should be
2502 if mac and device and port:
2503 if str( port ) != "1":
2504 main.log.error( "The attachment port is incorrect for " +
2505 "host " + str( mac ) +
Jon Hallf37d44d2017-05-24 10:37:30 -07002506 ". Expected: 1 Actual: " + str( port ) )
Jon Hall69b2b982016-05-11 12:04:59 -07002507 hostAttachment = False
2508 if device != mappings[ str( mac ) ]:
2509 main.log.error( "The attachment device is incorrect for " +
2510 "host " + str( mac ) +
2511 ". Expected: " + mappings[ str( mac ) ] +
2512 " Actual: " + device )
2513 hostAttachment = False
2514 else:
2515 hostAttachment = False
2516 except AssertionError:
2517 main.log.exception( "Json object not as expected" )
2518 main.log.error( repr( host ) )
2519 hostAttachment = False
2520 else:
2521 main.log.error( "No hosts json output or \"Error\"" +
2522 " in output. hosts = " +
2523 repr( hosts[ controller ] ) )
2524 if zeroHosts is False:
2525 # TODO: Find a way to know if there should be hosts in a
2526 # given point of the test
2527 hostAttachment = True
2528
2529 # END CHECKING HOST ATTACHMENT POINTS
2530 devicesResults = devicesResults and currentDevicesResult
2531 linksResults = linksResults and currentLinksResult
2532 hostsResults = hostsResults and currentHostsResult
2533 hostAttachmentResults = hostAttachmentResults and\
2534 hostAttachment
2535 topoResult = ( devicesResults and linksResults
2536 and hostsResults and ipResult and
2537 hostAttachmentResults )
2538 utilities.assert_equals( expect=True,
2539 actual=topoResult,
2540 onpass="ONOS topology matches Mininet",
2541 onfail=topoFailMsg )
2542 # End of While loop to pull ONOS state
2543
2544 # Compare json objects for hosts and dataplane clusters
2545
2546 # hosts
2547 main.step( "Hosts view is consistent across all ONOS nodes" )
2548 consistentHostsResult = main.TRUE
2549 for controller in range( len( hosts ) ):
Jon Hallf37d44d2017-05-24 10:37:30 -07002550 controllerStr = str( main.activeNodes[ controller ] + 1 )
Jon Hall69b2b982016-05-11 12:04:59 -07002551 if hosts[ controller ] is not None and "Error" not in hosts[ controller ]:
2552 if hosts[ controller ] == hosts[ 0 ]:
2553 continue
2554 else: # hosts not consistent
2555 main.log.error( "hosts from ONOS" + controllerStr +
2556 " is inconsistent with ONOS1" )
2557 main.log.warn( repr( hosts[ controller ] ) )
2558 consistentHostsResult = main.FALSE
2559
2560 else:
2561 main.log.error( "Error in getting ONOS hosts from ONOS" +
2562 controllerStr )
2563 consistentHostsResult = main.FALSE
2564 main.log.warn( "ONOS" + controllerStr +
2565 " hosts response: " +
2566 repr( hosts[ controller ] ) )
2567 utilities.assert_equals(
2568 expect=main.TRUE,
2569 actual=consistentHostsResult,
2570 onpass="Hosts view is consistent across all ONOS nodes",
2571 onfail="ONOS nodes have different views of hosts" )
2572
2573 main.step( "Hosts information is correct" )
2574 hostsResults = hostsResults and ipResult
2575 utilities.assert_equals(
2576 expect=main.TRUE,
2577 actual=hostsResults,
2578 onpass="Host information is correct",
2579 onfail="Host information is incorrect" )
2580
2581 main.step( "Host attachment points to the network" )
2582 utilities.assert_equals(
2583 expect=True,
2584 actual=hostAttachmentResults,
2585 onpass="Hosts are correctly attached to the network",
2586 onfail="ONOS did not correctly attach hosts to the network" )
2587
2588 # Strongly connected clusters of devices
2589 main.step( "Clusters view is consistent across all ONOS nodes" )
2590 consistentClustersResult = main.TRUE
2591 for controller in range( len( clusters ) ):
Jon Hallf37d44d2017-05-24 10:37:30 -07002592 controllerStr = str( main.activeNodes[ controller ] + 1 )
Jon Hall69b2b982016-05-11 12:04:59 -07002593 if "Error" not in clusters[ controller ]:
2594 if clusters[ controller ] == clusters[ 0 ]:
2595 continue
2596 else: # clusters not consistent
2597 main.log.error( "clusters from ONOS" +
2598 controllerStr +
2599 " is inconsistent with ONOS1" )
2600 consistentClustersResult = main.FALSE
2601 else:
2602 main.log.error( "Error in getting dataplane clusters " +
2603 "from ONOS" + controllerStr )
2604 consistentClustersResult = main.FALSE
2605 main.log.warn( "ONOS" + controllerStr +
2606 " clusters response: " +
2607 repr( clusters[ controller ] ) )
2608 utilities.assert_equals(
2609 expect=main.TRUE,
2610 actual=consistentClustersResult,
2611 onpass="Clusters view is consistent across all ONOS nodes",
2612 onfail="ONOS nodes have different views of clusters" )
2613 if not consistentClustersResult:
2614 main.log.debug( clusters )
2615 for x in links:
2616 main.log.warn( "{}: {}".format( len( x ), x ) )
2617
Jon Hall69b2b982016-05-11 12:04:59 -07002618 main.step( "There is only one SCC" )
2619 # there should always only be one cluster
2620 try:
2621 numClusters = len( json.loads( clusters[ 0 ] ) )
2622 except ( ValueError, TypeError ):
2623 main.log.exception( "Error parsing clusters[0]: " +
Jon Hallf37d44d2017-05-24 10:37:30 -07002624 repr( clusters[ 0 ] ) )
Jon Hall69b2b982016-05-11 12:04:59 -07002625 numClusters = "ERROR"
2626 clusterResults = main.FALSE
2627 if numClusters == 1:
2628 clusterResults = main.TRUE
2629 utilities.assert_equals(
2630 expect=1,
2631 actual=numClusters,
2632 onpass="ONOS shows 1 SCC",
2633 onfail="ONOS shows " + str( numClusters ) + " SCCs" )
2634
2635 topoResult = ( devicesResults and linksResults
2636 and hostsResults and consistentHostsResult
2637 and consistentClustersResult and clusterResults
2638 and ipResult and hostAttachmentResults )
2639
2640 topoResult = topoResult and int( count <= 2 )
2641 note = "note it takes about " + str( int( cliTime ) ) + \
2642 " seconds for the test to make all the cli calls to fetch " +\
2643 "the topology from each ONOS instance"
2644 main.log.info(
2645 "Very crass estimate for topology discovery/convergence( " +
2646 str( note ) + " ): " + str( elapsed ) + " seconds, " +
2647 str( count ) + " tries" )
2648
2649 main.step( "Device information is correct" )
2650 utilities.assert_equals(
2651 expect=main.TRUE,
2652 actual=devicesResults,
2653 onpass="Device information is correct",
2654 onfail="Device information is incorrect" )
2655
2656 main.step( "Links are correct" )
2657 utilities.assert_equals(
2658 expect=main.TRUE,
2659 actual=linksResults,
2660 onpass="Link are correct",
2661 onfail="Links are incorrect" )
2662
2663 main.step( "Hosts are correct" )
2664 utilities.assert_equals(
2665 expect=main.TRUE,
2666 actual=hostsResults,
2667 onpass="Hosts are correct",
2668 onfail="Hosts are incorrect" )
2669
2670 # FIXME: move this to an ONOS state case
2671 main.step( "Checking ONOS nodes" )
2672 nodeResults = utilities.retry( main.HA.nodesCheck,
2673 False,
Jon Hallf37d44d2017-05-24 10:37:30 -07002674 args=[ main.activeNodes ],
Jon Hall69b2b982016-05-11 12:04:59 -07002675 attempts=5 )
2676 utilities.assert_equals( expect=True, actual=nodeResults,
2677 onpass="Nodes check successful",
2678 onfail="Nodes check NOT successful" )
2679 if not nodeResults:
2680 for i in main.activeNodes:
2681 main.log.debug( "{} components not ACTIVE: \n{}".format(
Jon Hallf37d44d2017-05-24 10:37:30 -07002682 main.CLIs[ i ].name,
2683 main.CLIs[ i ].sendline( "scr:list | grep -v ACTIVE" ) ) )
Jon Hall69b2b982016-05-11 12:04:59 -07002684
Jon Halld2871c22016-07-26 11:01:14 -07002685 if not topoResult:
2686 main.cleanup()
2687 main.exit()
2688
Jon Hall69b2b982016-05-11 12:04:59 -07002689 def CASE9( self, main ):
2690 """
2691 Link s3-s28 down
2692 """
2693 import time
2694 assert main.numCtrls, "main.numCtrls not defined"
2695 assert main, "main not defined"
2696 assert utilities.assert_equals, "utilities.assert_equals not defined"
2697 assert main.CLIs, "main.CLIs not defined"
2698 assert main.nodes, "main.nodes not defined"
2699 # NOTE: You should probably run a topology check after this
2700
2701 linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
2702
2703 description = "Turn off a link to ensure that Link Discovery " +\
2704 "is working properly"
2705 main.case( description )
2706
2707 main.step( "Kill Link between s3 and s28" )
2708 LinkDown = main.Mininet1.link( END1="s3", END2="s28", OPTION="down" )
2709 main.log.info( "Waiting " + str( linkSleep ) +
2710 " seconds for link down to be discovered" )
2711 time.sleep( linkSleep )
2712 utilities.assert_equals( expect=main.TRUE, actual=LinkDown,
2713 onpass="Link down successful",
2714 onfail="Failed to bring link down" )
2715 # TODO do some sort of check here
2716
2717 def CASE10( self, main ):
2718 """
2719 Link s3-s28 up
2720 """
2721 import time
2722 assert main.numCtrls, "main.numCtrls not defined"
2723 assert main, "main not defined"
2724 assert utilities.assert_equals, "utilities.assert_equals not defined"
2725 assert main.CLIs, "main.CLIs not defined"
2726 assert main.nodes, "main.nodes not defined"
2727 # NOTE: You should probably run a topology check after this
2728
2729 linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
2730
2731 description = "Restore a link to ensure that Link Discovery is " + \
2732 "working properly"
2733 main.case( description )
2734
2735 main.step( "Bring link between s3 and s28 back up" )
2736 LinkUp = main.Mininet1.link( END1="s3", END2="s28", OPTION="up" )
2737 main.log.info( "Waiting " + str( linkSleep ) +
2738 " seconds for link up to be discovered" )
2739 time.sleep( linkSleep )
2740 utilities.assert_equals( expect=main.TRUE, actual=LinkUp,
2741 onpass="Link up successful",
2742 onfail="Failed to bring link up" )
2743 # TODO do some sort of check here
2744
2745 def CASE11( self, main ):
2746 """
2747 Switch Down
2748 """
2749 # NOTE: You should probably run a topology check after this
2750 import time
2751 assert main.numCtrls, "main.numCtrls not defined"
2752 assert main, "main not defined"
2753 assert utilities.assert_equals, "utilities.assert_equals not defined"
2754 assert main.CLIs, "main.CLIs not defined"
2755 assert main.nodes, "main.nodes not defined"
2756
2757 switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
2758
2759 description = "Killing a switch to ensure it is discovered correctly"
Jon Hallf37d44d2017-05-24 10:37:30 -07002760 onosCli = main.CLIs[ main.activeNodes[ 0 ] ]
Jon Hall69b2b982016-05-11 12:04:59 -07002761 main.case( description )
2762 switch = main.params[ 'kill' ][ 'switch' ]
2763 switchDPID = main.params[ 'kill' ][ 'dpid' ]
2764
2765 # TODO: Make this switch parameterizable
2766 main.step( "Kill " + switch )
2767 main.log.info( "Deleting " + switch )
2768 main.Mininet1.delSwitch( switch )
2769 main.log.info( "Waiting " + str( switchSleep ) +
2770 " seconds for switch down to be discovered" )
2771 time.sleep( switchSleep )
2772 device = onosCli.getDevice( dpid=switchDPID )
2773 # Peek at the deleted switch
2774 main.log.warn( str( device ) )
2775 result = main.FALSE
2776 if device and device[ 'available' ] is False:
2777 result = main.TRUE
2778 utilities.assert_equals( expect=main.TRUE, actual=result,
2779 onpass="Kill switch successful",
2780 onfail="Failed to kill switch?" )
2781
2782 def CASE12( self, main ):
2783 """
2784 Switch Up
2785 """
2786 # NOTE: You should probably run a topology check after this
2787 import time
2788 assert main.numCtrls, "main.numCtrls not defined"
2789 assert main, "main not defined"
2790 assert utilities.assert_equals, "utilities.assert_equals not defined"
2791 assert main.CLIs, "main.CLIs not defined"
2792 assert main.nodes, "main.nodes not defined"
2793
2794 switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
2795 switch = main.params[ 'kill' ][ 'switch' ]
2796 switchDPID = main.params[ 'kill' ][ 'dpid' ]
2797 links = main.params[ 'kill' ][ 'links' ].split()
Jon Hallf37d44d2017-05-24 10:37:30 -07002798 onosCli = main.CLIs[ main.activeNodes[ 0 ] ]
Jon Hall69b2b982016-05-11 12:04:59 -07002799 description = "Adding a switch to ensure it is discovered correctly"
2800 main.case( description )
2801
2802 main.step( "Add back " + switch )
2803 main.Mininet1.addSwitch( switch, dpid=switchDPID )
2804 for peer in links:
2805 main.Mininet1.addLink( switch, peer )
2806 ipList = [ node.ip_address for node in main.nodes ]
2807 main.Mininet1.assignSwController( sw=switch, ip=ipList )
2808 main.log.info( "Waiting " + str( switchSleep ) +
2809 " seconds for switch up to be discovered" )
2810 time.sleep( switchSleep )
2811 device = onosCli.getDevice( dpid=switchDPID )
2812 # Peek at the deleted switch
2813 main.log.warn( str( device ) )
2814 result = main.FALSE
2815 if device and device[ 'available' ]:
2816 result = main.TRUE
2817 utilities.assert_equals( expect=main.TRUE, actual=result,
2818 onpass="add switch successful",
2819 onfail="Failed to add switch?" )
2820
2821 def CASE13( self, main ):
2822 """
2823 Clean up
2824 """
2825 assert main.numCtrls, "main.numCtrls not defined"
2826 assert main, "main not defined"
2827 assert utilities.assert_equals, "utilities.assert_equals not defined"
2828 assert main.CLIs, "main.CLIs not defined"
2829 assert main.nodes, "main.nodes not defined"
2830
2831 main.case( "Test Cleanup" )
2832 main.step( "Killing tcpdumps" )
2833 main.Mininet2.stopTcpdump()
2834
2835 if main.params[ 'BACKUP' ][ 'ENABLED' ] == "True":
2836 main.step( "Copying MN pcap and ONOS log files to test station" )
2837 # NOTE: MN Pcap file is being saved to logdir.
2838 # We scp this file as MN and TestON aren't necessarily the same vm
2839
2840 # FIXME: To be replaced with a Jenkin's post script
2841 # TODO: Load these from params
2842 # NOTE: must end in /
2843 logFolder = "/opt/onos/log/"
2844 logFiles = [ "karaf.log", "karaf.log.1" ]
2845 # NOTE: must end in /
2846 for f in logFiles:
2847 for node in main.nodes:
2848 dstName = main.logdir + "/" + node.name + "-" + f
2849 main.ONOSbench.secureCopy( node.user_name, node.ip_address,
2850 logFolder + f, dstName )
2851 # std*.log's
2852 # NOTE: must end in /
2853 logFolder = "/opt/onos/var/"
2854 logFiles = [ "stderr.log", "stdout.log" ]
2855 # NOTE: must end in /
2856 for f in logFiles:
2857 for node in main.nodes:
2858 dstName = main.logdir + "/" + node.name + "-" + f
2859 main.ONOSbench.secureCopy( node.user_name, node.ip_address,
2860 logFolder + f, dstName )
2861 else:
2862 main.log.debug( "skipping saving log files" )
2863
2864 main.step( "Stopping Mininet" )
2865 mnResult = main.Mininet1.stopNet()
2866 utilities.assert_equals( expect=main.TRUE, actual=mnResult,
2867 onpass="Mininet stopped",
2868 onfail="MN cleanup NOT successful" )
2869
2870 main.step( "Checking ONOS Logs for errors" )
2871 for node in main.nodes:
2872 main.log.debug( "Checking logs for errors on " + node.name + ":" )
2873 main.log.warn( main.ONOSbench.checkLogs( node.ip_address ) )
2874
2875 try:
Jon Hallf37d44d2017-05-24 10:37:30 -07002876 timerLog = open( main.logdir + "/Timers.csv", 'w' )
Jon Hall69b2b982016-05-11 12:04:59 -07002877 main.log.error( ", ".join( labels ) + "\n" + ", ".join( data ) )
2878 timerLog.write( ", ".join( labels ) + "\n" + ", ".join( data ) )
2879 timerLog.close()
Jon Hallf37d44d2017-05-24 10:37:30 -07002880 except NameError as e:
2881 main.log.exception( e )
Jon Hall69b2b982016-05-11 12:04:59 -07002882
2883 main.step( "Stopping webserver" )
Jon Hallf37d44d2017-05-24 10:37:30 -07002884 status = main.Server.stop()
Jon Hall69b2b982016-05-11 12:04:59 -07002885 utilities.assert_equals( expect=main.TRUE, actual=status,
2886 onpass="Stop Server",
2887 onfail="Failled to stop SimpleHTTPServer" )
2888 del main.Server
2889
2890 def CASE14( self, main ):
2891 """
2892 start election app on all onos nodes
2893 """
2894 import time
2895 assert main.numCtrls, "main.numCtrls not defined"
2896 assert main, "main not defined"
2897 assert utilities.assert_equals, "utilities.assert_equals not defined"
2898 assert main.CLIs, "main.CLIs not defined"
2899 assert main.nodes, "main.nodes not defined"
2900
Jon Hallf37d44d2017-05-24 10:37:30 -07002901 main.case( "Start Leadership Election app" )
Jon Hall69b2b982016-05-11 12:04:59 -07002902 main.step( "Install leadership election app" )
Jon Hallf37d44d2017-05-24 10:37:30 -07002903 onosCli = main.CLIs[ main.activeNodes[ 0 ] ]
Jon Hall69b2b982016-05-11 12:04:59 -07002904 appResult = onosCli.activateApp( "org.onosproject.election" )
2905 utilities.assert_equals(
2906 expect=main.TRUE,
2907 actual=appResult,
2908 onpass="Election app installed",
2909 onfail="Something went wrong with installing Leadership election" )
2910
2911 main.step( "Run for election on each node" )
2912 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07002913 main.CLIs[ i ].electionTestRun()
2914 time.sleep( 5 )
2915 activeCLIs = [ main.CLIs[ i ] for i in main.activeNodes ]
Jon Hall69b2b982016-05-11 12:04:59 -07002916 sameResult, leaders = main.HA.consistentLeaderboards( activeCLIs )
2917 utilities.assert_equals(
2918 expect=True,
2919 actual=sameResult,
2920 onpass="All nodes see the same leaderboards",
2921 onfail="Inconsistent leaderboards" )
2922
2923 if sameResult:
2924 leader = leaders[ 0 ][ 0 ]
Jon Hallf37d44d2017-05-24 10:37:30 -07002925 if main.nodes[ main.activeNodes[ 0 ] ].ip_address in leader:
Jon Hall69b2b982016-05-11 12:04:59 -07002926 correctLeader = True
2927 else:
2928 correctLeader = False
2929 main.step( "First node was elected leader" )
2930 utilities.assert_equals(
2931 expect=True,
2932 actual=correctLeader,
2933 onpass="Correct leader was elected",
2934 onfail="Incorrect leader" )
2935
2936 def CASE15( self, main ):
2937 """
2938 Check that Leadership Election is still functional
2939 15.1 Run election on each node
2940 15.2 Check that each node has the same leaders and candidates
2941 15.3 Find current leader and withdraw
2942 15.4 Check that a new node was elected leader
2943 15.5 Check that that new leader was the candidate of old leader
2944 15.6 Run for election on old leader
2945 15.7 Check that oldLeader is a candidate, and leader if only 1 node
2946 15.8 Make sure that the old leader was added to the candidate list
2947
2948 old and new variable prefixes refer to data from before vs after
2949 withdrawl and later before withdrawl vs after re-election
2950 """
2951 import time
2952 assert main.numCtrls, "main.numCtrls not defined"
2953 assert main, "main not defined"
2954 assert utilities.assert_equals, "utilities.assert_equals not defined"
2955 assert main.CLIs, "main.CLIs not defined"
2956 assert main.nodes, "main.nodes not defined"
2957
2958 description = "Check that Leadership Election is still functional"
2959 main.case( description )
2960 # NOTE: Need to re-run after restarts since being a canidate is not persistant
2961
2962 oldLeaders = [] # list of lists of each nodes' candidates before
2963 newLeaders = [] # list of lists of each nodes' candidates after
2964 oldLeader = '' # the old leader from oldLeaders, None if not same
2965 newLeader = '' # the new leaders fron newLoeaders, None if not same
2966 oldLeaderCLI = None # the CLI of the old leader used for re-electing
2967 expectNoLeader = False # True when there is only one leader
2968 if main.numCtrls == 1:
2969 expectNoLeader = True
2970
2971 main.step( "Run for election on each node" )
2972 electionResult = main.TRUE
2973
2974 for i in main.activeNodes: # run test election on each node
Jon Hallf37d44d2017-05-24 10:37:30 -07002975 if main.CLIs[ i ].electionTestRun() == main.FALSE:
Jon Hall69b2b982016-05-11 12:04:59 -07002976 electionResult = main.FALSE
2977 utilities.assert_equals(
2978 expect=main.TRUE,
2979 actual=electionResult,
2980 onpass="All nodes successfully ran for leadership",
2981 onfail="At least one node failed to run for leadership" )
2982
2983 if electionResult == main.FALSE:
2984 main.log.error(
2985 "Skipping Test Case because Election Test App isn't loaded" )
2986 main.skipCase()
2987
2988 main.step( "Check that each node shows the same leader and candidates" )
2989 failMessage = "Nodes have different leaderboards"
Jon Hallf37d44d2017-05-24 10:37:30 -07002990 activeCLIs = [ main.CLIs[ i ] for i in main.activeNodes ]
Jon Hall69b2b982016-05-11 12:04:59 -07002991 sameResult, oldLeaders = main.HA.consistentLeaderboards( activeCLIs )
2992 if sameResult:
2993 oldLeader = oldLeaders[ 0 ][ 0 ]
2994 main.log.warn( oldLeader )
2995 else:
2996 oldLeader = None
2997 utilities.assert_equals(
2998 expect=True,
2999 actual=sameResult,
3000 onpass="Leaderboards are consistent for the election topic",
3001 onfail=failMessage )
3002
3003 main.step( "Find current leader and withdraw" )
3004 withdrawResult = main.TRUE
3005 # do some sanity checking on leader before using it
3006 if oldLeader is None:
3007 main.log.error( "Leadership isn't consistent." )
3008 withdrawResult = main.FALSE
3009 # Get the CLI of the oldLeader
3010 for i in main.activeNodes:
3011 if oldLeader == main.nodes[ i ].ip_address:
3012 oldLeaderCLI = main.CLIs[ i ]
3013 break
3014 else: # FOR/ELSE statement
3015 main.log.error( "Leader election, could not find current leader" )
3016 if oldLeader:
3017 withdrawResult = oldLeaderCLI.electionTestWithdraw()
3018 utilities.assert_equals(
3019 expect=main.TRUE,
3020 actual=withdrawResult,
3021 onpass="Node was withdrawn from election",
3022 onfail="Node was not withdrawn from election" )
3023
3024 main.step( "Check that a new node was elected leader" )
3025 failMessage = "Nodes have different leaders"
3026 # Get new leaders and candidates
3027 newLeaderResult, newLeaders = main.HA.consistentLeaderboards( activeCLIs )
3028 newLeader = None
3029 if newLeaderResult:
3030 if newLeaders[ 0 ][ 0 ] == 'none':
3031 main.log.error( "No leader was elected on at least 1 node" )
3032 if not expectNoLeader:
3033 newLeaderResult = False
3034 newLeader = newLeaders[ 0 ][ 0 ]
3035
3036 # Check that the new leader is not the older leader, which was withdrawn
3037 if newLeader == oldLeader:
3038 newLeaderResult = False
3039 main.log.error( "All nodes still see old leader: " + str( oldLeader ) +
Jon Hallf37d44d2017-05-24 10:37:30 -07003040 " as the current leader" )
Jon Hall69b2b982016-05-11 12:04:59 -07003041 utilities.assert_equals(
3042 expect=True,
3043 actual=newLeaderResult,
3044 onpass="Leadership election passed",
3045 onfail="Something went wrong with Leadership election" )
3046
3047 main.step( "Check that that new leader was the candidate of old leader" )
3048 # candidates[ 2 ] should become the top candidate after withdrawl
3049 correctCandidateResult = main.TRUE
3050 if expectNoLeader:
3051 if newLeader == 'none':
3052 main.log.info( "No leader expected. None found. Pass" )
3053 correctCandidateResult = main.TRUE
3054 else:
3055 main.log.info( "Expected no leader, got: " + str( newLeader ) )
3056 correctCandidateResult = main.FALSE
Jon Hallf37d44d2017-05-24 10:37:30 -07003057 elif len( oldLeaders[ 0 ] ) >= 3:
Jon Hall69b2b982016-05-11 12:04:59 -07003058 if newLeader == oldLeaders[ 0 ][ 2 ]:
3059 # correct leader was elected
3060 correctCandidateResult = main.TRUE
3061 else:
3062 correctCandidateResult = main.FALSE
3063 main.log.error( "Candidate {} was elected. {} should have had priority.".format(
3064 newLeader, oldLeaders[ 0 ][ 2 ] ) )
3065 else:
3066 main.log.warn( "Could not determine who should be the correct leader" )
3067 main.log.debug( oldLeaders[ 0 ] )
3068 correctCandidateResult = main.FALSE
3069 utilities.assert_equals(
3070 expect=main.TRUE,
3071 actual=correctCandidateResult,
3072 onpass="Correct Candidate Elected",
3073 onfail="Incorrect Candidate Elected" )
3074
3075 main.step( "Run for election on old leader( just so everyone " +
3076 "is in the hat )" )
3077 if oldLeaderCLI is not None:
3078 runResult = oldLeaderCLI.electionTestRun()
3079 else:
3080 main.log.error( "No old leader to re-elect" )
3081 runResult = main.FALSE
3082 utilities.assert_equals(
3083 expect=main.TRUE,
3084 actual=runResult,
3085 onpass="App re-ran for election",
3086 onfail="App failed to run for election" )
3087
3088 main.step(
3089 "Check that oldLeader is a candidate, and leader if only 1 node" )
3090 # verify leader didn't just change
3091 # Get new leaders and candidates
3092 reRunLeaders = []
3093 time.sleep( 5 ) # Paremterize
3094 positionResult, reRunLeaders = main.HA.consistentLeaderboards( activeCLIs )
3095
3096 # Check that the re-elected node is last on the candidate List
Jon Hallf37d44d2017-05-24 10:37:30 -07003097 if not reRunLeaders[ 0 ]:
Jon Hall69b2b982016-05-11 12:04:59 -07003098 positionResult = main.FALSE
3099 elif oldLeader != reRunLeaders[ 0 ][ -1 ]:
Jon Hallf37d44d2017-05-24 10:37:30 -07003100 main.log.error( "Old Leader ({}) not in the proper position: {} ".format( str( oldLeader ),
Jon Hall69b2b982016-05-11 12:04:59 -07003101 str( reRunLeaders[ 0 ] ) ) )
3102 positionResult = main.FALSE
3103 utilities.assert_equals(
3104 expect=True,
3105 actual=positionResult,
3106 onpass="Old leader successfully re-ran for election",
3107 onfail="Something went wrong with Leadership election after " +
3108 "the old leader re-ran for election" )
3109
3110 def CASE16( self, main ):
3111 """
3112 Install Distributed Primitives app
3113 """
3114 import time
3115 assert main.numCtrls, "main.numCtrls not defined"
3116 assert main, "main not defined"
3117 assert utilities.assert_equals, "utilities.assert_equals not defined"
3118 assert main.CLIs, "main.CLIs not defined"
3119 assert main.nodes, "main.nodes not defined"
3120
3121 # Variables for the distributed primitives tests
Jon Hall7a6ebfd2017-03-13 10:58:58 -07003122 main.pCounterName = "TestON-Partitions"
3123 main.pCounterValue = 0
Jon Hallf37d44d2017-05-24 10:37:30 -07003124 main.onosSet = set( [] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07003125 main.onosSetName = "TestON-set"
Jon Hall69b2b982016-05-11 12:04:59 -07003126
3127 description = "Install Primitives app"
3128 main.case( description )
3129 main.step( "Install Primitives app" )
3130 appName = "org.onosproject.distributedprimitives"
Jon Hallf37d44d2017-05-24 10:37:30 -07003131 node = main.activeNodes[ 0 ]
3132 appResults = main.CLIs[ node ].activateApp( appName )
Jon Hall69b2b982016-05-11 12:04:59 -07003133 utilities.assert_equals( expect=main.TRUE,
3134 actual=appResults,
3135 onpass="Primitives app activated",
3136 onfail="Primitives app not activated" )
3137 time.sleep( 5 ) # To allow all nodes to activate
3138
3139 def CASE17( self, main ):
3140 """
3141 Check for basic functionality with distributed primitives
3142 """
Jon Hall7a6ebfd2017-03-13 10:58:58 -07003143 main.HA.CASE17( main )