blob: b18cce8e95c99a16a442a48722257778baf9a0de [file] [log] [blame]
Jon Hall5cf14d52015-07-16 12:15:19 -07001"""
2Description: This test is to determine if the HA test setup is
3 working correctly. There are no failures so this test should
4 have a 100% pass rate
5
6List of test cases:
7CASE1: Compile ONOS and push it to the test machines
8CASE2: Assign devices to controllers
9CASE21: Assign mastership to controllers
10CASE3: Assign intents
11CASE4: Ping across added host intents
12CASE5: Reading state of ONOS
13CASE6: The Failure case. Since this is the Sanity test, we do nothing.
14CASE7: Check state after control plane failure
15CASE8: Compare topo
16CASE9: Link s3-s28 down
17CASE10: Link s3-s28 up
18CASE11: Switch down
19CASE12: Switch up
20CASE13: Clean up
21CASE14: start election app on all onos nodes
22CASE15: Check that Leadership Election is still functional
23CASE16: Install Distributed Primitives app
24CASE17: Check for basic functionality with distributed primitives
25"""
26
27
28class HAsanity:
29
30 def __init__( self ):
31 self.default = ''
32
33 def CASE1( self, main ):
34 """
35 CASE1 is to compile ONOS and push it to the test machines
36
37 Startup sequence:
38 cell <name>
39 onos-verify-cell
40 NOTE: temporary - onos-remove-raft-logs
41 onos-uninstall
42 start mininet
43 git pull
44 mvn clean install
45 onos-package
46 onos-install -f
47 onos-wait-for-start
48 start cli sessions
49 start tcpdump
50 """
Jon Halle1a3b752015-07-22 13:02:46 -070051 import imp
Jon Hallf3d16e72015-12-16 17:45:08 -080052 import time
Jon Halla440e872016-03-31 15:15:50 -070053 import json
Jon Hall5cf14d52015-07-16 12:15:19 -070054 main.log.info( "ONOS HA Sanity test - initialization" )
55 main.case( "Setting up test environment" )
Jon Hall783bbf92015-07-23 14:33:19 -070056 main.caseExplanation = "Setup the test environment including " +\
Jon Hall5cf14d52015-07-16 12:15:19 -070057 "installing ONOS, starting Mininet and ONOS" +\
58 "cli sessions."
Jon Hall5cf14d52015-07-16 12:15:19 -070059
60 # load some variables from the params file
61 PULLCODE = False
62 if main.params[ 'Git' ] == 'True':
63 PULLCODE = True
64 gitBranch = main.params[ 'branch' ]
65 cellName = main.params[ 'ENV' ][ 'cellName' ]
66
Jon Halle1a3b752015-07-22 13:02:46 -070067 main.numCtrls = int( main.params[ 'num_controllers' ] )
Jon Hall5cf14d52015-07-16 12:15:19 -070068 if main.ONOSbench.maxNodes:
Jon Halle1a3b752015-07-22 13:02:46 -070069 if main.ONOSbench.maxNodes < main.numCtrls:
70 main.numCtrls = int( main.ONOSbench.maxNodes )
Jon Hall5cf14d52015-07-16 12:15:19 -070071 # TODO: refactor how to get onos port, maybe put into component tag?
Jon Halle1a3b752015-07-22 13:02:46 -070072 # set global variables
Jon Hall5cf14d52015-07-16 12:15:19 -070073 global ONOS1Port
74 global ONOS2Port
75 global ONOS3Port
76 global ONOS4Port
77 global ONOS5Port
78 global ONOS6Port
79 global ONOS7Port
Jon Halla440e872016-03-31 15:15:50 -070080 # These are for csv plotting in jenkins
81 global labels
82 global data
83 labels = []
84 data = []
Jon Hall5cf14d52015-07-16 12:15:19 -070085
86 # FIXME: just get controller port from params?
87 # TODO: do we really need all these?
88 ONOS1Port = main.params[ 'CTRL' ][ 'port1' ]
89 ONOS2Port = main.params[ 'CTRL' ][ 'port2' ]
90 ONOS3Port = main.params[ 'CTRL' ][ 'port3' ]
91 ONOS4Port = main.params[ 'CTRL' ][ 'port4' ]
92 ONOS5Port = main.params[ 'CTRL' ][ 'port5' ]
93 ONOS6Port = main.params[ 'CTRL' ][ 'port6' ]
94 ONOS7Port = main.params[ 'CTRL' ][ 'port7' ]
95
Jon Halle1a3b752015-07-22 13:02:46 -070096 try:
Jon Hall53c5e662016-04-13 16:06:56 -070097 from tests.HA.dependencies.HA import HA
Jon Hall41d39f12016-04-11 22:54:35 -070098 main.HA = HA()
Jon Halle1a3b752015-07-22 13:02:46 -070099 except Exception as e:
100 main.log.exception( e )
101 main.cleanup()
102 main.exit()
103
104 main.CLIs = []
105 main.nodes = []
Jon Hall5cf14d52015-07-16 12:15:19 -0700106 ipList = []
Jon Halle1a3b752015-07-22 13:02:46 -0700107 for i in range( 1, main.numCtrls + 1 ):
108 try:
109 main.CLIs.append( getattr( main, 'ONOScli' + str( i ) ) )
110 main.nodes.append( getattr( main, 'ONOS' + str( i ) ) )
111 ipList.append( main.nodes[ -1 ].ip_address )
112 except AttributeError:
113 break
Jon Hall5cf14d52015-07-16 12:15:19 -0700114
115 main.step( "Create cell file" )
116 cellAppString = main.params[ 'ENV' ][ 'appString' ]
117 main.ONOSbench.createCellFile( main.ONOSbench.ip_address, cellName,
118 main.Mininet1.ip_address,
119 cellAppString, ipList )
120 main.step( "Applying cell variable to environment" )
121 cellResult = main.ONOSbench.setCell( cellName )
122 verifyResult = main.ONOSbench.verifyCell()
123
124 # FIXME:this is short term fix
125 main.log.info( "Removing raft logs" )
126 main.ONOSbench.onosRemoveRaftLogs()
127
128 main.log.info( "Uninstalling ONOS" )
Jon Halle1a3b752015-07-22 13:02:46 -0700129 for node in main.nodes:
Jon Hall5cf14d52015-07-16 12:15:19 -0700130 main.ONOSbench.onosUninstall( node.ip_address )
131
132 # Make sure ONOS is DEAD
133 main.log.info( "Killing any ONOS processes" )
134 killResults = main.TRUE
Jon Halle1a3b752015-07-22 13:02:46 -0700135 for node in main.nodes:
Jon Hall5cf14d52015-07-16 12:15:19 -0700136 killed = main.ONOSbench.onosKill( node.ip_address )
137 killResults = killResults and killed
138
139 cleanInstallResult = main.TRUE
140 gitPullResult = main.TRUE
141
142 main.step( "Starting Mininet" )
143 # scp topo file to mininet
144 # TODO: move to params?
145 topoName = "obelisk.py"
146 filePath = main.ONOSbench.home + "/tools/test/topos/"
kelvin-onlabd9e23de2015-08-06 10:34:44 -0700147 main.ONOSbench.scp( main.Mininet1,
148 filePath + topoName,
149 main.Mininet1.home,
150 direction="to" )
Jon Hall5cf14d52015-07-16 12:15:19 -0700151 mnResult = main.Mininet1.startNet( )
152 utilities.assert_equals( expect=main.TRUE, actual=mnResult,
153 onpass="Mininet Started",
154 onfail="Error starting Mininet" )
155
156 main.step( "Git checkout and pull " + gitBranch )
157 if PULLCODE:
158 main.ONOSbench.gitCheckout( gitBranch )
159 gitPullResult = main.ONOSbench.gitPull()
160 # values of 1 or 3 are good
161 utilities.assert_lesser( expect=0, actual=gitPullResult,
162 onpass="Git pull successful",
163 onfail="Git pull failed" )
164 main.ONOSbench.getVersion( report=True )
165
166 main.step( "Using mvn clean install" )
167 cleanInstallResult = main.TRUE
168 if PULLCODE and gitPullResult == main.TRUE:
169 cleanInstallResult = main.ONOSbench.cleanInstall()
170 else:
171 main.log.warn( "Did not pull new code so skipping mvn " +
172 "clean install" )
173 utilities.assert_equals( expect=main.TRUE,
174 actual=cleanInstallResult,
175 onpass="MCI successful",
176 onfail="MCI failed" )
177 # GRAPHS
178 # NOTE: important params here:
179 # job = name of Jenkins job
180 # Plot Name = Plot-HA, only can be used if multiple plots
181 # index = The number of the graph under plot name
182 job = "HAsanity"
183 plotName = "Plot-HA"
Jon Hall843f8bc2016-03-18 14:28:13 -0700184 index = "2"
Jon Hall5cf14d52015-07-16 12:15:19 -0700185 graphs = '<ac:structured-macro ac:name="html">\n'
186 graphs += '<ac:plain-text-body><![CDATA[\n'
187 graphs += '<iframe src="https://onos-jenkins.onlab.us/job/' + job +\
Jon Halla9845df2016-01-15 14:55:58 -0800188 '/plot/' + plotName + '/getPlot?index=' + index +\
Jon Hall5cf14d52015-07-16 12:15:19 -0700189 '&width=500&height=300"' +\
190 'noborder="0" width="500" height="300" scrolling="yes" ' +\
191 'seamless="seamless"></iframe>\n'
192 graphs += ']]></ac:plain-text-body>\n'
193 graphs += '</ac:structured-macro>\n'
194 main.log.wiki(graphs)
195
196 main.step( "Creating ONOS package" )
197 packageResult = main.ONOSbench.onosPackage()
198 utilities.assert_equals( expect=main.TRUE, actual=packageResult,
199 onpass="ONOS package successful",
200 onfail="ONOS package failed" )
201
202 main.step( "Installing ONOS package" )
203 onosInstallResult = main.TRUE
Jon Halle1a3b752015-07-22 13:02:46 -0700204 for node in main.nodes:
Jon Hall5cf14d52015-07-16 12:15:19 -0700205 tmpResult = main.ONOSbench.onosInstall( options="-f",
206 node=node.ip_address )
207 onosInstallResult = onosInstallResult and tmpResult
208 utilities.assert_equals( expect=main.TRUE, actual=onosInstallResult,
209 onpass="ONOS install successful",
210 onfail="ONOS install failed" )
211
212 main.step( "Checking if ONOS is up yet" )
213 for i in range( 2 ):
214 onosIsupResult = main.TRUE
Jon Halle1a3b752015-07-22 13:02:46 -0700215 for node in main.nodes:
Jon Hall5cf14d52015-07-16 12:15:19 -0700216 started = main.ONOSbench.isup( node.ip_address )
217 if not started:
Jon Hallc6793552016-01-19 14:18:37 -0800218 main.log.error( node.name + " hasn't started" )
Jon Hall5cf14d52015-07-16 12:15:19 -0700219 onosIsupResult = onosIsupResult and started
220 if onosIsupResult == main.TRUE:
221 break
222 utilities.assert_equals( expect=main.TRUE, actual=onosIsupResult,
223 onpass="ONOS startup successful",
224 onfail="ONOS startup failed" )
225
Jon Hall6509dbf2016-06-21 17:01:17 -0700226 main.step( "Starting ONOS CLI sessions" )
Jon Hall5cf14d52015-07-16 12:15:19 -0700227 cliResults = main.TRUE
228 threads = []
Jon Halle1a3b752015-07-22 13:02:46 -0700229 for i in range( main.numCtrls ):
230 t = main.Thread( target=main.CLIs[i].startOnosCli,
Jon Hall5cf14d52015-07-16 12:15:19 -0700231 name="startOnosCli-" + str( i ),
Jon Halle1a3b752015-07-22 13:02:46 -0700232 args=[main.nodes[i].ip_address] )
Jon Hall5cf14d52015-07-16 12:15:19 -0700233 threads.append( t )
234 t.start()
235
236 for t in threads:
237 t.join()
238 cliResults = cliResults and t.result
239 utilities.assert_equals( expect=main.TRUE, actual=cliResults,
240 onpass="ONOS cli startup successful",
241 onfail="ONOS cli startup failed" )
242
Jon Halla440e872016-03-31 15:15:50 -0700243 # Create a list of active nodes for use when some nodes are stopped
244 main.activeNodes = [ i for i in range( 0, len( main.CLIs ) ) ]
245
Jon Hall5cf14d52015-07-16 12:15:19 -0700246 if main.params[ 'tcpdump' ].lower() == "true":
247 main.step( "Start Packet Capture MN" )
248 main.Mininet2.startTcpdump(
249 str( main.params[ 'MNtcpdump' ][ 'folder' ] ) + str( main.TEST )
250 + "-MN.pcap",
251 intf=main.params[ 'MNtcpdump' ][ 'intf' ],
252 port=main.params[ 'MNtcpdump' ][ 'port' ] )
253
Jon Halla440e872016-03-31 15:15:50 -0700254 main.step( "Checking ONOS nodes" )
Jon Hall41d39f12016-04-11 22:54:35 -0700255 nodeResults = utilities.retry( main.HA.nodesCheck,
256 False,
257 args=[main.activeNodes],
258 attempts=5 )
Jon Halla440e872016-03-31 15:15:50 -0700259
Jon Hall41d39f12016-04-11 22:54:35 -0700260 utilities.assert_equals( expect=True, actual=nodeResults,
Jon Halla440e872016-03-31 15:15:50 -0700261 onpass="Nodes check successful",
262 onfail="Nodes check NOT successful" )
263
264 if not nodeResults:
Jon Hall7ac7bc32016-05-05 10:57:02 -0700265 for i in main.activeNodes:
266 cli = main.CLIs[i]
Jon Halla440e872016-03-31 15:15:50 -0700267 main.log.debug( "{} components not ACTIVE: \n{}".format(
268 cli.name,
269 cli.sendline( "scr:list | grep -v ACTIVE" ) ) )
Jon Hall5cf14d52015-07-16 12:15:19 -0700270 main.log.error( "Failed to start ONOS, stopping test" )
271 main.cleanup()
272 main.exit()
273
Jon Hall172b7ba2016-04-07 18:12:20 -0700274 main.step( "Activate apps defined in the params file" )
275 # get data from the params
276 apps = main.params.get( 'apps' )
277 if apps:
278 apps = apps.split(',')
279 main.log.warn( apps )
280 activateResult = True
281 for app in apps:
282 main.CLIs[ 0 ].app( app, "Activate" )
283 # TODO: check this worked
284 time.sleep( 10 ) # wait for apps to activate
285 for app in apps:
286 state = main.CLIs[ 0 ].appStatus( app )
287 if state == "ACTIVE":
288 activateResult = activeResult and True
289 else:
290 main.log.error( "{} is in {} state".format( app, state ) )
291 activeResult = False
292 utilities.assert_equals( expect=True,
293 actual=activateResult,
294 onpass="Successfully activated apps",
295 onfail="Failed to activate apps" )
296 else:
297 main.log.warn( "No apps were specified to be loaded after startup" )
298
299 main.step( "Set ONOS configurations" )
300 config = main.params.get( 'ONOS_Configuration' )
301 if config:
302 main.log.debug( config )
303 checkResult = main.TRUE
304 for component in config:
305 for setting in config[component]:
306 value = config[component][setting]
307 check = main.CLIs[ 0 ].setCfg( component, setting, value )
308 main.log.info( "Value was changed? {}".format( main.TRUE == check ) )
309 checkResult = check and checkResult
310 utilities.assert_equals( expect=main.TRUE,
311 actual=checkResult,
312 onpass="Successfully set config",
313 onfail="Failed to set config" )
314 else:
315 main.log.warn( "No configurations were specified to be changed after startup" )
316
Jon Hall9d2dcad2016-04-08 10:15:20 -0700317 main.step( "App Ids check" )
318 appCheck = main.TRUE
319 threads = []
320 for i in main.activeNodes:
321 t = main.Thread( target=main.CLIs[i].appToIDCheck,
322 name="appToIDCheck-" + str( i ),
323 args=[] )
324 threads.append( t )
325 t.start()
326
327 for t in threads:
328 t.join()
329 appCheck = appCheck and t.result
330 if appCheck != main.TRUE:
331 node = main.activeNodes[0]
332 main.log.warn( main.CLIs[node].apps() )
333 main.log.warn( main.CLIs[node].appIDs() )
334 utilities.assert_equals( expect=main.TRUE, actual=appCheck,
335 onpass="App Ids seem to be correct",
336 onfail="Something is wrong with app Ids" )
337
Jon Hall5cf14d52015-07-16 12:15:19 -0700338 def CASE2( self, main ):
339 """
340 Assign devices to controllers
341 """
342 import re
Jon Halle1a3b752015-07-22 13:02:46 -0700343 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700344 assert main, "main not defined"
345 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -0700346 assert main.CLIs, "main.CLIs not defined"
347 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700348 assert ONOS1Port, "ONOS1Port not defined"
349 assert ONOS2Port, "ONOS2Port not defined"
350 assert ONOS3Port, "ONOS3Port not defined"
351 assert ONOS4Port, "ONOS4Port not defined"
352 assert ONOS5Port, "ONOS5Port not defined"
353 assert ONOS6Port, "ONOS6Port not defined"
354 assert ONOS7Port, "ONOS7Port not defined"
355
356 main.case( "Assigning devices to controllers" )
Jon Hall783bbf92015-07-23 14:33:19 -0700357 main.caseExplanation = "Assign switches to ONOS using 'ovs-vsctl' " +\
Jon Hall5cf14d52015-07-16 12:15:19 -0700358 "and check that an ONOS node becomes the " +\
359 "master of the device."
360 main.step( "Assign switches to controllers" )
361
362 ipList = []
Jon Halle1a3b752015-07-22 13:02:46 -0700363 for i in range( main.numCtrls ):
364 ipList.append( main.nodes[ i ].ip_address )
Jon Hall5cf14d52015-07-16 12:15:19 -0700365 swList = []
366 for i in range( 1, 29 ):
367 swList.append( "s" + str( i ) )
368 main.Mininet1.assignSwController( sw=swList, ip=ipList )
369
370 mastershipCheck = main.TRUE
371 for i in range( 1, 29 ):
372 response = main.Mininet1.getSwController( "s" + str( i ) )
373 try:
374 main.log.info( str( response ) )
375 except Exception:
376 main.log.info( repr( response ) )
Jon Halle1a3b752015-07-22 13:02:46 -0700377 for node in main.nodes:
Jon Hall5cf14d52015-07-16 12:15:19 -0700378 if re.search( "tcp:" + node.ip_address, response ):
379 mastershipCheck = mastershipCheck and main.TRUE
380 else:
381 main.log.error( "Error, node " + node.ip_address + " is " +
382 "not in the list of controllers s" +
383 str( i ) + " is connecting to." )
384 mastershipCheck = main.FALSE
385 utilities.assert_equals(
386 expect=main.TRUE,
387 actual=mastershipCheck,
388 onpass="Switch mastership assigned correctly",
389 onfail="Switches not assigned correctly to controllers" )
390
391 def CASE21( self, main ):
392 """
393 Assign mastership to controllers
394 """
Jon Hall5cf14d52015-07-16 12:15:19 -0700395 import time
Jon Halle1a3b752015-07-22 13:02:46 -0700396 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700397 assert main, "main not defined"
398 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -0700399 assert main.CLIs, "main.CLIs not defined"
400 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700401 assert ONOS1Port, "ONOS1Port not defined"
402 assert ONOS2Port, "ONOS2Port not defined"
403 assert ONOS3Port, "ONOS3Port not defined"
404 assert ONOS4Port, "ONOS4Port not defined"
405 assert ONOS5Port, "ONOS5Port not defined"
406 assert ONOS6Port, "ONOS6Port not defined"
407 assert ONOS7Port, "ONOS7Port not defined"
408
409 main.case( "Assigning Controller roles for switches" )
Jon Hall783bbf92015-07-23 14:33:19 -0700410 main.caseExplanation = "Check that ONOS is connected to each " +\
Jon Hall5cf14d52015-07-16 12:15:19 -0700411 "device. Then manually assign" +\
412 " mastership to specific ONOS nodes using" +\
413 " 'device-role'"
414 main.step( "Assign mastership of switches to specific controllers" )
415 # Manually assign mastership to the controller we want
416 roleCall = main.TRUE
417
418 ipList = [ ]
419 deviceList = []
Jon Halla440e872016-03-31 15:15:50 -0700420 onosCli = main.CLIs[ main.activeNodes[0] ]
Jon Hall5cf14d52015-07-16 12:15:19 -0700421 try:
422 # Assign mastership to specific controllers. This assignment was
423 # determined for a 7 node cluser, but will work with any sized
424 # cluster
425 for i in range( 1, 29 ): # switches 1 through 28
426 # set up correct variables:
427 if i == 1:
428 c = 0
Jon Halle1a3b752015-07-22 13:02:46 -0700429 ip = main.nodes[ c ].ip_address # ONOS1
Jon Halla440e872016-03-31 15:15:50 -0700430 deviceId = onosCli.getDevice( "1000" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700431 elif i == 2:
Jon Halle1a3b752015-07-22 13:02:46 -0700432 c = 1 % main.numCtrls
433 ip = main.nodes[ c ].ip_address # ONOS2
Jon Halla440e872016-03-31 15:15:50 -0700434 deviceId = onosCli.getDevice( "2000" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700435 elif i == 3:
Jon Halle1a3b752015-07-22 13:02:46 -0700436 c = 1 % main.numCtrls
437 ip = main.nodes[ c ].ip_address # ONOS2
Jon Halla440e872016-03-31 15:15:50 -0700438 deviceId = onosCli.getDevice( "3000" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700439 elif i == 4:
Jon Halle1a3b752015-07-22 13:02:46 -0700440 c = 3 % main.numCtrls
441 ip = main.nodes[ c ].ip_address # ONOS4
Jon Halla440e872016-03-31 15:15:50 -0700442 deviceId = onosCli.getDevice( "3004" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700443 elif i == 5:
Jon Halle1a3b752015-07-22 13:02:46 -0700444 c = 2 % main.numCtrls
445 ip = main.nodes[ c ].ip_address # ONOS3
Jon Halla440e872016-03-31 15:15:50 -0700446 deviceId = onosCli.getDevice( "5000" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700447 elif i == 6:
Jon Halle1a3b752015-07-22 13:02:46 -0700448 c = 2 % main.numCtrls
449 ip = main.nodes[ c ].ip_address # ONOS3
Jon Halla440e872016-03-31 15:15:50 -0700450 deviceId = onosCli.getDevice( "6000" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700451 elif i == 7:
Jon Halle1a3b752015-07-22 13:02:46 -0700452 c = 5 % main.numCtrls
453 ip = main.nodes[ c ].ip_address # ONOS6
Jon Halla440e872016-03-31 15:15:50 -0700454 deviceId = onosCli.getDevice( "6007" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700455 elif i >= 8 and i <= 17:
Jon Halle1a3b752015-07-22 13:02:46 -0700456 c = 4 % main.numCtrls
457 ip = main.nodes[ c ].ip_address # ONOS5
Jon Hall5cf14d52015-07-16 12:15:19 -0700458 dpid = '3' + str( i ).zfill( 3 )
Jon Halla440e872016-03-31 15:15:50 -0700459 deviceId = onosCli.getDevice( dpid ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700460 elif i >= 18 and i <= 27:
Jon Halle1a3b752015-07-22 13:02:46 -0700461 c = 6 % main.numCtrls
462 ip = main.nodes[ c ].ip_address # ONOS7
Jon Hall5cf14d52015-07-16 12:15:19 -0700463 dpid = '6' + str( i ).zfill( 3 )
Jon Halla440e872016-03-31 15:15:50 -0700464 deviceId = onosCli.getDevice( dpid ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700465 elif i == 28:
466 c = 0
Jon Halle1a3b752015-07-22 13:02:46 -0700467 ip = main.nodes[ c ].ip_address # ONOS1
Jon Halla440e872016-03-31 15:15:50 -0700468 deviceId = onosCli.getDevice( "2800" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700469 else:
470 main.log.error( "You didn't write an else statement for " +
471 "switch s" + str( i ) )
472 roleCall = main.FALSE
473 # Assign switch
474 assert deviceId, "No device id for s" + str( i ) + " in ONOS"
475 # TODO: make this controller dynamic
Jon Halla440e872016-03-31 15:15:50 -0700476 roleCall = roleCall and onosCli.deviceRole( deviceId, ip )
Jon Hall5cf14d52015-07-16 12:15:19 -0700477 ipList.append( ip )
478 deviceList.append( deviceId )
479 except ( AttributeError, AssertionError ):
480 main.log.exception( "Something is wrong with ONOS device view" )
Jon Halla440e872016-03-31 15:15:50 -0700481 main.log.info( onosCli.devices() )
Jon Hall5cf14d52015-07-16 12:15:19 -0700482 utilities.assert_equals(
483 expect=main.TRUE,
484 actual=roleCall,
485 onpass="Re-assigned switch mastership to designated controller",
486 onfail="Something wrong with deviceRole calls" )
487
488 main.step( "Check mastership was correctly assigned" )
489 roleCheck = main.TRUE
490 # NOTE: This is due to the fact that device mastership change is not
491 # atomic and is actually a multi step process
492 time.sleep( 5 )
493 for i in range( len( ipList ) ):
494 ip = ipList[i]
495 deviceId = deviceList[i]
496 # Check assignment
Jon Halla440e872016-03-31 15:15:50 -0700497 master = onosCli.getRole( deviceId ).get( 'master' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700498 if ip in master:
499 roleCheck = roleCheck and main.TRUE
500 else:
501 roleCheck = roleCheck and main.FALSE
502 main.log.error( "Error, controller " + ip + " is not" +
503 " master " + "of device " +
504 str( deviceId ) + ". Master is " +
505 repr( master ) + "." )
506 utilities.assert_equals(
507 expect=main.TRUE,
508 actual=roleCheck,
509 onpass="Switches were successfully reassigned to designated " +
510 "controller",
511 onfail="Switches were not successfully reassigned" )
512
513 def CASE3( self, main ):
514 """
515 Assign intents
516 """
517 import time
518 import json
Jon Halle1a3b752015-07-22 13:02:46 -0700519 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700520 assert main, "main not defined"
521 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -0700522 assert main.CLIs, "main.CLIs not defined"
523 assert main.nodes, "main.nodes not defined"
Jon Halla440e872016-03-31 15:15:50 -0700524 try:
525 labels
526 except NameError:
527 main.log.error( "labels not defined, setting to []" )
528 labels = []
529 try:
530 data
531 except NameError:
532 main.log.error( "data not defined, setting to []" )
533 data = []
Jon Hall5cf14d52015-07-16 12:15:19 -0700534 main.case( "Adding host Intents" )
Jon Hall783bbf92015-07-23 14:33:19 -0700535 main.caseExplanation = "Discover hosts by using pingall then " +\
Jon Hall5cf14d52015-07-16 12:15:19 -0700536 "assign predetermined host-to-host intents." +\
537 " After installation, check that the intent" +\
538 " is distributed to all nodes and the state" +\
539 " is INSTALLED"
540
541 # install onos-app-fwd
542 main.step( "Install reactive forwarding app" )
Jon Halla440e872016-03-31 15:15:50 -0700543 onosCli = main.CLIs[ main.activeNodes[0] ]
544 installResults = onosCli.activateApp( "org.onosproject.fwd" )
Jon Hall5cf14d52015-07-16 12:15:19 -0700545 utilities.assert_equals( expect=main.TRUE, actual=installResults,
546 onpass="Install fwd successful",
547 onfail="Install fwd failed" )
548
549 main.step( "Check app ids" )
550 appCheck = main.TRUE
551 threads = []
Jon Halla440e872016-03-31 15:15:50 -0700552 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -0700553 t = main.Thread( target=main.CLIs[i].appToIDCheck,
Jon Hall5cf14d52015-07-16 12:15:19 -0700554 name="appToIDCheck-" + str( i ),
555 args=[] )
556 threads.append( t )
557 t.start()
558
559 for t in threads:
560 t.join()
561 appCheck = appCheck and t.result
562 if appCheck != main.TRUE:
Jon Halla440e872016-03-31 15:15:50 -0700563 main.log.warn( onosCli.apps() )
564 main.log.warn( onosCli.appIDs() )
Jon Hall5cf14d52015-07-16 12:15:19 -0700565 utilities.assert_equals( expect=main.TRUE, actual=appCheck,
566 onpass="App Ids seem to be correct",
567 onfail="Something is wrong with app Ids" )
568
569 main.step( "Discovering Hosts( Via pingall for now )" )
570 # FIXME: Once we have a host discovery mechanism, use that instead
571 # REACTIVE FWD test
572 pingResult = main.FALSE
Jon Hall96091e62015-09-21 17:34:17 -0700573 passMsg = "Reactive Pingall test passed"
574 time1 = time.time()
575 pingResult = main.Mininet1.pingall()
576 time2 = time.time()
577 if not pingResult:
578 main.log.warn("First pingall failed. Trying again...")
Jon Hall5cf14d52015-07-16 12:15:19 -0700579 pingResult = main.Mininet1.pingall()
Jon Hall96091e62015-09-21 17:34:17 -0700580 passMsg += " on the second try"
581 utilities.assert_equals(
582 expect=main.TRUE,
583 actual=pingResult,
584 onpass= passMsg,
585 onfail="Reactive Pingall failed, " +
586 "one or more ping pairs failed" )
587 main.log.info( "Time for pingall: %2f seconds" %
588 ( time2 - time1 ) )
Jon Hall5cf14d52015-07-16 12:15:19 -0700589 # timeout for fwd flows
590 time.sleep( 11 )
591 # uninstall onos-app-fwd
592 main.step( "Uninstall reactive forwarding app" )
Jon Halla440e872016-03-31 15:15:50 -0700593 node = main.activeNodes[0]
594 uninstallResult = main.CLIs[node].deactivateApp( "org.onosproject.fwd" )
Jon Hall5cf14d52015-07-16 12:15:19 -0700595 utilities.assert_equals( expect=main.TRUE, actual=uninstallResult,
596 onpass="Uninstall fwd successful",
597 onfail="Uninstall fwd failed" )
Jon Hall5cf14d52015-07-16 12:15:19 -0700598
599 main.step( "Check app ids" )
600 threads = []
601 appCheck2 = main.TRUE
Jon Halla440e872016-03-31 15:15:50 -0700602 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -0700603 t = main.Thread( target=main.CLIs[i].appToIDCheck,
Jon Hall5cf14d52015-07-16 12:15:19 -0700604 name="appToIDCheck-" + str( i ),
605 args=[] )
606 threads.append( t )
607 t.start()
608
609 for t in threads:
610 t.join()
611 appCheck2 = appCheck2 and t.result
612 if appCheck2 != main.TRUE:
Jon Halla440e872016-03-31 15:15:50 -0700613 node = main.activeNodes[0]
614 main.log.warn( main.CLIs[node].apps() )
615 main.log.warn( main.CLIs[node].appIDs() )
Jon Hall5cf14d52015-07-16 12:15:19 -0700616 utilities.assert_equals( expect=main.TRUE, actual=appCheck2,
617 onpass="App Ids seem to be correct",
618 onfail="Something is wrong with app Ids" )
619
620 main.step( "Add host intents via cli" )
621 intentIds = []
Jon Hall6e709752016-02-01 13:38:46 -0800622 # TODO: move the host numbers to params
623 # Maybe look at all the paths we ping?
Jon Hall5cf14d52015-07-16 12:15:19 -0700624 intentAddResult = True
625 hostResult = main.TRUE
626 for i in range( 8, 18 ):
627 main.log.info( "Adding host intent between h" + str( i ) +
628 " and h" + str( i + 10 ) )
629 host1 = "00:00:00:00:00:" + \
630 str( hex( i )[ 2: ] ).zfill( 2 ).upper()
631 host2 = "00:00:00:00:00:" + \
632 str( hex( i + 10 )[ 2: ] ).zfill( 2 ).upper()
633 # NOTE: getHost can return None
Jon Halla440e872016-03-31 15:15:50 -0700634 host1Dict = onosCli.getHost( host1 )
635 host2Dict = onosCli.getHost( host2 )
Jon Hall5cf14d52015-07-16 12:15:19 -0700636 host1Id = None
637 host2Id = None
638 if host1Dict and host2Dict:
639 host1Id = host1Dict.get( 'id', None )
640 host2Id = host2Dict.get( 'id', None )
641 if host1Id and host2Id:
Jon Halla440e872016-03-31 15:15:50 -0700642 nodeNum = ( i % len( main.activeNodes ) )
643 node = main.activeNodes[nodeNum]
644 tmpId = main.CLIs[node].addHostIntent( host1Id, host2Id )
Jon Hall5cf14d52015-07-16 12:15:19 -0700645 if tmpId:
646 main.log.info( "Added intent with id: " + tmpId )
647 intentIds.append( tmpId )
648 else:
649 main.log.error( "addHostIntent returned: " +
650 repr( tmpId ) )
651 else:
652 main.log.error( "Error, getHost() failed for h" + str( i ) +
653 " and/or h" + str( i + 10 ) )
Jon Halla440e872016-03-31 15:15:50 -0700654 node = main.activeNodes[0]
655 hosts = main.CLIs[node].hosts()
Jon Hall5cf14d52015-07-16 12:15:19 -0700656 main.log.warn( "Hosts output: " )
657 try:
658 main.log.warn( json.dumps( json.loads( hosts ),
659 sort_keys=True,
660 indent=4,
661 separators=( ',', ': ' ) ) )
662 except ( ValueError, TypeError ):
663 main.log.warn( repr( hosts ) )
664 hostResult = main.FALSE
665 utilities.assert_equals( expect=main.TRUE, actual=hostResult,
666 onpass="Found a host id for each host",
667 onfail="Error looking up host ids" )
668
669 intentStart = time.time()
Jon Halla440e872016-03-31 15:15:50 -0700670 onosIds = onosCli.getAllIntentsId()
Jon Hall5cf14d52015-07-16 12:15:19 -0700671 main.log.info( "Submitted intents: " + str( intentIds ) )
672 main.log.info( "Intents in ONOS: " + str( onosIds ) )
673 for intent in intentIds:
674 if intent in onosIds:
675 pass # intent submitted is in onos
676 else:
677 intentAddResult = False
678 if intentAddResult:
679 intentStop = time.time()
680 else:
681 intentStop = None
682 # Print the intent states
Jon Halla440e872016-03-31 15:15:50 -0700683 intents = onosCli.intents()
Jon Hall5cf14d52015-07-16 12:15:19 -0700684 intentStates = []
685 installedCheck = True
686 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
687 count = 0
688 try:
689 for intent in json.loads( intents ):
690 state = intent.get( 'state', None )
691 if "INSTALLED" not in state:
692 installedCheck = False
693 intentId = intent.get( 'id', None )
694 intentStates.append( ( intentId, state ) )
695 except ( ValueError, TypeError ):
696 main.log.exception( "Error parsing intents" )
697 # add submitted intents not in the store
698 tmplist = [ i for i, s in intentStates ]
699 missingIntents = False
700 for i in intentIds:
701 if i not in tmplist:
702 intentStates.append( ( i, " - " ) )
703 missingIntents = True
704 intentStates.sort()
705 for i, s in intentStates:
706 count += 1
707 main.log.info( "%-6s%-15s%-15s" %
708 ( str( count ), str( i ), str( s ) ) )
Jon Halla440e872016-03-31 15:15:50 -0700709 leaders = onosCli.leaders()
Jon Hall5cf14d52015-07-16 12:15:19 -0700710 try:
711 missing = False
712 if leaders:
713 parsedLeaders = json.loads( leaders )
714 main.log.warn( json.dumps( parsedLeaders,
715 sort_keys=True,
716 indent=4,
717 separators=( ',', ': ' ) ) )
718 # check for all intent partitions
719 topics = []
720 for i in range( 14 ):
721 topics.append( "intent-partition-" + str( i ) )
722 main.log.debug( topics )
723 ONOStopics = [ j['topic'] for j in parsedLeaders ]
724 for topic in topics:
725 if topic not in ONOStopics:
726 main.log.error( "Error: " + topic +
727 " not in leaders" )
728 missing = True
729 else:
730 main.log.error( "leaders() returned None" )
731 except ( ValueError, TypeError ):
732 main.log.exception( "Error parsing leaders" )
733 main.log.error( repr( leaders ) )
734 # Check all nodes
735 if missing:
Jon Halla440e872016-03-31 15:15:50 -0700736 for i in main.activeNodes:
737 response = main.CLIs[i].leaders( jsonFormat=False)
738 main.log.warn( str( main.CLIs[i].name ) + " leaders output: \n" +
Jon Hall5cf14d52015-07-16 12:15:19 -0700739 str( response ) )
740
Jon Halla440e872016-03-31 15:15:50 -0700741 partitions = onosCli.partitions()
Jon Hall5cf14d52015-07-16 12:15:19 -0700742 try:
743 if partitions :
744 parsedPartitions = json.loads( partitions )
745 main.log.warn( json.dumps( parsedPartitions,
746 sort_keys=True,
747 indent=4,
748 separators=( ',', ': ' ) ) )
749 # TODO check for a leader in all paritions
750 # TODO check for consistency among nodes
751 else:
752 main.log.error( "partitions() returned None" )
753 except ( ValueError, TypeError ):
754 main.log.exception( "Error parsing partitions" )
755 main.log.error( repr( partitions ) )
Jon Halla440e872016-03-31 15:15:50 -0700756 pendingMap = onosCli.pendingMap()
Jon Hall5cf14d52015-07-16 12:15:19 -0700757 try:
758 if pendingMap :
759 parsedPending = json.loads( pendingMap )
760 main.log.warn( json.dumps( parsedPending,
761 sort_keys=True,
762 indent=4,
763 separators=( ',', ': ' ) ) )
764 # TODO check something here?
765 else:
766 main.log.error( "pendingMap() returned None" )
767 except ( ValueError, TypeError ):
768 main.log.exception( "Error parsing pending map" )
769 main.log.error( repr( pendingMap ) )
770
771 intentAddResult = bool( intentAddResult and not missingIntents and
772 installedCheck )
773 if not intentAddResult:
774 main.log.error( "Error in pushing host intents to ONOS" )
775
776 main.step( "Intent Anti-Entropy dispersion" )
Jon Halla440e872016-03-31 15:15:50 -0700777 for j in range(100):
Jon Hall5cf14d52015-07-16 12:15:19 -0700778 correct = True
779 main.log.info( "Submitted intents: " + str( sorted( intentIds ) ) )
Jon Halla440e872016-03-31 15:15:50 -0700780 for i in main.activeNodes:
Jon Hall5cf14d52015-07-16 12:15:19 -0700781 onosIds = []
Jon Halla440e872016-03-31 15:15:50 -0700782 ids = main.CLIs[i].getAllIntentsId()
Jon Hall5cf14d52015-07-16 12:15:19 -0700783 onosIds.append( ids )
Jon Halla440e872016-03-31 15:15:50 -0700784 main.log.debug( "Intents in " + main.CLIs[i].name + ": " +
Jon Hall5cf14d52015-07-16 12:15:19 -0700785 str( sorted( onosIds ) ) )
786 if sorted( ids ) != sorted( intentIds ):
787 main.log.warn( "Set of intent IDs doesn't match" )
788 correct = False
789 break
790 else:
Jon Halla440e872016-03-31 15:15:50 -0700791 intents = json.loads( main.CLIs[i].intents() )
Jon Hall5cf14d52015-07-16 12:15:19 -0700792 for intent in intents:
793 if intent[ 'state' ] != "INSTALLED":
794 main.log.warn( "Intent " + intent[ 'id' ] +
795 " is " + intent[ 'state' ] )
796 correct = False
797 break
798 if correct:
799 break
800 else:
801 time.sleep(1)
802 if not intentStop:
803 intentStop = time.time()
804 global gossipTime
805 gossipTime = intentStop - intentStart
806 main.log.info( "It took about " + str( gossipTime ) +
807 " seconds for all intents to appear in each node" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700808 gossipPeriod = int( main.params['timers']['gossip'] )
Jon Halla440e872016-03-31 15:15:50 -0700809 maxGossipTime = gossipPeriod * len( main.activeNodes )
Jon Hall5cf14d52015-07-16 12:15:19 -0700810 utilities.assert_greater_equals(
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700811 expect=maxGossipTime, actual=gossipTime,
Jon Hall5cf14d52015-07-16 12:15:19 -0700812 onpass="ECM anti-entropy for intents worked within " +
813 "expected time",
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700814 onfail="Intent ECM anti-entropy took too long. " +
815 "Expected time:{}, Actual time:{}".format( maxGossipTime,
816 gossipTime ) )
817 if gossipTime <= maxGossipTime:
Jon Hall5cf14d52015-07-16 12:15:19 -0700818 intentAddResult = True
819
820 if not intentAddResult or "key" in pendingMap:
821 import time
822 installedCheck = True
823 main.log.info( "Sleeping 60 seconds to see if intents are found" )
824 time.sleep( 60 )
Jon Halla440e872016-03-31 15:15:50 -0700825 onosIds = onosCli.getAllIntentsId()
Jon Hall5cf14d52015-07-16 12:15:19 -0700826 main.log.info( "Submitted intents: " + str( intentIds ) )
827 main.log.info( "Intents in ONOS: " + str( onosIds ) )
828 # Print the intent states
Jon Halla440e872016-03-31 15:15:50 -0700829 intents = onosCli.intents()
Jon Hall5cf14d52015-07-16 12:15:19 -0700830 intentStates = []
831 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
832 count = 0
833 try:
834 for intent in json.loads( intents ):
835 # Iter through intents of a node
836 state = intent.get( 'state', None )
837 if "INSTALLED" not in state:
838 installedCheck = False
839 intentId = intent.get( 'id', None )
840 intentStates.append( ( intentId, state ) )
841 except ( ValueError, TypeError ):
842 main.log.exception( "Error parsing intents" )
843 # add submitted intents not in the store
844 tmplist = [ i for i, s in intentStates ]
845 for i in intentIds:
846 if i not in tmplist:
847 intentStates.append( ( i, " - " ) )
848 intentStates.sort()
849 for i, s in intentStates:
850 count += 1
851 main.log.info( "%-6s%-15s%-15s" %
852 ( str( count ), str( i ), str( s ) ) )
Jon Halla440e872016-03-31 15:15:50 -0700853 leaders = onosCli.leaders()
Jon Hall5cf14d52015-07-16 12:15:19 -0700854 try:
855 missing = False
856 if leaders:
857 parsedLeaders = json.loads( leaders )
858 main.log.warn( json.dumps( parsedLeaders,
859 sort_keys=True,
860 indent=4,
861 separators=( ',', ': ' ) ) )
862 # check for all intent partitions
863 # check for election
864 topics = []
865 for i in range( 14 ):
866 topics.append( "intent-partition-" + str( i ) )
867 # FIXME: this should only be after we start the app
868 topics.append( "org.onosproject.election" )
869 main.log.debug( topics )
870 ONOStopics = [ j['topic'] for j in parsedLeaders ]
871 for topic in topics:
872 if topic not in ONOStopics:
873 main.log.error( "Error: " + topic +
874 " not in leaders" )
875 missing = True
876 else:
877 main.log.error( "leaders() returned None" )
878 except ( ValueError, TypeError ):
879 main.log.exception( "Error parsing leaders" )
880 main.log.error( repr( leaders ) )
881 # Check all nodes
882 if missing:
Jon Halla440e872016-03-31 15:15:50 -0700883 for i in main.activeNodes:
884 node = main.CLIs[i]
Jon Hall5cf14d52015-07-16 12:15:19 -0700885 response = node.leaders( jsonFormat=False)
886 main.log.warn( str( node.name ) + " leaders output: \n" +
887 str( response ) )
888
Jon Halla440e872016-03-31 15:15:50 -0700889 partitions = onosCli.partitions()
Jon Hall5cf14d52015-07-16 12:15:19 -0700890 try:
891 if partitions :
892 parsedPartitions = json.loads( partitions )
893 main.log.warn( json.dumps( parsedPartitions,
894 sort_keys=True,
895 indent=4,
896 separators=( ',', ': ' ) ) )
897 # TODO check for a leader in all paritions
898 # TODO check for consistency among nodes
899 else:
900 main.log.error( "partitions() returned None" )
901 except ( ValueError, TypeError ):
902 main.log.exception( "Error parsing partitions" )
903 main.log.error( repr( partitions ) )
Jon Halla440e872016-03-31 15:15:50 -0700904 pendingMap = onosCli.pendingMap()
Jon Hall5cf14d52015-07-16 12:15:19 -0700905 try:
906 if pendingMap :
907 parsedPending = json.loads( pendingMap )
908 main.log.warn( json.dumps( parsedPending,
909 sort_keys=True,
910 indent=4,
911 separators=( ',', ': ' ) ) )
912 # TODO check something here?
913 else:
914 main.log.error( "pendingMap() returned None" )
915 except ( ValueError, TypeError ):
916 main.log.exception( "Error parsing pending map" )
917 main.log.error( repr( pendingMap ) )
918
919 def CASE4( self, main ):
920 """
921 Ping across added host intents
922 """
923 import json
924 import time
Jon Halle1a3b752015-07-22 13:02:46 -0700925 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700926 assert main, "main not defined"
927 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -0700928 assert main.CLIs, "main.CLIs not defined"
929 assert main.nodes, "main.nodes not defined"
Jon Halla440e872016-03-31 15:15:50 -0700930 main.case( "Verify connectivity by sending traffic across Intents" )
Jon Hall783bbf92015-07-23 14:33:19 -0700931 main.caseExplanation = "Ping across added host intents to check " +\
Jon Hall5cf14d52015-07-16 12:15:19 -0700932 "functionality and check the state of " +\
933 "the intent"
Jon Hall5cf14d52015-07-16 12:15:19 -0700934
Jon Hall41d39f12016-04-11 22:54:35 -0700935 onosCli = main.CLIs[ main.activeNodes[0] ]
Jon Hall5cf14d52015-07-16 12:15:19 -0700936 main.step( "Check Intent state" )
937 installedCheck = False
938 loopCount = 0
939 while not installedCheck and loopCount < 40:
940 installedCheck = True
941 # Print the intent states
Jon Halla440e872016-03-31 15:15:50 -0700942 intents = onosCli.intents()
Jon Hall5cf14d52015-07-16 12:15:19 -0700943 intentStates = []
944 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
945 count = 0
946 # Iter through intents of a node
947 try:
948 for intent in json.loads( intents ):
949 state = intent.get( 'state', None )
950 if "INSTALLED" not in state:
951 installedCheck = False
952 intentId = intent.get( 'id', None )
953 intentStates.append( ( intentId, state ) )
954 except ( ValueError, TypeError ):
955 main.log.exception( "Error parsing intents." )
956 # Print states
957 intentStates.sort()
958 for i, s in intentStates:
959 count += 1
960 main.log.info( "%-6s%-15s%-15s" %
961 ( str( count ), str( i ), str( s ) ) )
962 if not installedCheck:
963 time.sleep( 1 )
964 loopCount += 1
965 utilities.assert_equals( expect=True, actual=installedCheck,
966 onpass="Intents are all INSTALLED",
967 onfail="Intents are not all in " +
968 "INSTALLED state" )
969
Jon Hall9d2dcad2016-04-08 10:15:20 -0700970 main.step( "Ping across added host intents" )
Jon Hall9d2dcad2016-04-08 10:15:20 -0700971 PingResult = main.TRUE
972 for i in range( 8, 18 ):
973 ping = main.Mininet1.pingHost( src="h" + str( i ),
974 target="h" + str( i + 10 ) )
975 PingResult = PingResult and ping
976 if ping == main.FALSE:
977 main.log.warn( "Ping failed between h" + str( i ) +
978 " and h" + str( i + 10 ) )
979 elif ping == main.TRUE:
980 main.log.info( "Ping test passed!" )
981 # Don't set PingResult or you'd override failures
982 if PingResult == main.FALSE:
983 main.log.error(
984 "Intents have not been installed correctly, pings failed." )
985 # TODO: pretty print
986 main.log.warn( "ONOS1 intents: " )
987 try:
988 tmpIntents = onosCli.intents()
989 main.log.warn( json.dumps( json.loads( tmpIntents ),
990 sort_keys=True,
991 indent=4,
992 separators=( ',', ': ' ) ) )
993 except ( ValueError, TypeError ):
994 main.log.warn( repr( tmpIntents ) )
995 utilities.assert_equals(
996 expect=main.TRUE,
997 actual=PingResult,
998 onpass="Intents have been installed correctly and pings work",
999 onfail="Intents have not been installed correctly, pings failed." )
1000
Jon Hall5cf14d52015-07-16 12:15:19 -07001001 main.step( "Check leadership of topics" )
Jon Halla440e872016-03-31 15:15:50 -07001002 leaders = onosCli.leaders()
Jon Hall5cf14d52015-07-16 12:15:19 -07001003 topicCheck = main.TRUE
1004 try:
1005 if leaders:
1006 parsedLeaders = json.loads( leaders )
1007 main.log.warn( json.dumps( parsedLeaders,
1008 sort_keys=True,
1009 indent=4,
1010 separators=( ',', ': ' ) ) )
1011 # check for all intent partitions
1012 # check for election
1013 # TODO: Look at Devices as topics now that it uses this system
1014 topics = []
1015 for i in range( 14 ):
1016 topics.append( "intent-partition-" + str( i ) )
1017 # FIXME: this should only be after we start the app
1018 # FIXME: topics.append( "org.onosproject.election" )
1019 # Print leaders output
1020 main.log.debug( topics )
1021 ONOStopics = [ j['topic'] for j in parsedLeaders ]
1022 for topic in topics:
1023 if topic not in ONOStopics:
1024 main.log.error( "Error: " + topic +
1025 " not in leaders" )
1026 topicCheck = main.FALSE
1027 else:
1028 main.log.error( "leaders() returned None" )
1029 topicCheck = main.FALSE
1030 except ( ValueError, TypeError ):
1031 topicCheck = main.FALSE
1032 main.log.exception( "Error parsing leaders" )
1033 main.log.error( repr( leaders ) )
1034 # TODO: Check for a leader of these topics
1035 # Check all nodes
1036 if topicCheck:
Jon Halla440e872016-03-31 15:15:50 -07001037 for i in main.activeNodes:
1038 node = main.CLIs[i]
Jon Hall5cf14d52015-07-16 12:15:19 -07001039 response = node.leaders( jsonFormat=False)
1040 main.log.warn( str( node.name ) + " leaders output: \n" +
1041 str( response ) )
1042
1043 utilities.assert_equals( expect=main.TRUE, actual=topicCheck,
1044 onpass="intent Partitions is in leaders",
1045 onfail="Some topics were lost " )
1046 # Print partitions
Jon Halla440e872016-03-31 15:15:50 -07001047 partitions = onosCli.partitions()
Jon Hall5cf14d52015-07-16 12:15:19 -07001048 try:
1049 if partitions :
1050 parsedPartitions = json.loads( partitions )
1051 main.log.warn( json.dumps( parsedPartitions,
1052 sort_keys=True,
1053 indent=4,
1054 separators=( ',', ': ' ) ) )
1055 # TODO check for a leader in all paritions
1056 # TODO check for consistency among nodes
1057 else:
1058 main.log.error( "partitions() returned None" )
1059 except ( ValueError, TypeError ):
1060 main.log.exception( "Error parsing partitions" )
1061 main.log.error( repr( partitions ) )
1062 # Print Pending Map
Jon Halla440e872016-03-31 15:15:50 -07001063 pendingMap = onosCli.pendingMap()
Jon Hall5cf14d52015-07-16 12:15:19 -07001064 try:
1065 if pendingMap :
1066 parsedPending = json.loads( pendingMap )
1067 main.log.warn( json.dumps( parsedPending,
1068 sort_keys=True,
1069 indent=4,
1070 separators=( ',', ': ' ) ) )
1071 # TODO check something here?
1072 else:
1073 main.log.error( "pendingMap() returned None" )
1074 except ( ValueError, TypeError ):
1075 main.log.exception( "Error parsing pending map" )
1076 main.log.error( repr( pendingMap ) )
1077
1078 if not installedCheck:
1079 main.log.info( "Waiting 60 seconds to see if the state of " +
1080 "intents change" )
1081 time.sleep( 60 )
1082 # Print the intent states
Jon Halla440e872016-03-31 15:15:50 -07001083 intents = onosCli.intents()
Jon Hall5cf14d52015-07-16 12:15:19 -07001084 intentStates = []
1085 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
1086 count = 0
1087 # Iter through intents of a node
1088 try:
1089 for intent in json.loads( intents ):
1090 state = intent.get( 'state', None )
1091 if "INSTALLED" not in state:
1092 installedCheck = False
1093 intentId = intent.get( 'id', None )
1094 intentStates.append( ( intentId, state ) )
1095 except ( ValueError, TypeError ):
1096 main.log.exception( "Error parsing intents." )
1097 intentStates.sort()
1098 for i, s in intentStates:
1099 count += 1
1100 main.log.info( "%-6s%-15s%-15s" %
1101 ( str( count ), str( i ), str( s ) ) )
Jon Halla440e872016-03-31 15:15:50 -07001102 leaders = onosCli.leaders()
Jon Hall5cf14d52015-07-16 12:15:19 -07001103 try:
1104 missing = False
1105 if leaders:
1106 parsedLeaders = json.loads( leaders )
1107 main.log.warn( json.dumps( parsedLeaders,
1108 sort_keys=True,
1109 indent=4,
1110 separators=( ',', ': ' ) ) )
1111 # check for all intent partitions
1112 # check for election
1113 topics = []
1114 for i in range( 14 ):
1115 topics.append( "intent-partition-" + str( i ) )
1116 # FIXME: this should only be after we start the app
1117 topics.append( "org.onosproject.election" )
1118 main.log.debug( topics )
1119 ONOStopics = [ j['topic'] for j in parsedLeaders ]
1120 for topic in topics:
1121 if topic not in ONOStopics:
1122 main.log.error( "Error: " + topic +
1123 " not in leaders" )
1124 missing = True
1125 else:
1126 main.log.error( "leaders() returned None" )
1127 except ( ValueError, TypeError ):
1128 main.log.exception( "Error parsing leaders" )
1129 main.log.error( repr( leaders ) )
1130 if missing:
Jon Halla440e872016-03-31 15:15:50 -07001131 for i in main.activeNodes:
1132 node = main.CLIs[i]
Jon Hall5cf14d52015-07-16 12:15:19 -07001133 response = node.leaders( jsonFormat=False)
1134 main.log.warn( str( node.name ) + " leaders output: \n" +
1135 str( response ) )
1136
Jon Halla440e872016-03-31 15:15:50 -07001137 partitions = onosCli.partitions()
Jon Hall5cf14d52015-07-16 12:15:19 -07001138 try:
1139 if partitions :
1140 parsedPartitions = json.loads( partitions )
1141 main.log.warn( json.dumps( parsedPartitions,
1142 sort_keys=True,
1143 indent=4,
1144 separators=( ',', ': ' ) ) )
1145 # TODO check for a leader in all paritions
1146 # TODO check for consistency among nodes
1147 else:
1148 main.log.error( "partitions() returned None" )
1149 except ( ValueError, TypeError ):
1150 main.log.exception( "Error parsing partitions" )
1151 main.log.error( repr( partitions ) )
Jon Halla440e872016-03-31 15:15:50 -07001152 pendingMap = onosCli.pendingMap()
Jon Hall5cf14d52015-07-16 12:15:19 -07001153 try:
1154 if pendingMap :
1155 parsedPending = json.loads( pendingMap )
1156 main.log.warn( json.dumps( parsedPending,
1157 sort_keys=True,
1158 indent=4,
1159 separators=( ',', ': ' ) ) )
1160 # TODO check something here?
1161 else:
1162 main.log.error( "pendingMap() returned None" )
1163 except ( ValueError, TypeError ):
1164 main.log.exception( "Error parsing pending map" )
1165 main.log.error( repr( pendingMap ) )
1166 # Print flowrules
Jon Halla440e872016-03-31 15:15:50 -07001167 node = main.activeNodes[0]
1168 main.log.debug( main.CLIs[node].flows( jsonFormat=False ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001169 main.step( "Wait a minute then ping again" )
1170 # the wait is above
1171 PingResult = main.TRUE
1172 for i in range( 8, 18 ):
1173 ping = main.Mininet1.pingHost( src="h" + str( i ),
1174 target="h" + str( i + 10 ) )
1175 PingResult = PingResult and ping
1176 if ping == main.FALSE:
1177 main.log.warn( "Ping failed between h" + str( i ) +
1178 " and h" + str( i + 10 ) )
1179 elif ping == main.TRUE:
1180 main.log.info( "Ping test passed!" )
1181 # Don't set PingResult or you'd override failures
1182 if PingResult == main.FALSE:
1183 main.log.error(
1184 "Intents have not been installed correctly, pings failed." )
1185 # TODO: pretty print
1186 main.log.warn( "ONOS1 intents: " )
1187 try:
Jon Halla440e872016-03-31 15:15:50 -07001188 tmpIntents = onosCli.intents()
Jon Hall5cf14d52015-07-16 12:15:19 -07001189 main.log.warn( json.dumps( json.loads( tmpIntents ),
1190 sort_keys=True,
1191 indent=4,
1192 separators=( ',', ': ' ) ) )
1193 except ( ValueError, TypeError ):
1194 main.log.warn( repr( tmpIntents ) )
1195 utilities.assert_equals(
1196 expect=main.TRUE,
1197 actual=PingResult,
1198 onpass="Intents have been installed correctly and pings work",
1199 onfail="Intents have not been installed correctly, pings failed." )
1200
1201 def CASE5( self, main ):
1202 """
1203 Reading state of ONOS
1204 """
1205 import json
1206 import time
Jon Halle1a3b752015-07-22 13:02:46 -07001207 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07001208 assert main, "main not defined"
1209 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07001210 assert main.CLIs, "main.CLIs not defined"
1211 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07001212
1213 main.case( "Setting up and gathering data for current state" )
1214 # The general idea for this test case is to pull the state of
1215 # ( intents,flows, topology,... ) from each ONOS node
1216 # We can then compare them with each other and also with past states
1217
1218 main.step( "Check that each switch has a master" )
1219 global mastershipState
1220 mastershipState = '[]'
1221
1222 # Assert that each device has a master
1223 rolesNotNull = main.TRUE
1224 threads = []
Jon Halla440e872016-03-31 15:15:50 -07001225 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001226 t = main.Thread( target=main.CLIs[i].rolesNotNull,
Jon Hall5cf14d52015-07-16 12:15:19 -07001227 name="rolesNotNull-" + str( i ),
1228 args=[] )
1229 threads.append( t )
1230 t.start()
1231
1232 for t in threads:
1233 t.join()
1234 rolesNotNull = rolesNotNull and t.result
1235 utilities.assert_equals(
1236 expect=main.TRUE,
1237 actual=rolesNotNull,
1238 onpass="Each device has a master",
1239 onfail="Some devices don't have a master assigned" )
1240
1241 main.step( "Get the Mastership of each switch from each controller" )
1242 ONOSMastership = []
1243 mastershipCheck = main.FALSE
1244 consistentMastership = True
1245 rolesResults = True
1246 threads = []
Jon Halla440e872016-03-31 15:15:50 -07001247 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001248 t = main.Thread( target=main.CLIs[i].roles,
Jon Hall5cf14d52015-07-16 12:15:19 -07001249 name="roles-" + str( i ),
1250 args=[] )
1251 threads.append( t )
1252 t.start()
1253
1254 for t in threads:
1255 t.join()
1256 ONOSMastership.append( t.result )
1257
Jon Halla440e872016-03-31 15:15:50 -07001258 for i in range( len( ONOSMastership ) ):
1259 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001260 if not ONOSMastership[i] or "Error" in ONOSMastership[i]:
Jon Halla440e872016-03-31 15:15:50 -07001261 main.log.error( "Error in getting ONOS" + node + " roles" )
1262 main.log.warn( "ONOS" + node + " mastership response: " +
1263 repr( ONOSMastership[i] ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001264 rolesResults = False
1265 utilities.assert_equals(
1266 expect=True,
1267 actual=rolesResults,
1268 onpass="No error in reading roles output",
1269 onfail="Error in reading roles from ONOS" )
1270
1271 main.step( "Check for consistency in roles from each controller" )
1272 if all([ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
1273 main.log.info(
1274 "Switch roles are consistent across all ONOS nodes" )
1275 else:
1276 consistentMastership = False
1277 utilities.assert_equals(
1278 expect=True,
1279 actual=consistentMastership,
1280 onpass="Switch roles are consistent across all ONOS nodes",
1281 onfail="ONOS nodes have different views of switch roles" )
1282
1283 if rolesResults and not consistentMastership:
Jon Halla440e872016-03-31 15:15:50 -07001284 for i in range( len( main.activeNodes ) ):
1285 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001286 try:
1287 main.log.warn(
Jon Halla440e872016-03-31 15:15:50 -07001288 "ONOS" + node + " roles: ",
Jon Hall5cf14d52015-07-16 12:15:19 -07001289 json.dumps(
1290 json.loads( ONOSMastership[ i ] ),
1291 sort_keys=True,
1292 indent=4,
1293 separators=( ',', ': ' ) ) )
1294 except ( ValueError, TypeError ):
1295 main.log.warn( repr( ONOSMastership[ i ] ) )
1296 elif rolesResults and consistentMastership:
1297 mastershipCheck = main.TRUE
1298 mastershipState = ONOSMastership[ 0 ]
1299
1300 main.step( "Get the intents from each controller" )
1301 global intentState
1302 intentState = []
1303 ONOSIntents = []
1304 intentCheck = main.FALSE
1305 consistentIntents = True
1306 intentsResults = True
1307 threads = []
Jon Halla440e872016-03-31 15:15:50 -07001308 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001309 t = main.Thread( target=main.CLIs[i].intents,
Jon Hall5cf14d52015-07-16 12:15:19 -07001310 name="intents-" + str( i ),
1311 args=[],
1312 kwargs={ 'jsonFormat': True } )
1313 threads.append( t )
1314 t.start()
1315
1316 for t in threads:
1317 t.join()
1318 ONOSIntents.append( t.result )
1319
Jon Halla440e872016-03-31 15:15:50 -07001320 for i in range( len( ONOSIntents ) ):
1321 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001322 if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
Jon Halla440e872016-03-31 15:15:50 -07001323 main.log.error( "Error in getting ONOS" + node + " intents" )
1324 main.log.warn( "ONOS" + node + " intents response: " +
Jon Hall5cf14d52015-07-16 12:15:19 -07001325 repr( ONOSIntents[ i ] ) )
1326 intentsResults = False
1327 utilities.assert_equals(
1328 expect=True,
1329 actual=intentsResults,
1330 onpass="No error in reading intents output",
1331 onfail="Error in reading intents from ONOS" )
1332
1333 main.step( "Check for consistency in Intents from each controller" )
1334 if all([ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
1335 main.log.info( "Intents are consistent across all ONOS " +
1336 "nodes" )
1337 else:
1338 consistentIntents = False
1339 main.log.error( "Intents not consistent" )
1340 utilities.assert_equals(
1341 expect=True,
1342 actual=consistentIntents,
1343 onpass="Intents are consistent across all ONOS nodes",
1344 onfail="ONOS nodes have different views of intents" )
1345
1346 if intentsResults:
1347 # Try to make it easy to figure out what is happening
1348 #
1349 # Intent ONOS1 ONOS2 ...
1350 # 0x01 INSTALLED INSTALLING
1351 # ... ... ...
1352 # ... ... ...
1353 title = " Id"
Jon Halla440e872016-03-31 15:15:50 -07001354 for n in main.activeNodes:
Jon Hall5cf14d52015-07-16 12:15:19 -07001355 title += " " * 10 + "ONOS" + str( n + 1 )
1356 main.log.warn( title )
Jon Halle1a3b752015-07-22 13:02:46 -07001357 # get all intent keys in the cluster
Jon Hall5cf14d52015-07-16 12:15:19 -07001358 keys = []
1359 try:
1360 # Get the set of all intent keys
1361 for nodeStr in ONOSIntents:
1362 node = json.loads( nodeStr )
1363 for intent in node:
1364 keys.append( intent.get( 'id' ) )
1365 keys = set( keys )
1366 # For each intent key, print the state on each node
1367 for key in keys:
1368 row = "%-13s" % key
1369 for nodeStr in ONOSIntents:
1370 node = json.loads( nodeStr )
1371 for intent in node:
1372 if intent.get( 'id', "Error" ) == key:
1373 row += "%-15s" % intent.get( 'state' )
1374 main.log.warn( row )
1375 # End of intent state table
1376 except ValueError as e:
1377 main.log.exception( e )
1378 main.log.debug( "nodeStr was: " + repr( nodeStr ) )
1379
1380 if intentsResults and not consistentIntents:
1381 # print the json objects
Jon Halla440e872016-03-31 15:15:50 -07001382 n = str( main.activeNodes[-1] + 1 )
1383 main.log.debug( "ONOS" + n + " intents: " )
Jon Hall5cf14d52015-07-16 12:15:19 -07001384 main.log.debug( json.dumps( json.loads( ONOSIntents[ -1 ] ),
1385 sort_keys=True,
1386 indent=4,
1387 separators=( ',', ': ' ) ) )
Jon Halla440e872016-03-31 15:15:50 -07001388 for i in range( len( ONOSIntents ) ):
1389 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001390 if ONOSIntents[ i ] != ONOSIntents[ -1 ]:
Jon Halla440e872016-03-31 15:15:50 -07001391 main.log.debug( "ONOS" + node + " intents: " )
Jon Hall5cf14d52015-07-16 12:15:19 -07001392 main.log.debug( json.dumps( json.loads( ONOSIntents[i] ),
1393 sort_keys=True,
1394 indent=4,
1395 separators=( ',', ': ' ) ) )
1396 else:
Jon Halla440e872016-03-31 15:15:50 -07001397 main.log.debug( "ONOS" + node + " intents match ONOS" +
1398 n + " intents" )
Jon Hall5cf14d52015-07-16 12:15:19 -07001399 elif intentsResults and consistentIntents:
1400 intentCheck = main.TRUE
1401 intentState = ONOSIntents[ 0 ]
1402
1403 main.step( "Get the flows from each controller" )
1404 global flowState
1405 flowState = []
1406 ONOSFlows = []
1407 ONOSFlowsJson = []
1408 flowCheck = main.FALSE
1409 consistentFlows = True
1410 flowsResults = True
1411 threads = []
Jon Halla440e872016-03-31 15:15:50 -07001412 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001413 t = main.Thread( target=main.CLIs[i].flows,
Jon Hall5cf14d52015-07-16 12:15:19 -07001414 name="flows-" + str( i ),
1415 args=[],
1416 kwargs={ 'jsonFormat': True } )
1417 threads.append( t )
1418 t.start()
1419
1420 # NOTE: Flows command can take some time to run
1421 time.sleep(30)
1422 for t in threads:
1423 t.join()
1424 result = t.result
1425 ONOSFlows.append( result )
1426
Jon Halla440e872016-03-31 15:15:50 -07001427 for i in range( len( ONOSFlows ) ):
1428 num = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001429 if not ONOSFlows[ i ] or "Error" in ONOSFlows[ i ]:
1430 main.log.error( "Error in getting ONOS" + num + " flows" )
1431 main.log.warn( "ONOS" + num + " flows response: " +
1432 repr( ONOSFlows[ i ] ) )
1433 flowsResults = False
1434 ONOSFlowsJson.append( None )
1435 else:
1436 try:
1437 ONOSFlowsJson.append( json.loads( ONOSFlows[ i ] ) )
1438 except ( ValueError, TypeError ):
1439 # FIXME: change this to log.error?
1440 main.log.exception( "Error in parsing ONOS" + num +
1441 " response as json." )
1442 main.log.error( repr( ONOSFlows[ i ] ) )
1443 ONOSFlowsJson.append( None )
1444 flowsResults = False
1445 utilities.assert_equals(
1446 expect=True,
1447 actual=flowsResults,
1448 onpass="No error in reading flows output",
1449 onfail="Error in reading flows from ONOS" )
1450
1451 main.step( "Check for consistency in Flows from each controller" )
1452 tmp = [ len( i ) == len( ONOSFlowsJson[ 0 ] ) for i in ONOSFlowsJson ]
1453 if all( tmp ):
1454 main.log.info( "Flow count is consistent across all ONOS nodes" )
1455 else:
1456 consistentFlows = False
1457 utilities.assert_equals(
1458 expect=True,
1459 actual=consistentFlows,
1460 onpass="The flow count is consistent across all ONOS nodes",
1461 onfail="ONOS nodes have different flow counts" )
1462
1463 if flowsResults and not consistentFlows:
Jon Halla440e872016-03-31 15:15:50 -07001464 for i in range( len( ONOSFlows ) ):
1465 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001466 try:
1467 main.log.warn(
Jon Halla440e872016-03-31 15:15:50 -07001468 "ONOS" + node + " flows: " +
Jon Hall5cf14d52015-07-16 12:15:19 -07001469 json.dumps( json.loads( ONOSFlows[i] ), sort_keys=True,
1470 indent=4, separators=( ',', ': ' ) ) )
1471 except ( ValueError, TypeError ):
Jon Halla440e872016-03-31 15:15:50 -07001472 main.log.warn( "ONOS" + node + " flows: " +
1473 repr( ONOSFlows[ i ] ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001474 elif flowsResults and consistentFlows:
1475 flowCheck = main.TRUE
1476 flowState = ONOSFlows[ 0 ]
1477
1478 main.step( "Get the OF Table entries" )
1479 global flows
1480 flows = []
1481 for i in range( 1, 29 ):
GlennRC68467eb2015-11-16 18:01:01 -08001482 flows.append( main.Mininet1.getFlowTable( "s" + str( i ), version="1.3", debug=False ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001483 if flowCheck == main.FALSE:
1484 for table in flows:
1485 main.log.warn( table )
1486 # TODO: Compare switch flow tables with ONOS flow tables
1487
1488 main.step( "Start continuous pings" )
1489 main.Mininet2.pingLong(
1490 src=main.params[ 'PING' ][ 'source1' ],
1491 target=main.params[ 'PING' ][ 'target1' ],
1492 pingTime=500 )
1493 main.Mininet2.pingLong(
1494 src=main.params[ 'PING' ][ 'source2' ],
1495 target=main.params[ 'PING' ][ 'target2' ],
1496 pingTime=500 )
1497 main.Mininet2.pingLong(
1498 src=main.params[ 'PING' ][ 'source3' ],
1499 target=main.params[ 'PING' ][ 'target3' ],
1500 pingTime=500 )
1501 main.Mininet2.pingLong(
1502 src=main.params[ 'PING' ][ 'source4' ],
1503 target=main.params[ 'PING' ][ 'target4' ],
1504 pingTime=500 )
1505 main.Mininet2.pingLong(
1506 src=main.params[ 'PING' ][ 'source5' ],
1507 target=main.params[ 'PING' ][ 'target5' ],
1508 pingTime=500 )
1509 main.Mininet2.pingLong(
1510 src=main.params[ 'PING' ][ 'source6' ],
1511 target=main.params[ 'PING' ][ 'target6' ],
1512 pingTime=500 )
1513 main.Mininet2.pingLong(
1514 src=main.params[ 'PING' ][ 'source7' ],
1515 target=main.params[ 'PING' ][ 'target7' ],
1516 pingTime=500 )
1517 main.Mininet2.pingLong(
1518 src=main.params[ 'PING' ][ 'source8' ],
1519 target=main.params[ 'PING' ][ 'target8' ],
1520 pingTime=500 )
1521 main.Mininet2.pingLong(
1522 src=main.params[ 'PING' ][ 'source9' ],
1523 target=main.params[ 'PING' ][ 'target9' ],
1524 pingTime=500 )
1525 main.Mininet2.pingLong(
1526 src=main.params[ 'PING' ][ 'source10' ],
1527 target=main.params[ 'PING' ][ 'target10' ],
1528 pingTime=500 )
1529
1530 main.step( "Collecting topology information from ONOS" )
1531 devices = []
1532 threads = []
Jon Halla440e872016-03-31 15:15:50 -07001533 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001534 t = main.Thread( target=main.CLIs[i].devices,
Jon Hall5cf14d52015-07-16 12:15:19 -07001535 name="devices-" + str( i ),
1536 args=[ ] )
1537 threads.append( t )
1538 t.start()
1539
1540 for t in threads:
1541 t.join()
1542 devices.append( t.result )
1543 hosts = []
1544 threads = []
Jon Halla440e872016-03-31 15:15:50 -07001545 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001546 t = main.Thread( target=main.CLIs[i].hosts,
Jon Hall5cf14d52015-07-16 12:15:19 -07001547 name="hosts-" + str( i ),
1548 args=[ ] )
1549 threads.append( t )
1550 t.start()
1551
1552 for t in threads:
1553 t.join()
1554 try:
1555 hosts.append( json.loads( t.result ) )
1556 except ( ValueError, TypeError ):
1557 # FIXME: better handling of this, print which node
1558 # Maybe use thread name?
1559 main.log.exception( "Error parsing json output of hosts" )
Jon Hallf3d16e72015-12-16 17:45:08 -08001560 main.log.warn( repr( t.result ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001561 hosts.append( None )
1562
1563 ports = []
1564 threads = []
Jon Halla440e872016-03-31 15:15:50 -07001565 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001566 t = main.Thread( target=main.CLIs[i].ports,
Jon Hall5cf14d52015-07-16 12:15:19 -07001567 name="ports-" + str( i ),
1568 args=[ ] )
1569 threads.append( t )
1570 t.start()
1571
1572 for t in threads:
1573 t.join()
1574 ports.append( t.result )
1575 links = []
1576 threads = []
Jon Halla440e872016-03-31 15:15:50 -07001577 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001578 t = main.Thread( target=main.CLIs[i].links,
Jon Hall5cf14d52015-07-16 12:15:19 -07001579 name="links-" + str( i ),
1580 args=[ ] )
1581 threads.append( t )
1582 t.start()
1583
1584 for t in threads:
1585 t.join()
1586 links.append( t.result )
1587 clusters = []
1588 threads = []
Jon Halla440e872016-03-31 15:15:50 -07001589 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001590 t = main.Thread( target=main.CLIs[i].clusters,
Jon Hall5cf14d52015-07-16 12:15:19 -07001591 name="clusters-" + str( i ),
1592 args=[ ] )
1593 threads.append( t )
1594 t.start()
1595
1596 for t in threads:
1597 t.join()
1598 clusters.append( t.result )
1599 # Compare json objects for hosts and dataplane clusters
1600
1601 # hosts
1602 main.step( "Host view is consistent across ONOS nodes" )
1603 consistentHostsResult = main.TRUE
1604 for controller in range( len( hosts ) ):
Jon Halla440e872016-03-31 15:15:50 -07001605 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hallf3d16e72015-12-16 17:45:08 -08001606 if hosts[ controller ] and "Error" not in hosts[ controller ]:
Jon Hall5cf14d52015-07-16 12:15:19 -07001607 if hosts[ controller ] == hosts[ 0 ]:
1608 continue
1609 else: # hosts not consistent
1610 main.log.error( "hosts from ONOS" +
1611 controllerStr +
1612 " is inconsistent with ONOS1" )
1613 main.log.warn( repr( hosts[ controller ] ) )
1614 consistentHostsResult = main.FALSE
1615
1616 else:
1617 main.log.error( "Error in getting ONOS hosts from ONOS" +
1618 controllerStr )
1619 consistentHostsResult = main.FALSE
1620 main.log.warn( "ONOS" + controllerStr +
1621 " hosts response: " +
1622 repr( hosts[ controller ] ) )
1623 utilities.assert_equals(
1624 expect=main.TRUE,
1625 actual=consistentHostsResult,
1626 onpass="Hosts view is consistent across all ONOS nodes",
1627 onfail="ONOS nodes have different views of hosts" )
1628
1629 main.step( "Each host has an IP address" )
1630 ipResult = main.TRUE
1631 for controller in range( 0, len( hosts ) ):
Jon Halla440e872016-03-31 15:15:50 -07001632 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hallf3d16e72015-12-16 17:45:08 -08001633 if hosts[ controller ]:
1634 for host in hosts[ controller ]:
1635 if not host.get( 'ipAddresses', [ ] ):
1636 main.log.error( "Error with host ips on controller" +
1637 controllerStr + ": " + str( host ) )
1638 ipResult = main.FALSE
Jon Hall5cf14d52015-07-16 12:15:19 -07001639 utilities.assert_equals(
1640 expect=main.TRUE,
1641 actual=ipResult,
1642 onpass="The ips of the hosts aren't empty",
1643 onfail="The ip of at least one host is missing" )
1644
1645 # Strongly connected clusters of devices
1646 main.step( "Cluster view is consistent across ONOS nodes" )
1647 consistentClustersResult = main.TRUE
1648 for controller in range( len( clusters ) ):
Jon Halla440e872016-03-31 15:15:50 -07001649 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001650 if "Error" not in clusters[ controller ]:
1651 if clusters[ controller ] == clusters[ 0 ]:
1652 continue
1653 else: # clusters not consistent
1654 main.log.error( "clusters from ONOS" + controllerStr +
1655 " is inconsistent with ONOS1" )
1656 consistentClustersResult = main.FALSE
1657
1658 else:
1659 main.log.error( "Error in getting dataplane clusters " +
1660 "from ONOS" + controllerStr )
1661 consistentClustersResult = main.FALSE
1662 main.log.warn( "ONOS" + controllerStr +
1663 " clusters response: " +
1664 repr( clusters[ controller ] ) )
1665 utilities.assert_equals(
1666 expect=main.TRUE,
1667 actual=consistentClustersResult,
1668 onpass="Clusters view is consistent across all ONOS nodes",
1669 onfail="ONOS nodes have different views of clusters" )
Jon Hall64948022016-05-12 13:38:50 -07001670 if not consistentClustersResult:
Jon Hall172b7ba2016-04-07 18:12:20 -07001671 main.log.debug( clusters )
Jon Hall64948022016-05-12 13:38:50 -07001672
Jon Hall5cf14d52015-07-16 12:15:19 -07001673 # there should always only be one cluster
1674 main.step( "Cluster view correct across ONOS nodes" )
1675 try:
1676 numClusters = len( json.loads( clusters[ 0 ] ) )
1677 except ( ValueError, TypeError ):
1678 main.log.exception( "Error parsing clusters[0]: " +
1679 repr( clusters[ 0 ] ) )
Jon Hall6e709752016-02-01 13:38:46 -08001680 numClusters = "ERROR"
Jon Hall5cf14d52015-07-16 12:15:19 -07001681 clusterResults = main.FALSE
1682 if numClusters == 1:
1683 clusterResults = main.TRUE
1684 utilities.assert_equals(
1685 expect=1,
1686 actual=numClusters,
1687 onpass="ONOS shows 1 SCC",
1688 onfail="ONOS shows " + str( numClusters ) + " SCCs" )
1689
1690 main.step( "Comparing ONOS topology to MN" )
1691 devicesResults = main.TRUE
1692 linksResults = main.TRUE
1693 hostsResults = main.TRUE
1694 mnSwitches = main.Mininet1.getSwitches()
1695 mnLinks = main.Mininet1.getLinks()
1696 mnHosts = main.Mininet1.getHosts()
Jon Halla440e872016-03-31 15:15:50 -07001697 for controller in main.activeNodes:
1698 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001699 if devices[ controller ] and ports[ controller ] and\
1700 "Error" not in devices[ controller ] and\
1701 "Error" not in ports[ controller ]:
Jon Halla440e872016-03-31 15:15:50 -07001702 currentDevicesResult = main.Mininet1.compareSwitches(
1703 mnSwitches,
1704 json.loads( devices[ controller ] ),
1705 json.loads( ports[ controller ] ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001706 else:
1707 currentDevicesResult = main.FALSE
1708 utilities.assert_equals( expect=main.TRUE,
1709 actual=currentDevicesResult,
1710 onpass="ONOS" + controllerStr +
1711 " Switches view is correct",
1712 onfail="ONOS" + controllerStr +
1713 " Switches view is incorrect" )
1714 if links[ controller ] and "Error" not in links[ controller ]:
1715 currentLinksResult = main.Mininet1.compareLinks(
1716 mnSwitches, mnLinks,
1717 json.loads( links[ controller ] ) )
1718 else:
1719 currentLinksResult = main.FALSE
1720 utilities.assert_equals( expect=main.TRUE,
1721 actual=currentLinksResult,
1722 onpass="ONOS" + controllerStr +
1723 " links view is correct",
1724 onfail="ONOS" + controllerStr +
1725 " links view is incorrect" )
1726
Jon Hall657cdf62015-12-17 14:40:51 -08001727 if hosts[ controller ] and "Error" not in hosts[ controller ]:
Jon Hall5cf14d52015-07-16 12:15:19 -07001728 currentHostsResult = main.Mininet1.compareHosts(
1729 mnHosts,
1730 hosts[ controller ] )
1731 else:
1732 currentHostsResult = main.FALSE
1733 utilities.assert_equals( expect=main.TRUE,
1734 actual=currentHostsResult,
1735 onpass="ONOS" + controllerStr +
1736 " hosts exist in Mininet",
1737 onfail="ONOS" + controllerStr +
1738 " hosts don't match Mininet" )
1739
1740 devicesResults = devicesResults and currentDevicesResult
1741 linksResults = linksResults and currentLinksResult
1742 hostsResults = hostsResults and currentHostsResult
1743
1744 main.step( "Device information is correct" )
1745 utilities.assert_equals(
1746 expect=main.TRUE,
1747 actual=devicesResults,
1748 onpass="Device information is correct",
1749 onfail="Device information is incorrect" )
1750
1751 main.step( "Links are correct" )
1752 utilities.assert_equals(
1753 expect=main.TRUE,
1754 actual=linksResults,
1755 onpass="Link are correct",
1756 onfail="Links are incorrect" )
1757
1758 main.step( "Hosts are correct" )
1759 utilities.assert_equals(
1760 expect=main.TRUE,
1761 actual=hostsResults,
1762 onpass="Hosts are correct",
1763 onfail="Hosts are incorrect" )
1764
1765 def CASE6( self, main ):
1766 """
1767 The Failure case. Since this is the Sanity test, we do nothing.
1768 """
1769 import time
Jon Halle1a3b752015-07-22 13:02:46 -07001770 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07001771 assert main, "main not defined"
1772 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07001773 assert main.CLIs, "main.CLIs not defined"
1774 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07001775 main.case( "Wait 60 seconds instead of inducing a failure" )
1776 time.sleep( 60 )
1777 utilities.assert_equals(
1778 expect=main.TRUE,
1779 actual=main.TRUE,
1780 onpass="Sleeping 60 seconds",
1781 onfail="Something is terribly wrong with my math" )
1782
1783 def CASE7( self, main ):
1784 """
1785 Check state after ONOS failure
1786 """
1787 import json
Jon Halle1a3b752015-07-22 13:02:46 -07001788 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07001789 assert main, "main not defined"
1790 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07001791 assert main.CLIs, "main.CLIs not defined"
1792 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07001793 main.case( "Running ONOS Constant State Tests" )
1794
1795 main.step( "Check that each switch has a master" )
1796 # Assert that each device has a master
1797 rolesNotNull = main.TRUE
1798 threads = []
Jon Halla440e872016-03-31 15:15:50 -07001799 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001800 t = main.Thread( target=main.CLIs[i].rolesNotNull,
Jon Hall5cf14d52015-07-16 12:15:19 -07001801 name="rolesNotNull-" + str( i ),
1802 args=[ ] )
1803 threads.append( t )
1804 t.start()
1805
1806 for t in threads:
1807 t.join()
1808 rolesNotNull = rolesNotNull and t.result
1809 utilities.assert_equals(
1810 expect=main.TRUE,
1811 actual=rolesNotNull,
1812 onpass="Each device has a master",
1813 onfail="Some devices don't have a master assigned" )
1814
1815 main.step( "Read device roles from ONOS" )
1816 ONOSMastership = []
1817 mastershipCheck = main.FALSE
1818 consistentMastership = True
1819 rolesResults = True
1820 threads = []
Jon Halla440e872016-03-31 15:15:50 -07001821 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001822 t = main.Thread( target=main.CLIs[i].roles,
Jon Hall5cf14d52015-07-16 12:15:19 -07001823 name="roles-" + str( i ),
1824 args=[] )
1825 threads.append( t )
1826 t.start()
1827
1828 for t in threads:
1829 t.join()
1830 ONOSMastership.append( t.result )
1831
Jon Halla440e872016-03-31 15:15:50 -07001832 for i in range( len( ONOSMastership ) ):
1833 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001834 if not ONOSMastership[i] or "Error" in ONOSMastership[i]:
Jon Halla440e872016-03-31 15:15:50 -07001835 main.log.error( "Error in getting ONOS" + node + " roles" )
1836 main.log.warn( "ONOS" + node + " mastership response: " +
1837 repr( ONOSMastership[i] ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001838 rolesResults = False
1839 utilities.assert_equals(
1840 expect=True,
1841 actual=rolesResults,
1842 onpass="No error in reading roles output",
1843 onfail="Error in reading roles from ONOS" )
1844
1845 main.step( "Check for consistency in roles from each controller" )
1846 if all([ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
1847 main.log.info(
1848 "Switch roles are consistent across all ONOS nodes" )
1849 else:
1850 consistentMastership = False
1851 utilities.assert_equals(
1852 expect=True,
1853 actual=consistentMastership,
1854 onpass="Switch roles are consistent across all ONOS nodes",
1855 onfail="ONOS nodes have different views of switch roles" )
1856
1857 if rolesResults and not consistentMastership:
Jon Halla440e872016-03-31 15:15:50 -07001858 for i in range( len( ONOSMastership ) ):
1859 node = str( main.activeNodes[i] + 1 )
1860 main.log.warn( "ONOS" + node + " roles: ",
1861 json.dumps( json.loads( ONOSMastership[ i ] ),
1862 sort_keys=True,
1863 indent=4,
1864 separators=( ',', ': ' ) ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001865
1866 description2 = "Compare switch roles from before failure"
1867 main.step( description2 )
1868 try:
1869 currentJson = json.loads( ONOSMastership[0] )
1870 oldJson = json.loads( mastershipState )
1871 except ( ValueError, TypeError ):
1872 main.log.exception( "Something is wrong with parsing " +
1873 "ONOSMastership[0] or mastershipState" )
1874 main.log.error( "ONOSMastership[0]: " + repr( ONOSMastership[0] ) )
1875 main.log.error( "mastershipState" + repr( mastershipState ) )
1876 main.cleanup()
1877 main.exit()
1878 mastershipCheck = main.TRUE
1879 for i in range( 1, 29 ):
1880 switchDPID = str(
1881 main.Mininet1.getSwitchDPID( switch="s" + str( i ) ) )
1882 current = [ switch[ 'master' ] for switch in currentJson
1883 if switchDPID in switch[ 'id' ] ]
1884 old = [ switch[ 'master' ] for switch in oldJson
1885 if switchDPID in switch[ 'id' ] ]
1886 if current == old:
1887 mastershipCheck = mastershipCheck and main.TRUE
1888 else:
1889 main.log.warn( "Mastership of switch %s changed" % switchDPID )
1890 mastershipCheck = main.FALSE
1891 utilities.assert_equals(
1892 expect=main.TRUE,
1893 actual=mastershipCheck,
1894 onpass="Mastership of Switches was not changed",
1895 onfail="Mastership of some switches changed" )
1896 mastershipCheck = mastershipCheck and consistentMastership
1897
1898 main.step( "Get the intents and compare across all nodes" )
1899 ONOSIntents = []
1900 intentCheck = main.FALSE
1901 consistentIntents = True
1902 intentsResults = True
1903 threads = []
Jon Halla440e872016-03-31 15:15:50 -07001904 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001905 t = main.Thread( target=main.CLIs[i].intents,
Jon Hall5cf14d52015-07-16 12:15:19 -07001906 name="intents-" + str( i ),
1907 args=[],
1908 kwargs={ 'jsonFormat': True } )
1909 threads.append( t )
1910 t.start()
1911
1912 for t in threads:
1913 t.join()
1914 ONOSIntents.append( t.result )
1915
Jon Halla440e872016-03-31 15:15:50 -07001916 for i in range( len( ONOSIntents) ):
1917 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001918 if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
Jon Halla440e872016-03-31 15:15:50 -07001919 main.log.error( "Error in getting ONOS" + node + " intents" )
1920 main.log.warn( "ONOS" + node + " intents response: " +
Jon Hall5cf14d52015-07-16 12:15:19 -07001921 repr( ONOSIntents[ i ] ) )
1922 intentsResults = False
1923 utilities.assert_equals(
1924 expect=True,
1925 actual=intentsResults,
1926 onpass="No error in reading intents output",
1927 onfail="Error in reading intents from ONOS" )
1928
1929 main.step( "Check for consistency in Intents from each controller" )
1930 if all([ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
1931 main.log.info( "Intents are consistent across all ONOS " +
1932 "nodes" )
1933 else:
1934 consistentIntents = False
1935
1936 # Try to make it easy to figure out what is happening
1937 #
1938 # Intent ONOS1 ONOS2 ...
1939 # 0x01 INSTALLED INSTALLING
1940 # ... ... ...
1941 # ... ... ...
1942 title = " ID"
Jon Halla440e872016-03-31 15:15:50 -07001943 for n in main.activeNodes:
Jon Hall5cf14d52015-07-16 12:15:19 -07001944 title += " " * 10 + "ONOS" + str( n + 1 )
1945 main.log.warn( title )
1946 # get all intent keys in the cluster
1947 keys = []
1948 for nodeStr in ONOSIntents:
1949 node = json.loads( nodeStr )
1950 for intent in node:
1951 keys.append( intent.get( 'id' ) )
1952 keys = set( keys )
1953 for key in keys:
1954 row = "%-13s" % key
1955 for nodeStr in ONOSIntents:
1956 node = json.loads( nodeStr )
1957 for intent in node:
1958 if intent.get( 'id' ) == key:
1959 row += "%-15s" % intent.get( 'state' )
1960 main.log.warn( row )
1961 # End table view
1962
1963 utilities.assert_equals(
1964 expect=True,
1965 actual=consistentIntents,
1966 onpass="Intents are consistent across all ONOS nodes",
1967 onfail="ONOS nodes have different views of intents" )
1968 intentStates = []
1969 for node in ONOSIntents: # Iter through ONOS nodes
1970 nodeStates = []
1971 # Iter through intents of a node
1972 try:
1973 for intent in json.loads( node ):
1974 nodeStates.append( intent[ 'state' ] )
1975 except ( ValueError, TypeError ):
1976 main.log.exception( "Error in parsing intents" )
1977 main.log.error( repr( node ) )
1978 intentStates.append( nodeStates )
1979 out = [ (i, nodeStates.count( i ) ) for i in set( nodeStates ) ]
1980 main.log.info( dict( out ) )
1981
1982 if intentsResults and not consistentIntents:
Jon Halla440e872016-03-31 15:15:50 -07001983 for i in range( len( main.activeNodes ) ):
1984 node = str( main.activeNodes[i] + 1 )
1985 main.log.warn( "ONOS" + node + " intents: " )
Jon Hall5cf14d52015-07-16 12:15:19 -07001986 main.log.warn( json.dumps(
1987 json.loads( ONOSIntents[ i ] ),
1988 sort_keys=True,
1989 indent=4,
1990 separators=( ',', ': ' ) ) )
1991 elif intentsResults and consistentIntents:
1992 intentCheck = main.TRUE
1993
1994 # NOTE: Store has no durability, so intents are lost across system
1995 # restarts
1996 main.step( "Compare current intents with intents before the failure" )
1997 # NOTE: this requires case 5 to pass for intentState to be set.
1998 # maybe we should stop the test if that fails?
1999 sameIntents = main.FALSE
Jon Halla440e872016-03-31 15:15:50 -07002000 try:
2001 intentState
2002 except NameError:
2003 main.log.warn( "No previous intent state was saved" )
2004 else:
2005 if intentState and intentState == ONOSIntents[ 0 ]:
2006 sameIntents = main.TRUE
2007 main.log.info( "Intents are consistent with before failure" )
2008 # TODO: possibly the states have changed? we may need to figure out
2009 # what the acceptable states are
2010 elif len( intentState ) == len( ONOSIntents[ 0 ] ):
2011 sameIntents = main.TRUE
2012 try:
2013 before = json.loads( intentState )
2014 after = json.loads( ONOSIntents[ 0 ] )
2015 for intent in before:
2016 if intent not in after:
2017 sameIntents = main.FALSE
2018 main.log.debug( "Intent is not currently in ONOS " +
2019 "(at least in the same form):" )
2020 main.log.debug( json.dumps( intent ) )
2021 except ( ValueError, TypeError ):
2022 main.log.exception( "Exception printing intents" )
2023 main.log.debug( repr( ONOSIntents[0] ) )
2024 main.log.debug( repr( intentState ) )
2025 if sameIntents == main.FALSE:
2026 try:
2027 main.log.debug( "ONOS intents before: " )
2028 main.log.debug( json.dumps( json.loads( intentState ),
2029 sort_keys=True, indent=4,
2030 separators=( ',', ': ' ) ) )
2031 main.log.debug( "Current ONOS intents: " )
2032 main.log.debug( json.dumps( json.loads( ONOSIntents[ 0 ] ),
2033 sort_keys=True, indent=4,
2034 separators=( ',', ': ' ) ) )
2035 except ( ValueError, TypeError ):
2036 main.log.exception( "Exception printing intents" )
2037 main.log.debug( repr( ONOSIntents[0] ) )
2038 main.log.debug( repr( intentState ) )
2039 utilities.assert_equals(
2040 expect=main.TRUE,
2041 actual=sameIntents,
2042 onpass="Intents are consistent with before failure",
2043 onfail="The Intents changed during failure" )
Jon Hall5cf14d52015-07-16 12:15:19 -07002044 intentCheck = intentCheck and sameIntents
2045
2046 main.step( "Get the OF Table entries and compare to before " +
2047 "component failure" )
2048 FlowTables = main.TRUE
Jon Hall5cf14d52015-07-16 12:15:19 -07002049 for i in range( 28 ):
2050 main.log.info( "Checking flow table on s" + str( i + 1 ) )
GlennRC68467eb2015-11-16 18:01:01 -08002051 tmpFlows = main.Mininet1.getFlowTable( "s" + str( i + 1 ), version="1.3", debug=False )
Jon Hall41d39f12016-04-11 22:54:35 -07002052 curSwitch = main.Mininet1.flowTableComp( flows[i], tmpFlows )
2053 FlowTables = FlowTables and curSwitch
2054 if curSwitch == main.FALSE:
GlennRC68467eb2015-11-16 18:01:01 -08002055 main.log.warn( "Differences in flow table for switch: s{}".format( i + 1 ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07002056 utilities.assert_equals(
2057 expect=main.TRUE,
2058 actual=FlowTables,
2059 onpass="No changes were found in the flow tables",
2060 onfail="Changes were found in the flow tables" )
2061
2062 main.Mininet2.pingLongKill()
2063 '''
2064 main.step( "Check the continuous pings to ensure that no packets " +
2065 "were dropped during component failure" )
2066 main.Mininet2.pingKill( main.params[ 'TESTONUSER' ],
2067 main.params[ 'TESTONIP' ] )
2068 LossInPings = main.FALSE
2069 # NOTE: checkForLoss returns main.FALSE with 0% packet loss
2070 for i in range( 8, 18 ):
2071 main.log.info(
2072 "Checking for a loss in pings along flow from s" +
2073 str( i ) )
2074 LossInPings = main.Mininet2.checkForLoss(
2075 "/tmp/ping.h" +
2076 str( i ) ) or LossInPings
2077 if LossInPings == main.TRUE:
2078 main.log.info( "Loss in ping detected" )
2079 elif LossInPings == main.ERROR:
2080 main.log.info( "There are multiple mininet process running" )
2081 elif LossInPings == main.FALSE:
2082 main.log.info( "No Loss in the pings" )
2083 main.log.info( "No loss of dataplane connectivity" )
2084 utilities.assert_equals(
2085 expect=main.FALSE,
2086 actual=LossInPings,
2087 onpass="No Loss of connectivity",
2088 onfail="Loss of dataplane connectivity detected" )
2089 '''
2090
2091 main.step( "Leadership Election is still functional" )
2092 # Test of LeadershipElection
Jon Halla440e872016-03-31 15:15:50 -07002093 leaderList = []
2094
Jon Hall5cf14d52015-07-16 12:15:19 -07002095 # NOTE: this only works for the sanity test. In case of failures,
2096 # leader will likely change
Jon Halla440e872016-03-31 15:15:50 -07002097 leader = main.nodes[ main.activeNodes[ 0 ] ].ip_address
Jon Hall5cf14d52015-07-16 12:15:19 -07002098 leaderResult = main.TRUE
Jon Halla440e872016-03-31 15:15:50 -07002099
2100 for i in main.activeNodes:
2101 cli = main.CLIs[i]
Jon Hall5cf14d52015-07-16 12:15:19 -07002102 leaderN = cli.electionTestLeader()
Jon Halla440e872016-03-31 15:15:50 -07002103 leaderList.append( leaderN )
Jon Hall5cf14d52015-07-16 12:15:19 -07002104 # verify leader is ONOS1
2105 if leaderN == leader:
2106 # all is well
2107 # NOTE: In failure scenario, this could be a new node, maybe
2108 # check != ONOS1
2109 pass
2110 elif leaderN == main.FALSE:
2111 # error in response
2112 main.log.error( "Something is wrong with " +
2113 "electionTestLeader function, check the" +
2114 " error logs" )
2115 leaderResult = main.FALSE
Jon Halla440e872016-03-31 15:15:50 -07002116 elif leaderN is None:
2117 main.log.error( cli.name +
2118 " shows no leader for the election-app was" +
2119 " elected after the old one died" )
Jon Hall5cf14d52015-07-16 12:15:19 -07002120 leaderResult = main.FALSE
Jon Halla440e872016-03-31 15:15:50 -07002121 if len( set( leaderList ) ) != 1:
2122 leaderResult = main.FALSE
2123 main.log.error(
2124 "Inconsistent view of leader for the election test app" )
2125 # TODO: print the list
Jon Hall5cf14d52015-07-16 12:15:19 -07002126 utilities.assert_equals(
2127 expect=main.TRUE,
2128 actual=leaderResult,
2129 onpass="Leadership election passed",
2130 onfail="Something went wrong with Leadership election" )
2131
2132 def CASE8( self, main ):
2133 """
2134 Compare topo
2135 """
2136 import json
2137 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002138 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002139 assert main, "main not defined"
2140 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002141 assert main.CLIs, "main.CLIs not defined"
2142 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002143
2144 main.case( "Compare ONOS Topology view to Mininet topology" )
Jon Hall783bbf92015-07-23 14:33:19 -07002145 main.caseExplanation = "Compare topology objects between Mininet" +\
Jon Hall5cf14d52015-07-16 12:15:19 -07002146 " and ONOS"
Jon Hall5cf14d52015-07-16 12:15:19 -07002147 topoResult = main.FALSE
Jon Halla440e872016-03-31 15:15:50 -07002148 topoFailMsg = "ONOS topology don't match Mininet"
Jon Hall5cf14d52015-07-16 12:15:19 -07002149 elapsed = 0
2150 count = 0
Jon Halle9b1fa32015-12-08 15:32:21 -08002151 main.step( "Comparing ONOS topology to MN topology" )
Jon Hall5cf14d52015-07-16 12:15:19 -07002152 startTime = time.time()
2153 # Give time for Gossip to work
Jon Halle9b1fa32015-12-08 15:32:21 -08002154 while topoResult == main.FALSE and ( elapsed < 60 or count < 3 ):
Jon Hall96091e62015-09-21 17:34:17 -07002155 devicesResults = main.TRUE
2156 linksResults = main.TRUE
2157 hostsResults = main.TRUE
2158 hostAttachmentResults = True
Jon Hall5cf14d52015-07-16 12:15:19 -07002159 count += 1
2160 cliStart = time.time()
2161 devices = []
2162 threads = []
Jon Halla440e872016-03-31 15:15:50 -07002163 for i in main.activeNodes:
2164 t = main.Thread( target=utilities.retry,
Jon Hall5cf14d52015-07-16 12:15:19 -07002165 name="devices-" + str( i ),
Jon Halla440e872016-03-31 15:15:50 -07002166 args=[ main.CLIs[i].devices, [ None ] ],
2167 kwargs= { 'sleep': 5, 'attempts': 5,
2168 'randomTime': True } )
Jon Hall5cf14d52015-07-16 12:15:19 -07002169 threads.append( t )
2170 t.start()
2171
2172 for t in threads:
2173 t.join()
2174 devices.append( t.result )
2175 hosts = []
2176 ipResult = main.TRUE
2177 threads = []
Jon Halla440e872016-03-31 15:15:50 -07002178 for i in main.activeNodes:
Jon Halld8f6de82015-12-17 17:04:34 -08002179 t = main.Thread( target=utilities.retry,
Jon Hall5cf14d52015-07-16 12:15:19 -07002180 name="hosts-" + str( i ),
Jon Halld8f6de82015-12-17 17:04:34 -08002181 args=[ main.CLIs[i].hosts, [ None ] ],
2182 kwargs= { 'sleep': 5, 'attempts': 5,
2183 'randomTime': True } )
Jon Hall5cf14d52015-07-16 12:15:19 -07002184 threads.append( t )
2185 t.start()
2186
2187 for t in threads:
2188 t.join()
2189 try:
2190 hosts.append( json.loads( t.result ) )
2191 except ( ValueError, TypeError ):
2192 main.log.exception( "Error parsing hosts results" )
2193 main.log.error( repr( t.result ) )
Jon Hallf3d16e72015-12-16 17:45:08 -08002194 hosts.append( None )
Jon Hall5cf14d52015-07-16 12:15:19 -07002195 for controller in range( 0, len( hosts ) ):
Jon Halla440e872016-03-31 15:15:50 -07002196 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hallacd1b182015-12-17 11:43:20 -08002197 if hosts[ controller ]:
2198 for host in hosts[ controller ]:
2199 if host is None or host.get( 'ipAddresses', [] ) == []:
2200 main.log.error(
2201 "Error with host ipAddresses on controller" +
2202 controllerStr + ": " + str( host ) )
2203 ipResult = main.FALSE
Jon Hall5cf14d52015-07-16 12:15:19 -07002204 ports = []
2205 threads = []
Jon Halla440e872016-03-31 15:15:50 -07002206 for i in main.activeNodes:
2207 t = main.Thread( target=utilities.retry,
Jon Hall5cf14d52015-07-16 12:15:19 -07002208 name="ports-" + str( i ),
Jon Halla440e872016-03-31 15:15:50 -07002209 args=[ main.CLIs[i].ports, [ None ] ],
2210 kwargs= { 'sleep': 5, 'attempts': 5,
2211 'randomTime': True } )
Jon Hall5cf14d52015-07-16 12:15:19 -07002212 threads.append( t )
2213 t.start()
2214
2215 for t in threads:
2216 t.join()
2217 ports.append( t.result )
2218 links = []
2219 threads = []
Jon Halla440e872016-03-31 15:15:50 -07002220 for i in main.activeNodes:
2221 t = main.Thread( target=utilities.retry,
Jon Hall5cf14d52015-07-16 12:15:19 -07002222 name="links-" + str( i ),
Jon Halla440e872016-03-31 15:15:50 -07002223 args=[ main.CLIs[i].links, [ None ] ],
2224 kwargs= { 'sleep': 5, 'attempts': 5,
2225 'randomTime': True } )
Jon Hall5cf14d52015-07-16 12:15:19 -07002226 threads.append( t )
2227 t.start()
2228
2229 for t in threads:
2230 t.join()
2231 links.append( t.result )
2232 clusters = []
2233 threads = []
Jon Halla440e872016-03-31 15:15:50 -07002234 for i in main.activeNodes:
2235 t = main.Thread( target=utilities.retry,
Jon Hall5cf14d52015-07-16 12:15:19 -07002236 name="clusters-" + str( i ),
Jon Halla440e872016-03-31 15:15:50 -07002237 args=[ main.CLIs[i].clusters, [ None ] ],
2238 kwargs= { 'sleep': 5, 'attempts': 5,
2239 'randomTime': True } )
Jon Hall5cf14d52015-07-16 12:15:19 -07002240 threads.append( t )
2241 t.start()
2242
2243 for t in threads:
2244 t.join()
2245 clusters.append( t.result )
2246
2247 elapsed = time.time() - startTime
2248 cliTime = time.time() - cliStart
2249 print "Elapsed time: " + str( elapsed )
2250 print "CLI time: " + str( cliTime )
2251
Jon Halla440e872016-03-31 15:15:50 -07002252 if all( e is None for e in devices ) and\
2253 all( e is None for e in hosts ) and\
2254 all( e is None for e in ports ) and\
2255 all( e is None for e in links ) and\
2256 all( e is None for e in clusters ):
2257 topoFailMsg = "Could not get topology from ONOS"
2258 main.log.error( topoFailMsg )
2259 continue # Try again, No use trying to compare
2260
Jon Hall5cf14d52015-07-16 12:15:19 -07002261 mnSwitches = main.Mininet1.getSwitches()
2262 mnLinks = main.Mininet1.getLinks()
2263 mnHosts = main.Mininet1.getHosts()
Jon Halla440e872016-03-31 15:15:50 -07002264 for controller in range( len( main.activeNodes ) ):
2265 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07002266 if devices[ controller ] and ports[ controller ] and\
2267 "Error" not in devices[ controller ] and\
2268 "Error" not in ports[ controller ]:
2269
Jon Hallc6793552016-01-19 14:18:37 -08002270 try:
2271 currentDevicesResult = main.Mininet1.compareSwitches(
2272 mnSwitches,
2273 json.loads( devices[ controller ] ),
2274 json.loads( ports[ controller ] ) )
2275 except ( TypeError, ValueError ) as e:
2276 main.log.exception( "Object not as expected; devices={!r}\nports={!r}".format(
2277 devices[ controller ], ports[ controller ] ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07002278 else:
2279 currentDevicesResult = main.FALSE
2280 utilities.assert_equals( expect=main.TRUE,
2281 actual=currentDevicesResult,
2282 onpass="ONOS" + controllerStr +
2283 " Switches view is correct",
2284 onfail="ONOS" + controllerStr +
2285 " Switches view is incorrect" )
2286
2287 if links[ controller ] and "Error" not in links[ controller ]:
2288 currentLinksResult = main.Mininet1.compareLinks(
2289 mnSwitches, mnLinks,
2290 json.loads( links[ controller ] ) )
2291 else:
2292 currentLinksResult = main.FALSE
2293 utilities.assert_equals( expect=main.TRUE,
2294 actual=currentLinksResult,
2295 onpass="ONOS" + controllerStr +
2296 " links view is correct",
2297 onfail="ONOS" + controllerStr +
2298 " links view is incorrect" )
Jon Hall657cdf62015-12-17 14:40:51 -08002299 if hosts[ controller ] and "Error" not in hosts[ controller ]:
Jon Hall5cf14d52015-07-16 12:15:19 -07002300 currentHostsResult = main.Mininet1.compareHosts(
2301 mnHosts,
2302 hosts[ controller ] )
Jon Hall13b446e2016-01-05 12:17:01 -08002303 elif hosts[ controller ] == []:
2304 currentHostsResult = main.TRUE
Jon Hall5cf14d52015-07-16 12:15:19 -07002305 else:
2306 currentHostsResult = main.FALSE
2307 utilities.assert_equals( expect=main.TRUE,
2308 actual=currentHostsResult,
2309 onpass="ONOS" + controllerStr +
2310 " hosts exist in Mininet",
2311 onfail="ONOS" + controllerStr +
2312 " hosts don't match Mininet" )
2313 # CHECKING HOST ATTACHMENT POINTS
2314 hostAttachment = True
2315 zeroHosts = False
2316 # FIXME: topo-HA/obelisk specific mappings:
2317 # key is mac and value is dpid
2318 mappings = {}
2319 for i in range( 1, 29 ): # hosts 1 through 28
2320 # set up correct variables:
2321 macId = "00:" * 5 + hex( i ).split( "0x" )[1].upper().zfill(2)
2322 if i == 1:
2323 deviceId = "1000".zfill(16)
2324 elif i == 2:
2325 deviceId = "2000".zfill(16)
2326 elif i == 3:
2327 deviceId = "3000".zfill(16)
2328 elif i == 4:
2329 deviceId = "3004".zfill(16)
2330 elif i == 5:
2331 deviceId = "5000".zfill(16)
2332 elif i == 6:
2333 deviceId = "6000".zfill(16)
2334 elif i == 7:
2335 deviceId = "6007".zfill(16)
2336 elif i >= 8 and i <= 17:
2337 dpid = '3' + str( i ).zfill( 3 )
2338 deviceId = dpid.zfill(16)
2339 elif i >= 18 and i <= 27:
2340 dpid = '6' + str( i ).zfill( 3 )
2341 deviceId = dpid.zfill(16)
2342 elif i == 28:
2343 deviceId = "2800".zfill(16)
2344 mappings[ macId ] = deviceId
Jon Halld8f6de82015-12-17 17:04:34 -08002345 if hosts[ controller ] is not None and "Error" not in hosts[ controller ]:
Jon Hall5cf14d52015-07-16 12:15:19 -07002346 if hosts[ controller ] == []:
2347 main.log.warn( "There are no hosts discovered" )
2348 zeroHosts = True
2349 else:
2350 for host in hosts[ controller ]:
2351 mac = None
2352 location = None
2353 device = None
2354 port = None
2355 try:
2356 mac = host.get( 'mac' )
2357 assert mac, "mac field could not be found for this host object"
2358
2359 location = host.get( 'location' )
2360 assert location, "location field could not be found for this host object"
2361
2362 # Trim the protocol identifier off deviceId
2363 device = str( location.get( 'elementId' ) ).split(':')[1]
2364 assert device, "elementId field could not be found for this host location object"
2365
2366 port = location.get( 'port' )
2367 assert port, "port field could not be found for this host location object"
2368
2369 # Now check if this matches where they should be
2370 if mac and device and port:
2371 if str( port ) != "1":
2372 main.log.error( "The attachment port is incorrect for " +
2373 "host " + str( mac ) +
2374 ". Expected: 1 Actual: " + str( port) )
2375 hostAttachment = False
2376 if device != mappings[ str( mac ) ]:
2377 main.log.error( "The attachment device is incorrect for " +
2378 "host " + str( mac ) +
2379 ". Expected: " + mappings[ str( mac ) ] +
2380 " Actual: " + device )
2381 hostAttachment = False
2382 else:
2383 hostAttachment = False
2384 except AssertionError:
2385 main.log.exception( "Json object not as expected" )
2386 main.log.error( repr( host ) )
2387 hostAttachment = False
2388 else:
2389 main.log.error( "No hosts json output or \"Error\"" +
2390 " in output. hosts = " +
2391 repr( hosts[ controller ] ) )
2392 if zeroHosts is False:
2393 hostAttachment = True
2394
2395 # END CHECKING HOST ATTACHMENT POINTS
2396 devicesResults = devicesResults and currentDevicesResult
2397 linksResults = linksResults and currentLinksResult
2398 hostsResults = hostsResults and currentHostsResult
2399 hostAttachmentResults = hostAttachmentResults and\
2400 hostAttachment
2401 topoResult = ( devicesResults and linksResults
2402 and hostsResults and ipResult and
2403 hostAttachmentResults )
Jon Halle9b1fa32015-12-08 15:32:21 -08002404 utilities.assert_equals( expect=True,
2405 actual=topoResult,
2406 onpass="ONOS topology matches Mininet",
Jon Halla440e872016-03-31 15:15:50 -07002407 onfail=topoFailMsg )
Jon Halle9b1fa32015-12-08 15:32:21 -08002408 # End of While loop to pull ONOS state
Jon Hall5cf14d52015-07-16 12:15:19 -07002409
2410 # Compare json objects for hosts and dataplane clusters
2411
2412 # hosts
2413 main.step( "Hosts view is consistent across all ONOS nodes" )
2414 consistentHostsResult = main.TRUE
2415 for controller in range( len( hosts ) ):
Jon Halla440e872016-03-31 15:15:50 -07002416 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hall13b446e2016-01-05 12:17:01 -08002417 if hosts[ controller ] is not None and "Error" not in hosts[ controller ]:
Jon Hall5cf14d52015-07-16 12:15:19 -07002418 if hosts[ controller ] == hosts[ 0 ]:
2419 continue
2420 else: # hosts not consistent
2421 main.log.error( "hosts from ONOS" + controllerStr +
2422 " is inconsistent with ONOS1" )
2423 main.log.warn( repr( hosts[ controller ] ) )
2424 consistentHostsResult = main.FALSE
2425
2426 else:
2427 main.log.error( "Error in getting ONOS hosts from ONOS" +
2428 controllerStr )
2429 consistentHostsResult = main.FALSE
2430 main.log.warn( "ONOS" + controllerStr +
2431 " hosts response: " +
2432 repr( hosts[ controller ] ) )
2433 utilities.assert_equals(
2434 expect=main.TRUE,
2435 actual=consistentHostsResult,
2436 onpass="Hosts view is consistent across all ONOS nodes",
2437 onfail="ONOS nodes have different views of hosts" )
2438
2439 main.step( "Hosts information is correct" )
2440 hostsResults = hostsResults and ipResult
2441 utilities.assert_equals(
2442 expect=main.TRUE,
2443 actual=hostsResults,
2444 onpass="Host information is correct",
2445 onfail="Host information is incorrect" )
2446
2447 main.step( "Host attachment points to the network" )
2448 utilities.assert_equals(
2449 expect=True,
2450 actual=hostAttachmentResults,
2451 onpass="Hosts are correctly attached to the network",
2452 onfail="ONOS did not correctly attach hosts to the network" )
2453
2454 # Strongly connected clusters of devices
2455 main.step( "Clusters view is consistent across all ONOS nodes" )
2456 consistentClustersResult = main.TRUE
2457 for controller in range( len( clusters ) ):
Jon Halla440e872016-03-31 15:15:50 -07002458 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07002459 if "Error" not in clusters[ controller ]:
2460 if clusters[ controller ] == clusters[ 0 ]:
2461 continue
2462 else: # clusters not consistent
2463 main.log.error( "clusters from ONOS" +
2464 controllerStr +
2465 " is inconsistent with ONOS1" )
2466 consistentClustersResult = main.FALSE
Jon Hall5cf14d52015-07-16 12:15:19 -07002467 else:
2468 main.log.error( "Error in getting dataplane clusters " +
2469 "from ONOS" + controllerStr )
2470 consistentClustersResult = main.FALSE
2471 main.log.warn( "ONOS" + controllerStr +
2472 " clusters response: " +
2473 repr( clusters[ controller ] ) )
2474 utilities.assert_equals(
2475 expect=main.TRUE,
2476 actual=consistentClustersResult,
2477 onpass="Clusters view is consistent across all ONOS nodes",
2478 onfail="ONOS nodes have different views of clusters" )
Jon Hall64948022016-05-12 13:38:50 -07002479 if not consistentClustersResult:
2480 main.log.debug( clusters )
Jon Hall5cf14d52015-07-16 12:15:19 -07002481
2482 main.step( "There is only one SCC" )
2483 # there should always only be one cluster
2484 try:
2485 numClusters = len( json.loads( clusters[ 0 ] ) )
2486 except ( ValueError, TypeError ):
2487 main.log.exception( "Error parsing clusters[0]: " +
2488 repr( clusters[0] ) )
Jon Halla440e872016-03-31 15:15:50 -07002489 numClusters = "ERROR"
Jon Hall5cf14d52015-07-16 12:15:19 -07002490 clusterResults = main.FALSE
2491 if numClusters == 1:
2492 clusterResults = main.TRUE
2493 utilities.assert_equals(
2494 expect=1,
2495 actual=numClusters,
2496 onpass="ONOS shows 1 SCC",
2497 onfail="ONOS shows " + str( numClusters ) + " SCCs" )
2498
2499 topoResult = ( devicesResults and linksResults
2500 and hostsResults and consistentHostsResult
2501 and consistentClustersResult and clusterResults
2502 and ipResult and hostAttachmentResults )
2503
2504 topoResult = topoResult and int( count <= 2 )
2505 note = "note it takes about " + str( int( cliTime ) ) + \
2506 " seconds for the test to make all the cli calls to fetch " +\
2507 "the topology from each ONOS instance"
2508 main.log.info(
2509 "Very crass estimate for topology discovery/convergence( " +
2510 str( note ) + " ): " + str( elapsed ) + " seconds, " +
2511 str( count ) + " tries" )
2512
2513 main.step( "Device information is correct" )
2514 utilities.assert_equals(
2515 expect=main.TRUE,
2516 actual=devicesResults,
2517 onpass="Device information is correct",
2518 onfail="Device information is incorrect" )
2519
2520 main.step( "Links are correct" )
2521 utilities.assert_equals(
2522 expect=main.TRUE,
2523 actual=linksResults,
2524 onpass="Link are correct",
2525 onfail="Links are incorrect" )
2526
2527 main.step( "Hosts are correct" )
2528 utilities.assert_equals(
2529 expect=main.TRUE,
2530 actual=hostsResults,
2531 onpass="Hosts are correct",
2532 onfail="Hosts are incorrect" )
2533
2534 # FIXME: move this to an ONOS state case
2535 main.step( "Checking ONOS nodes" )
Jon Hall41d39f12016-04-11 22:54:35 -07002536 nodeResults = utilities.retry( main.HA.nodesCheck,
2537 False,
2538 args=[main.activeNodes],
2539 attempts=5 )
Jon Hall5cf14d52015-07-16 12:15:19 -07002540
Jon Hall41d39f12016-04-11 22:54:35 -07002541 utilities.assert_equals( expect=True, actual=nodeResults,
Jon Hall5cf14d52015-07-16 12:15:19 -07002542 onpass="Nodes check successful",
2543 onfail="Nodes check NOT successful" )
Jon Halla440e872016-03-31 15:15:50 -07002544 if not nodeResults:
Jon Hall41d39f12016-04-11 22:54:35 -07002545 for i in main.activeNodes:
Jon Halla440e872016-03-31 15:15:50 -07002546 main.log.debug( "{} components not ACTIVE: \n{}".format(
Jon Hall41d39f12016-04-11 22:54:35 -07002547 main.CLIs[i].name,
2548 main.CLIs[i].sendline( "scr:list | grep -v ACTIVE" ) ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07002549
2550 def CASE9( self, main ):
2551 """
2552 Link s3-s28 down
2553 """
2554 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002555 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002556 assert main, "main not defined"
2557 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002558 assert main.CLIs, "main.CLIs not defined"
2559 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002560 # NOTE: You should probably run a topology check after this
2561
2562 linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
2563
2564 description = "Turn off a link to ensure that Link Discovery " +\
2565 "is working properly"
2566 main.case( description )
2567
2568 main.step( "Kill Link between s3 and s28" )
2569 LinkDown = main.Mininet1.link( END1="s3", END2="s28", OPTION="down" )
2570 main.log.info( "Waiting " + str( linkSleep ) +
2571 " seconds for link down to be discovered" )
2572 time.sleep( linkSleep )
2573 utilities.assert_equals( expect=main.TRUE, actual=LinkDown,
2574 onpass="Link down successful",
2575 onfail="Failed to bring link down" )
2576 # TODO do some sort of check here
2577
2578 def CASE10( self, main ):
2579 """
2580 Link s3-s28 up
2581 """
2582 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002583 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002584 assert main, "main not defined"
2585 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002586 assert main.CLIs, "main.CLIs not defined"
2587 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002588 # NOTE: You should probably run a topology check after this
2589
2590 linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
2591
2592 description = "Restore a link to ensure that Link Discovery is " + \
2593 "working properly"
2594 main.case( description )
2595
2596 main.step( "Bring link between s3 and s28 back up" )
2597 LinkUp = main.Mininet1.link( END1="s3", END2="s28", OPTION="up" )
2598 main.log.info( "Waiting " + str( linkSleep ) +
2599 " seconds for link up to be discovered" )
2600 time.sleep( linkSleep )
2601 utilities.assert_equals( expect=main.TRUE, actual=LinkUp,
2602 onpass="Link up successful",
2603 onfail="Failed to bring link up" )
2604 # TODO do some sort of check here
2605
2606 def CASE11( self, main ):
2607 """
2608 Switch Down
2609 """
2610 # NOTE: You should probably run a topology check after this
2611 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002612 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002613 assert main, "main not defined"
2614 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002615 assert main.CLIs, "main.CLIs not defined"
2616 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002617
2618 switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
2619
2620 description = "Killing a switch to ensure it is discovered correctly"
Jon Halla440e872016-03-31 15:15:50 -07002621 onosCli = main.CLIs[ main.activeNodes[0] ]
Jon Hall5cf14d52015-07-16 12:15:19 -07002622 main.case( description )
2623 switch = main.params[ 'kill' ][ 'switch' ]
2624 switchDPID = main.params[ 'kill' ][ 'dpid' ]
2625
2626 # TODO: Make this switch parameterizable
2627 main.step( "Kill " + switch )
2628 main.log.info( "Deleting " + switch )
2629 main.Mininet1.delSwitch( switch )
2630 main.log.info( "Waiting " + str( switchSleep ) +
2631 " seconds for switch down to be discovered" )
2632 time.sleep( switchSleep )
Jon Halla440e872016-03-31 15:15:50 -07002633 device = onosCli.getDevice( dpid=switchDPID )
Jon Hall5cf14d52015-07-16 12:15:19 -07002634 # Peek at the deleted switch
2635 main.log.warn( str( device ) )
2636 result = main.FALSE
2637 if device and device[ 'available' ] is False:
2638 result = main.TRUE
2639 utilities.assert_equals( expect=main.TRUE, actual=result,
2640 onpass="Kill switch successful",
2641 onfail="Failed to kill switch?" )
2642
2643 def CASE12( self, main ):
2644 """
2645 Switch Up
2646 """
2647 # NOTE: You should probably run a topology check after this
2648 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002649 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002650 assert main, "main not defined"
2651 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002652 assert main.CLIs, "main.CLIs not defined"
2653 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002654 assert ONOS1Port, "ONOS1Port not defined"
2655 assert ONOS2Port, "ONOS2Port not defined"
2656 assert ONOS3Port, "ONOS3Port not defined"
2657 assert ONOS4Port, "ONOS4Port not defined"
2658 assert ONOS5Port, "ONOS5Port not defined"
2659 assert ONOS6Port, "ONOS6Port not defined"
2660 assert ONOS7Port, "ONOS7Port not defined"
2661
2662 switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
2663 switch = main.params[ 'kill' ][ 'switch' ]
2664 switchDPID = main.params[ 'kill' ][ 'dpid' ]
2665 links = main.params[ 'kill' ][ 'links' ].split()
Jon Halla440e872016-03-31 15:15:50 -07002666 onosCli = main.CLIs[ main.activeNodes[0] ]
Jon Hall5cf14d52015-07-16 12:15:19 -07002667 description = "Adding a switch to ensure it is discovered correctly"
2668 main.case( description )
2669
2670 main.step( "Add back " + switch )
2671 main.Mininet1.addSwitch( switch, dpid=switchDPID )
2672 for peer in links:
2673 main.Mininet1.addLink( switch, peer )
Jon Halla440e872016-03-31 15:15:50 -07002674 ipList = [ node.ip_address for node in main.nodes ]
Jon Hall5cf14d52015-07-16 12:15:19 -07002675 main.Mininet1.assignSwController( sw=switch, ip=ipList )
2676 main.log.info( "Waiting " + str( switchSleep ) +
2677 " seconds for switch up to be discovered" )
2678 time.sleep( switchSleep )
Jon Halla440e872016-03-31 15:15:50 -07002679 device = onosCli.getDevice( dpid=switchDPID )
Jon Hall5cf14d52015-07-16 12:15:19 -07002680 # Peek at the deleted switch
2681 main.log.warn( str( device ) )
2682 result = main.FALSE
2683 if device and device[ 'available' ]:
2684 result = main.TRUE
2685 utilities.assert_equals( expect=main.TRUE, actual=result,
2686 onpass="add switch successful",
2687 onfail="Failed to add switch?" )
2688
2689 def CASE13( self, main ):
2690 """
2691 Clean up
2692 """
2693 import os
2694 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002695 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002696 assert main, "main not defined"
2697 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002698 assert main.CLIs, "main.CLIs not defined"
2699 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002700
2701 # printing colors to terminal
2702 colors = { 'cyan': '\033[96m', 'purple': '\033[95m',
2703 'blue': '\033[94m', 'green': '\033[92m',
2704 'yellow': '\033[93m', 'red': '\033[91m', 'end': '\033[0m' }
2705 main.case( "Test Cleanup" )
2706 main.step( "Killing tcpdumps" )
2707 main.Mininet2.stopTcpdump()
2708
2709 testname = main.TEST
Jon Hall96091e62015-09-21 17:34:17 -07002710 if main.params[ 'BACKUP' ][ 'ENABLED' ] == "True":
Jon Hall5cf14d52015-07-16 12:15:19 -07002711 main.step( "Copying MN pcap and ONOS log files to test station" )
2712 teststationUser = main.params[ 'BACKUP' ][ 'TESTONUSER' ]
2713 teststationIP = main.params[ 'BACKUP' ][ 'TESTONIP' ]
Jon Hall96091e62015-09-21 17:34:17 -07002714 # NOTE: MN Pcap file is being saved to logdir.
2715 # We scp this file as MN and TestON aren't necessarily the same vm
2716
2717 # FIXME: To be replaced with a Jenkin's post script
Jon Hall5cf14d52015-07-16 12:15:19 -07002718 # TODO: Load these from params
2719 # NOTE: must end in /
2720 logFolder = "/opt/onos/log/"
2721 logFiles = [ "karaf.log", "karaf.log.1" ]
2722 # NOTE: must end in /
Jon Hall5cf14d52015-07-16 12:15:19 -07002723 for f in logFiles:
Jon Halle1a3b752015-07-22 13:02:46 -07002724 for node in main.nodes:
Jon Halla440e872016-03-31 15:15:50 -07002725 dstName = main.logdir + "/" + node.name + "-" + f
Jon Hall96091e62015-09-21 17:34:17 -07002726 main.ONOSbench.secureCopy( node.user_name, node.ip_address,
2727 logFolder + f, dstName )
Jon Hall5cf14d52015-07-16 12:15:19 -07002728 # std*.log's
2729 # NOTE: must end in /
2730 logFolder = "/opt/onos/var/"
2731 logFiles = [ "stderr.log", "stdout.log" ]
2732 # NOTE: must end in /
Jon Hall5cf14d52015-07-16 12:15:19 -07002733 for f in logFiles:
Jon Halle1a3b752015-07-22 13:02:46 -07002734 for node in main.nodes:
Jon Halla440e872016-03-31 15:15:50 -07002735 dstName = main.logdir + "/" + node.name + "-" + f
Jon Hall96091e62015-09-21 17:34:17 -07002736 main.ONOSbench.secureCopy( node.user_name, node.ip_address,
2737 logFolder + f, dstName )
2738 else:
2739 main.log.debug( "skipping saving log files" )
Jon Hall5cf14d52015-07-16 12:15:19 -07002740
2741 main.step( "Stopping Mininet" )
2742 mnResult = main.Mininet1.stopNet()
2743 utilities.assert_equals( expect=main.TRUE, actual=mnResult,
2744 onpass="Mininet stopped",
2745 onfail="MN cleanup NOT successful" )
2746
2747 main.step( "Checking ONOS Logs for errors" )
Jon Halle1a3b752015-07-22 13:02:46 -07002748 for node in main.nodes:
Jon Hall96091e62015-09-21 17:34:17 -07002749 main.log.debug( "Checking logs for errors on " + node.name + ":" )
2750 main.log.warn( main.ONOSbench.checkLogs( node.ip_address ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07002751
2752 try:
2753 timerLog = open( main.logdir + "/Timers.csv", 'w')
2754 # Overwrite with empty line and close
2755 labels = "Gossip Intents"
2756 data = str( gossipTime )
2757 timerLog.write( labels + "\n" + data )
2758 timerLog.close()
2759 except NameError, e:
2760 main.log.exception(e)
2761
2762 def CASE14( self, main ):
2763 """
2764 start election app on all onos nodes
2765 """
Jon Halle1a3b752015-07-22 13:02:46 -07002766 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002767 assert main, "main not defined"
2768 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002769 assert main.CLIs, "main.CLIs not defined"
2770 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002771
2772 main.case("Start Leadership Election app")
2773 main.step( "Install leadership election app" )
Jon Halla440e872016-03-31 15:15:50 -07002774 onosCli = main.CLIs[ main.activeNodes[0] ]
2775 appResult = onosCli.activateApp( "org.onosproject.election" )
Jon Hall5cf14d52015-07-16 12:15:19 -07002776 utilities.assert_equals(
2777 expect=main.TRUE,
2778 actual=appResult,
2779 onpass="Election app installed",
2780 onfail="Something went wrong with installing Leadership election" )
2781
2782 main.step( "Run for election on each node" )
Jon Halla440e872016-03-31 15:15:50 -07002783 for i in main.activeNodes:
2784 main.CLIs[i].electionTestRun()
Jon Hall25463a82016-04-13 14:03:52 -07002785 time.sleep(5)
2786 activeCLIs = [ main.CLIs[i] for i in main.activeNodes ]
2787 sameResult, leaders = main.HA.consistentLeaderboards( activeCLIs )
Jon Hall5cf14d52015-07-16 12:15:19 -07002788 utilities.assert_equals(
Jon Hall25463a82016-04-13 14:03:52 -07002789 expect=True,
2790 actual=sameResult,
2791 onpass="All nodes see the same leaderboards",
2792 onfail="Inconsistent leaderboards" )
Jon Hall5cf14d52015-07-16 12:15:19 -07002793
Jon Hall25463a82016-04-13 14:03:52 -07002794 if sameResult:
2795 leader = leaders[ 0 ][ 0 ]
2796 if main.nodes[main.activeNodes[0]].ip_address in leader:
2797 correctLeader = True
2798 else:
2799 correctLeader = False
2800 main.step( "First node was elected leader" )
2801 utilities.assert_equals(
2802 expect=True,
2803 actual=correctLeader,
2804 onpass="Correct leader was elected",
2805 onfail="Incorrect leader" )
Jon Hall5cf14d52015-07-16 12:15:19 -07002806
2807 def CASE15( self, main ):
2808 """
2809 Check that Leadership Election is still functional
acsmars71adceb2015-08-31 15:09:26 -07002810 15.1 Run election on each node
2811 15.2 Check that each node has the same leaders and candidates
2812 15.3 Find current leader and withdraw
2813 15.4 Check that a new node was elected leader
2814 15.5 Check that that new leader was the candidate of old leader
2815 15.6 Run for election on old leader
2816 15.7 Check that oldLeader is a candidate, and leader if only 1 node
2817 15.8 Make sure that the old leader was added to the candidate list
2818
2819 old and new variable prefixes refer to data from before vs after
2820 withdrawl and later before withdrawl vs after re-election
Jon Hall5cf14d52015-07-16 12:15:19 -07002821 """
2822 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002823 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002824 assert main, "main not defined"
2825 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002826 assert main.CLIs, "main.CLIs not defined"
2827 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002828
Jon Halla440e872016-03-31 15:15:50 -07002829 description = "Check that Leadership Election is still functional"
Jon Hall5cf14d52015-07-16 12:15:19 -07002830 main.case( description )
Jon Halla440e872016-03-31 15:15:50 -07002831 # NOTE: Need to re-run after restarts since being a canidate is not persistant
Jon Hall5cf14d52015-07-16 12:15:19 -07002832
Jon Halla440e872016-03-31 15:15:50 -07002833 oldLeaders = [] # list of lists of each nodes' candidates before
2834 newLeaders = [] # list of lists of each nodes' candidates after
acsmars71adceb2015-08-31 15:09:26 -07002835 oldLeader = '' # the old leader from oldLeaders, None if not same
2836 newLeader = '' # the new leaders fron newLoeaders, None if not same
2837 oldLeaderCLI = None # the CLI of the old leader used for re-electing
2838 expectNoLeader = False # True when there is only one leader
2839 if main.numCtrls == 1:
2840 expectNoLeader = True
2841
2842 main.step( "Run for election on each node" )
2843 electionResult = main.TRUE
2844
Jon Halla440e872016-03-31 15:15:50 -07002845 for i in main.activeNodes: # run test election on each node
2846 if main.CLIs[i].electionTestRun() == main.FALSE:
acsmars71adceb2015-08-31 15:09:26 -07002847 electionResult = main.FALSE
Jon Hall5cf14d52015-07-16 12:15:19 -07002848 utilities.assert_equals(
2849 expect=main.TRUE,
acsmars71adceb2015-08-31 15:09:26 -07002850 actual=electionResult,
2851 onpass="All nodes successfully ran for leadership",
2852 onfail="At least one node failed to run for leadership" )
2853
acsmars3a72bde2015-09-02 14:16:22 -07002854 if electionResult == main.FALSE:
2855 main.log.error(
Jon Halla440e872016-03-31 15:15:50 -07002856 "Skipping Test Case because Election Test App isn't loaded" )
acsmars3a72bde2015-09-02 14:16:22 -07002857 main.skipCase()
2858
acsmars71adceb2015-08-31 15:09:26 -07002859 main.step( "Check that each node shows the same leader and candidates" )
Jon Halla440e872016-03-31 15:15:50 -07002860 failMessage = "Nodes have different leaderboards"
Jon Halla440e872016-03-31 15:15:50 -07002861 activeCLIs = [ main.CLIs[i] for i in main.activeNodes ]
Jon Hall41d39f12016-04-11 22:54:35 -07002862 sameResult, oldLeaders = main.HA.consistentLeaderboards( activeCLIs )
Jon Halla440e872016-03-31 15:15:50 -07002863 if sameResult:
2864 oldLeader = oldLeaders[ 0 ][ 0 ]
2865 main.log.warn( oldLeader )
acsmars71adceb2015-08-31 15:09:26 -07002866 else:
Jon Halla440e872016-03-31 15:15:50 -07002867 oldLeader = None
acsmars71adceb2015-08-31 15:09:26 -07002868 utilities.assert_equals(
Jon Halla440e872016-03-31 15:15:50 -07002869 expect=True,
acsmars71adceb2015-08-31 15:09:26 -07002870 actual=sameResult,
Jon Halla440e872016-03-31 15:15:50 -07002871 onpass="Leaderboards are consistent for the election topic",
acsmars71adceb2015-08-31 15:09:26 -07002872 onfail=failMessage )
Jon Hall5cf14d52015-07-16 12:15:19 -07002873
2874 main.step( "Find current leader and withdraw" )
acsmars71adceb2015-08-31 15:09:26 -07002875 withdrawResult = main.TRUE
Jon Hall5cf14d52015-07-16 12:15:19 -07002876 # do some sanity checking on leader before using it
acsmars71adceb2015-08-31 15:09:26 -07002877 if oldLeader is None:
2878 main.log.error( "Leadership isn't consistent." )
2879 withdrawResult = main.FALSE
2880 # Get the CLI of the oldLeader
Jon Halla440e872016-03-31 15:15:50 -07002881 for i in main.activeNodes:
acsmars71adceb2015-08-31 15:09:26 -07002882 if oldLeader == main.nodes[ i ].ip_address:
2883 oldLeaderCLI = main.CLIs[ i ]
Jon Hall5cf14d52015-07-16 12:15:19 -07002884 break
2885 else: # FOR/ELSE statement
2886 main.log.error( "Leader election, could not find current leader" )
2887 if oldLeader:
acsmars71adceb2015-08-31 15:09:26 -07002888 withdrawResult = oldLeaderCLI.electionTestWithdraw()
Jon Hall5cf14d52015-07-16 12:15:19 -07002889 utilities.assert_equals(
2890 expect=main.TRUE,
2891 actual=withdrawResult,
2892 onpass="Node was withdrawn from election",
2893 onfail="Node was not withdrawn from election" )
2894
acsmars71adceb2015-08-31 15:09:26 -07002895 main.step( "Check that a new node was elected leader" )
acsmars71adceb2015-08-31 15:09:26 -07002896 failMessage = "Nodes have different leaders"
acsmars71adceb2015-08-31 15:09:26 -07002897 # Get new leaders and candidates
Jon Hall41d39f12016-04-11 22:54:35 -07002898 newLeaderResult, newLeaders = main.HA.consistentLeaderboards( activeCLIs )
Jon Hall3a7843a2016-04-12 03:01:09 -07002899 newLeader = None
Jon Halla440e872016-03-31 15:15:50 -07002900 if newLeaderResult:
Jon Hall3a7843a2016-04-12 03:01:09 -07002901 if newLeaders[ 0 ][ 0 ] == 'none':
2902 main.log.error( "No leader was elected on at least 1 node" )
2903 if not expectNoLeader:
2904 newLeaderResult = False
Jon Hall25463a82016-04-13 14:03:52 -07002905 newLeader = newLeaders[ 0 ][ 0 ]
acsmars71adceb2015-08-31 15:09:26 -07002906
2907 # Check that the new leader is not the older leader, which was withdrawn
2908 if newLeader == oldLeader:
Jon Halla440e872016-03-31 15:15:50 -07002909 newLeaderResult = False
Jon Hall6e709752016-02-01 13:38:46 -08002910 main.log.error( "All nodes still see old leader: " + str( oldLeader ) +
acsmars71adceb2015-08-31 15:09:26 -07002911 " as the current leader" )
Jon Hall5cf14d52015-07-16 12:15:19 -07002912 utilities.assert_equals(
Jon Halla440e872016-03-31 15:15:50 -07002913 expect=True,
acsmars71adceb2015-08-31 15:09:26 -07002914 actual=newLeaderResult,
Jon Hall5cf14d52015-07-16 12:15:19 -07002915 onpass="Leadership election passed",
2916 onfail="Something went wrong with Leadership election" )
2917
Jon Halla440e872016-03-31 15:15:50 -07002918 main.step( "Check that that new leader was the candidate of old leader" )
2919 # candidates[ 2 ] should become the top candidate after withdrawl
acsmars71adceb2015-08-31 15:09:26 -07002920 correctCandidateResult = main.TRUE
2921 if expectNoLeader:
2922 if newLeader == 'none':
2923 main.log.info( "No leader expected. None found. Pass" )
2924 correctCandidateResult = main.TRUE
2925 else:
2926 main.log.info( "Expected no leader, got: " + str( newLeader ) )
2927 correctCandidateResult = main.FALSE
Jon Halla440e872016-03-31 15:15:50 -07002928 elif len( oldLeaders[0] ) >= 3:
2929 if newLeader == oldLeaders[ 0 ][ 2 ]:
2930 # correct leader was elected
2931 correctCandidateResult = main.TRUE
2932 else:
2933 correctCandidateResult = main.FALSE
2934 main.log.error( "Candidate {} was elected. {} should have had priority.".format(
2935 newLeader, oldLeaders[ 0 ][ 2 ] ) )
2936 else:
2937 main.log.warn( "Could not determine who should be the correct leader" )
2938 main.log.debug( oldLeaders[ 0 ] )
acsmars71adceb2015-08-31 15:09:26 -07002939 correctCandidateResult = main.FALSE
acsmars71adceb2015-08-31 15:09:26 -07002940 utilities.assert_equals(
2941 expect=main.TRUE,
2942 actual=correctCandidateResult,
2943 onpass="Correct Candidate Elected",
2944 onfail="Incorrect Candidate Elected" )
2945
Jon Hall5cf14d52015-07-16 12:15:19 -07002946 main.step( "Run for election on old leader( just so everyone " +
2947 "is in the hat )" )
acsmars71adceb2015-08-31 15:09:26 -07002948 if oldLeaderCLI is not None:
2949 runResult = oldLeaderCLI.electionTestRun()
Jon Hall5cf14d52015-07-16 12:15:19 -07002950 else:
acsmars71adceb2015-08-31 15:09:26 -07002951 main.log.error( "No old leader to re-elect" )
Jon Hall5cf14d52015-07-16 12:15:19 -07002952 runResult = main.FALSE
2953 utilities.assert_equals(
2954 expect=main.TRUE,
2955 actual=runResult,
2956 onpass="App re-ran for election",
2957 onfail="App failed to run for election" )
Jon Halla440e872016-03-31 15:15:50 -07002958
acsmars71adceb2015-08-31 15:09:26 -07002959 main.step(
2960 "Check that oldLeader is a candidate, and leader if only 1 node" )
Jon Hall5cf14d52015-07-16 12:15:19 -07002961 # verify leader didn't just change
Jon Halla440e872016-03-31 15:15:50 -07002962 # Get new leaders and candidates
2963 reRunLeaders = []
2964 time.sleep( 5 ) # Paremterize
Jon Hall41d39f12016-04-11 22:54:35 -07002965 positionResult, reRunLeaders = main.HA.consistentLeaderboards( activeCLIs )
acsmars71adceb2015-08-31 15:09:26 -07002966
2967 # Check that the re-elected node is last on the candidate List
Jon Hall3a7843a2016-04-12 03:01:09 -07002968 if not reRunLeaders[0]:
2969 positionResult = main.FALSE
2970 elif oldLeader != reRunLeaders[ 0 ][ -1 ]:
Jon Halla440e872016-03-31 15:15:50 -07002971 main.log.error( "Old Leader ({}) not in the proper position: {} ".format( str( oldLeader),
2972 str( reRunLeaders[ 0 ] ) ) )
acsmars71adceb2015-08-31 15:09:26 -07002973 positionResult = main.FALSE
Jon Hall5cf14d52015-07-16 12:15:19 -07002974 utilities.assert_equals(
Jon Halla440e872016-03-31 15:15:50 -07002975 expect=True,
acsmars71adceb2015-08-31 15:09:26 -07002976 actual=positionResult,
Jon Hall5cf14d52015-07-16 12:15:19 -07002977 onpass="Old leader successfully re-ran for election",
2978 onfail="Something went wrong with Leadership election after " +
2979 "the old leader re-ran for election" )
2980
2981 def CASE16( self, main ):
2982 """
2983 Install Distributed Primitives app
2984 """
2985 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002986 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002987 assert main, "main not defined"
2988 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002989 assert main.CLIs, "main.CLIs not defined"
2990 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002991
2992 # Variables for the distributed primitives tests
2993 global pCounterName
Jon Hall5cf14d52015-07-16 12:15:19 -07002994 global pCounterValue
Jon Hall5cf14d52015-07-16 12:15:19 -07002995 global onosSet
2996 global onosSetName
2997 pCounterName = "TestON-Partitions"
Jon Hall5cf14d52015-07-16 12:15:19 -07002998 pCounterValue = 0
Jon Hall5cf14d52015-07-16 12:15:19 -07002999 onosSet = set([])
3000 onosSetName = "TestON-set"
3001
3002 description = "Install Primitives app"
3003 main.case( description )
3004 main.step( "Install Primitives app" )
3005 appName = "org.onosproject.distributedprimitives"
Jon Halla440e872016-03-31 15:15:50 -07003006 node = main.activeNodes[0]
3007 appResults = main.CLIs[node].activateApp( appName )
Jon Hall5cf14d52015-07-16 12:15:19 -07003008 utilities.assert_equals( expect=main.TRUE,
3009 actual=appResults,
3010 onpass="Primitives app activated",
3011 onfail="Primitives app not activated" )
3012 time.sleep( 5 ) # To allow all nodes to activate
3013
3014 def CASE17( self, main ):
3015 """
3016 Check for basic functionality with distributed primitives
3017 """
Jon Hall5cf14d52015-07-16 12:15:19 -07003018 # Make sure variables are defined/set
Jon Halle1a3b752015-07-22 13:02:46 -07003019 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07003020 assert main, "main not defined"
3021 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07003022 assert main.CLIs, "main.CLIs not defined"
3023 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07003024 assert pCounterName, "pCounterName not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07003025 assert onosSetName, "onosSetName not defined"
3026 # NOTE: assert fails if value is 0/None/Empty/False
3027 try:
3028 pCounterValue
3029 except NameError:
3030 main.log.error( "pCounterValue not defined, setting to 0" )
3031 pCounterValue = 0
3032 try:
Jon Hall5cf14d52015-07-16 12:15:19 -07003033 onosSet
3034 except NameError:
3035 main.log.error( "onosSet not defined, setting to empty Set" )
3036 onosSet = set([])
3037 # Variables for the distributed primitives tests. These are local only
3038 addValue = "a"
3039 addAllValue = "a b c d e f"
3040 retainValue = "c d e f"
3041
3042 description = "Check for basic functionality with distributed " +\
3043 "primitives"
3044 main.case( description )
Jon Halle1a3b752015-07-22 13:02:46 -07003045 main.caseExplanation = "Test the methods of the distributed " +\
3046 "primitives (counters and sets) throught the cli"
Jon Hall5cf14d52015-07-16 12:15:19 -07003047 # DISTRIBUTED ATOMIC COUNTERS
Jon Halle1a3b752015-07-22 13:02:46 -07003048 # Partitioned counters
3049 main.step( "Increment then get a default counter on each node" )
Jon Hall5cf14d52015-07-16 12:15:19 -07003050 pCounters = []
3051 threads = []
3052 addedPValues = []
Jon Halla440e872016-03-31 15:15:50 -07003053 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003054 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3055 name="counterAddAndGet-" + str( i ),
Jon Hall5cf14d52015-07-16 12:15:19 -07003056 args=[ pCounterName ] )
3057 pCounterValue += 1
3058 addedPValues.append( pCounterValue )
3059 threads.append( t )
3060 t.start()
3061
3062 for t in threads:
3063 t.join()
3064 pCounters.append( t.result )
3065 # Check that counter incremented numController times
3066 pCounterResults = True
3067 for i in addedPValues:
3068 tmpResult = i in pCounters
3069 pCounterResults = pCounterResults and tmpResult
3070 if not tmpResult:
3071 main.log.error( str( i ) + " is not in partitioned "
3072 "counter incremented results" )
3073 utilities.assert_equals( expect=True,
3074 actual=pCounterResults,
3075 onpass="Default counter incremented",
3076 onfail="Error incrementing default" +
3077 " counter" )
3078
Jon Halle1a3b752015-07-22 13:02:46 -07003079 main.step( "Get then Increment a default counter on each node" )
3080 pCounters = []
3081 threads = []
3082 addedPValues = []
Jon Halla440e872016-03-31 15:15:50 -07003083 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003084 t = main.Thread( target=main.CLIs[i].counterTestGetAndAdd,
3085 name="counterGetAndAdd-" + str( i ),
3086 args=[ pCounterName ] )
3087 addedPValues.append( pCounterValue )
3088 pCounterValue += 1
3089 threads.append( t )
3090 t.start()
3091
3092 for t in threads:
3093 t.join()
3094 pCounters.append( t.result )
3095 # Check that counter incremented numController times
3096 pCounterResults = True
3097 for i in addedPValues:
3098 tmpResult = i in pCounters
3099 pCounterResults = pCounterResults and tmpResult
3100 if not tmpResult:
3101 main.log.error( str( i ) + " is not in partitioned "
3102 "counter incremented results" )
3103 utilities.assert_equals( expect=True,
3104 actual=pCounterResults,
3105 onpass="Default counter incremented",
3106 onfail="Error incrementing default" +
3107 " counter" )
3108
3109 main.step( "Counters we added have the correct values" )
Jon Hall41d39f12016-04-11 22:54:35 -07003110 incrementCheck = main.HA.counterCheck( pCounterName, pCounterValue )
Jon Halle1a3b752015-07-22 13:02:46 -07003111 utilities.assert_equals( expect=main.TRUE,
3112 actual=incrementCheck,
3113 onpass="Added counters are correct",
3114 onfail="Added counters are incorrect" )
3115
3116 main.step( "Add -8 to then get a default counter on each node" )
3117 pCounters = []
3118 threads = []
3119 addedPValues = []
Jon Halla440e872016-03-31 15:15:50 -07003120 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003121 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3122 name="counterIncrement-" + str( i ),
3123 args=[ pCounterName ],
3124 kwargs={ "delta": -8 } )
3125 pCounterValue += -8
3126 addedPValues.append( pCounterValue )
3127 threads.append( t )
3128 t.start()
3129
3130 for t in threads:
3131 t.join()
3132 pCounters.append( t.result )
3133 # Check that counter incremented numController times
3134 pCounterResults = True
3135 for i in addedPValues:
3136 tmpResult = i in pCounters
3137 pCounterResults = pCounterResults and tmpResult
3138 if not tmpResult:
3139 main.log.error( str( i ) + " is not in partitioned "
3140 "counter incremented results" )
3141 utilities.assert_equals( expect=True,
3142 actual=pCounterResults,
3143 onpass="Default counter incremented",
3144 onfail="Error incrementing default" +
3145 " counter" )
3146
3147 main.step( "Add 5 to then get a default counter on each node" )
3148 pCounters = []
3149 threads = []
3150 addedPValues = []
Jon Halla440e872016-03-31 15:15:50 -07003151 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003152 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3153 name="counterIncrement-" + str( i ),
3154 args=[ pCounterName ],
3155 kwargs={ "delta": 5 } )
3156 pCounterValue += 5
3157 addedPValues.append( pCounterValue )
3158 threads.append( t )
3159 t.start()
3160
3161 for t in threads:
3162 t.join()
3163 pCounters.append( t.result )
3164 # Check that counter incremented numController times
3165 pCounterResults = True
3166 for i in addedPValues:
3167 tmpResult = i in pCounters
3168 pCounterResults = pCounterResults and tmpResult
3169 if not tmpResult:
3170 main.log.error( str( i ) + " is not in partitioned "
3171 "counter incremented results" )
3172 utilities.assert_equals( expect=True,
3173 actual=pCounterResults,
3174 onpass="Default counter incremented",
3175 onfail="Error incrementing default" +
3176 " counter" )
3177
3178 main.step( "Get then add 5 to a default counter on each node" )
3179 pCounters = []
3180 threads = []
3181 addedPValues = []
Jon Halla440e872016-03-31 15:15:50 -07003182 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003183 t = main.Thread( target=main.CLIs[i].counterTestGetAndAdd,
3184 name="counterIncrement-" + str( i ),
3185 args=[ pCounterName ],
3186 kwargs={ "delta": 5 } )
3187 addedPValues.append( pCounterValue )
3188 pCounterValue += 5
3189 threads.append( t )
3190 t.start()
3191
3192 for t in threads:
3193 t.join()
3194 pCounters.append( t.result )
3195 # Check that counter incremented numController times
3196 pCounterResults = True
3197 for i in addedPValues:
3198 tmpResult = i in pCounters
3199 pCounterResults = pCounterResults and tmpResult
3200 if not tmpResult:
3201 main.log.error( str( i ) + " is not in partitioned "
3202 "counter incremented results" )
3203 utilities.assert_equals( expect=True,
3204 actual=pCounterResults,
3205 onpass="Default counter incremented",
3206 onfail="Error incrementing default" +
3207 " counter" )
3208
3209 main.step( "Counters we added have the correct values" )
Jon Hall41d39f12016-04-11 22:54:35 -07003210 incrementCheck = main.HA.counterCheck( pCounterName, pCounterValue )
Jon Halle1a3b752015-07-22 13:02:46 -07003211 utilities.assert_equals( expect=main.TRUE,
3212 actual=incrementCheck,
3213 onpass="Added counters are correct",
3214 onfail="Added counters are incorrect" )
3215
Jon Hall5cf14d52015-07-16 12:15:19 -07003216 # DISTRIBUTED SETS
3217 main.step( "Distributed Set get" )
3218 size = len( onosSet )
3219 getResponses = []
3220 threads = []
Jon Halla440e872016-03-31 15:15:50 -07003221 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003222 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003223 name="setTestGet-" + str( i ),
3224 args=[ onosSetName ] )
3225 threads.append( t )
3226 t.start()
3227 for t in threads:
3228 t.join()
3229 getResponses.append( t.result )
3230
3231 getResults = main.TRUE
Jon Halla440e872016-03-31 15:15:50 -07003232 for i in range( len( main.activeNodes ) ):
3233 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003234 if isinstance( getResponses[ i ], list):
3235 current = set( getResponses[ i ] )
3236 if len( current ) == len( getResponses[ i ] ):
3237 # no repeats
3238 if onosSet != current:
Jon Halla440e872016-03-31 15:15:50 -07003239 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003240 " has incorrect view" +
3241 " of set " + onosSetName + ":\n" +
3242 str( getResponses[ i ] ) )
3243 main.log.debug( "Expected: " + str( onosSet ) )
3244 main.log.debug( "Actual: " + str( current ) )
3245 getResults = main.FALSE
3246 else:
3247 # error, set is not a set
Jon Halla440e872016-03-31 15:15:50 -07003248 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003249 " has repeat elements in" +
3250 " set " + onosSetName + ":\n" +
3251 str( getResponses[ i ] ) )
3252 getResults = main.FALSE
3253 elif getResponses[ i ] == main.ERROR:
3254 getResults = main.FALSE
3255 utilities.assert_equals( expect=main.TRUE,
3256 actual=getResults,
3257 onpass="Set elements are correct",
3258 onfail="Set elements are incorrect" )
3259
3260 main.step( "Distributed Set size" )
3261 sizeResponses = []
3262 threads = []
Jon Halla440e872016-03-31 15:15:50 -07003263 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003264 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07003265 name="setTestSize-" + str( i ),
3266 args=[ onosSetName ] )
3267 threads.append( t )
3268 t.start()
3269 for t in threads:
3270 t.join()
3271 sizeResponses.append( t.result )
3272
3273 sizeResults = main.TRUE
Jon Halla440e872016-03-31 15:15:50 -07003274 for i in range( len( main.activeNodes ) ):
3275 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003276 if size != sizeResponses[ i ]:
3277 sizeResults = main.FALSE
Jon Halla440e872016-03-31 15:15:50 -07003278 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003279 " expected a size of " + str( size ) +
3280 " for set " + onosSetName +
3281 " but got " + str( sizeResponses[ i ] ) )
3282 utilities.assert_equals( expect=main.TRUE,
3283 actual=sizeResults,
3284 onpass="Set sizes are correct",
3285 onfail="Set sizes are incorrect" )
3286
3287 main.step( "Distributed Set add()" )
3288 onosSet.add( addValue )
3289 addResponses = []
3290 threads = []
Jon Halla440e872016-03-31 15:15:50 -07003291 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003292 t = main.Thread( target=main.CLIs[i].setTestAdd,
Jon Hall5cf14d52015-07-16 12:15:19 -07003293 name="setTestAdd-" + str( i ),
3294 args=[ onosSetName, addValue ] )
3295 threads.append( t )
3296 t.start()
3297 for t in threads:
3298 t.join()
3299 addResponses.append( t.result )
3300
3301 # main.TRUE = successfully changed the set
3302 # main.FALSE = action resulted in no change in set
3303 # main.ERROR - Some error in executing the function
3304 addResults = main.TRUE
Jon Halla440e872016-03-31 15:15:50 -07003305 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003306 if addResponses[ i ] == main.TRUE:
3307 # All is well
3308 pass
3309 elif addResponses[ i ] == main.FALSE:
3310 # Already in set, probably fine
3311 pass
3312 elif addResponses[ i ] == main.ERROR:
3313 # Error in execution
3314 addResults = main.FALSE
3315 else:
3316 # unexpected result
3317 addResults = main.FALSE
3318 if addResults != main.TRUE:
3319 main.log.error( "Error executing set add" )
3320
3321 # Check if set is still correct
3322 size = len( onosSet )
3323 getResponses = []
3324 threads = []
Jon Halla440e872016-03-31 15:15:50 -07003325 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003326 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003327 name="setTestGet-" + str( i ),
3328 args=[ onosSetName ] )
3329 threads.append( t )
3330 t.start()
3331 for t in threads:
3332 t.join()
3333 getResponses.append( t.result )
3334 getResults = main.TRUE
Jon Halla440e872016-03-31 15:15:50 -07003335 for i in range( len( main.activeNodes ) ):
3336 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003337 if isinstance( getResponses[ i ], list):
3338 current = set( getResponses[ i ] )
3339 if len( current ) == len( getResponses[ i ] ):
3340 # no repeats
3341 if onosSet != current:
Jon Halla440e872016-03-31 15:15:50 -07003342 main.log.error( "ONOS" + node + " has incorrect view" +
Jon Hall5cf14d52015-07-16 12:15:19 -07003343 " of set " + onosSetName + ":\n" +
3344 str( getResponses[ i ] ) )
3345 main.log.debug( "Expected: " + str( onosSet ) )
3346 main.log.debug( "Actual: " + str( current ) )
3347 getResults = main.FALSE
3348 else:
3349 # error, set is not a set
Jon Halla440e872016-03-31 15:15:50 -07003350 main.log.error( "ONOS" + node + " has repeat elements in" +
Jon Hall5cf14d52015-07-16 12:15:19 -07003351 " set " + onosSetName + ":\n" +
3352 str( getResponses[ i ] ) )
3353 getResults = main.FALSE
3354 elif getResponses[ i ] == main.ERROR:
3355 getResults = main.FALSE
3356 sizeResponses = []
3357 threads = []
Jon Halla440e872016-03-31 15:15:50 -07003358 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003359 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07003360 name="setTestSize-" + str( i ),
3361 args=[ onosSetName ] )
3362 threads.append( t )
3363 t.start()
3364 for t in threads:
3365 t.join()
3366 sizeResponses.append( t.result )
3367 sizeResults = main.TRUE
Jon Halla440e872016-03-31 15:15:50 -07003368 for i in range( len( main.activeNodes ) ):
3369 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003370 if size != sizeResponses[ i ]:
3371 sizeResults = main.FALSE
Jon Halla440e872016-03-31 15:15:50 -07003372 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003373 " expected a size of " + str( size ) +
3374 " for set " + onosSetName +
3375 " but got " + str( sizeResponses[ i ] ) )
3376 addResults = addResults and getResults and sizeResults
3377 utilities.assert_equals( expect=main.TRUE,
3378 actual=addResults,
3379 onpass="Set add correct",
3380 onfail="Set add was incorrect" )
3381
3382 main.step( "Distributed Set addAll()" )
3383 onosSet.update( addAllValue.split() )
3384 addResponses = []
3385 threads = []
Jon Halla440e872016-03-31 15:15:50 -07003386 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003387 t = main.Thread( target=main.CLIs[i].setTestAdd,
Jon Hall5cf14d52015-07-16 12:15:19 -07003388 name="setTestAddAll-" + str( i ),
3389 args=[ onosSetName, addAllValue ] )
3390 threads.append( t )
3391 t.start()
3392 for t in threads:
3393 t.join()
3394 addResponses.append( t.result )
3395
3396 # main.TRUE = successfully changed the set
3397 # main.FALSE = action resulted in no change in set
3398 # main.ERROR - Some error in executing the function
3399 addAllResults = main.TRUE
Jon Halla440e872016-03-31 15:15:50 -07003400 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003401 if addResponses[ i ] == main.TRUE:
3402 # All is well
3403 pass
3404 elif addResponses[ i ] == main.FALSE:
3405 # Already in set, probably fine
3406 pass
3407 elif addResponses[ i ] == main.ERROR:
3408 # Error in execution
3409 addAllResults = main.FALSE
3410 else:
3411 # unexpected result
3412 addAllResults = main.FALSE
3413 if addAllResults != main.TRUE:
3414 main.log.error( "Error executing set addAll" )
3415
3416 # Check if set is still correct
3417 size = len( onosSet )
3418 getResponses = []
3419 threads = []
Jon Halla440e872016-03-31 15:15:50 -07003420 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003421 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003422 name="setTestGet-" + str( i ),
3423 args=[ onosSetName ] )
3424 threads.append( t )
3425 t.start()
3426 for t in threads:
3427 t.join()
3428 getResponses.append( t.result )
3429 getResults = main.TRUE
Jon Halla440e872016-03-31 15:15:50 -07003430 for i in range( len( main.activeNodes ) ):
3431 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003432 if isinstance( getResponses[ i ], list):
3433 current = set( getResponses[ i ] )
3434 if len( current ) == len( getResponses[ i ] ):
3435 # no repeats
3436 if onosSet != current:
Jon Halla440e872016-03-31 15:15:50 -07003437 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003438 " has incorrect view" +
3439 " of set " + onosSetName + ":\n" +
3440 str( getResponses[ i ] ) )
3441 main.log.debug( "Expected: " + str( onosSet ) )
3442 main.log.debug( "Actual: " + str( current ) )
3443 getResults = main.FALSE
3444 else:
3445 # error, set is not a set
Jon Halla440e872016-03-31 15:15:50 -07003446 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003447 " has repeat elements in" +
3448 " set " + onosSetName + ":\n" +
3449 str( getResponses[ i ] ) )
3450 getResults = main.FALSE
3451 elif getResponses[ i ] == main.ERROR:
3452 getResults = main.FALSE
3453 sizeResponses = []
3454 threads = []
Jon Halla440e872016-03-31 15:15:50 -07003455 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003456 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07003457 name="setTestSize-" + str( i ),
3458 args=[ onosSetName ] )
3459 threads.append( t )
3460 t.start()
3461 for t in threads:
3462 t.join()
3463 sizeResponses.append( t.result )
3464 sizeResults = main.TRUE
Jon Halla440e872016-03-31 15:15:50 -07003465 for i in range( len( main.activeNodes ) ):
3466 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003467 if size != sizeResponses[ i ]:
3468 sizeResults = main.FALSE
Jon Halla440e872016-03-31 15:15:50 -07003469 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003470 " expected a size of " + str( size ) +
3471 " for set " + onosSetName +
3472 " but got " + str( sizeResponses[ i ] ) )
3473 addAllResults = addAllResults and getResults and sizeResults
3474 utilities.assert_equals( expect=main.TRUE,
3475 actual=addAllResults,
3476 onpass="Set addAll correct",
3477 onfail="Set addAll was incorrect" )
3478
3479 main.step( "Distributed Set contains()" )
3480 containsResponses = []
3481 threads = []
Jon Halla440e872016-03-31 15:15:50 -07003482 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003483 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003484 name="setContains-" + str( i ),
3485 args=[ onosSetName ],
3486 kwargs={ "values": addValue } )
3487 threads.append( t )
3488 t.start()
3489 for t in threads:
3490 t.join()
3491 # NOTE: This is the tuple
3492 containsResponses.append( t.result )
3493
3494 containsResults = main.TRUE
Jon Halla440e872016-03-31 15:15:50 -07003495 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003496 if containsResponses[ i ] == main.ERROR:
3497 containsResults = main.FALSE
3498 else:
3499 containsResults = containsResults and\
3500 containsResponses[ i ][ 1 ]
3501 utilities.assert_equals( expect=main.TRUE,
3502 actual=containsResults,
3503 onpass="Set contains is functional",
3504 onfail="Set contains failed" )
3505
3506 main.step( "Distributed Set containsAll()" )
3507 containsAllResponses = []
3508 threads = []
Jon Halla440e872016-03-31 15:15:50 -07003509 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003510 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003511 name="setContainsAll-" + str( i ),
3512 args=[ onosSetName ],
3513 kwargs={ "values": addAllValue } )
3514 threads.append( t )
3515 t.start()
3516 for t in threads:
3517 t.join()
3518 # NOTE: This is the tuple
3519 containsAllResponses.append( t.result )
3520
3521 containsAllResults = main.TRUE
Jon Halla440e872016-03-31 15:15:50 -07003522 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003523 if containsResponses[ i ] == main.ERROR:
3524 containsResults = main.FALSE
3525 else:
3526 containsResults = containsResults and\
3527 containsResponses[ i ][ 1 ]
3528 utilities.assert_equals( expect=main.TRUE,
3529 actual=containsAllResults,
3530 onpass="Set containsAll is functional",
3531 onfail="Set containsAll failed" )
3532
3533 main.step( "Distributed Set remove()" )
3534 onosSet.remove( addValue )
3535 removeResponses = []
3536 threads = []
Jon Halla440e872016-03-31 15:15:50 -07003537 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003538 t = main.Thread( target=main.CLIs[i].setTestRemove,
Jon Hall5cf14d52015-07-16 12:15:19 -07003539 name="setTestRemove-" + str( i ),
3540 args=[ onosSetName, addValue ] )
3541 threads.append( t )
3542 t.start()
3543 for t in threads:
3544 t.join()
3545 removeResponses.append( t.result )
3546
3547 # main.TRUE = successfully changed the set
3548 # main.FALSE = action resulted in no change in set
3549 # main.ERROR - Some error in executing the function
3550 removeResults = main.TRUE
Jon Halla440e872016-03-31 15:15:50 -07003551 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003552 if removeResponses[ i ] == main.TRUE:
3553 # All is well
3554 pass
3555 elif removeResponses[ i ] == main.FALSE:
3556 # not in set, probably fine
3557 pass
3558 elif removeResponses[ i ] == main.ERROR:
3559 # Error in execution
3560 removeResults = main.FALSE
3561 else:
3562 # unexpected result
3563 removeResults = main.FALSE
3564 if removeResults != main.TRUE:
3565 main.log.error( "Error executing set remove" )
3566
3567 # Check if set is still correct
3568 size = len( onosSet )
3569 getResponses = []
3570 threads = []
Jon Halla440e872016-03-31 15:15:50 -07003571 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003572 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003573 name="setTestGet-" + str( i ),
3574 args=[ onosSetName ] )
3575 threads.append( t )
3576 t.start()
3577 for t in threads:
3578 t.join()
3579 getResponses.append( t.result )
3580 getResults = main.TRUE
Jon Halla440e872016-03-31 15:15:50 -07003581 for i in range( len( main.activeNodes ) ):
3582 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003583 if isinstance( getResponses[ i ], list):
3584 current = set( getResponses[ i ] )
3585 if len( current ) == len( getResponses[ i ] ):
3586 # no repeats
3587 if onosSet != current:
Jon Halla440e872016-03-31 15:15:50 -07003588 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003589 " has incorrect view" +
3590 " of set " + onosSetName + ":\n" +
3591 str( getResponses[ i ] ) )
3592 main.log.debug( "Expected: " + str( onosSet ) )
3593 main.log.debug( "Actual: " + str( current ) )
3594 getResults = main.FALSE
3595 else:
3596 # error, set is not a set
Jon Halla440e872016-03-31 15:15:50 -07003597 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003598 " has repeat elements in" +
3599 " set " + onosSetName + ":\n" +
3600 str( getResponses[ i ] ) )
3601 getResults = main.FALSE
3602 elif getResponses[ i ] == main.ERROR:
3603 getResults = main.FALSE
3604 sizeResponses = []
3605 threads = []
Jon Halla440e872016-03-31 15:15:50 -07003606 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003607 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07003608 name="setTestSize-" + str( i ),
3609 args=[ onosSetName ] )
3610 threads.append( t )
3611 t.start()
3612 for t in threads:
3613 t.join()
3614 sizeResponses.append( t.result )
3615 sizeResults = main.TRUE
Jon Halla440e872016-03-31 15:15:50 -07003616 for i in range( len( main.activeNodes ) ):
3617 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003618 if size != sizeResponses[ i ]:
3619 sizeResults = main.FALSE
Jon Halla440e872016-03-31 15:15:50 -07003620 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003621 " expected a size of " + str( size ) +
3622 " for set " + onosSetName +
3623 " but got " + str( sizeResponses[ i ] ) )
3624 removeResults = removeResults and getResults and sizeResults
3625 utilities.assert_equals( expect=main.TRUE,
3626 actual=removeResults,
3627 onpass="Set remove correct",
3628 onfail="Set remove was incorrect" )
3629
3630 main.step( "Distributed Set removeAll()" )
3631 onosSet.difference_update( addAllValue.split() )
3632 removeAllResponses = []
3633 threads = []
3634 try:
Jon Halla440e872016-03-31 15:15:50 -07003635 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003636 t = main.Thread( target=main.CLIs[i].setTestRemove,
Jon Hall5cf14d52015-07-16 12:15:19 -07003637 name="setTestRemoveAll-" + str( i ),
3638 args=[ onosSetName, addAllValue ] )
3639 threads.append( t )
3640 t.start()
3641 for t in threads:
3642 t.join()
3643 removeAllResponses.append( t.result )
3644 except Exception, e:
3645 main.log.exception(e)
3646
3647 # main.TRUE = successfully changed the set
3648 # main.FALSE = action resulted in no change in set
3649 # main.ERROR - Some error in executing the function
3650 removeAllResults = main.TRUE
Jon Halla440e872016-03-31 15:15:50 -07003651 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003652 if removeAllResponses[ i ] == main.TRUE:
3653 # All is well
3654 pass
3655 elif removeAllResponses[ i ] == main.FALSE:
3656 # not in set, probably fine
3657 pass
3658 elif removeAllResponses[ i ] == main.ERROR:
3659 # Error in execution
3660 removeAllResults = main.FALSE
3661 else:
3662 # unexpected result
3663 removeAllResults = main.FALSE
3664 if removeAllResults != main.TRUE:
3665 main.log.error( "Error executing set removeAll" )
3666
3667 # Check if set is still correct
3668 size = len( onosSet )
3669 getResponses = []
3670 threads = []
Jon Halla440e872016-03-31 15:15:50 -07003671 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003672 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003673 name="setTestGet-" + str( i ),
3674 args=[ onosSetName ] )
3675 threads.append( t )
3676 t.start()
3677 for t in threads:
3678 t.join()
3679 getResponses.append( t.result )
3680 getResults = main.TRUE
Jon Halla440e872016-03-31 15:15:50 -07003681 for i in range( len( main.activeNodes ) ):
3682 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003683 if isinstance( getResponses[ i ], list):
3684 current = set( getResponses[ i ] )
3685 if len( current ) == len( getResponses[ i ] ):
3686 # no repeats
3687 if onosSet != current:
Jon Halla440e872016-03-31 15:15:50 -07003688 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003689 " has incorrect view" +
3690 " of set " + onosSetName + ":\n" +
3691 str( getResponses[ i ] ) )
3692 main.log.debug( "Expected: " + str( onosSet ) )
3693 main.log.debug( "Actual: " + str( current ) )
3694 getResults = main.FALSE
3695 else:
3696 # error, set is not a set
Jon Halla440e872016-03-31 15:15:50 -07003697 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003698 " has repeat elements in" +
3699 " set " + onosSetName + ":\n" +
3700 str( getResponses[ i ] ) )
3701 getResults = main.FALSE
3702 elif getResponses[ i ] == main.ERROR:
3703 getResults = main.FALSE
3704 sizeResponses = []
3705 threads = []
Jon Halla440e872016-03-31 15:15:50 -07003706 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003707 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07003708 name="setTestSize-" + str( i ),
3709 args=[ onosSetName ] )
3710 threads.append( t )
3711 t.start()
3712 for t in threads:
3713 t.join()
3714 sizeResponses.append( t.result )
3715 sizeResults = main.TRUE
Jon Halla440e872016-03-31 15:15:50 -07003716 for i in range( len( main.activeNodes ) ):
3717 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003718 if size != sizeResponses[ i ]:
3719 sizeResults = main.FALSE
Jon Halla440e872016-03-31 15:15:50 -07003720 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003721 " expected a size of " + str( size ) +
3722 " for set " + onosSetName +
3723 " but got " + str( sizeResponses[ i ] ) )
3724 removeAllResults = removeAllResults and getResults and sizeResults
3725 utilities.assert_equals( expect=main.TRUE,
3726 actual=removeAllResults,
3727 onpass="Set removeAll correct",
3728 onfail="Set removeAll was incorrect" )
3729
3730 main.step( "Distributed Set addAll()" )
3731 onosSet.update( addAllValue.split() )
3732 addResponses = []
3733 threads = []
Jon Halla440e872016-03-31 15:15:50 -07003734 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003735 t = main.Thread( target=main.CLIs[i].setTestAdd,
Jon Hall5cf14d52015-07-16 12:15:19 -07003736 name="setTestAddAll-" + str( i ),
3737 args=[ onosSetName, addAllValue ] )
3738 threads.append( t )
3739 t.start()
3740 for t in threads:
3741 t.join()
3742 addResponses.append( t.result )
3743
3744 # main.TRUE = successfully changed the set
3745 # main.FALSE = action resulted in no change in set
3746 # main.ERROR - Some error in executing the function
3747 addAllResults = main.TRUE
Jon Halla440e872016-03-31 15:15:50 -07003748 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003749 if addResponses[ i ] == main.TRUE:
3750 # All is well
3751 pass
3752 elif addResponses[ i ] == main.FALSE:
3753 # Already in set, probably fine
3754 pass
3755 elif addResponses[ i ] == main.ERROR:
3756 # Error in execution
3757 addAllResults = main.FALSE
3758 else:
3759 # unexpected result
3760 addAllResults = main.FALSE
3761 if addAllResults != main.TRUE:
3762 main.log.error( "Error executing set addAll" )
3763
3764 # Check if set is still correct
3765 size = len( onosSet )
3766 getResponses = []
3767 threads = []
Jon Halla440e872016-03-31 15:15:50 -07003768 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003769 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003770 name="setTestGet-" + str( i ),
3771 args=[ onosSetName ] )
3772 threads.append( t )
3773 t.start()
3774 for t in threads:
3775 t.join()
3776 getResponses.append( t.result )
3777 getResults = main.TRUE
Jon Halla440e872016-03-31 15:15:50 -07003778 for i in range( len( main.activeNodes ) ):
3779 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003780 if isinstance( getResponses[ i ], list):
3781 current = set( getResponses[ i ] )
3782 if len( current ) == len( getResponses[ i ] ):
3783 # no repeats
3784 if onosSet != current:
Jon Halla440e872016-03-31 15:15:50 -07003785 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003786 " has incorrect view" +
3787 " of set " + onosSetName + ":\n" +
3788 str( getResponses[ i ] ) )
3789 main.log.debug( "Expected: " + str( onosSet ) )
3790 main.log.debug( "Actual: " + str( current ) )
3791 getResults = main.FALSE
3792 else:
3793 # error, set is not a set
Jon Halla440e872016-03-31 15:15:50 -07003794 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003795 " has repeat elements in" +
3796 " set " + onosSetName + ":\n" +
3797 str( getResponses[ i ] ) )
3798 getResults = main.FALSE
3799 elif getResponses[ i ] == main.ERROR:
3800 getResults = main.FALSE
3801 sizeResponses = []
3802 threads = []
Jon Halla440e872016-03-31 15:15:50 -07003803 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003804 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07003805 name="setTestSize-" + str( i ),
3806 args=[ onosSetName ] )
3807 threads.append( t )
3808 t.start()
3809 for t in threads:
3810 t.join()
3811 sizeResponses.append( t.result )
3812 sizeResults = main.TRUE
Jon Halla440e872016-03-31 15:15:50 -07003813 for i in range( len( main.activeNodes ) ):
3814 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003815 if size != sizeResponses[ i ]:
3816 sizeResults = main.FALSE
Jon Halla440e872016-03-31 15:15:50 -07003817 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003818 " expected a size of " + str( size ) +
3819 " for set " + onosSetName +
3820 " but got " + str( sizeResponses[ i ] ) )
3821 addAllResults = addAllResults and getResults and sizeResults
3822 utilities.assert_equals( expect=main.TRUE,
3823 actual=addAllResults,
3824 onpass="Set addAll correct",
3825 onfail="Set addAll was incorrect" )
3826
3827 main.step( "Distributed Set clear()" )
3828 onosSet.clear()
3829 clearResponses = []
3830 threads = []
Jon Halla440e872016-03-31 15:15:50 -07003831 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003832 t = main.Thread( target=main.CLIs[i].setTestRemove,
Jon Hall5cf14d52015-07-16 12:15:19 -07003833 name="setTestClear-" + str( i ),
3834 args=[ onosSetName, " "], # Values doesn't matter
3835 kwargs={ "clear": True } )
3836 threads.append( t )
3837 t.start()
3838 for t in threads:
3839 t.join()
3840 clearResponses.append( t.result )
3841
3842 # main.TRUE = successfully changed the set
3843 # main.FALSE = action resulted in no change in set
3844 # main.ERROR - Some error in executing the function
3845 clearResults = main.TRUE
Jon Halla440e872016-03-31 15:15:50 -07003846 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003847 if clearResponses[ i ] == main.TRUE:
3848 # All is well
3849 pass
3850 elif clearResponses[ i ] == main.FALSE:
3851 # Nothing set, probably fine
3852 pass
3853 elif clearResponses[ i ] == main.ERROR:
3854 # Error in execution
3855 clearResults = main.FALSE
3856 else:
3857 # unexpected result
3858 clearResults = main.FALSE
3859 if clearResults != main.TRUE:
3860 main.log.error( "Error executing set clear" )
3861
3862 # Check if set is still correct
3863 size = len( onosSet )
3864 getResponses = []
3865 threads = []
Jon Halla440e872016-03-31 15:15:50 -07003866 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003867 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003868 name="setTestGet-" + str( i ),
3869 args=[ onosSetName ] )
3870 threads.append( t )
3871 t.start()
3872 for t in threads:
3873 t.join()
3874 getResponses.append( t.result )
3875 getResults = main.TRUE
Jon Halla440e872016-03-31 15:15:50 -07003876 for i in range( len( main.activeNodes ) ):
3877 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003878 if isinstance( getResponses[ i ], list):
3879 current = set( getResponses[ i ] )
3880 if len( current ) == len( getResponses[ i ] ):
3881 # no repeats
3882 if onosSet != current:
Jon Halla440e872016-03-31 15:15:50 -07003883 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003884 " has incorrect view" +
3885 " of set " + onosSetName + ":\n" +
3886 str( getResponses[ i ] ) )
3887 main.log.debug( "Expected: " + str( onosSet ) )
3888 main.log.debug( "Actual: " + str( current ) )
3889 getResults = main.FALSE
3890 else:
3891 # error, set is not a set
Jon Halla440e872016-03-31 15:15:50 -07003892 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003893 " has repeat elements in" +
3894 " set " + onosSetName + ":\n" +
3895 str( getResponses[ i ] ) )
3896 getResults = main.FALSE
3897 elif getResponses[ i ] == main.ERROR:
3898 getResults = main.FALSE
3899 sizeResponses = []
3900 threads = []
Jon Halla440e872016-03-31 15:15:50 -07003901 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003902 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07003903 name="setTestSize-" + str( i ),
3904 args=[ onosSetName ] )
3905 threads.append( t )
3906 t.start()
3907 for t in threads:
3908 t.join()
3909 sizeResponses.append( t.result )
3910 sizeResults = main.TRUE
Jon Halla440e872016-03-31 15:15:50 -07003911 for i in range( len( main.activeNodes ) ):
3912 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003913 if size != sizeResponses[ i ]:
3914 sizeResults = main.FALSE
Jon Halla440e872016-03-31 15:15:50 -07003915 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003916 " expected a size of " + str( size ) +
3917 " for set " + onosSetName +
3918 " but got " + str( sizeResponses[ i ] ) )
3919 clearResults = clearResults and getResults and sizeResults
3920 utilities.assert_equals( expect=main.TRUE,
3921 actual=clearResults,
3922 onpass="Set clear correct",
3923 onfail="Set clear was incorrect" )
3924
3925 main.step( "Distributed Set addAll()" )
3926 onosSet.update( addAllValue.split() )
3927 addResponses = []
3928 threads = []
Jon Halla440e872016-03-31 15:15:50 -07003929 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003930 t = main.Thread( target=main.CLIs[i].setTestAdd,
Jon Hall5cf14d52015-07-16 12:15:19 -07003931 name="setTestAddAll-" + str( i ),
3932 args=[ onosSetName, addAllValue ] )
3933 threads.append( t )
3934 t.start()
3935 for t in threads:
3936 t.join()
3937 addResponses.append( t.result )
3938
3939 # main.TRUE = successfully changed the set
3940 # main.FALSE = action resulted in no change in set
3941 # main.ERROR - Some error in executing the function
3942 addAllResults = main.TRUE
Jon Halla440e872016-03-31 15:15:50 -07003943 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003944 if addResponses[ i ] == main.TRUE:
3945 # All is well
3946 pass
3947 elif addResponses[ i ] == main.FALSE:
3948 # Already in set, probably fine
3949 pass
3950 elif addResponses[ i ] == main.ERROR:
3951 # Error in execution
3952 addAllResults = main.FALSE
3953 else:
3954 # unexpected result
3955 addAllResults = main.FALSE
3956 if addAllResults != main.TRUE:
3957 main.log.error( "Error executing set addAll" )
3958
3959 # Check if set is still correct
3960 size = len( onosSet )
3961 getResponses = []
3962 threads = []
Jon Halla440e872016-03-31 15:15:50 -07003963 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003964 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003965 name="setTestGet-" + str( i ),
3966 args=[ onosSetName ] )
3967 threads.append( t )
3968 t.start()
3969 for t in threads:
3970 t.join()
3971 getResponses.append( t.result )
3972 getResults = main.TRUE
Jon Halla440e872016-03-31 15:15:50 -07003973 for i in range( len( main.activeNodes ) ):
3974 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003975 if isinstance( getResponses[ i ], list):
3976 current = set( getResponses[ i ] )
3977 if len( current ) == len( getResponses[ i ] ):
3978 # no repeats
3979 if onosSet != current:
Jon Halla440e872016-03-31 15:15:50 -07003980 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003981 " has incorrect view" +
3982 " of set " + onosSetName + ":\n" +
3983 str( getResponses[ i ] ) )
3984 main.log.debug( "Expected: " + str( onosSet ) )
3985 main.log.debug( "Actual: " + str( current ) )
3986 getResults = main.FALSE
3987 else:
3988 # error, set is not a set
Jon Halla440e872016-03-31 15:15:50 -07003989 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003990 " has repeat elements in" +
3991 " set " + onosSetName + ":\n" +
3992 str( getResponses[ i ] ) )
3993 getResults = main.FALSE
3994 elif getResponses[ i ] == main.ERROR:
3995 getResults = main.FALSE
3996 sizeResponses = []
3997 threads = []
Jon Halla440e872016-03-31 15:15:50 -07003998 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003999 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07004000 name="setTestSize-" + str( i ),
4001 args=[ onosSetName ] )
4002 threads.append( t )
4003 t.start()
4004 for t in threads:
4005 t.join()
4006 sizeResponses.append( t.result )
4007 sizeResults = main.TRUE
Jon Halla440e872016-03-31 15:15:50 -07004008 for i in range( len( main.activeNodes ) ):
4009 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004010 if size != sizeResponses[ i ]:
4011 sizeResults = main.FALSE
Jon Halla440e872016-03-31 15:15:50 -07004012 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004013 " expected a size of " + str( size ) +
4014 " for set " + onosSetName +
4015 " but got " + str( sizeResponses[ i ] ) )
4016 addAllResults = addAllResults and getResults and sizeResults
4017 utilities.assert_equals( expect=main.TRUE,
4018 actual=addAllResults,
4019 onpass="Set addAll correct",
4020 onfail="Set addAll was incorrect" )
4021
4022 main.step( "Distributed Set retain()" )
4023 onosSet.intersection_update( retainValue.split() )
4024 retainResponses = []
4025 threads = []
Jon Halla440e872016-03-31 15:15:50 -07004026 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004027 t = main.Thread( target=main.CLIs[i].setTestRemove,
Jon Hall5cf14d52015-07-16 12:15:19 -07004028 name="setTestRetain-" + str( i ),
4029 args=[ onosSetName, retainValue ],
4030 kwargs={ "retain": True } )
4031 threads.append( t )
4032 t.start()
4033 for t in threads:
4034 t.join()
4035 retainResponses.append( t.result )
4036
4037 # main.TRUE = successfully changed the set
4038 # main.FALSE = action resulted in no change in set
4039 # main.ERROR - Some error in executing the function
4040 retainResults = main.TRUE
Jon Halla440e872016-03-31 15:15:50 -07004041 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07004042 if retainResponses[ i ] == main.TRUE:
4043 # All is well
4044 pass
4045 elif retainResponses[ i ] == main.FALSE:
4046 # Already in set, probably fine
4047 pass
4048 elif retainResponses[ i ] == main.ERROR:
4049 # Error in execution
4050 retainResults = main.FALSE
4051 else:
4052 # unexpected result
4053 retainResults = main.FALSE
4054 if retainResults != main.TRUE:
4055 main.log.error( "Error executing set retain" )
4056
4057 # Check if set is still correct
4058 size = len( onosSet )
4059 getResponses = []
4060 threads = []
Jon Halla440e872016-03-31 15:15:50 -07004061 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004062 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07004063 name="setTestGet-" + str( i ),
4064 args=[ onosSetName ] )
4065 threads.append( t )
4066 t.start()
4067 for t in threads:
4068 t.join()
4069 getResponses.append( t.result )
4070 getResults = main.TRUE
Jon Halla440e872016-03-31 15:15:50 -07004071 for i in range( len( main.activeNodes ) ):
4072 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004073 if isinstance( getResponses[ i ], list):
4074 current = set( getResponses[ i ] )
4075 if len( current ) == len( getResponses[ i ] ):
4076 # no repeats
4077 if onosSet != current:
Jon Halla440e872016-03-31 15:15:50 -07004078 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004079 " has incorrect view" +
4080 " of set " + onosSetName + ":\n" +
4081 str( getResponses[ i ] ) )
4082 main.log.debug( "Expected: " + str( onosSet ) )
4083 main.log.debug( "Actual: " + str( current ) )
4084 getResults = main.FALSE
4085 else:
4086 # error, set is not a set
Jon Halla440e872016-03-31 15:15:50 -07004087 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004088 " has repeat elements in" +
4089 " set " + onosSetName + ":\n" +
4090 str( getResponses[ i ] ) )
4091 getResults = main.FALSE
4092 elif getResponses[ i ] == main.ERROR:
4093 getResults = main.FALSE
4094 sizeResponses = []
4095 threads = []
Jon Halla440e872016-03-31 15:15:50 -07004096 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004097 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07004098 name="setTestSize-" + str( i ),
4099 args=[ onosSetName ] )
4100 threads.append( t )
4101 t.start()
4102 for t in threads:
4103 t.join()
4104 sizeResponses.append( t.result )
4105 sizeResults = main.TRUE
Jon Halla440e872016-03-31 15:15:50 -07004106 for i in range( len( main.activeNodes ) ):
4107 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004108 if size != sizeResponses[ i ]:
4109 sizeResults = main.FALSE
Jon Halla440e872016-03-31 15:15:50 -07004110 main.log.error( "ONOS" + node + " expected a size of " +
Jon Hall5cf14d52015-07-16 12:15:19 -07004111 str( size ) + " for set " + onosSetName +
4112 " but got " + str( sizeResponses[ i ] ) )
4113 retainResults = retainResults and getResults and sizeResults
4114 utilities.assert_equals( expect=main.TRUE,
4115 actual=retainResults,
4116 onpass="Set retain correct",
4117 onfail="Set retain was incorrect" )
4118
Jon Hall2a5002c2015-08-21 16:49:11 -07004119 # Transactional maps
4120 main.step( "Partitioned Transactional maps put" )
4121 tMapValue = "Testing"
4122 numKeys = 100
4123 putResult = True
Jon Halla440e872016-03-31 15:15:50 -07004124 node = main.activeNodes[0]
4125 putResponses = main.CLIs[node].transactionalMapPut( numKeys, tMapValue )
4126 if putResponses and len( putResponses ) == 100:
Jon Hall2a5002c2015-08-21 16:49:11 -07004127 for i in putResponses:
4128 if putResponses[ i ][ 'value' ] != tMapValue:
4129 putResult = False
4130 else:
4131 putResult = False
4132 if not putResult:
4133 main.log.debug( "Put response values: " + str( putResponses ) )
4134 utilities.assert_equals( expect=True,
4135 actual=putResult,
4136 onpass="Partitioned Transactional Map put successful",
4137 onfail="Partitioned Transactional Map put values are incorrect" )
4138
4139 main.step( "Partitioned Transactional maps get" )
Jon Hall9bfadd22016-05-11 14:48:07 -07004140 # FIXME: is this sleep needed?
4141 time.sleep( 5 )
4142
Jon Hall2a5002c2015-08-21 16:49:11 -07004143 getCheck = True
4144 for n in range( 1, numKeys + 1 ):
4145 getResponses = []
4146 threads = []
4147 valueCheck = True
Jon Halla440e872016-03-31 15:15:50 -07004148 for i in main.activeNodes:
Jon Hall2a5002c2015-08-21 16:49:11 -07004149 t = main.Thread( target=main.CLIs[i].transactionalMapGet,
4150 name="TMap-get-" + str( i ),
Jon Halla440e872016-03-31 15:15:50 -07004151 args=[ "Key" + str( n ) ] )
Jon Hall2a5002c2015-08-21 16:49:11 -07004152 threads.append( t )
4153 t.start()
4154 for t in threads:
4155 t.join()
4156 getResponses.append( t.result )
4157 for node in getResponses:
4158 if node != tMapValue:
4159 valueCheck = False
4160 if not valueCheck:
4161 main.log.warn( "Values for key 'Key" + str( n ) + "' do not match:" )
4162 main.log.warn( getResponses )
4163 getCheck = getCheck and valueCheck
4164 utilities.assert_equals( expect=True,
4165 actual=getCheck,
4166 onpass="Partitioned Transactional Map get values were correct",
4167 onfail="Partitioned Transactional Map values incorrect" )