blob: cb6929acdd73ad4f625f698589fd2597a5dfa21b [file] [log] [blame]
Jon Hall5cf14d52015-07-16 12:15:19 -07001"""
2Description: This test is to determine if the HA test setup is
3 working correctly. There are no failures so this test should
4 have a 100% pass rate
5
6List of test cases:
7CASE1: Compile ONOS and push it to the test machines
8CASE2: Assign devices to controllers
9CASE21: Assign mastership to controllers
10CASE3: Assign intents
11CASE4: Ping across added host intents
12CASE5: Reading state of ONOS
13CASE6: The Failure case. Since this is the Sanity test, we do nothing.
14CASE7: Check state after control plane failure
15CASE8: Compare topo
16CASE9: Link s3-s28 down
17CASE10: Link s3-s28 up
18CASE11: Switch down
19CASE12: Switch up
20CASE13: Clean up
21CASE14: start election app on all onos nodes
22CASE15: Check that Leadership Election is still functional
23CASE16: Install Distributed Primitives app
24CASE17: Check for basic functionality with distributed primitives
25"""
26
27
28class HAsanity:
29
30 def __init__( self ):
31 self.default = ''
32
33 def CASE1( self, main ):
34 """
35 CASE1 is to compile ONOS and push it to the test machines
36
37 Startup sequence:
38 cell <name>
39 onos-verify-cell
40 NOTE: temporary - onos-remove-raft-logs
41 onos-uninstall
42 start mininet
43 git pull
44 mvn clean install
45 onos-package
46 onos-install -f
47 onos-wait-for-start
48 start cli sessions
49 start tcpdump
50 """
Jon Halle1a3b752015-07-22 13:02:46 -070051 import imp
Jon Hallf3d16e72015-12-16 17:45:08 -080052 import time
Jon Halla440e872016-03-31 15:15:50 -070053 import json
Jon Hall5cf14d52015-07-16 12:15:19 -070054 main.log.info( "ONOS HA Sanity test - initialization" )
55 main.case( "Setting up test environment" )
Jon Hall783bbf92015-07-23 14:33:19 -070056 main.caseExplanation = "Setup the test environment including " +\
Jon Hall5cf14d52015-07-16 12:15:19 -070057 "installing ONOS, starting Mininet and ONOS" +\
58 "cli sessions."
Jon Hall5cf14d52015-07-16 12:15:19 -070059
60 # load some variables from the params file
61 PULLCODE = False
62 if main.params[ 'Git' ] == 'True':
63 PULLCODE = True
64 gitBranch = main.params[ 'branch' ]
65 cellName = main.params[ 'ENV' ][ 'cellName' ]
66
Jon Halle1a3b752015-07-22 13:02:46 -070067 main.numCtrls = int( main.params[ 'num_controllers' ] )
Jon Hall5cf14d52015-07-16 12:15:19 -070068 if main.ONOSbench.maxNodes:
Jon Halle1a3b752015-07-22 13:02:46 -070069 if main.ONOSbench.maxNodes < main.numCtrls:
70 main.numCtrls = int( main.ONOSbench.maxNodes )
Jon Hall5cf14d52015-07-16 12:15:19 -070071 # TODO: refactor how to get onos port, maybe put into component tag?
Jon Halle1a3b752015-07-22 13:02:46 -070072 # set global variables
Jon Hall5cf14d52015-07-16 12:15:19 -070073 global ONOS1Port
74 global ONOS2Port
75 global ONOS3Port
76 global ONOS4Port
77 global ONOS5Port
78 global ONOS6Port
79 global ONOS7Port
Jon Halla440e872016-03-31 15:15:50 -070080 # These are for csv plotting in jenkins
81 global labels
82 global data
83 labels = []
84 data = []
Jon Hall5cf14d52015-07-16 12:15:19 -070085
86 # FIXME: just get controller port from params?
87 # TODO: do we really need all these?
88 ONOS1Port = main.params[ 'CTRL' ][ 'port1' ]
89 ONOS2Port = main.params[ 'CTRL' ][ 'port2' ]
90 ONOS3Port = main.params[ 'CTRL' ][ 'port3' ]
91 ONOS4Port = main.params[ 'CTRL' ][ 'port4' ]
92 ONOS5Port = main.params[ 'CTRL' ][ 'port5' ]
93 ONOS6Port = main.params[ 'CTRL' ][ 'port6' ]
94 ONOS7Port = main.params[ 'CTRL' ][ 'port7' ]
95
Jon Halle1a3b752015-07-22 13:02:46 -070096 try:
Jon Hall53c5e662016-04-13 16:06:56 -070097 from tests.HA.dependencies.HA import HA
Jon Hall41d39f12016-04-11 22:54:35 -070098 main.HA = HA()
Jon Halle1a3b752015-07-22 13:02:46 -070099 except Exception as e:
100 main.log.exception( e )
101 main.cleanup()
102 main.exit()
103
104 main.CLIs = []
105 main.nodes = []
Jon Hall5cf14d52015-07-16 12:15:19 -0700106 ipList = []
Jon Halle1a3b752015-07-22 13:02:46 -0700107 for i in range( 1, main.numCtrls + 1 ):
108 try:
109 main.CLIs.append( getattr( main, 'ONOScli' + str( i ) ) )
110 main.nodes.append( getattr( main, 'ONOS' + str( i ) ) )
111 ipList.append( main.nodes[ -1 ].ip_address )
112 except AttributeError:
113 break
Jon Hall5cf14d52015-07-16 12:15:19 -0700114
115 main.step( "Create cell file" )
116 cellAppString = main.params[ 'ENV' ][ 'appString' ]
117 main.ONOSbench.createCellFile( main.ONOSbench.ip_address, cellName,
118 main.Mininet1.ip_address,
119 cellAppString, ipList )
120 main.step( "Applying cell variable to environment" )
121 cellResult = main.ONOSbench.setCell( cellName )
122 verifyResult = main.ONOSbench.verifyCell()
123
124 # FIXME:this is short term fix
125 main.log.info( "Removing raft logs" )
126 main.ONOSbench.onosRemoveRaftLogs()
127
128 main.log.info( "Uninstalling ONOS" )
Jon Halle1a3b752015-07-22 13:02:46 -0700129 for node in main.nodes:
Jon Hall5cf14d52015-07-16 12:15:19 -0700130 main.ONOSbench.onosUninstall( node.ip_address )
131
132 # Make sure ONOS is DEAD
133 main.log.info( "Killing any ONOS processes" )
134 killResults = main.TRUE
Jon Halle1a3b752015-07-22 13:02:46 -0700135 for node in main.nodes:
Jon Hall5cf14d52015-07-16 12:15:19 -0700136 killed = main.ONOSbench.onosKill( node.ip_address )
137 killResults = killResults and killed
138
139 cleanInstallResult = main.TRUE
140 gitPullResult = main.TRUE
141
142 main.step( "Starting Mininet" )
143 # scp topo file to mininet
144 # TODO: move to params?
145 topoName = "obelisk.py"
146 filePath = main.ONOSbench.home + "/tools/test/topos/"
kelvin-onlabd9e23de2015-08-06 10:34:44 -0700147 main.ONOSbench.scp( main.Mininet1,
148 filePath + topoName,
149 main.Mininet1.home,
150 direction="to" )
Jon Hall5cf14d52015-07-16 12:15:19 -0700151 mnResult = main.Mininet1.startNet( )
152 utilities.assert_equals( expect=main.TRUE, actual=mnResult,
153 onpass="Mininet Started",
154 onfail="Error starting Mininet" )
155
156 main.step( "Git checkout and pull " + gitBranch )
157 if PULLCODE:
158 main.ONOSbench.gitCheckout( gitBranch )
159 gitPullResult = main.ONOSbench.gitPull()
160 # values of 1 or 3 are good
161 utilities.assert_lesser( expect=0, actual=gitPullResult,
162 onpass="Git pull successful",
163 onfail="Git pull failed" )
164 main.ONOSbench.getVersion( report=True )
165
166 main.step( "Using mvn clean install" )
167 cleanInstallResult = main.TRUE
168 if PULLCODE and gitPullResult == main.TRUE:
169 cleanInstallResult = main.ONOSbench.cleanInstall()
170 else:
171 main.log.warn( "Did not pull new code so skipping mvn " +
172 "clean install" )
173 utilities.assert_equals( expect=main.TRUE,
174 actual=cleanInstallResult,
175 onpass="MCI successful",
176 onfail="MCI failed" )
177 # GRAPHS
178 # NOTE: important params here:
179 # job = name of Jenkins job
180 # Plot Name = Plot-HA, only can be used if multiple plots
181 # index = The number of the graph under plot name
182 job = "HAsanity"
183 plotName = "Plot-HA"
Jon Hall843f8bc2016-03-18 14:28:13 -0700184 index = "2"
Jon Hall5cf14d52015-07-16 12:15:19 -0700185 graphs = '<ac:structured-macro ac:name="html">\n'
186 graphs += '<ac:plain-text-body><![CDATA[\n'
187 graphs += '<iframe src="https://onos-jenkins.onlab.us/job/' + job +\
Jon Halla9845df2016-01-15 14:55:58 -0800188 '/plot/' + plotName + '/getPlot?index=' + index +\
Jon Hall5cf14d52015-07-16 12:15:19 -0700189 '&width=500&height=300"' +\
190 'noborder="0" width="500" height="300" scrolling="yes" ' +\
191 'seamless="seamless"></iframe>\n'
192 graphs += ']]></ac:plain-text-body>\n'
193 graphs += '</ac:structured-macro>\n'
194 main.log.wiki(graphs)
195
196 main.step( "Creating ONOS package" )
197 packageResult = main.ONOSbench.onosPackage()
198 utilities.assert_equals( expect=main.TRUE, actual=packageResult,
199 onpass="ONOS package successful",
200 onfail="ONOS package failed" )
201
202 main.step( "Installing ONOS package" )
203 onosInstallResult = main.TRUE
Jon Halle1a3b752015-07-22 13:02:46 -0700204 for node in main.nodes:
Jon Hall5cf14d52015-07-16 12:15:19 -0700205 tmpResult = main.ONOSbench.onosInstall( options="-f",
206 node=node.ip_address )
207 onosInstallResult = onosInstallResult and tmpResult
208 utilities.assert_equals( expect=main.TRUE, actual=onosInstallResult,
209 onpass="ONOS install successful",
210 onfail="ONOS install failed" )
211
212 main.step( "Checking if ONOS is up yet" )
213 for i in range( 2 ):
214 onosIsupResult = main.TRUE
Jon Halle1a3b752015-07-22 13:02:46 -0700215 for node in main.nodes:
Jon Hall5cf14d52015-07-16 12:15:19 -0700216 started = main.ONOSbench.isup( node.ip_address )
217 if not started:
Jon Hallc6793552016-01-19 14:18:37 -0800218 main.log.error( node.name + " hasn't started" )
Jon Hall5cf14d52015-07-16 12:15:19 -0700219 onosIsupResult = onosIsupResult and started
220 if onosIsupResult == main.TRUE:
221 break
222 utilities.assert_equals( expect=main.TRUE, actual=onosIsupResult,
223 onpass="ONOS startup successful",
224 onfail="ONOS startup failed" )
225
226 main.log.step( "Starting ONOS CLI sessions" )
227 cliResults = main.TRUE
228 threads = []
Jon Halle1a3b752015-07-22 13:02:46 -0700229 for i in range( main.numCtrls ):
230 t = main.Thread( target=main.CLIs[i].startOnosCli,
Jon Hall5cf14d52015-07-16 12:15:19 -0700231 name="startOnosCli-" + str( i ),
Jon Halle1a3b752015-07-22 13:02:46 -0700232 args=[main.nodes[i].ip_address] )
Jon Hall5cf14d52015-07-16 12:15:19 -0700233 threads.append( t )
234 t.start()
235
236 for t in threads:
237 t.join()
238 cliResults = cliResults and t.result
239 utilities.assert_equals( expect=main.TRUE, actual=cliResults,
240 onpass="ONOS cli startup successful",
241 onfail="ONOS cli startup failed" )
242
Jon Halla440e872016-03-31 15:15:50 -0700243 # Create a list of active nodes for use when some nodes are stopped
244 main.activeNodes = [ i for i in range( 0, len( main.CLIs ) ) ]
245
Jon Hall5cf14d52015-07-16 12:15:19 -0700246 if main.params[ 'tcpdump' ].lower() == "true":
247 main.step( "Start Packet Capture MN" )
248 main.Mininet2.startTcpdump(
249 str( main.params[ 'MNtcpdump' ][ 'folder' ] ) + str( main.TEST )
250 + "-MN.pcap",
251 intf=main.params[ 'MNtcpdump' ][ 'intf' ],
252 port=main.params[ 'MNtcpdump' ][ 'port' ] )
253
Jon Halla440e872016-03-31 15:15:50 -0700254 main.step( "Checking ONOS nodes" )
Jon Hall41d39f12016-04-11 22:54:35 -0700255 nodeResults = utilities.retry( main.HA.nodesCheck,
256 False,
257 args=[main.activeNodes],
258 attempts=5 )
Jon Halla440e872016-03-31 15:15:50 -0700259
Jon Hall41d39f12016-04-11 22:54:35 -0700260 utilities.assert_equals( expect=True, actual=nodeResults,
Jon Halla440e872016-03-31 15:15:50 -0700261 onpass="Nodes check successful",
262 onfail="Nodes check NOT successful" )
263
264 if not nodeResults:
Jon Hall7ac7bc32016-05-05 10:57:02 -0700265 for i in main.activeNodes:
266 cli = main.CLIs[i]
Jon Halla440e872016-03-31 15:15:50 -0700267 main.log.debug( "{} components not ACTIVE: \n{}".format(
268 cli.name,
269 cli.sendline( "scr:list | grep -v ACTIVE" ) ) )
Jon Hall5cf14d52015-07-16 12:15:19 -0700270 main.log.error( "Failed to start ONOS, stopping test" )
271 main.cleanup()
272 main.exit()
273
Jon Hall172b7ba2016-04-07 18:12:20 -0700274 main.step( "Activate apps defined in the params file" )
275 # get data from the params
276 apps = main.params.get( 'apps' )
277 if apps:
278 apps = apps.split(',')
279 main.log.warn( apps )
280 activateResult = True
281 for app in apps:
282 main.CLIs[ 0 ].app( app, "Activate" )
283 # TODO: check this worked
284 time.sleep( 10 ) # wait for apps to activate
285 for app in apps:
286 state = main.CLIs[ 0 ].appStatus( app )
287 if state == "ACTIVE":
288 activateResult = activeResult and True
289 else:
290 main.log.error( "{} is in {} state".format( app, state ) )
291 activeResult = False
292 utilities.assert_equals( expect=True,
293 actual=activateResult,
294 onpass="Successfully activated apps",
295 onfail="Failed to activate apps" )
296 else:
297 main.log.warn( "No apps were specified to be loaded after startup" )
298
299 main.step( "Set ONOS configurations" )
300 config = main.params.get( 'ONOS_Configuration' )
301 if config:
302 main.log.debug( config )
303 checkResult = main.TRUE
304 for component in config:
305 for setting in config[component]:
306 value = config[component][setting]
307 check = main.CLIs[ 0 ].setCfg( component, setting, value )
308 main.log.info( "Value was changed? {}".format( main.TRUE == check ) )
309 checkResult = check and checkResult
310 utilities.assert_equals( expect=main.TRUE,
311 actual=checkResult,
312 onpass="Successfully set config",
313 onfail="Failed to set config" )
314 else:
315 main.log.warn( "No configurations were specified to be changed after startup" )
316
Jon Hall9d2dcad2016-04-08 10:15:20 -0700317 main.step( "App Ids check" )
318 appCheck = main.TRUE
319 threads = []
320 for i in main.activeNodes:
321 t = main.Thread( target=main.CLIs[i].appToIDCheck,
322 name="appToIDCheck-" + str( i ),
323 args=[] )
324 threads.append( t )
325 t.start()
326
327 for t in threads:
328 t.join()
329 appCheck = appCheck and t.result
330 if appCheck != main.TRUE:
331 node = main.activeNodes[0]
332 main.log.warn( main.CLIs[node].apps() )
333 main.log.warn( main.CLIs[node].appIDs() )
334 utilities.assert_equals( expect=main.TRUE, actual=appCheck,
335 onpass="App Ids seem to be correct",
336 onfail="Something is wrong with app Ids" )
337
Jon Hall5cf14d52015-07-16 12:15:19 -0700338 def CASE2( self, main ):
339 """
340 Assign devices to controllers
341 """
342 import re
Jon Halle1a3b752015-07-22 13:02:46 -0700343 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700344 assert main, "main not defined"
345 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -0700346 assert main.CLIs, "main.CLIs not defined"
347 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700348 assert ONOS1Port, "ONOS1Port not defined"
349 assert ONOS2Port, "ONOS2Port not defined"
350 assert ONOS3Port, "ONOS3Port not defined"
351 assert ONOS4Port, "ONOS4Port not defined"
352 assert ONOS5Port, "ONOS5Port not defined"
353 assert ONOS6Port, "ONOS6Port not defined"
354 assert ONOS7Port, "ONOS7Port not defined"
355
356 main.case( "Assigning devices to controllers" )
Jon Hall783bbf92015-07-23 14:33:19 -0700357 main.caseExplanation = "Assign switches to ONOS using 'ovs-vsctl' " +\
Jon Hall5cf14d52015-07-16 12:15:19 -0700358 "and check that an ONOS node becomes the " +\
359 "master of the device."
360 main.step( "Assign switches to controllers" )
361
362 ipList = []
Jon Halle1a3b752015-07-22 13:02:46 -0700363 for i in range( main.numCtrls ):
364 ipList.append( main.nodes[ i ].ip_address )
Jon Hall5cf14d52015-07-16 12:15:19 -0700365 swList = []
366 for i in range( 1, 29 ):
367 swList.append( "s" + str( i ) )
368 main.Mininet1.assignSwController( sw=swList, ip=ipList )
369
370 mastershipCheck = main.TRUE
371 for i in range( 1, 29 ):
372 response = main.Mininet1.getSwController( "s" + str( i ) )
373 try:
374 main.log.info( str( response ) )
375 except Exception:
376 main.log.info( repr( response ) )
Jon Halle1a3b752015-07-22 13:02:46 -0700377 for node in main.nodes:
Jon Hall5cf14d52015-07-16 12:15:19 -0700378 if re.search( "tcp:" + node.ip_address, response ):
379 mastershipCheck = mastershipCheck and main.TRUE
380 else:
381 main.log.error( "Error, node " + node.ip_address + " is " +
382 "not in the list of controllers s" +
383 str( i ) + " is connecting to." )
384 mastershipCheck = main.FALSE
385 utilities.assert_equals(
386 expect=main.TRUE,
387 actual=mastershipCheck,
388 onpass="Switch mastership assigned correctly",
389 onfail="Switches not assigned correctly to controllers" )
390
391 def CASE21( self, main ):
392 """
393 Assign mastership to controllers
394 """
Jon Hall5cf14d52015-07-16 12:15:19 -0700395 import time
Jon Halle1a3b752015-07-22 13:02:46 -0700396 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700397 assert main, "main not defined"
398 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -0700399 assert main.CLIs, "main.CLIs not defined"
400 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700401 assert ONOS1Port, "ONOS1Port not defined"
402 assert ONOS2Port, "ONOS2Port not defined"
403 assert ONOS3Port, "ONOS3Port not defined"
404 assert ONOS4Port, "ONOS4Port not defined"
405 assert ONOS5Port, "ONOS5Port not defined"
406 assert ONOS6Port, "ONOS6Port not defined"
407 assert ONOS7Port, "ONOS7Port not defined"
408
409 main.case( "Assigning Controller roles for switches" )
Jon Hall783bbf92015-07-23 14:33:19 -0700410 main.caseExplanation = "Check that ONOS is connected to each " +\
Jon Hall5cf14d52015-07-16 12:15:19 -0700411 "device. Then manually assign" +\
412 " mastership to specific ONOS nodes using" +\
413 " 'device-role'"
414 main.step( "Assign mastership of switches to specific controllers" )
415 # Manually assign mastership to the controller we want
416 roleCall = main.TRUE
417
418 ipList = [ ]
419 deviceList = []
Jon Halla440e872016-03-31 15:15:50 -0700420 onosCli = main.CLIs[ main.activeNodes[0] ]
Jon Hall5cf14d52015-07-16 12:15:19 -0700421 try:
422 # Assign mastership to specific controllers. This assignment was
423 # determined for a 7 node cluser, but will work with any sized
424 # cluster
425 for i in range( 1, 29 ): # switches 1 through 28
426 # set up correct variables:
427 if i == 1:
428 c = 0
Jon Halle1a3b752015-07-22 13:02:46 -0700429 ip = main.nodes[ c ].ip_address # ONOS1
Jon Halla440e872016-03-31 15:15:50 -0700430 deviceId = onosCli.getDevice( "1000" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700431 elif i == 2:
Jon Halle1a3b752015-07-22 13:02:46 -0700432 c = 1 % main.numCtrls
433 ip = main.nodes[ c ].ip_address # ONOS2
Jon Halla440e872016-03-31 15:15:50 -0700434 deviceId = onosCli.getDevice( "2000" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700435 elif i == 3:
Jon Halle1a3b752015-07-22 13:02:46 -0700436 c = 1 % main.numCtrls
437 ip = main.nodes[ c ].ip_address # ONOS2
Jon Halla440e872016-03-31 15:15:50 -0700438 deviceId = onosCli.getDevice( "3000" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700439 elif i == 4:
Jon Halle1a3b752015-07-22 13:02:46 -0700440 c = 3 % main.numCtrls
441 ip = main.nodes[ c ].ip_address # ONOS4
Jon Halla440e872016-03-31 15:15:50 -0700442 deviceId = onosCli.getDevice( "3004" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700443 elif i == 5:
Jon Halle1a3b752015-07-22 13:02:46 -0700444 c = 2 % main.numCtrls
445 ip = main.nodes[ c ].ip_address # ONOS3
Jon Halla440e872016-03-31 15:15:50 -0700446 deviceId = onosCli.getDevice( "5000" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700447 elif i == 6:
Jon Halle1a3b752015-07-22 13:02:46 -0700448 c = 2 % main.numCtrls
449 ip = main.nodes[ c ].ip_address # ONOS3
Jon Halla440e872016-03-31 15:15:50 -0700450 deviceId = onosCli.getDevice( "6000" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700451 elif i == 7:
Jon Halle1a3b752015-07-22 13:02:46 -0700452 c = 5 % main.numCtrls
453 ip = main.nodes[ c ].ip_address # ONOS6
Jon Halla440e872016-03-31 15:15:50 -0700454 deviceId = onosCli.getDevice( "6007" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700455 elif i >= 8 and i <= 17:
Jon Halle1a3b752015-07-22 13:02:46 -0700456 c = 4 % main.numCtrls
457 ip = main.nodes[ c ].ip_address # ONOS5
Jon Hall5cf14d52015-07-16 12:15:19 -0700458 dpid = '3' + str( i ).zfill( 3 )
Jon Halla440e872016-03-31 15:15:50 -0700459 deviceId = onosCli.getDevice( dpid ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700460 elif i >= 18 and i <= 27:
Jon Halle1a3b752015-07-22 13:02:46 -0700461 c = 6 % main.numCtrls
462 ip = main.nodes[ c ].ip_address # ONOS7
Jon Hall5cf14d52015-07-16 12:15:19 -0700463 dpid = '6' + str( i ).zfill( 3 )
Jon Halla440e872016-03-31 15:15:50 -0700464 deviceId = onosCli.getDevice( dpid ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700465 elif i == 28:
466 c = 0
Jon Halle1a3b752015-07-22 13:02:46 -0700467 ip = main.nodes[ c ].ip_address # ONOS1
Jon Halla440e872016-03-31 15:15:50 -0700468 deviceId = onosCli.getDevice( "2800" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700469 else:
470 main.log.error( "You didn't write an else statement for " +
471 "switch s" + str( i ) )
472 roleCall = main.FALSE
473 # Assign switch
474 assert deviceId, "No device id for s" + str( i ) + " in ONOS"
475 # TODO: make this controller dynamic
Jon Halla440e872016-03-31 15:15:50 -0700476 roleCall = roleCall and onosCli.deviceRole( deviceId, ip )
Jon Hall5cf14d52015-07-16 12:15:19 -0700477 ipList.append( ip )
478 deviceList.append( deviceId )
479 except ( AttributeError, AssertionError ):
480 main.log.exception( "Something is wrong with ONOS device view" )
Jon Halla440e872016-03-31 15:15:50 -0700481 main.log.info( onosCli.devices() )
Jon Hall5cf14d52015-07-16 12:15:19 -0700482 utilities.assert_equals(
483 expect=main.TRUE,
484 actual=roleCall,
485 onpass="Re-assigned switch mastership to designated controller",
486 onfail="Something wrong with deviceRole calls" )
487
488 main.step( "Check mastership was correctly assigned" )
489 roleCheck = main.TRUE
490 # NOTE: This is due to the fact that device mastership change is not
491 # atomic and is actually a multi step process
492 time.sleep( 5 )
493 for i in range( len( ipList ) ):
494 ip = ipList[i]
495 deviceId = deviceList[i]
496 # Check assignment
Jon Halla440e872016-03-31 15:15:50 -0700497 master = onosCli.getRole( deviceId ).get( 'master' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700498 if ip in master:
499 roleCheck = roleCheck and main.TRUE
500 else:
501 roleCheck = roleCheck and main.FALSE
502 main.log.error( "Error, controller " + ip + " is not" +
503 " master " + "of device " +
504 str( deviceId ) + ". Master is " +
505 repr( master ) + "." )
506 utilities.assert_equals(
507 expect=main.TRUE,
508 actual=roleCheck,
509 onpass="Switches were successfully reassigned to designated " +
510 "controller",
511 onfail="Switches were not successfully reassigned" )
512
513 def CASE3( self, main ):
514 """
515 Assign intents
516 """
517 import time
518 import json
Jon Halle1a3b752015-07-22 13:02:46 -0700519 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700520 assert main, "main not defined"
521 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -0700522 assert main.CLIs, "main.CLIs not defined"
523 assert main.nodes, "main.nodes not defined"
Jon Halla440e872016-03-31 15:15:50 -0700524 try:
525 labels
526 except NameError:
527 main.log.error( "labels not defined, setting to []" )
528 labels = []
529 try:
530 data
531 except NameError:
532 main.log.error( "data not defined, setting to []" )
533 data = []
Jon Hall5cf14d52015-07-16 12:15:19 -0700534 main.case( "Adding host Intents" )
Jon Hall783bbf92015-07-23 14:33:19 -0700535 main.caseExplanation = "Discover hosts by using pingall then " +\
Jon Hall5cf14d52015-07-16 12:15:19 -0700536 "assign predetermined host-to-host intents." +\
537 " After installation, check that the intent" +\
538 " is distributed to all nodes and the state" +\
539 " is INSTALLED"
540
541 # install onos-app-fwd
542 main.step( "Install reactive forwarding app" )
Jon Halla440e872016-03-31 15:15:50 -0700543 onosCli = main.CLIs[ main.activeNodes[0] ]
544 installResults = onosCli.activateApp( "org.onosproject.fwd" )
Jon Hall5cf14d52015-07-16 12:15:19 -0700545 utilities.assert_equals( expect=main.TRUE, actual=installResults,
546 onpass="Install fwd successful",
547 onfail="Install fwd failed" )
548
549 main.step( "Check app ids" )
550 appCheck = main.TRUE
551 threads = []
Jon Halla440e872016-03-31 15:15:50 -0700552 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -0700553 t = main.Thread( target=main.CLIs[i].appToIDCheck,
Jon Hall5cf14d52015-07-16 12:15:19 -0700554 name="appToIDCheck-" + str( i ),
555 args=[] )
556 threads.append( t )
557 t.start()
558
559 for t in threads:
560 t.join()
561 appCheck = appCheck and t.result
562 if appCheck != main.TRUE:
Jon Halla440e872016-03-31 15:15:50 -0700563 main.log.warn( onosCli.apps() )
564 main.log.warn( onosCli.appIDs() )
Jon Hall5cf14d52015-07-16 12:15:19 -0700565 utilities.assert_equals( expect=main.TRUE, actual=appCheck,
566 onpass="App Ids seem to be correct",
567 onfail="Something is wrong with app Ids" )
568
569 main.step( "Discovering Hosts( Via pingall for now )" )
570 # FIXME: Once we have a host discovery mechanism, use that instead
571 # REACTIVE FWD test
572 pingResult = main.FALSE
Jon Hall96091e62015-09-21 17:34:17 -0700573 passMsg = "Reactive Pingall test passed"
574 time1 = time.time()
575 pingResult = main.Mininet1.pingall()
576 time2 = time.time()
577 if not pingResult:
578 main.log.warn("First pingall failed. Trying again...")
Jon Hall5cf14d52015-07-16 12:15:19 -0700579 pingResult = main.Mininet1.pingall()
Jon Hall96091e62015-09-21 17:34:17 -0700580 passMsg += " on the second try"
581 utilities.assert_equals(
582 expect=main.TRUE,
583 actual=pingResult,
584 onpass= passMsg,
585 onfail="Reactive Pingall failed, " +
586 "one or more ping pairs failed" )
587 main.log.info( "Time for pingall: %2f seconds" %
588 ( time2 - time1 ) )
Jon Hall5cf14d52015-07-16 12:15:19 -0700589 # timeout for fwd flows
590 time.sleep( 11 )
591 # uninstall onos-app-fwd
592 main.step( "Uninstall reactive forwarding app" )
Jon Halla440e872016-03-31 15:15:50 -0700593 node = main.activeNodes[0]
594 uninstallResult = main.CLIs[node].deactivateApp( "org.onosproject.fwd" )
Jon Hall5cf14d52015-07-16 12:15:19 -0700595 utilities.assert_equals( expect=main.TRUE, actual=uninstallResult,
596 onpass="Uninstall fwd successful",
597 onfail="Uninstall fwd failed" )
Jon Hall5cf14d52015-07-16 12:15:19 -0700598
599 main.step( "Check app ids" )
600 threads = []
601 appCheck2 = main.TRUE
Jon Halla440e872016-03-31 15:15:50 -0700602 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -0700603 t = main.Thread( target=main.CLIs[i].appToIDCheck,
Jon Hall5cf14d52015-07-16 12:15:19 -0700604 name="appToIDCheck-" + str( i ),
605 args=[] )
606 threads.append( t )
607 t.start()
608
609 for t in threads:
610 t.join()
611 appCheck2 = appCheck2 and t.result
612 if appCheck2 != main.TRUE:
Jon Halla440e872016-03-31 15:15:50 -0700613 node = main.activeNodes[0]
614 main.log.warn( main.CLIs[node].apps() )
615 main.log.warn( main.CLIs[node].appIDs() )
Jon Hall5cf14d52015-07-16 12:15:19 -0700616 utilities.assert_equals( expect=main.TRUE, actual=appCheck2,
617 onpass="App Ids seem to be correct",
618 onfail="Something is wrong with app Ids" )
619
620 main.step( "Add host intents via cli" )
621 intentIds = []
Jon Hall6e709752016-02-01 13:38:46 -0800622 # TODO: move the host numbers to params
623 # Maybe look at all the paths we ping?
Jon Hall5cf14d52015-07-16 12:15:19 -0700624 intentAddResult = True
625 hostResult = main.TRUE
626 for i in range( 8, 18 ):
627 main.log.info( "Adding host intent between h" + str( i ) +
628 " and h" + str( i + 10 ) )
629 host1 = "00:00:00:00:00:" + \
630 str( hex( i )[ 2: ] ).zfill( 2 ).upper()
631 host2 = "00:00:00:00:00:" + \
632 str( hex( i + 10 )[ 2: ] ).zfill( 2 ).upper()
633 # NOTE: getHost can return None
Jon Halla440e872016-03-31 15:15:50 -0700634 host1Dict = onosCli.getHost( host1 )
635 host2Dict = onosCli.getHost( host2 )
Jon Hall5cf14d52015-07-16 12:15:19 -0700636 host1Id = None
637 host2Id = None
638 if host1Dict and host2Dict:
639 host1Id = host1Dict.get( 'id', None )
640 host2Id = host2Dict.get( 'id', None )
641 if host1Id and host2Id:
Jon Halla440e872016-03-31 15:15:50 -0700642 nodeNum = ( i % len( main.activeNodes ) )
643 node = main.activeNodes[nodeNum]
644 tmpId = main.CLIs[node].addHostIntent( host1Id, host2Id )
Jon Hall5cf14d52015-07-16 12:15:19 -0700645 if tmpId:
646 main.log.info( "Added intent with id: " + tmpId )
647 intentIds.append( tmpId )
648 else:
649 main.log.error( "addHostIntent returned: " +
650 repr( tmpId ) )
651 else:
652 main.log.error( "Error, getHost() failed for h" + str( i ) +
653 " and/or h" + str( i + 10 ) )
Jon Halla440e872016-03-31 15:15:50 -0700654 node = main.activeNodes[0]
655 hosts = main.CLIs[node].hosts()
Jon Hall5cf14d52015-07-16 12:15:19 -0700656 main.log.warn( "Hosts output: " )
657 try:
658 main.log.warn( json.dumps( json.loads( hosts ),
659 sort_keys=True,
660 indent=4,
661 separators=( ',', ': ' ) ) )
662 except ( ValueError, TypeError ):
663 main.log.warn( repr( hosts ) )
664 hostResult = main.FALSE
665 utilities.assert_equals( expect=main.TRUE, actual=hostResult,
666 onpass="Found a host id for each host",
667 onfail="Error looking up host ids" )
668
669 intentStart = time.time()
Jon Halla440e872016-03-31 15:15:50 -0700670 onosIds = onosCli.getAllIntentsId()
Jon Hall5cf14d52015-07-16 12:15:19 -0700671 main.log.info( "Submitted intents: " + str( intentIds ) )
672 main.log.info( "Intents in ONOS: " + str( onosIds ) )
673 for intent in intentIds:
674 if intent in onosIds:
675 pass # intent submitted is in onos
676 else:
677 intentAddResult = False
678 if intentAddResult:
679 intentStop = time.time()
680 else:
681 intentStop = None
682 # Print the intent states
Jon Halla440e872016-03-31 15:15:50 -0700683 intents = onosCli.intents()
Jon Hall5cf14d52015-07-16 12:15:19 -0700684 intentStates = []
685 installedCheck = True
686 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
687 count = 0
688 try:
689 for intent in json.loads( intents ):
690 state = intent.get( 'state', None )
691 if "INSTALLED" not in state:
692 installedCheck = False
693 intentId = intent.get( 'id', None )
694 intentStates.append( ( intentId, state ) )
695 except ( ValueError, TypeError ):
696 main.log.exception( "Error parsing intents" )
697 # add submitted intents not in the store
698 tmplist = [ i for i, s in intentStates ]
699 missingIntents = False
700 for i in intentIds:
701 if i not in tmplist:
702 intentStates.append( ( i, " - " ) )
703 missingIntents = True
704 intentStates.sort()
705 for i, s in intentStates:
706 count += 1
707 main.log.info( "%-6s%-15s%-15s" %
708 ( str( count ), str( i ), str( s ) ) )
Jon Halla440e872016-03-31 15:15:50 -0700709 leaders = onosCli.leaders()
Jon Hall5cf14d52015-07-16 12:15:19 -0700710 try:
711 missing = False
712 if leaders:
713 parsedLeaders = json.loads( leaders )
714 main.log.warn( json.dumps( parsedLeaders,
715 sort_keys=True,
716 indent=4,
717 separators=( ',', ': ' ) ) )
718 # check for all intent partitions
719 topics = []
720 for i in range( 14 ):
721 topics.append( "intent-partition-" + str( i ) )
722 main.log.debug( topics )
723 ONOStopics = [ j['topic'] for j in parsedLeaders ]
724 for topic in topics:
725 if topic not in ONOStopics:
726 main.log.error( "Error: " + topic +
727 " not in leaders" )
728 missing = True
729 else:
730 main.log.error( "leaders() returned None" )
731 except ( ValueError, TypeError ):
732 main.log.exception( "Error parsing leaders" )
733 main.log.error( repr( leaders ) )
734 # Check all nodes
735 if missing:
Jon Halla440e872016-03-31 15:15:50 -0700736 for i in main.activeNodes:
737 response = main.CLIs[i].leaders( jsonFormat=False)
738 main.log.warn( str( main.CLIs[i].name ) + " leaders output: \n" +
Jon Hall5cf14d52015-07-16 12:15:19 -0700739 str( response ) )
740
Jon Halla440e872016-03-31 15:15:50 -0700741 partitions = onosCli.partitions()
Jon Hall5cf14d52015-07-16 12:15:19 -0700742 try:
743 if partitions :
744 parsedPartitions = json.loads( partitions )
745 main.log.warn( json.dumps( parsedPartitions,
746 sort_keys=True,
747 indent=4,
748 separators=( ',', ': ' ) ) )
749 # TODO check for a leader in all paritions
750 # TODO check for consistency among nodes
751 else:
752 main.log.error( "partitions() returned None" )
753 except ( ValueError, TypeError ):
754 main.log.exception( "Error parsing partitions" )
755 main.log.error( repr( partitions ) )
Jon Halla440e872016-03-31 15:15:50 -0700756 pendingMap = onosCli.pendingMap()
Jon Hall5cf14d52015-07-16 12:15:19 -0700757 try:
758 if pendingMap :
759 parsedPending = json.loads( pendingMap )
760 main.log.warn( json.dumps( parsedPending,
761 sort_keys=True,
762 indent=4,
763 separators=( ',', ': ' ) ) )
764 # TODO check something here?
765 else:
766 main.log.error( "pendingMap() returned None" )
767 except ( ValueError, TypeError ):
768 main.log.exception( "Error parsing pending map" )
769 main.log.error( repr( pendingMap ) )
770
771 intentAddResult = bool( intentAddResult and not missingIntents and
772 installedCheck )
773 if not intentAddResult:
774 main.log.error( "Error in pushing host intents to ONOS" )
775
776 main.step( "Intent Anti-Entropy dispersion" )
Jon Halla440e872016-03-31 15:15:50 -0700777 for j in range(100):
Jon Hall5cf14d52015-07-16 12:15:19 -0700778 correct = True
779 main.log.info( "Submitted intents: " + str( sorted( intentIds ) ) )
Jon Halla440e872016-03-31 15:15:50 -0700780 for i in main.activeNodes:
Jon Hall5cf14d52015-07-16 12:15:19 -0700781 onosIds = []
Jon Halla440e872016-03-31 15:15:50 -0700782 ids = main.CLIs[i].getAllIntentsId()
Jon Hall5cf14d52015-07-16 12:15:19 -0700783 onosIds.append( ids )
Jon Halla440e872016-03-31 15:15:50 -0700784 main.log.debug( "Intents in " + main.CLIs[i].name + ": " +
Jon Hall5cf14d52015-07-16 12:15:19 -0700785 str( sorted( onosIds ) ) )
786 if sorted( ids ) != sorted( intentIds ):
787 main.log.warn( "Set of intent IDs doesn't match" )
788 correct = False
789 break
790 else:
Jon Halla440e872016-03-31 15:15:50 -0700791 intents = json.loads( main.CLIs[i].intents() )
Jon Hall5cf14d52015-07-16 12:15:19 -0700792 for intent in intents:
793 if intent[ 'state' ] != "INSTALLED":
794 main.log.warn( "Intent " + intent[ 'id' ] +
795 " is " + intent[ 'state' ] )
796 correct = False
797 break
798 if correct:
799 break
800 else:
801 time.sleep(1)
802 if not intentStop:
803 intentStop = time.time()
804 global gossipTime
805 gossipTime = intentStop - intentStart
806 main.log.info( "It took about " + str( gossipTime ) +
807 " seconds for all intents to appear in each node" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700808 gossipPeriod = int( main.params['timers']['gossip'] )
Jon Halla440e872016-03-31 15:15:50 -0700809 maxGossipTime = gossipPeriod * len( main.activeNodes )
Jon Hall5cf14d52015-07-16 12:15:19 -0700810 utilities.assert_greater_equals(
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700811 expect=maxGossipTime, actual=gossipTime,
Jon Hall5cf14d52015-07-16 12:15:19 -0700812 onpass="ECM anti-entropy for intents worked within " +
813 "expected time",
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700814 onfail="Intent ECM anti-entropy took too long. " +
815 "Expected time:{}, Actual time:{}".format( maxGossipTime,
816 gossipTime ) )
817 if gossipTime <= maxGossipTime:
Jon Hall5cf14d52015-07-16 12:15:19 -0700818 intentAddResult = True
819
820 if not intentAddResult or "key" in pendingMap:
821 import time
822 installedCheck = True
823 main.log.info( "Sleeping 60 seconds to see if intents are found" )
824 time.sleep( 60 )
Jon Halla440e872016-03-31 15:15:50 -0700825 onosIds = onosCli.getAllIntentsId()
Jon Hall5cf14d52015-07-16 12:15:19 -0700826 main.log.info( "Submitted intents: " + str( intentIds ) )
827 main.log.info( "Intents in ONOS: " + str( onosIds ) )
828 # Print the intent states
Jon Halla440e872016-03-31 15:15:50 -0700829 intents = onosCli.intents()
Jon Hall5cf14d52015-07-16 12:15:19 -0700830 intentStates = []
831 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
832 count = 0
833 try:
834 for intent in json.loads( intents ):
835 # Iter through intents of a node
836 state = intent.get( 'state', None )
837 if "INSTALLED" not in state:
838 installedCheck = False
839 intentId = intent.get( 'id', None )
840 intentStates.append( ( intentId, state ) )
841 except ( ValueError, TypeError ):
842 main.log.exception( "Error parsing intents" )
843 # add submitted intents not in the store
844 tmplist = [ i for i, s in intentStates ]
845 for i in intentIds:
846 if i not in tmplist:
847 intentStates.append( ( i, " - " ) )
848 intentStates.sort()
849 for i, s in intentStates:
850 count += 1
851 main.log.info( "%-6s%-15s%-15s" %
852 ( str( count ), str( i ), str( s ) ) )
Jon Halla440e872016-03-31 15:15:50 -0700853 leaders = onosCli.leaders()
Jon Hall5cf14d52015-07-16 12:15:19 -0700854 try:
855 missing = False
856 if leaders:
857 parsedLeaders = json.loads( leaders )
858 main.log.warn( json.dumps( parsedLeaders,
859 sort_keys=True,
860 indent=4,
861 separators=( ',', ': ' ) ) )
862 # check for all intent partitions
863 # check for election
864 topics = []
865 for i in range( 14 ):
866 topics.append( "intent-partition-" + str( i ) )
867 # FIXME: this should only be after we start the app
868 topics.append( "org.onosproject.election" )
869 main.log.debug( topics )
870 ONOStopics = [ j['topic'] for j in parsedLeaders ]
871 for topic in topics:
872 if topic not in ONOStopics:
873 main.log.error( "Error: " + topic +
874 " not in leaders" )
875 missing = True
876 else:
877 main.log.error( "leaders() returned None" )
878 except ( ValueError, TypeError ):
879 main.log.exception( "Error parsing leaders" )
880 main.log.error( repr( leaders ) )
881 # Check all nodes
882 if missing:
Jon Halla440e872016-03-31 15:15:50 -0700883 for i in main.activeNodes:
884 node = main.CLIs[i]
Jon Hall5cf14d52015-07-16 12:15:19 -0700885 response = node.leaders( jsonFormat=False)
886 main.log.warn( str( node.name ) + " leaders output: \n" +
887 str( response ) )
888
Jon Halla440e872016-03-31 15:15:50 -0700889 partitions = onosCli.partitions()
Jon Hall5cf14d52015-07-16 12:15:19 -0700890 try:
891 if partitions :
892 parsedPartitions = json.loads( partitions )
893 main.log.warn( json.dumps( parsedPartitions,
894 sort_keys=True,
895 indent=4,
896 separators=( ',', ': ' ) ) )
897 # TODO check for a leader in all paritions
898 # TODO check for consistency among nodes
899 else:
900 main.log.error( "partitions() returned None" )
901 except ( ValueError, TypeError ):
902 main.log.exception( "Error parsing partitions" )
903 main.log.error( repr( partitions ) )
Jon Halla440e872016-03-31 15:15:50 -0700904 pendingMap = onosCli.pendingMap()
Jon Hall5cf14d52015-07-16 12:15:19 -0700905 try:
906 if pendingMap :
907 parsedPending = json.loads( pendingMap )
908 main.log.warn( json.dumps( parsedPending,
909 sort_keys=True,
910 indent=4,
911 separators=( ',', ': ' ) ) )
912 # TODO check something here?
913 else:
914 main.log.error( "pendingMap() returned None" )
915 except ( ValueError, TypeError ):
916 main.log.exception( "Error parsing pending map" )
917 main.log.error( repr( pendingMap ) )
918
919 def CASE4( self, main ):
920 """
921 Ping across added host intents
922 """
923 import json
924 import time
Jon Halle1a3b752015-07-22 13:02:46 -0700925 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700926 assert main, "main not defined"
927 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -0700928 assert main.CLIs, "main.CLIs not defined"
929 assert main.nodes, "main.nodes not defined"
Jon Halla440e872016-03-31 15:15:50 -0700930 main.case( "Verify connectivity by sending traffic across Intents" )
Jon Hall783bbf92015-07-23 14:33:19 -0700931 main.caseExplanation = "Ping across added host intents to check " +\
Jon Hall5cf14d52015-07-16 12:15:19 -0700932 "functionality and check the state of " +\
933 "the intent"
Jon Hall5cf14d52015-07-16 12:15:19 -0700934
Jon Hall41d39f12016-04-11 22:54:35 -0700935 onosCli = main.CLIs[ main.activeNodes[0] ]
Jon Hall5cf14d52015-07-16 12:15:19 -0700936 main.step( "Check Intent state" )
937 installedCheck = False
938 loopCount = 0
939 while not installedCheck and loopCount < 40:
940 installedCheck = True
941 # Print the intent states
Jon Halla440e872016-03-31 15:15:50 -0700942 intents = onosCli.intents()
Jon Hall5cf14d52015-07-16 12:15:19 -0700943 intentStates = []
944 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
945 count = 0
946 # Iter through intents of a node
947 try:
948 for intent in json.loads( intents ):
949 state = intent.get( 'state', None )
950 if "INSTALLED" not in state:
951 installedCheck = False
952 intentId = intent.get( 'id', None )
953 intentStates.append( ( intentId, state ) )
954 except ( ValueError, TypeError ):
955 main.log.exception( "Error parsing intents." )
956 # Print states
957 intentStates.sort()
958 for i, s in intentStates:
959 count += 1
960 main.log.info( "%-6s%-15s%-15s" %
961 ( str( count ), str( i ), str( s ) ) )
962 if not installedCheck:
963 time.sleep( 1 )
964 loopCount += 1
965 utilities.assert_equals( expect=True, actual=installedCheck,
966 onpass="Intents are all INSTALLED",
967 onfail="Intents are not all in " +
968 "INSTALLED state" )
969
Jon Hall9d2dcad2016-04-08 10:15:20 -0700970 main.step( "Ping across added host intents" )
Jon Hall9d2dcad2016-04-08 10:15:20 -0700971 PingResult = main.TRUE
972 for i in range( 8, 18 ):
973 ping = main.Mininet1.pingHost( src="h" + str( i ),
974 target="h" + str( i + 10 ) )
975 PingResult = PingResult and ping
976 if ping == main.FALSE:
977 main.log.warn( "Ping failed between h" + str( i ) +
978 " and h" + str( i + 10 ) )
979 elif ping == main.TRUE:
980 main.log.info( "Ping test passed!" )
981 # Don't set PingResult or you'd override failures
982 if PingResult == main.FALSE:
983 main.log.error(
984 "Intents have not been installed correctly, pings failed." )
985 # TODO: pretty print
986 main.log.warn( "ONOS1 intents: " )
987 try:
988 tmpIntents = onosCli.intents()
989 main.log.warn( json.dumps( json.loads( tmpIntents ),
990 sort_keys=True,
991 indent=4,
992 separators=( ',', ': ' ) ) )
993 except ( ValueError, TypeError ):
994 main.log.warn( repr( tmpIntents ) )
995 utilities.assert_equals(
996 expect=main.TRUE,
997 actual=PingResult,
998 onpass="Intents have been installed correctly and pings work",
999 onfail="Intents have not been installed correctly, pings failed." )
1000
Jon Hall5cf14d52015-07-16 12:15:19 -07001001 main.step( "Check leadership of topics" )
Jon Halla440e872016-03-31 15:15:50 -07001002 leaders = onosCli.leaders()
Jon Hall5cf14d52015-07-16 12:15:19 -07001003 topicCheck = main.TRUE
1004 try:
1005 if leaders:
1006 parsedLeaders = json.loads( leaders )
1007 main.log.warn( json.dumps( parsedLeaders,
1008 sort_keys=True,
1009 indent=4,
1010 separators=( ',', ': ' ) ) )
1011 # check for all intent partitions
1012 # check for election
1013 # TODO: Look at Devices as topics now that it uses this system
1014 topics = []
1015 for i in range( 14 ):
1016 topics.append( "intent-partition-" + str( i ) )
1017 # FIXME: this should only be after we start the app
1018 # FIXME: topics.append( "org.onosproject.election" )
1019 # Print leaders output
1020 main.log.debug( topics )
1021 ONOStopics = [ j['topic'] for j in parsedLeaders ]
1022 for topic in topics:
1023 if topic not in ONOStopics:
1024 main.log.error( "Error: " + topic +
1025 " not in leaders" )
1026 topicCheck = main.FALSE
1027 else:
1028 main.log.error( "leaders() returned None" )
1029 topicCheck = main.FALSE
1030 except ( ValueError, TypeError ):
1031 topicCheck = main.FALSE
1032 main.log.exception( "Error parsing leaders" )
1033 main.log.error( repr( leaders ) )
1034 # TODO: Check for a leader of these topics
1035 # Check all nodes
1036 if topicCheck:
Jon Halla440e872016-03-31 15:15:50 -07001037 for i in main.activeNodes:
1038 node = main.CLIs[i]
Jon Hall5cf14d52015-07-16 12:15:19 -07001039 response = node.leaders( jsonFormat=False)
1040 main.log.warn( str( node.name ) + " leaders output: \n" +
1041 str( response ) )
1042
1043 utilities.assert_equals( expect=main.TRUE, actual=topicCheck,
1044 onpass="intent Partitions is in leaders",
1045 onfail="Some topics were lost " )
1046 # Print partitions
Jon Halla440e872016-03-31 15:15:50 -07001047 partitions = onosCli.partitions()
Jon Hall5cf14d52015-07-16 12:15:19 -07001048 try:
1049 if partitions :
1050 parsedPartitions = json.loads( partitions )
1051 main.log.warn( json.dumps( parsedPartitions,
1052 sort_keys=True,
1053 indent=4,
1054 separators=( ',', ': ' ) ) )
1055 # TODO check for a leader in all paritions
1056 # TODO check for consistency among nodes
1057 else:
1058 main.log.error( "partitions() returned None" )
1059 except ( ValueError, TypeError ):
1060 main.log.exception( "Error parsing partitions" )
1061 main.log.error( repr( partitions ) )
1062 # Print Pending Map
Jon Halla440e872016-03-31 15:15:50 -07001063 pendingMap = onosCli.pendingMap()
Jon Hall5cf14d52015-07-16 12:15:19 -07001064 try:
1065 if pendingMap :
1066 parsedPending = json.loads( pendingMap )
1067 main.log.warn( json.dumps( parsedPending,
1068 sort_keys=True,
1069 indent=4,
1070 separators=( ',', ': ' ) ) )
1071 # TODO check something here?
1072 else:
1073 main.log.error( "pendingMap() returned None" )
1074 except ( ValueError, TypeError ):
1075 main.log.exception( "Error parsing pending map" )
1076 main.log.error( repr( pendingMap ) )
1077
1078 if not installedCheck:
1079 main.log.info( "Waiting 60 seconds to see if the state of " +
1080 "intents change" )
1081 time.sleep( 60 )
1082 # Print the intent states
Jon Halla440e872016-03-31 15:15:50 -07001083 intents = onosCli.intents()
Jon Hall5cf14d52015-07-16 12:15:19 -07001084 intentStates = []
1085 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
1086 count = 0
1087 # Iter through intents of a node
1088 try:
1089 for intent in json.loads( intents ):
1090 state = intent.get( 'state', None )
1091 if "INSTALLED" not in state:
1092 installedCheck = False
1093 intentId = intent.get( 'id', None )
1094 intentStates.append( ( intentId, state ) )
1095 except ( ValueError, TypeError ):
1096 main.log.exception( "Error parsing intents." )
1097 intentStates.sort()
1098 for i, s in intentStates:
1099 count += 1
1100 main.log.info( "%-6s%-15s%-15s" %
1101 ( str( count ), str( i ), str( s ) ) )
Jon Halla440e872016-03-31 15:15:50 -07001102 leaders = onosCli.leaders()
Jon Hall5cf14d52015-07-16 12:15:19 -07001103 try:
1104 missing = False
1105 if leaders:
1106 parsedLeaders = json.loads( leaders )
1107 main.log.warn( json.dumps( parsedLeaders,
1108 sort_keys=True,
1109 indent=4,
1110 separators=( ',', ': ' ) ) )
1111 # check for all intent partitions
1112 # check for election
1113 topics = []
1114 for i in range( 14 ):
1115 topics.append( "intent-partition-" + str( i ) )
1116 # FIXME: this should only be after we start the app
1117 topics.append( "org.onosproject.election" )
1118 main.log.debug( topics )
1119 ONOStopics = [ j['topic'] for j in parsedLeaders ]
1120 for topic in topics:
1121 if topic not in ONOStopics:
1122 main.log.error( "Error: " + topic +
1123 " not in leaders" )
1124 missing = True
1125 else:
1126 main.log.error( "leaders() returned None" )
1127 except ( ValueError, TypeError ):
1128 main.log.exception( "Error parsing leaders" )
1129 main.log.error( repr( leaders ) )
1130 if missing:
Jon Halla440e872016-03-31 15:15:50 -07001131 for i in main.activeNodes:
1132 node = main.CLIs[i]
Jon Hall5cf14d52015-07-16 12:15:19 -07001133 response = node.leaders( jsonFormat=False)
1134 main.log.warn( str( node.name ) + " leaders output: \n" +
1135 str( response ) )
1136
Jon Halla440e872016-03-31 15:15:50 -07001137 partitions = onosCli.partitions()
Jon Hall5cf14d52015-07-16 12:15:19 -07001138 try:
1139 if partitions :
1140 parsedPartitions = json.loads( partitions )
1141 main.log.warn( json.dumps( parsedPartitions,
1142 sort_keys=True,
1143 indent=4,
1144 separators=( ',', ': ' ) ) )
1145 # TODO check for a leader in all paritions
1146 # TODO check for consistency among nodes
1147 else:
1148 main.log.error( "partitions() returned None" )
1149 except ( ValueError, TypeError ):
1150 main.log.exception( "Error parsing partitions" )
1151 main.log.error( repr( partitions ) )
Jon Halla440e872016-03-31 15:15:50 -07001152 pendingMap = onosCli.pendingMap()
Jon Hall5cf14d52015-07-16 12:15:19 -07001153 try:
1154 if pendingMap :
1155 parsedPending = json.loads( pendingMap )
1156 main.log.warn( json.dumps( parsedPending,
1157 sort_keys=True,
1158 indent=4,
1159 separators=( ',', ': ' ) ) )
1160 # TODO check something here?
1161 else:
1162 main.log.error( "pendingMap() returned None" )
1163 except ( ValueError, TypeError ):
1164 main.log.exception( "Error parsing pending map" )
1165 main.log.error( repr( pendingMap ) )
1166 # Print flowrules
Jon Halla440e872016-03-31 15:15:50 -07001167 node = main.activeNodes[0]
1168 main.log.debug( main.CLIs[node].flows( jsonFormat=False ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001169 main.step( "Wait a minute then ping again" )
1170 # the wait is above
1171 PingResult = main.TRUE
1172 for i in range( 8, 18 ):
1173 ping = main.Mininet1.pingHost( src="h" + str( i ),
1174 target="h" + str( i + 10 ) )
1175 PingResult = PingResult and ping
1176 if ping == main.FALSE:
1177 main.log.warn( "Ping failed between h" + str( i ) +
1178 " and h" + str( i + 10 ) )
1179 elif ping == main.TRUE:
1180 main.log.info( "Ping test passed!" )
1181 # Don't set PingResult or you'd override failures
1182 if PingResult == main.FALSE:
1183 main.log.error(
1184 "Intents have not been installed correctly, pings failed." )
1185 # TODO: pretty print
1186 main.log.warn( "ONOS1 intents: " )
1187 try:
Jon Halla440e872016-03-31 15:15:50 -07001188 tmpIntents = onosCli.intents()
Jon Hall5cf14d52015-07-16 12:15:19 -07001189 main.log.warn( json.dumps( json.loads( tmpIntents ),
1190 sort_keys=True,
1191 indent=4,
1192 separators=( ',', ': ' ) ) )
1193 except ( ValueError, TypeError ):
1194 main.log.warn( repr( tmpIntents ) )
1195 utilities.assert_equals(
1196 expect=main.TRUE,
1197 actual=PingResult,
1198 onpass="Intents have been installed correctly and pings work",
1199 onfail="Intents have not been installed correctly, pings failed." )
1200
1201 def CASE5( self, main ):
1202 """
1203 Reading state of ONOS
1204 """
1205 import json
1206 import time
Jon Halle1a3b752015-07-22 13:02:46 -07001207 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07001208 assert main, "main not defined"
1209 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07001210 assert main.CLIs, "main.CLIs not defined"
1211 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07001212
1213 main.case( "Setting up and gathering data for current state" )
1214 # The general idea for this test case is to pull the state of
1215 # ( intents,flows, topology,... ) from each ONOS node
1216 # We can then compare them with each other and also with past states
1217
1218 main.step( "Check that each switch has a master" )
1219 global mastershipState
1220 mastershipState = '[]'
1221
1222 # Assert that each device has a master
1223 rolesNotNull = main.TRUE
1224 threads = []
Jon Halla440e872016-03-31 15:15:50 -07001225 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001226 t = main.Thread( target=main.CLIs[i].rolesNotNull,
Jon Hall5cf14d52015-07-16 12:15:19 -07001227 name="rolesNotNull-" + str( i ),
1228 args=[] )
1229 threads.append( t )
1230 t.start()
1231
1232 for t in threads:
1233 t.join()
1234 rolesNotNull = rolesNotNull and t.result
1235 utilities.assert_equals(
1236 expect=main.TRUE,
1237 actual=rolesNotNull,
1238 onpass="Each device has a master",
1239 onfail="Some devices don't have a master assigned" )
1240
1241 main.step( "Get the Mastership of each switch from each controller" )
1242 ONOSMastership = []
1243 mastershipCheck = main.FALSE
1244 consistentMastership = True
1245 rolesResults = True
1246 threads = []
Jon Halla440e872016-03-31 15:15:50 -07001247 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001248 t = main.Thread( target=main.CLIs[i].roles,
Jon Hall5cf14d52015-07-16 12:15:19 -07001249 name="roles-" + str( i ),
1250 args=[] )
1251 threads.append( t )
1252 t.start()
1253
1254 for t in threads:
1255 t.join()
1256 ONOSMastership.append( t.result )
1257
Jon Halla440e872016-03-31 15:15:50 -07001258 for i in range( len( ONOSMastership ) ):
1259 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001260 if not ONOSMastership[i] or "Error" in ONOSMastership[i]:
Jon Halla440e872016-03-31 15:15:50 -07001261 main.log.error( "Error in getting ONOS" + node + " roles" )
1262 main.log.warn( "ONOS" + node + " mastership response: " +
1263 repr( ONOSMastership[i] ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001264 rolesResults = False
1265 utilities.assert_equals(
1266 expect=True,
1267 actual=rolesResults,
1268 onpass="No error in reading roles output",
1269 onfail="Error in reading roles from ONOS" )
1270
1271 main.step( "Check for consistency in roles from each controller" )
1272 if all([ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
1273 main.log.info(
1274 "Switch roles are consistent across all ONOS nodes" )
1275 else:
1276 consistentMastership = False
1277 utilities.assert_equals(
1278 expect=True,
1279 actual=consistentMastership,
1280 onpass="Switch roles are consistent across all ONOS nodes",
1281 onfail="ONOS nodes have different views of switch roles" )
1282
1283 if rolesResults and not consistentMastership:
Jon Halla440e872016-03-31 15:15:50 -07001284 for i in range( len( main.activeNodes ) ):
1285 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001286 try:
1287 main.log.warn(
Jon Halla440e872016-03-31 15:15:50 -07001288 "ONOS" + node + " roles: ",
Jon Hall5cf14d52015-07-16 12:15:19 -07001289 json.dumps(
1290 json.loads( ONOSMastership[ i ] ),
1291 sort_keys=True,
1292 indent=4,
1293 separators=( ',', ': ' ) ) )
1294 except ( ValueError, TypeError ):
1295 main.log.warn( repr( ONOSMastership[ i ] ) )
1296 elif rolesResults and consistentMastership:
1297 mastershipCheck = main.TRUE
1298 mastershipState = ONOSMastership[ 0 ]
1299
1300 main.step( "Get the intents from each controller" )
1301 global intentState
1302 intentState = []
1303 ONOSIntents = []
1304 intentCheck = main.FALSE
1305 consistentIntents = True
1306 intentsResults = True
1307 threads = []
Jon Halla440e872016-03-31 15:15:50 -07001308 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001309 t = main.Thread( target=main.CLIs[i].intents,
Jon Hall5cf14d52015-07-16 12:15:19 -07001310 name="intents-" + str( i ),
1311 args=[],
1312 kwargs={ 'jsonFormat': True } )
1313 threads.append( t )
1314 t.start()
1315
1316 for t in threads:
1317 t.join()
1318 ONOSIntents.append( t.result )
1319
Jon Halla440e872016-03-31 15:15:50 -07001320 for i in range( len( ONOSIntents ) ):
1321 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001322 if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
Jon Halla440e872016-03-31 15:15:50 -07001323 main.log.error( "Error in getting ONOS" + node + " intents" )
1324 main.log.warn( "ONOS" + node + " intents response: " +
Jon Hall5cf14d52015-07-16 12:15:19 -07001325 repr( ONOSIntents[ i ] ) )
1326 intentsResults = False
1327 utilities.assert_equals(
1328 expect=True,
1329 actual=intentsResults,
1330 onpass="No error in reading intents output",
1331 onfail="Error in reading intents from ONOS" )
1332
1333 main.step( "Check for consistency in Intents from each controller" )
1334 if all([ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
1335 main.log.info( "Intents are consistent across all ONOS " +
1336 "nodes" )
1337 else:
1338 consistentIntents = False
1339 main.log.error( "Intents not consistent" )
1340 utilities.assert_equals(
1341 expect=True,
1342 actual=consistentIntents,
1343 onpass="Intents are consistent across all ONOS nodes",
1344 onfail="ONOS nodes have different views of intents" )
1345
1346 if intentsResults:
1347 # Try to make it easy to figure out what is happening
1348 #
1349 # Intent ONOS1 ONOS2 ...
1350 # 0x01 INSTALLED INSTALLING
1351 # ... ... ...
1352 # ... ... ...
1353 title = " Id"
Jon Halla440e872016-03-31 15:15:50 -07001354 for n in main.activeNodes:
Jon Hall5cf14d52015-07-16 12:15:19 -07001355 title += " " * 10 + "ONOS" + str( n + 1 )
1356 main.log.warn( title )
Jon Halle1a3b752015-07-22 13:02:46 -07001357 # get all intent keys in the cluster
Jon Hall5cf14d52015-07-16 12:15:19 -07001358 keys = []
1359 try:
1360 # Get the set of all intent keys
1361 for nodeStr in ONOSIntents:
1362 node = json.loads( nodeStr )
1363 for intent in node:
1364 keys.append( intent.get( 'id' ) )
1365 keys = set( keys )
1366 # For each intent key, print the state on each node
1367 for key in keys:
1368 row = "%-13s" % key
1369 for nodeStr in ONOSIntents:
1370 node = json.loads( nodeStr )
1371 for intent in node:
1372 if intent.get( 'id', "Error" ) == key:
1373 row += "%-15s" % intent.get( 'state' )
1374 main.log.warn( row )
1375 # End of intent state table
1376 except ValueError as e:
1377 main.log.exception( e )
1378 main.log.debug( "nodeStr was: " + repr( nodeStr ) )
1379
1380 if intentsResults and not consistentIntents:
1381 # print the json objects
Jon Halla440e872016-03-31 15:15:50 -07001382 n = str( main.activeNodes[-1] + 1 )
1383 main.log.debug( "ONOS" + n + " intents: " )
Jon Hall5cf14d52015-07-16 12:15:19 -07001384 main.log.debug( json.dumps( json.loads( ONOSIntents[ -1 ] ),
1385 sort_keys=True,
1386 indent=4,
1387 separators=( ',', ': ' ) ) )
Jon Halla440e872016-03-31 15:15:50 -07001388 for i in range( len( ONOSIntents ) ):
1389 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001390 if ONOSIntents[ i ] != ONOSIntents[ -1 ]:
Jon Halla440e872016-03-31 15:15:50 -07001391 main.log.debug( "ONOS" + node + " intents: " )
Jon Hall5cf14d52015-07-16 12:15:19 -07001392 main.log.debug( json.dumps( json.loads( ONOSIntents[i] ),
1393 sort_keys=True,
1394 indent=4,
1395 separators=( ',', ': ' ) ) )
1396 else:
Jon Halla440e872016-03-31 15:15:50 -07001397 main.log.debug( "ONOS" + node + " intents match ONOS" +
1398 n + " intents" )
Jon Hall5cf14d52015-07-16 12:15:19 -07001399 elif intentsResults and consistentIntents:
1400 intentCheck = main.TRUE
1401 intentState = ONOSIntents[ 0 ]
1402
1403 main.step( "Get the flows from each controller" )
1404 global flowState
1405 flowState = []
1406 ONOSFlows = []
1407 ONOSFlowsJson = []
1408 flowCheck = main.FALSE
1409 consistentFlows = True
1410 flowsResults = True
1411 threads = []
Jon Halla440e872016-03-31 15:15:50 -07001412 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001413 t = main.Thread( target=main.CLIs[i].flows,
Jon Hall5cf14d52015-07-16 12:15:19 -07001414 name="flows-" + str( i ),
1415 args=[],
1416 kwargs={ 'jsonFormat': True } )
1417 threads.append( t )
1418 t.start()
1419
1420 # NOTE: Flows command can take some time to run
1421 time.sleep(30)
1422 for t in threads:
1423 t.join()
1424 result = t.result
1425 ONOSFlows.append( result )
1426
Jon Halla440e872016-03-31 15:15:50 -07001427 for i in range( len( ONOSFlows ) ):
1428 num = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001429 if not ONOSFlows[ i ] or "Error" in ONOSFlows[ i ]:
1430 main.log.error( "Error in getting ONOS" + num + " flows" )
1431 main.log.warn( "ONOS" + num + " flows response: " +
1432 repr( ONOSFlows[ i ] ) )
1433 flowsResults = False
1434 ONOSFlowsJson.append( None )
1435 else:
1436 try:
1437 ONOSFlowsJson.append( json.loads( ONOSFlows[ i ] ) )
1438 except ( ValueError, TypeError ):
1439 # FIXME: change this to log.error?
1440 main.log.exception( "Error in parsing ONOS" + num +
1441 " response as json." )
1442 main.log.error( repr( ONOSFlows[ i ] ) )
1443 ONOSFlowsJson.append( None )
1444 flowsResults = False
1445 utilities.assert_equals(
1446 expect=True,
1447 actual=flowsResults,
1448 onpass="No error in reading flows output",
1449 onfail="Error in reading flows from ONOS" )
1450
1451 main.step( "Check for consistency in Flows from each controller" )
1452 tmp = [ len( i ) == len( ONOSFlowsJson[ 0 ] ) for i in ONOSFlowsJson ]
1453 if all( tmp ):
1454 main.log.info( "Flow count is consistent across all ONOS nodes" )
1455 else:
1456 consistentFlows = False
1457 utilities.assert_equals(
1458 expect=True,
1459 actual=consistentFlows,
1460 onpass="The flow count is consistent across all ONOS nodes",
1461 onfail="ONOS nodes have different flow counts" )
1462
1463 if flowsResults and not consistentFlows:
Jon Halla440e872016-03-31 15:15:50 -07001464 for i in range( len( ONOSFlows ) ):
1465 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001466 try:
1467 main.log.warn(
Jon Halla440e872016-03-31 15:15:50 -07001468 "ONOS" + node + " flows: " +
Jon Hall5cf14d52015-07-16 12:15:19 -07001469 json.dumps( json.loads( ONOSFlows[i] ), sort_keys=True,
1470 indent=4, separators=( ',', ': ' ) ) )
1471 except ( ValueError, TypeError ):
Jon Halla440e872016-03-31 15:15:50 -07001472 main.log.warn( "ONOS" + node + " flows: " +
1473 repr( ONOSFlows[ i ] ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001474 elif flowsResults and consistentFlows:
1475 flowCheck = main.TRUE
1476 flowState = ONOSFlows[ 0 ]
1477
1478 main.step( "Get the OF Table entries" )
1479 global flows
1480 flows = []
1481 for i in range( 1, 29 ):
GlennRC68467eb2015-11-16 18:01:01 -08001482 flows.append( main.Mininet1.getFlowTable( "s" + str( i ), version="1.3", debug=False ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001483 if flowCheck == main.FALSE:
1484 for table in flows:
1485 main.log.warn( table )
1486 # TODO: Compare switch flow tables with ONOS flow tables
1487
1488 main.step( "Start continuous pings" )
1489 main.Mininet2.pingLong(
1490 src=main.params[ 'PING' ][ 'source1' ],
1491 target=main.params[ 'PING' ][ 'target1' ],
1492 pingTime=500 )
1493 main.Mininet2.pingLong(
1494 src=main.params[ 'PING' ][ 'source2' ],
1495 target=main.params[ 'PING' ][ 'target2' ],
1496 pingTime=500 )
1497 main.Mininet2.pingLong(
1498 src=main.params[ 'PING' ][ 'source3' ],
1499 target=main.params[ 'PING' ][ 'target3' ],
1500 pingTime=500 )
1501 main.Mininet2.pingLong(
1502 src=main.params[ 'PING' ][ 'source4' ],
1503 target=main.params[ 'PING' ][ 'target4' ],
1504 pingTime=500 )
1505 main.Mininet2.pingLong(
1506 src=main.params[ 'PING' ][ 'source5' ],
1507 target=main.params[ 'PING' ][ 'target5' ],
1508 pingTime=500 )
1509 main.Mininet2.pingLong(
1510 src=main.params[ 'PING' ][ 'source6' ],
1511 target=main.params[ 'PING' ][ 'target6' ],
1512 pingTime=500 )
1513 main.Mininet2.pingLong(
1514 src=main.params[ 'PING' ][ 'source7' ],
1515 target=main.params[ 'PING' ][ 'target7' ],
1516 pingTime=500 )
1517 main.Mininet2.pingLong(
1518 src=main.params[ 'PING' ][ 'source8' ],
1519 target=main.params[ 'PING' ][ 'target8' ],
1520 pingTime=500 )
1521 main.Mininet2.pingLong(
1522 src=main.params[ 'PING' ][ 'source9' ],
1523 target=main.params[ 'PING' ][ 'target9' ],
1524 pingTime=500 )
1525 main.Mininet2.pingLong(
1526 src=main.params[ 'PING' ][ 'source10' ],
1527 target=main.params[ 'PING' ][ 'target10' ],
1528 pingTime=500 )
1529
1530 main.step( "Collecting topology information from ONOS" )
1531 devices = []
1532 threads = []
Jon Halla440e872016-03-31 15:15:50 -07001533 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001534 t = main.Thread( target=main.CLIs[i].devices,
Jon Hall5cf14d52015-07-16 12:15:19 -07001535 name="devices-" + str( i ),
1536 args=[ ] )
1537 threads.append( t )
1538 t.start()
1539
1540 for t in threads:
1541 t.join()
1542 devices.append( t.result )
1543 hosts = []
1544 threads = []
Jon Halla440e872016-03-31 15:15:50 -07001545 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001546 t = main.Thread( target=main.CLIs[i].hosts,
Jon Hall5cf14d52015-07-16 12:15:19 -07001547 name="hosts-" + str( i ),
1548 args=[ ] )
1549 threads.append( t )
1550 t.start()
1551
1552 for t in threads:
1553 t.join()
1554 try:
1555 hosts.append( json.loads( t.result ) )
1556 except ( ValueError, TypeError ):
1557 # FIXME: better handling of this, print which node
1558 # Maybe use thread name?
1559 main.log.exception( "Error parsing json output of hosts" )
Jon Hallf3d16e72015-12-16 17:45:08 -08001560 main.log.warn( repr( t.result ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001561 hosts.append( None )
1562
1563 ports = []
1564 threads = []
Jon Halla440e872016-03-31 15:15:50 -07001565 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001566 t = main.Thread( target=main.CLIs[i].ports,
Jon Hall5cf14d52015-07-16 12:15:19 -07001567 name="ports-" + str( i ),
1568 args=[ ] )
1569 threads.append( t )
1570 t.start()
1571
1572 for t in threads:
1573 t.join()
1574 ports.append( t.result )
1575 links = []
1576 threads = []
Jon Halla440e872016-03-31 15:15:50 -07001577 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001578 t = main.Thread( target=main.CLIs[i].links,
Jon Hall5cf14d52015-07-16 12:15:19 -07001579 name="links-" + str( i ),
1580 args=[ ] )
1581 threads.append( t )
1582 t.start()
1583
1584 for t in threads:
1585 t.join()
1586 links.append( t.result )
1587 clusters = []
1588 threads = []
Jon Halla440e872016-03-31 15:15:50 -07001589 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001590 t = main.Thread( target=main.CLIs[i].clusters,
Jon Hall5cf14d52015-07-16 12:15:19 -07001591 name="clusters-" + str( i ),
1592 args=[ ] )
1593 threads.append( t )
1594 t.start()
1595
1596 for t in threads:
1597 t.join()
1598 clusters.append( t.result )
1599 # Compare json objects for hosts and dataplane clusters
1600
1601 # hosts
1602 main.step( "Host view is consistent across ONOS nodes" )
1603 consistentHostsResult = main.TRUE
1604 for controller in range( len( hosts ) ):
Jon Halla440e872016-03-31 15:15:50 -07001605 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hallf3d16e72015-12-16 17:45:08 -08001606 if hosts[ controller ] and "Error" not in hosts[ controller ]:
Jon Hall5cf14d52015-07-16 12:15:19 -07001607 if hosts[ controller ] == hosts[ 0 ]:
1608 continue
1609 else: # hosts not consistent
1610 main.log.error( "hosts from ONOS" +
1611 controllerStr +
1612 " is inconsistent with ONOS1" )
1613 main.log.warn( repr( hosts[ controller ] ) )
1614 consistentHostsResult = main.FALSE
1615
1616 else:
1617 main.log.error( "Error in getting ONOS hosts from ONOS" +
1618 controllerStr )
1619 consistentHostsResult = main.FALSE
1620 main.log.warn( "ONOS" + controllerStr +
1621 " hosts response: " +
1622 repr( hosts[ controller ] ) )
1623 utilities.assert_equals(
1624 expect=main.TRUE,
1625 actual=consistentHostsResult,
1626 onpass="Hosts view is consistent across all ONOS nodes",
1627 onfail="ONOS nodes have different views of hosts" )
1628
1629 main.step( "Each host has an IP address" )
1630 ipResult = main.TRUE
1631 for controller in range( 0, len( hosts ) ):
Jon Halla440e872016-03-31 15:15:50 -07001632 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hallf3d16e72015-12-16 17:45:08 -08001633 if hosts[ controller ]:
1634 for host in hosts[ controller ]:
1635 if not host.get( 'ipAddresses', [ ] ):
1636 main.log.error( "Error with host ips on controller" +
1637 controllerStr + ": " + str( host ) )
1638 ipResult = main.FALSE
Jon Hall5cf14d52015-07-16 12:15:19 -07001639 utilities.assert_equals(
1640 expect=main.TRUE,
1641 actual=ipResult,
1642 onpass="The ips of the hosts aren't empty",
1643 onfail="The ip of at least one host is missing" )
1644
1645 # Strongly connected clusters of devices
1646 main.step( "Cluster view is consistent across ONOS nodes" )
1647 consistentClustersResult = main.TRUE
1648 for controller in range( len( clusters ) ):
Jon Halla440e872016-03-31 15:15:50 -07001649 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001650 if "Error" not in clusters[ controller ]:
1651 if clusters[ controller ] == clusters[ 0 ]:
1652 continue
1653 else: # clusters not consistent
1654 main.log.error( "clusters from ONOS" + controllerStr +
1655 " is inconsistent with ONOS1" )
1656 consistentClustersResult = main.FALSE
1657
1658 else:
1659 main.log.error( "Error in getting dataplane clusters " +
1660 "from ONOS" + controllerStr )
1661 consistentClustersResult = main.FALSE
1662 main.log.warn( "ONOS" + controllerStr +
1663 " clusters response: " +
1664 repr( clusters[ controller ] ) )
1665 utilities.assert_equals(
1666 expect=main.TRUE,
1667 actual=consistentClustersResult,
1668 onpass="Clusters view is consistent across all ONOS nodes",
1669 onfail="ONOS nodes have different views of clusters" )
Jon Hall172b7ba2016-04-07 18:12:20 -07001670 if consistentClustersResult != main.TRUE:
1671 main.log.debug( clusters )
Jon Hall5cf14d52015-07-16 12:15:19 -07001672 # there should always only be one cluster
1673 main.step( "Cluster view correct across ONOS nodes" )
1674 try:
1675 numClusters = len( json.loads( clusters[ 0 ] ) )
1676 except ( ValueError, TypeError ):
1677 main.log.exception( "Error parsing clusters[0]: " +
1678 repr( clusters[ 0 ] ) )
Jon Hall6e709752016-02-01 13:38:46 -08001679 numClusters = "ERROR"
Jon Hall5cf14d52015-07-16 12:15:19 -07001680 clusterResults = main.FALSE
1681 if numClusters == 1:
1682 clusterResults = main.TRUE
1683 utilities.assert_equals(
1684 expect=1,
1685 actual=numClusters,
1686 onpass="ONOS shows 1 SCC",
1687 onfail="ONOS shows " + str( numClusters ) + " SCCs" )
1688
1689 main.step( "Comparing ONOS topology to MN" )
1690 devicesResults = main.TRUE
1691 linksResults = main.TRUE
1692 hostsResults = main.TRUE
1693 mnSwitches = main.Mininet1.getSwitches()
1694 mnLinks = main.Mininet1.getLinks()
1695 mnHosts = main.Mininet1.getHosts()
Jon Halla440e872016-03-31 15:15:50 -07001696 for controller in main.activeNodes:
1697 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001698 if devices[ controller ] and ports[ controller ] and\
1699 "Error" not in devices[ controller ] and\
1700 "Error" not in ports[ controller ]:
Jon Halla440e872016-03-31 15:15:50 -07001701 currentDevicesResult = main.Mininet1.compareSwitches(
1702 mnSwitches,
1703 json.loads( devices[ controller ] ),
1704 json.loads( ports[ controller ] ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001705 else:
1706 currentDevicesResult = main.FALSE
1707 utilities.assert_equals( expect=main.TRUE,
1708 actual=currentDevicesResult,
1709 onpass="ONOS" + controllerStr +
1710 " Switches view is correct",
1711 onfail="ONOS" + controllerStr +
1712 " Switches view is incorrect" )
1713 if links[ controller ] and "Error" not in links[ controller ]:
1714 currentLinksResult = main.Mininet1.compareLinks(
1715 mnSwitches, mnLinks,
1716 json.loads( links[ controller ] ) )
1717 else:
1718 currentLinksResult = main.FALSE
1719 utilities.assert_equals( expect=main.TRUE,
1720 actual=currentLinksResult,
1721 onpass="ONOS" + controllerStr +
1722 " links view is correct",
1723 onfail="ONOS" + controllerStr +
1724 " links view is incorrect" )
1725
Jon Hall657cdf62015-12-17 14:40:51 -08001726 if hosts[ controller ] and "Error" not in hosts[ controller ]:
Jon Hall5cf14d52015-07-16 12:15:19 -07001727 currentHostsResult = main.Mininet1.compareHosts(
1728 mnHosts,
1729 hosts[ controller ] )
1730 else:
1731 currentHostsResult = main.FALSE
1732 utilities.assert_equals( expect=main.TRUE,
1733 actual=currentHostsResult,
1734 onpass="ONOS" + controllerStr +
1735 " hosts exist in Mininet",
1736 onfail="ONOS" + controllerStr +
1737 " hosts don't match Mininet" )
1738
1739 devicesResults = devicesResults and currentDevicesResult
1740 linksResults = linksResults and currentLinksResult
1741 hostsResults = hostsResults and currentHostsResult
1742
1743 main.step( "Device information is correct" )
1744 utilities.assert_equals(
1745 expect=main.TRUE,
1746 actual=devicesResults,
1747 onpass="Device information is correct",
1748 onfail="Device information is incorrect" )
1749
1750 main.step( "Links are correct" )
1751 utilities.assert_equals(
1752 expect=main.TRUE,
1753 actual=linksResults,
1754 onpass="Link are correct",
1755 onfail="Links are incorrect" )
1756
1757 main.step( "Hosts are correct" )
1758 utilities.assert_equals(
1759 expect=main.TRUE,
1760 actual=hostsResults,
1761 onpass="Hosts are correct",
1762 onfail="Hosts are incorrect" )
1763
1764 def CASE6( self, main ):
1765 """
1766 The Failure case. Since this is the Sanity test, we do nothing.
1767 """
1768 import time
Jon Halle1a3b752015-07-22 13:02:46 -07001769 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07001770 assert main, "main not defined"
1771 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07001772 assert main.CLIs, "main.CLIs not defined"
1773 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07001774 main.case( "Wait 60 seconds instead of inducing a failure" )
1775 time.sleep( 60 )
1776 utilities.assert_equals(
1777 expect=main.TRUE,
1778 actual=main.TRUE,
1779 onpass="Sleeping 60 seconds",
1780 onfail="Something is terribly wrong with my math" )
1781
1782 def CASE7( self, main ):
1783 """
1784 Check state after ONOS failure
1785 """
1786 import json
Jon Halle1a3b752015-07-22 13:02:46 -07001787 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07001788 assert main, "main not defined"
1789 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07001790 assert main.CLIs, "main.CLIs not defined"
1791 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07001792 main.case( "Running ONOS Constant State Tests" )
1793
1794 main.step( "Check that each switch has a master" )
1795 # Assert that each device has a master
1796 rolesNotNull = main.TRUE
1797 threads = []
Jon Halla440e872016-03-31 15:15:50 -07001798 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001799 t = main.Thread( target=main.CLIs[i].rolesNotNull,
Jon Hall5cf14d52015-07-16 12:15:19 -07001800 name="rolesNotNull-" + str( i ),
1801 args=[ ] )
1802 threads.append( t )
1803 t.start()
1804
1805 for t in threads:
1806 t.join()
1807 rolesNotNull = rolesNotNull and t.result
1808 utilities.assert_equals(
1809 expect=main.TRUE,
1810 actual=rolesNotNull,
1811 onpass="Each device has a master",
1812 onfail="Some devices don't have a master assigned" )
1813
1814 main.step( "Read device roles from ONOS" )
1815 ONOSMastership = []
1816 mastershipCheck = main.FALSE
1817 consistentMastership = True
1818 rolesResults = True
1819 threads = []
Jon Halla440e872016-03-31 15:15:50 -07001820 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001821 t = main.Thread( target=main.CLIs[i].roles,
Jon Hall5cf14d52015-07-16 12:15:19 -07001822 name="roles-" + str( i ),
1823 args=[] )
1824 threads.append( t )
1825 t.start()
1826
1827 for t in threads:
1828 t.join()
1829 ONOSMastership.append( t.result )
1830
Jon Halla440e872016-03-31 15:15:50 -07001831 for i in range( len( ONOSMastership ) ):
1832 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001833 if not ONOSMastership[i] or "Error" in ONOSMastership[i]:
Jon Halla440e872016-03-31 15:15:50 -07001834 main.log.error( "Error in getting ONOS" + node + " roles" )
1835 main.log.warn( "ONOS" + node + " mastership response: " +
1836 repr( ONOSMastership[i] ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001837 rolesResults = False
1838 utilities.assert_equals(
1839 expect=True,
1840 actual=rolesResults,
1841 onpass="No error in reading roles output",
1842 onfail="Error in reading roles from ONOS" )
1843
1844 main.step( "Check for consistency in roles from each controller" )
1845 if all([ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
1846 main.log.info(
1847 "Switch roles are consistent across all ONOS nodes" )
1848 else:
1849 consistentMastership = False
1850 utilities.assert_equals(
1851 expect=True,
1852 actual=consistentMastership,
1853 onpass="Switch roles are consistent across all ONOS nodes",
1854 onfail="ONOS nodes have different views of switch roles" )
1855
1856 if rolesResults and not consistentMastership:
Jon Halla440e872016-03-31 15:15:50 -07001857 for i in range( len( ONOSMastership ) ):
1858 node = str( main.activeNodes[i] + 1 )
1859 main.log.warn( "ONOS" + node + " roles: ",
1860 json.dumps( json.loads( ONOSMastership[ i ] ),
1861 sort_keys=True,
1862 indent=4,
1863 separators=( ',', ': ' ) ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001864
1865 description2 = "Compare switch roles from before failure"
1866 main.step( description2 )
1867 try:
1868 currentJson = json.loads( ONOSMastership[0] )
1869 oldJson = json.loads( mastershipState )
1870 except ( ValueError, TypeError ):
1871 main.log.exception( "Something is wrong with parsing " +
1872 "ONOSMastership[0] or mastershipState" )
1873 main.log.error( "ONOSMastership[0]: " + repr( ONOSMastership[0] ) )
1874 main.log.error( "mastershipState" + repr( mastershipState ) )
1875 main.cleanup()
1876 main.exit()
1877 mastershipCheck = main.TRUE
1878 for i in range( 1, 29 ):
1879 switchDPID = str(
1880 main.Mininet1.getSwitchDPID( switch="s" + str( i ) ) )
1881 current = [ switch[ 'master' ] for switch in currentJson
1882 if switchDPID in switch[ 'id' ] ]
1883 old = [ switch[ 'master' ] for switch in oldJson
1884 if switchDPID in switch[ 'id' ] ]
1885 if current == old:
1886 mastershipCheck = mastershipCheck and main.TRUE
1887 else:
1888 main.log.warn( "Mastership of switch %s changed" % switchDPID )
1889 mastershipCheck = main.FALSE
1890 utilities.assert_equals(
1891 expect=main.TRUE,
1892 actual=mastershipCheck,
1893 onpass="Mastership of Switches was not changed",
1894 onfail="Mastership of some switches changed" )
1895 mastershipCheck = mastershipCheck and consistentMastership
1896
1897 main.step( "Get the intents and compare across all nodes" )
1898 ONOSIntents = []
1899 intentCheck = main.FALSE
1900 consistentIntents = True
1901 intentsResults = True
1902 threads = []
Jon Halla440e872016-03-31 15:15:50 -07001903 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001904 t = main.Thread( target=main.CLIs[i].intents,
Jon Hall5cf14d52015-07-16 12:15:19 -07001905 name="intents-" + str( i ),
1906 args=[],
1907 kwargs={ 'jsonFormat': True } )
1908 threads.append( t )
1909 t.start()
1910
1911 for t in threads:
1912 t.join()
1913 ONOSIntents.append( t.result )
1914
Jon Halla440e872016-03-31 15:15:50 -07001915 for i in range( len( ONOSIntents) ):
1916 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001917 if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
Jon Halla440e872016-03-31 15:15:50 -07001918 main.log.error( "Error in getting ONOS" + node + " intents" )
1919 main.log.warn( "ONOS" + node + " intents response: " +
Jon Hall5cf14d52015-07-16 12:15:19 -07001920 repr( ONOSIntents[ i ] ) )
1921 intentsResults = False
1922 utilities.assert_equals(
1923 expect=True,
1924 actual=intentsResults,
1925 onpass="No error in reading intents output",
1926 onfail="Error in reading intents from ONOS" )
1927
1928 main.step( "Check for consistency in Intents from each controller" )
1929 if all([ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
1930 main.log.info( "Intents are consistent across all ONOS " +
1931 "nodes" )
1932 else:
1933 consistentIntents = False
1934
1935 # Try to make it easy to figure out what is happening
1936 #
1937 # Intent ONOS1 ONOS2 ...
1938 # 0x01 INSTALLED INSTALLING
1939 # ... ... ...
1940 # ... ... ...
1941 title = " ID"
Jon Halla440e872016-03-31 15:15:50 -07001942 for n in main.activeNodes:
Jon Hall5cf14d52015-07-16 12:15:19 -07001943 title += " " * 10 + "ONOS" + str( n + 1 )
1944 main.log.warn( title )
1945 # get all intent keys in the cluster
1946 keys = []
1947 for nodeStr in ONOSIntents:
1948 node = json.loads( nodeStr )
1949 for intent in node:
1950 keys.append( intent.get( 'id' ) )
1951 keys = set( keys )
1952 for key in keys:
1953 row = "%-13s" % key
1954 for nodeStr in ONOSIntents:
1955 node = json.loads( nodeStr )
1956 for intent in node:
1957 if intent.get( 'id' ) == key:
1958 row += "%-15s" % intent.get( 'state' )
1959 main.log.warn( row )
1960 # End table view
1961
1962 utilities.assert_equals(
1963 expect=True,
1964 actual=consistentIntents,
1965 onpass="Intents are consistent across all ONOS nodes",
1966 onfail="ONOS nodes have different views of intents" )
1967 intentStates = []
1968 for node in ONOSIntents: # Iter through ONOS nodes
1969 nodeStates = []
1970 # Iter through intents of a node
1971 try:
1972 for intent in json.loads( node ):
1973 nodeStates.append( intent[ 'state' ] )
1974 except ( ValueError, TypeError ):
1975 main.log.exception( "Error in parsing intents" )
1976 main.log.error( repr( node ) )
1977 intentStates.append( nodeStates )
1978 out = [ (i, nodeStates.count( i ) ) for i in set( nodeStates ) ]
1979 main.log.info( dict( out ) )
1980
1981 if intentsResults and not consistentIntents:
Jon Halla440e872016-03-31 15:15:50 -07001982 for i in range( len( main.activeNodes ) ):
1983 node = str( main.activeNodes[i] + 1 )
1984 main.log.warn( "ONOS" + node + " intents: " )
Jon Hall5cf14d52015-07-16 12:15:19 -07001985 main.log.warn( json.dumps(
1986 json.loads( ONOSIntents[ i ] ),
1987 sort_keys=True,
1988 indent=4,
1989 separators=( ',', ': ' ) ) )
1990 elif intentsResults and consistentIntents:
1991 intentCheck = main.TRUE
1992
1993 # NOTE: Store has no durability, so intents are lost across system
1994 # restarts
1995 main.step( "Compare current intents with intents before the failure" )
1996 # NOTE: this requires case 5 to pass for intentState to be set.
1997 # maybe we should stop the test if that fails?
1998 sameIntents = main.FALSE
Jon Halla440e872016-03-31 15:15:50 -07001999 try:
2000 intentState
2001 except NameError:
2002 main.log.warn( "No previous intent state was saved" )
2003 else:
2004 if intentState and intentState == ONOSIntents[ 0 ]:
2005 sameIntents = main.TRUE
2006 main.log.info( "Intents are consistent with before failure" )
2007 # TODO: possibly the states have changed? we may need to figure out
2008 # what the acceptable states are
2009 elif len( intentState ) == len( ONOSIntents[ 0 ] ):
2010 sameIntents = main.TRUE
2011 try:
2012 before = json.loads( intentState )
2013 after = json.loads( ONOSIntents[ 0 ] )
2014 for intent in before:
2015 if intent not in after:
2016 sameIntents = main.FALSE
2017 main.log.debug( "Intent is not currently in ONOS " +
2018 "(at least in the same form):" )
2019 main.log.debug( json.dumps( intent ) )
2020 except ( ValueError, TypeError ):
2021 main.log.exception( "Exception printing intents" )
2022 main.log.debug( repr( ONOSIntents[0] ) )
2023 main.log.debug( repr( intentState ) )
2024 if sameIntents == main.FALSE:
2025 try:
2026 main.log.debug( "ONOS intents before: " )
2027 main.log.debug( json.dumps( json.loads( intentState ),
2028 sort_keys=True, indent=4,
2029 separators=( ',', ': ' ) ) )
2030 main.log.debug( "Current ONOS intents: " )
2031 main.log.debug( json.dumps( json.loads( ONOSIntents[ 0 ] ),
2032 sort_keys=True, indent=4,
2033 separators=( ',', ': ' ) ) )
2034 except ( ValueError, TypeError ):
2035 main.log.exception( "Exception printing intents" )
2036 main.log.debug( repr( ONOSIntents[0] ) )
2037 main.log.debug( repr( intentState ) )
2038 utilities.assert_equals(
2039 expect=main.TRUE,
2040 actual=sameIntents,
2041 onpass="Intents are consistent with before failure",
2042 onfail="The Intents changed during failure" )
Jon Hall5cf14d52015-07-16 12:15:19 -07002043 intentCheck = intentCheck and sameIntents
2044
2045 main.step( "Get the OF Table entries and compare to before " +
2046 "component failure" )
2047 FlowTables = main.TRUE
Jon Hall5cf14d52015-07-16 12:15:19 -07002048 for i in range( 28 ):
2049 main.log.info( "Checking flow table on s" + str( i + 1 ) )
GlennRC68467eb2015-11-16 18:01:01 -08002050 tmpFlows = main.Mininet1.getFlowTable( "s" + str( i + 1 ), version="1.3", debug=False )
Jon Hall41d39f12016-04-11 22:54:35 -07002051 curSwitch = main.Mininet1.flowTableComp( flows[i], tmpFlows )
2052 FlowTables = FlowTables and curSwitch
2053 if curSwitch == main.FALSE:
GlennRC68467eb2015-11-16 18:01:01 -08002054 main.log.warn( "Differences in flow table for switch: s{}".format( i + 1 ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07002055 utilities.assert_equals(
2056 expect=main.TRUE,
2057 actual=FlowTables,
2058 onpass="No changes were found in the flow tables",
2059 onfail="Changes were found in the flow tables" )
2060
2061 main.Mininet2.pingLongKill()
2062 '''
2063 main.step( "Check the continuous pings to ensure that no packets " +
2064 "were dropped during component failure" )
2065 main.Mininet2.pingKill( main.params[ 'TESTONUSER' ],
2066 main.params[ 'TESTONIP' ] )
2067 LossInPings = main.FALSE
2068 # NOTE: checkForLoss returns main.FALSE with 0% packet loss
2069 for i in range( 8, 18 ):
2070 main.log.info(
2071 "Checking for a loss in pings along flow from s" +
2072 str( i ) )
2073 LossInPings = main.Mininet2.checkForLoss(
2074 "/tmp/ping.h" +
2075 str( i ) ) or LossInPings
2076 if LossInPings == main.TRUE:
2077 main.log.info( "Loss in ping detected" )
2078 elif LossInPings == main.ERROR:
2079 main.log.info( "There are multiple mininet process running" )
2080 elif LossInPings == main.FALSE:
2081 main.log.info( "No Loss in the pings" )
2082 main.log.info( "No loss of dataplane connectivity" )
2083 utilities.assert_equals(
2084 expect=main.FALSE,
2085 actual=LossInPings,
2086 onpass="No Loss of connectivity",
2087 onfail="Loss of dataplane connectivity detected" )
2088 '''
2089
2090 main.step( "Leadership Election is still functional" )
2091 # Test of LeadershipElection
Jon Halla440e872016-03-31 15:15:50 -07002092 leaderList = []
2093
Jon Hall5cf14d52015-07-16 12:15:19 -07002094 # NOTE: this only works for the sanity test. In case of failures,
2095 # leader will likely change
Jon Halla440e872016-03-31 15:15:50 -07002096 leader = main.nodes[ main.activeNodes[ 0 ] ].ip_address
Jon Hall5cf14d52015-07-16 12:15:19 -07002097 leaderResult = main.TRUE
Jon Halla440e872016-03-31 15:15:50 -07002098
2099 for i in main.activeNodes:
2100 cli = main.CLIs[i]
Jon Hall5cf14d52015-07-16 12:15:19 -07002101 leaderN = cli.electionTestLeader()
Jon Halla440e872016-03-31 15:15:50 -07002102 leaderList.append( leaderN )
Jon Hall5cf14d52015-07-16 12:15:19 -07002103 # verify leader is ONOS1
2104 if leaderN == leader:
2105 # all is well
2106 # NOTE: In failure scenario, this could be a new node, maybe
2107 # check != ONOS1
2108 pass
2109 elif leaderN == main.FALSE:
2110 # error in response
2111 main.log.error( "Something is wrong with " +
2112 "electionTestLeader function, check the" +
2113 " error logs" )
2114 leaderResult = main.FALSE
Jon Halla440e872016-03-31 15:15:50 -07002115 elif leaderN is None:
2116 main.log.error( cli.name +
2117 " shows no leader for the election-app was" +
2118 " elected after the old one died" )
Jon Hall5cf14d52015-07-16 12:15:19 -07002119 leaderResult = main.FALSE
Jon Halla440e872016-03-31 15:15:50 -07002120 if len( set( leaderList ) ) != 1:
2121 leaderResult = main.FALSE
2122 main.log.error(
2123 "Inconsistent view of leader for the election test app" )
2124 # TODO: print the list
Jon Hall5cf14d52015-07-16 12:15:19 -07002125 utilities.assert_equals(
2126 expect=main.TRUE,
2127 actual=leaderResult,
2128 onpass="Leadership election passed",
2129 onfail="Something went wrong with Leadership election" )
2130
2131 def CASE8( self, main ):
2132 """
2133 Compare topo
2134 """
2135 import json
2136 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002137 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002138 assert main, "main not defined"
2139 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002140 assert main.CLIs, "main.CLIs not defined"
2141 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002142
2143 main.case( "Compare ONOS Topology view to Mininet topology" )
Jon Hall783bbf92015-07-23 14:33:19 -07002144 main.caseExplanation = "Compare topology objects between Mininet" +\
Jon Hall5cf14d52015-07-16 12:15:19 -07002145 " and ONOS"
Jon Hall5cf14d52015-07-16 12:15:19 -07002146 topoResult = main.FALSE
Jon Halla440e872016-03-31 15:15:50 -07002147 topoFailMsg = "ONOS topology don't match Mininet"
Jon Hall5cf14d52015-07-16 12:15:19 -07002148 elapsed = 0
2149 count = 0
Jon Halle9b1fa32015-12-08 15:32:21 -08002150 main.step( "Comparing ONOS topology to MN topology" )
Jon Hall5cf14d52015-07-16 12:15:19 -07002151 startTime = time.time()
2152 # Give time for Gossip to work
Jon Halle9b1fa32015-12-08 15:32:21 -08002153 while topoResult == main.FALSE and ( elapsed < 60 or count < 3 ):
Jon Hall96091e62015-09-21 17:34:17 -07002154 devicesResults = main.TRUE
2155 linksResults = main.TRUE
2156 hostsResults = main.TRUE
2157 hostAttachmentResults = True
Jon Hall5cf14d52015-07-16 12:15:19 -07002158 count += 1
2159 cliStart = time.time()
2160 devices = []
2161 threads = []
Jon Halla440e872016-03-31 15:15:50 -07002162 for i in main.activeNodes:
2163 t = main.Thread( target=utilities.retry,
Jon Hall5cf14d52015-07-16 12:15:19 -07002164 name="devices-" + str( i ),
Jon Halla440e872016-03-31 15:15:50 -07002165 args=[ main.CLIs[i].devices, [ None ] ],
2166 kwargs= { 'sleep': 5, 'attempts': 5,
2167 'randomTime': True } )
Jon Hall5cf14d52015-07-16 12:15:19 -07002168 threads.append( t )
2169 t.start()
2170
2171 for t in threads:
2172 t.join()
2173 devices.append( t.result )
2174 hosts = []
2175 ipResult = main.TRUE
2176 threads = []
Jon Halla440e872016-03-31 15:15:50 -07002177 for i in main.activeNodes:
Jon Halld8f6de82015-12-17 17:04:34 -08002178 t = main.Thread( target=utilities.retry,
Jon Hall5cf14d52015-07-16 12:15:19 -07002179 name="hosts-" + str( i ),
Jon Halld8f6de82015-12-17 17:04:34 -08002180 args=[ main.CLIs[i].hosts, [ None ] ],
2181 kwargs= { 'sleep': 5, 'attempts': 5,
2182 'randomTime': True } )
Jon Hall5cf14d52015-07-16 12:15:19 -07002183 threads.append( t )
2184 t.start()
2185
2186 for t in threads:
2187 t.join()
2188 try:
2189 hosts.append( json.loads( t.result ) )
2190 except ( ValueError, TypeError ):
2191 main.log.exception( "Error parsing hosts results" )
2192 main.log.error( repr( t.result ) )
Jon Hallf3d16e72015-12-16 17:45:08 -08002193 hosts.append( None )
Jon Hall5cf14d52015-07-16 12:15:19 -07002194 for controller in range( 0, len( hosts ) ):
Jon Halla440e872016-03-31 15:15:50 -07002195 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hallacd1b182015-12-17 11:43:20 -08002196 if hosts[ controller ]:
2197 for host in hosts[ controller ]:
2198 if host is None or host.get( 'ipAddresses', [] ) == []:
2199 main.log.error(
2200 "Error with host ipAddresses on controller" +
2201 controllerStr + ": " + str( host ) )
2202 ipResult = main.FALSE
Jon Hall5cf14d52015-07-16 12:15:19 -07002203 ports = []
2204 threads = []
Jon Halla440e872016-03-31 15:15:50 -07002205 for i in main.activeNodes:
2206 t = main.Thread( target=utilities.retry,
Jon Hall5cf14d52015-07-16 12:15:19 -07002207 name="ports-" + str( i ),
Jon Halla440e872016-03-31 15:15:50 -07002208 args=[ main.CLIs[i].ports, [ None ] ],
2209 kwargs= { 'sleep': 5, 'attempts': 5,
2210 'randomTime': True } )
Jon Hall5cf14d52015-07-16 12:15:19 -07002211 threads.append( t )
2212 t.start()
2213
2214 for t in threads:
2215 t.join()
2216 ports.append( t.result )
2217 links = []
2218 threads = []
Jon Halla440e872016-03-31 15:15:50 -07002219 for i in main.activeNodes:
2220 t = main.Thread( target=utilities.retry,
Jon Hall5cf14d52015-07-16 12:15:19 -07002221 name="links-" + str( i ),
Jon Halla440e872016-03-31 15:15:50 -07002222 args=[ main.CLIs[i].links, [ None ] ],
2223 kwargs= { 'sleep': 5, 'attempts': 5,
2224 'randomTime': True } )
Jon Hall5cf14d52015-07-16 12:15:19 -07002225 threads.append( t )
2226 t.start()
2227
2228 for t in threads:
2229 t.join()
2230 links.append( t.result )
2231 clusters = []
2232 threads = []
Jon Halla440e872016-03-31 15:15:50 -07002233 for i in main.activeNodes:
2234 t = main.Thread( target=utilities.retry,
Jon Hall5cf14d52015-07-16 12:15:19 -07002235 name="clusters-" + str( i ),
Jon Halla440e872016-03-31 15:15:50 -07002236 args=[ main.CLIs[i].clusters, [ None ] ],
2237 kwargs= { 'sleep': 5, 'attempts': 5,
2238 'randomTime': True } )
Jon Hall5cf14d52015-07-16 12:15:19 -07002239 threads.append( t )
2240 t.start()
2241
2242 for t in threads:
2243 t.join()
2244 clusters.append( t.result )
2245
2246 elapsed = time.time() - startTime
2247 cliTime = time.time() - cliStart
2248 print "Elapsed time: " + str( elapsed )
2249 print "CLI time: " + str( cliTime )
2250
Jon Halla440e872016-03-31 15:15:50 -07002251 if all( e is None for e in devices ) and\
2252 all( e is None for e in hosts ) and\
2253 all( e is None for e in ports ) and\
2254 all( e is None for e in links ) and\
2255 all( e is None for e in clusters ):
2256 topoFailMsg = "Could not get topology from ONOS"
2257 main.log.error( topoFailMsg )
2258 continue # Try again, No use trying to compare
2259
Jon Hall5cf14d52015-07-16 12:15:19 -07002260 mnSwitches = main.Mininet1.getSwitches()
2261 mnLinks = main.Mininet1.getLinks()
2262 mnHosts = main.Mininet1.getHosts()
Jon Halla440e872016-03-31 15:15:50 -07002263 for controller in range( len( main.activeNodes ) ):
2264 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07002265 if devices[ controller ] and ports[ controller ] and\
2266 "Error" not in devices[ controller ] and\
2267 "Error" not in ports[ controller ]:
2268
Jon Hallc6793552016-01-19 14:18:37 -08002269 try:
2270 currentDevicesResult = main.Mininet1.compareSwitches(
2271 mnSwitches,
2272 json.loads( devices[ controller ] ),
2273 json.loads( ports[ controller ] ) )
2274 except ( TypeError, ValueError ) as e:
2275 main.log.exception( "Object not as expected; devices={!r}\nports={!r}".format(
2276 devices[ controller ], ports[ controller ] ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07002277 else:
2278 currentDevicesResult = main.FALSE
2279 utilities.assert_equals( expect=main.TRUE,
2280 actual=currentDevicesResult,
2281 onpass="ONOS" + controllerStr +
2282 " Switches view is correct",
2283 onfail="ONOS" + controllerStr +
2284 " Switches view is incorrect" )
2285
2286 if links[ controller ] and "Error" not in links[ controller ]:
2287 currentLinksResult = main.Mininet1.compareLinks(
2288 mnSwitches, mnLinks,
2289 json.loads( links[ controller ] ) )
2290 else:
2291 currentLinksResult = main.FALSE
2292 utilities.assert_equals( expect=main.TRUE,
2293 actual=currentLinksResult,
2294 onpass="ONOS" + controllerStr +
2295 " links view is correct",
2296 onfail="ONOS" + controllerStr +
2297 " links view is incorrect" )
Jon Hall657cdf62015-12-17 14:40:51 -08002298 if hosts[ controller ] and "Error" not in hosts[ controller ]:
Jon Hall5cf14d52015-07-16 12:15:19 -07002299 currentHostsResult = main.Mininet1.compareHosts(
2300 mnHosts,
2301 hosts[ controller ] )
Jon Hall13b446e2016-01-05 12:17:01 -08002302 elif hosts[ controller ] == []:
2303 currentHostsResult = main.TRUE
Jon Hall5cf14d52015-07-16 12:15:19 -07002304 else:
2305 currentHostsResult = main.FALSE
2306 utilities.assert_equals( expect=main.TRUE,
2307 actual=currentHostsResult,
2308 onpass="ONOS" + controllerStr +
2309 " hosts exist in Mininet",
2310 onfail="ONOS" + controllerStr +
2311 " hosts don't match Mininet" )
2312 # CHECKING HOST ATTACHMENT POINTS
2313 hostAttachment = True
2314 zeroHosts = False
2315 # FIXME: topo-HA/obelisk specific mappings:
2316 # key is mac and value is dpid
2317 mappings = {}
2318 for i in range( 1, 29 ): # hosts 1 through 28
2319 # set up correct variables:
2320 macId = "00:" * 5 + hex( i ).split( "0x" )[1].upper().zfill(2)
2321 if i == 1:
2322 deviceId = "1000".zfill(16)
2323 elif i == 2:
2324 deviceId = "2000".zfill(16)
2325 elif i == 3:
2326 deviceId = "3000".zfill(16)
2327 elif i == 4:
2328 deviceId = "3004".zfill(16)
2329 elif i == 5:
2330 deviceId = "5000".zfill(16)
2331 elif i == 6:
2332 deviceId = "6000".zfill(16)
2333 elif i == 7:
2334 deviceId = "6007".zfill(16)
2335 elif i >= 8 and i <= 17:
2336 dpid = '3' + str( i ).zfill( 3 )
2337 deviceId = dpid.zfill(16)
2338 elif i >= 18 and i <= 27:
2339 dpid = '6' + str( i ).zfill( 3 )
2340 deviceId = dpid.zfill(16)
2341 elif i == 28:
2342 deviceId = "2800".zfill(16)
2343 mappings[ macId ] = deviceId
Jon Halld8f6de82015-12-17 17:04:34 -08002344 if hosts[ controller ] is not None and "Error" not in hosts[ controller ]:
Jon Hall5cf14d52015-07-16 12:15:19 -07002345 if hosts[ controller ] == []:
2346 main.log.warn( "There are no hosts discovered" )
2347 zeroHosts = True
2348 else:
2349 for host in hosts[ controller ]:
2350 mac = None
2351 location = None
2352 device = None
2353 port = None
2354 try:
2355 mac = host.get( 'mac' )
2356 assert mac, "mac field could not be found for this host object"
2357
2358 location = host.get( 'location' )
2359 assert location, "location field could not be found for this host object"
2360
2361 # Trim the protocol identifier off deviceId
2362 device = str( location.get( 'elementId' ) ).split(':')[1]
2363 assert device, "elementId field could not be found for this host location object"
2364
2365 port = location.get( 'port' )
2366 assert port, "port field could not be found for this host location object"
2367
2368 # Now check if this matches where they should be
2369 if mac and device and port:
2370 if str( port ) != "1":
2371 main.log.error( "The attachment port is incorrect for " +
2372 "host " + str( mac ) +
2373 ". Expected: 1 Actual: " + str( port) )
2374 hostAttachment = False
2375 if device != mappings[ str( mac ) ]:
2376 main.log.error( "The attachment device is incorrect for " +
2377 "host " + str( mac ) +
2378 ". Expected: " + mappings[ str( mac ) ] +
2379 " Actual: " + device )
2380 hostAttachment = False
2381 else:
2382 hostAttachment = False
2383 except AssertionError:
2384 main.log.exception( "Json object not as expected" )
2385 main.log.error( repr( host ) )
2386 hostAttachment = False
2387 else:
2388 main.log.error( "No hosts json output or \"Error\"" +
2389 " in output. hosts = " +
2390 repr( hosts[ controller ] ) )
2391 if zeroHosts is False:
2392 hostAttachment = True
2393
2394 # END CHECKING HOST ATTACHMENT POINTS
2395 devicesResults = devicesResults and currentDevicesResult
2396 linksResults = linksResults and currentLinksResult
2397 hostsResults = hostsResults and currentHostsResult
2398 hostAttachmentResults = hostAttachmentResults and\
2399 hostAttachment
2400 topoResult = ( devicesResults and linksResults
2401 and hostsResults and ipResult and
2402 hostAttachmentResults )
Jon Halle9b1fa32015-12-08 15:32:21 -08002403 utilities.assert_equals( expect=True,
2404 actual=topoResult,
2405 onpass="ONOS topology matches Mininet",
Jon Halla440e872016-03-31 15:15:50 -07002406 onfail=topoFailMsg )
Jon Halle9b1fa32015-12-08 15:32:21 -08002407 # End of While loop to pull ONOS state
Jon Hall5cf14d52015-07-16 12:15:19 -07002408
2409 # Compare json objects for hosts and dataplane clusters
2410
2411 # hosts
2412 main.step( "Hosts view is consistent across all ONOS nodes" )
2413 consistentHostsResult = main.TRUE
2414 for controller in range( len( hosts ) ):
Jon Halla440e872016-03-31 15:15:50 -07002415 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hall13b446e2016-01-05 12:17:01 -08002416 if hosts[ controller ] is not None and "Error" not in hosts[ controller ]:
Jon Hall5cf14d52015-07-16 12:15:19 -07002417 if hosts[ controller ] == hosts[ 0 ]:
2418 continue
2419 else: # hosts not consistent
2420 main.log.error( "hosts from ONOS" + controllerStr +
2421 " is inconsistent with ONOS1" )
2422 main.log.warn( repr( hosts[ controller ] ) )
2423 consistentHostsResult = main.FALSE
2424
2425 else:
2426 main.log.error( "Error in getting ONOS hosts from ONOS" +
2427 controllerStr )
2428 consistentHostsResult = main.FALSE
2429 main.log.warn( "ONOS" + controllerStr +
2430 " hosts response: " +
2431 repr( hosts[ controller ] ) )
2432 utilities.assert_equals(
2433 expect=main.TRUE,
2434 actual=consistentHostsResult,
2435 onpass="Hosts view is consistent across all ONOS nodes",
2436 onfail="ONOS nodes have different views of hosts" )
2437
2438 main.step( "Hosts information is correct" )
2439 hostsResults = hostsResults and ipResult
2440 utilities.assert_equals(
2441 expect=main.TRUE,
2442 actual=hostsResults,
2443 onpass="Host information is correct",
2444 onfail="Host information is incorrect" )
2445
2446 main.step( "Host attachment points to the network" )
2447 utilities.assert_equals(
2448 expect=True,
2449 actual=hostAttachmentResults,
2450 onpass="Hosts are correctly attached to the network",
2451 onfail="ONOS did not correctly attach hosts to the network" )
2452
2453 # Strongly connected clusters of devices
2454 main.step( "Clusters view is consistent across all ONOS nodes" )
2455 consistentClustersResult = main.TRUE
2456 for controller in range( len( clusters ) ):
Jon Halla440e872016-03-31 15:15:50 -07002457 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07002458 if "Error" not in clusters[ controller ]:
2459 if clusters[ controller ] == clusters[ 0 ]:
2460 continue
2461 else: # clusters not consistent
2462 main.log.error( "clusters from ONOS" +
2463 controllerStr +
2464 " is inconsistent with ONOS1" )
2465 consistentClustersResult = main.FALSE
Jon Hall5cf14d52015-07-16 12:15:19 -07002466 else:
2467 main.log.error( "Error in getting dataplane clusters " +
2468 "from ONOS" + controllerStr )
2469 consistentClustersResult = main.FALSE
2470 main.log.warn( "ONOS" + controllerStr +
2471 " clusters response: " +
2472 repr( clusters[ controller ] ) )
2473 utilities.assert_equals(
2474 expect=main.TRUE,
2475 actual=consistentClustersResult,
2476 onpass="Clusters view is consistent across all ONOS nodes",
2477 onfail="ONOS nodes have different views of clusters" )
2478
2479 main.step( "There is only one SCC" )
2480 # there should always only be one cluster
2481 try:
2482 numClusters = len( json.loads( clusters[ 0 ] ) )
2483 except ( ValueError, TypeError ):
2484 main.log.exception( "Error parsing clusters[0]: " +
2485 repr( clusters[0] ) )
Jon Halla440e872016-03-31 15:15:50 -07002486 numClusters = "ERROR"
Jon Hall5cf14d52015-07-16 12:15:19 -07002487 clusterResults = main.FALSE
2488 if numClusters == 1:
2489 clusterResults = main.TRUE
2490 utilities.assert_equals(
2491 expect=1,
2492 actual=numClusters,
2493 onpass="ONOS shows 1 SCC",
2494 onfail="ONOS shows " + str( numClusters ) + " SCCs" )
2495
2496 topoResult = ( devicesResults and linksResults
2497 and hostsResults and consistentHostsResult
2498 and consistentClustersResult and clusterResults
2499 and ipResult and hostAttachmentResults )
2500
2501 topoResult = topoResult and int( count <= 2 )
2502 note = "note it takes about " + str( int( cliTime ) ) + \
2503 " seconds for the test to make all the cli calls to fetch " +\
2504 "the topology from each ONOS instance"
2505 main.log.info(
2506 "Very crass estimate for topology discovery/convergence( " +
2507 str( note ) + " ): " + str( elapsed ) + " seconds, " +
2508 str( count ) + " tries" )
2509
2510 main.step( "Device information is correct" )
2511 utilities.assert_equals(
2512 expect=main.TRUE,
2513 actual=devicesResults,
2514 onpass="Device information is correct",
2515 onfail="Device information is incorrect" )
2516
2517 main.step( "Links are correct" )
2518 utilities.assert_equals(
2519 expect=main.TRUE,
2520 actual=linksResults,
2521 onpass="Link are correct",
2522 onfail="Links are incorrect" )
2523
2524 main.step( "Hosts are correct" )
2525 utilities.assert_equals(
2526 expect=main.TRUE,
2527 actual=hostsResults,
2528 onpass="Hosts are correct",
2529 onfail="Hosts are incorrect" )
2530
2531 # FIXME: move this to an ONOS state case
2532 main.step( "Checking ONOS nodes" )
Jon Hall41d39f12016-04-11 22:54:35 -07002533 nodeResults = utilities.retry( main.HA.nodesCheck,
2534 False,
2535 args=[main.activeNodes],
2536 attempts=5 )
Jon Hall5cf14d52015-07-16 12:15:19 -07002537
Jon Hall41d39f12016-04-11 22:54:35 -07002538 utilities.assert_equals( expect=True, actual=nodeResults,
Jon Hall5cf14d52015-07-16 12:15:19 -07002539 onpass="Nodes check successful",
2540 onfail="Nodes check NOT successful" )
Jon Halla440e872016-03-31 15:15:50 -07002541 if not nodeResults:
Jon Hall41d39f12016-04-11 22:54:35 -07002542 for i in main.activeNodes:
Jon Halla440e872016-03-31 15:15:50 -07002543 main.log.debug( "{} components not ACTIVE: \n{}".format(
Jon Hall41d39f12016-04-11 22:54:35 -07002544 main.CLIs[i].name,
2545 main.CLIs[i].sendline( "scr:list | grep -v ACTIVE" ) ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07002546
2547 def CASE9( self, main ):
2548 """
2549 Link s3-s28 down
2550 """
2551 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002552 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002553 assert main, "main not defined"
2554 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002555 assert main.CLIs, "main.CLIs not defined"
2556 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002557 # NOTE: You should probably run a topology check after this
2558
2559 linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
2560
2561 description = "Turn off a link to ensure that Link Discovery " +\
2562 "is working properly"
2563 main.case( description )
2564
2565 main.step( "Kill Link between s3 and s28" )
2566 LinkDown = main.Mininet1.link( END1="s3", END2="s28", OPTION="down" )
2567 main.log.info( "Waiting " + str( linkSleep ) +
2568 " seconds for link down to be discovered" )
2569 time.sleep( linkSleep )
2570 utilities.assert_equals( expect=main.TRUE, actual=LinkDown,
2571 onpass="Link down successful",
2572 onfail="Failed to bring link down" )
2573 # TODO do some sort of check here
2574
2575 def CASE10( self, main ):
2576 """
2577 Link s3-s28 up
2578 """
2579 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002580 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002581 assert main, "main not defined"
2582 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002583 assert main.CLIs, "main.CLIs not defined"
2584 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002585 # NOTE: You should probably run a topology check after this
2586
2587 linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
2588
2589 description = "Restore a link to ensure that Link Discovery is " + \
2590 "working properly"
2591 main.case( description )
2592
2593 main.step( "Bring link between s3 and s28 back up" )
2594 LinkUp = main.Mininet1.link( END1="s3", END2="s28", OPTION="up" )
2595 main.log.info( "Waiting " + str( linkSleep ) +
2596 " seconds for link up to be discovered" )
2597 time.sleep( linkSleep )
2598 utilities.assert_equals( expect=main.TRUE, actual=LinkUp,
2599 onpass="Link up successful",
2600 onfail="Failed to bring link up" )
2601 # TODO do some sort of check here
2602
2603 def CASE11( self, main ):
2604 """
2605 Switch Down
2606 """
2607 # NOTE: You should probably run a topology check after this
2608 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002609 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002610 assert main, "main not defined"
2611 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002612 assert main.CLIs, "main.CLIs not defined"
2613 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002614
2615 switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
2616
2617 description = "Killing a switch to ensure it is discovered correctly"
Jon Halla440e872016-03-31 15:15:50 -07002618 onosCli = main.CLIs[ main.activeNodes[0] ]
Jon Hall5cf14d52015-07-16 12:15:19 -07002619 main.case( description )
2620 switch = main.params[ 'kill' ][ 'switch' ]
2621 switchDPID = main.params[ 'kill' ][ 'dpid' ]
2622
2623 # TODO: Make this switch parameterizable
2624 main.step( "Kill " + switch )
2625 main.log.info( "Deleting " + switch )
2626 main.Mininet1.delSwitch( switch )
2627 main.log.info( "Waiting " + str( switchSleep ) +
2628 " seconds for switch down to be discovered" )
2629 time.sleep( switchSleep )
Jon Halla440e872016-03-31 15:15:50 -07002630 device = onosCli.getDevice( dpid=switchDPID )
Jon Hall5cf14d52015-07-16 12:15:19 -07002631 # Peek at the deleted switch
2632 main.log.warn( str( device ) )
2633 result = main.FALSE
2634 if device and device[ 'available' ] is False:
2635 result = main.TRUE
2636 utilities.assert_equals( expect=main.TRUE, actual=result,
2637 onpass="Kill switch successful",
2638 onfail="Failed to kill switch?" )
2639
2640 def CASE12( self, main ):
2641 """
2642 Switch Up
2643 """
2644 # NOTE: You should probably run a topology check after this
2645 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002646 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002647 assert main, "main not defined"
2648 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002649 assert main.CLIs, "main.CLIs not defined"
2650 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002651 assert ONOS1Port, "ONOS1Port not defined"
2652 assert ONOS2Port, "ONOS2Port not defined"
2653 assert ONOS3Port, "ONOS3Port not defined"
2654 assert ONOS4Port, "ONOS4Port not defined"
2655 assert ONOS5Port, "ONOS5Port not defined"
2656 assert ONOS6Port, "ONOS6Port not defined"
2657 assert ONOS7Port, "ONOS7Port not defined"
2658
2659 switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
2660 switch = main.params[ 'kill' ][ 'switch' ]
2661 switchDPID = main.params[ 'kill' ][ 'dpid' ]
2662 links = main.params[ 'kill' ][ 'links' ].split()
Jon Halla440e872016-03-31 15:15:50 -07002663 onosCli = main.CLIs[ main.activeNodes[0] ]
Jon Hall5cf14d52015-07-16 12:15:19 -07002664 description = "Adding a switch to ensure it is discovered correctly"
2665 main.case( description )
2666
2667 main.step( "Add back " + switch )
2668 main.Mininet1.addSwitch( switch, dpid=switchDPID )
2669 for peer in links:
2670 main.Mininet1.addLink( switch, peer )
Jon Halla440e872016-03-31 15:15:50 -07002671 ipList = [ node.ip_address for node in main.nodes ]
Jon Hall5cf14d52015-07-16 12:15:19 -07002672 main.Mininet1.assignSwController( sw=switch, ip=ipList )
2673 main.log.info( "Waiting " + str( switchSleep ) +
2674 " seconds for switch up to be discovered" )
2675 time.sleep( switchSleep )
Jon Halla440e872016-03-31 15:15:50 -07002676 device = onosCli.getDevice( dpid=switchDPID )
Jon Hall5cf14d52015-07-16 12:15:19 -07002677 # Peek at the deleted switch
2678 main.log.warn( str( device ) )
2679 result = main.FALSE
2680 if device and device[ 'available' ]:
2681 result = main.TRUE
2682 utilities.assert_equals( expect=main.TRUE, actual=result,
2683 onpass="add switch successful",
2684 onfail="Failed to add switch?" )
2685
2686 def CASE13( self, main ):
2687 """
2688 Clean up
2689 """
2690 import os
2691 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002692 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002693 assert main, "main not defined"
2694 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002695 assert main.CLIs, "main.CLIs not defined"
2696 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002697
2698 # printing colors to terminal
2699 colors = { 'cyan': '\033[96m', 'purple': '\033[95m',
2700 'blue': '\033[94m', 'green': '\033[92m',
2701 'yellow': '\033[93m', 'red': '\033[91m', 'end': '\033[0m' }
2702 main.case( "Test Cleanup" )
2703 main.step( "Killing tcpdumps" )
2704 main.Mininet2.stopTcpdump()
2705
2706 testname = main.TEST
Jon Hall96091e62015-09-21 17:34:17 -07002707 if main.params[ 'BACKUP' ][ 'ENABLED' ] == "True":
Jon Hall5cf14d52015-07-16 12:15:19 -07002708 main.step( "Copying MN pcap and ONOS log files to test station" )
2709 teststationUser = main.params[ 'BACKUP' ][ 'TESTONUSER' ]
2710 teststationIP = main.params[ 'BACKUP' ][ 'TESTONIP' ]
Jon Hall96091e62015-09-21 17:34:17 -07002711 # NOTE: MN Pcap file is being saved to logdir.
2712 # We scp this file as MN and TestON aren't necessarily the same vm
2713
2714 # FIXME: To be replaced with a Jenkin's post script
Jon Hall5cf14d52015-07-16 12:15:19 -07002715 # TODO: Load these from params
2716 # NOTE: must end in /
2717 logFolder = "/opt/onos/log/"
2718 logFiles = [ "karaf.log", "karaf.log.1" ]
2719 # NOTE: must end in /
Jon Hall5cf14d52015-07-16 12:15:19 -07002720 for f in logFiles:
Jon Halle1a3b752015-07-22 13:02:46 -07002721 for node in main.nodes:
Jon Halla440e872016-03-31 15:15:50 -07002722 dstName = main.logdir + "/" + node.name + "-" + f
Jon Hall96091e62015-09-21 17:34:17 -07002723 main.ONOSbench.secureCopy( node.user_name, node.ip_address,
2724 logFolder + f, dstName )
Jon Hall5cf14d52015-07-16 12:15:19 -07002725 # std*.log's
2726 # NOTE: must end in /
2727 logFolder = "/opt/onos/var/"
2728 logFiles = [ "stderr.log", "stdout.log" ]
2729 # NOTE: must end in /
Jon Hall5cf14d52015-07-16 12:15:19 -07002730 for f in logFiles:
Jon Halle1a3b752015-07-22 13:02:46 -07002731 for node in main.nodes:
Jon Halla440e872016-03-31 15:15:50 -07002732 dstName = main.logdir + "/" + node.name + "-" + f
Jon Hall96091e62015-09-21 17:34:17 -07002733 main.ONOSbench.secureCopy( node.user_name, node.ip_address,
2734 logFolder + f, dstName )
2735 else:
2736 main.log.debug( "skipping saving log files" )
Jon Hall5cf14d52015-07-16 12:15:19 -07002737
2738 main.step( "Stopping Mininet" )
2739 mnResult = main.Mininet1.stopNet()
2740 utilities.assert_equals( expect=main.TRUE, actual=mnResult,
2741 onpass="Mininet stopped",
2742 onfail="MN cleanup NOT successful" )
2743
2744 main.step( "Checking ONOS Logs for errors" )
Jon Halle1a3b752015-07-22 13:02:46 -07002745 for node in main.nodes:
Jon Hall96091e62015-09-21 17:34:17 -07002746 main.log.debug( "Checking logs for errors on " + node.name + ":" )
2747 main.log.warn( main.ONOSbench.checkLogs( node.ip_address ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07002748
2749 try:
2750 timerLog = open( main.logdir + "/Timers.csv", 'w')
2751 # Overwrite with empty line and close
2752 labels = "Gossip Intents"
2753 data = str( gossipTime )
2754 timerLog.write( labels + "\n" + data )
2755 timerLog.close()
2756 except NameError, e:
2757 main.log.exception(e)
2758
2759 def CASE14( self, main ):
2760 """
2761 start election app on all onos nodes
2762 """
Jon Halle1a3b752015-07-22 13:02:46 -07002763 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002764 assert main, "main not defined"
2765 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002766 assert main.CLIs, "main.CLIs not defined"
2767 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002768
2769 main.case("Start Leadership Election app")
2770 main.step( "Install leadership election app" )
Jon Halla440e872016-03-31 15:15:50 -07002771 onosCli = main.CLIs[ main.activeNodes[0] ]
2772 appResult = onosCli.activateApp( "org.onosproject.election" )
Jon Hall5cf14d52015-07-16 12:15:19 -07002773 utilities.assert_equals(
2774 expect=main.TRUE,
2775 actual=appResult,
2776 onpass="Election app installed",
2777 onfail="Something went wrong with installing Leadership election" )
2778
2779 main.step( "Run for election on each node" )
Jon Halla440e872016-03-31 15:15:50 -07002780 for i in main.activeNodes:
2781 main.CLIs[i].electionTestRun()
Jon Hall25463a82016-04-13 14:03:52 -07002782 time.sleep(5)
2783 activeCLIs = [ main.CLIs[i] for i in main.activeNodes ]
2784 sameResult, leaders = main.HA.consistentLeaderboards( activeCLIs )
Jon Hall5cf14d52015-07-16 12:15:19 -07002785 utilities.assert_equals(
Jon Hall25463a82016-04-13 14:03:52 -07002786 expect=True,
2787 actual=sameResult,
2788 onpass="All nodes see the same leaderboards",
2789 onfail="Inconsistent leaderboards" )
Jon Hall5cf14d52015-07-16 12:15:19 -07002790
Jon Hall25463a82016-04-13 14:03:52 -07002791 if sameResult:
2792 leader = leaders[ 0 ][ 0 ]
2793 if main.nodes[main.activeNodes[0]].ip_address in leader:
2794 correctLeader = True
2795 else:
2796 correctLeader = False
2797 main.step( "First node was elected leader" )
2798 utilities.assert_equals(
2799 expect=True,
2800 actual=correctLeader,
2801 onpass="Correct leader was elected",
2802 onfail="Incorrect leader" )
Jon Hall5cf14d52015-07-16 12:15:19 -07002803
2804 def CASE15( self, main ):
2805 """
2806 Check that Leadership Election is still functional
acsmars71adceb2015-08-31 15:09:26 -07002807 15.1 Run election on each node
2808 15.2 Check that each node has the same leaders and candidates
2809 15.3 Find current leader and withdraw
2810 15.4 Check that a new node was elected leader
2811 15.5 Check that that new leader was the candidate of old leader
2812 15.6 Run for election on old leader
2813 15.7 Check that oldLeader is a candidate, and leader if only 1 node
2814 15.8 Make sure that the old leader was added to the candidate list
2815
2816 old and new variable prefixes refer to data from before vs after
2817 withdrawl and later before withdrawl vs after re-election
Jon Hall5cf14d52015-07-16 12:15:19 -07002818 """
2819 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002820 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002821 assert main, "main not defined"
2822 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002823 assert main.CLIs, "main.CLIs not defined"
2824 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002825
Jon Halla440e872016-03-31 15:15:50 -07002826 description = "Check that Leadership Election is still functional"
Jon Hall5cf14d52015-07-16 12:15:19 -07002827 main.case( description )
Jon Halla440e872016-03-31 15:15:50 -07002828 # NOTE: Need to re-run after restarts since being a canidate is not persistant
Jon Hall5cf14d52015-07-16 12:15:19 -07002829
Jon Halla440e872016-03-31 15:15:50 -07002830 oldLeaders = [] # list of lists of each nodes' candidates before
2831 newLeaders = [] # list of lists of each nodes' candidates after
acsmars71adceb2015-08-31 15:09:26 -07002832 oldLeader = '' # the old leader from oldLeaders, None if not same
2833 newLeader = '' # the new leaders fron newLoeaders, None if not same
2834 oldLeaderCLI = None # the CLI of the old leader used for re-electing
2835 expectNoLeader = False # True when there is only one leader
2836 if main.numCtrls == 1:
2837 expectNoLeader = True
2838
2839 main.step( "Run for election on each node" )
2840 electionResult = main.TRUE
2841
Jon Halla440e872016-03-31 15:15:50 -07002842 for i in main.activeNodes: # run test election on each node
2843 if main.CLIs[i].electionTestRun() == main.FALSE:
acsmars71adceb2015-08-31 15:09:26 -07002844 electionResult = main.FALSE
Jon Hall5cf14d52015-07-16 12:15:19 -07002845 utilities.assert_equals(
2846 expect=main.TRUE,
acsmars71adceb2015-08-31 15:09:26 -07002847 actual=electionResult,
2848 onpass="All nodes successfully ran for leadership",
2849 onfail="At least one node failed to run for leadership" )
2850
acsmars3a72bde2015-09-02 14:16:22 -07002851 if electionResult == main.FALSE:
2852 main.log.error(
Jon Halla440e872016-03-31 15:15:50 -07002853 "Skipping Test Case because Election Test App isn't loaded" )
acsmars3a72bde2015-09-02 14:16:22 -07002854 main.skipCase()
2855
acsmars71adceb2015-08-31 15:09:26 -07002856 main.step( "Check that each node shows the same leader and candidates" )
Jon Halla440e872016-03-31 15:15:50 -07002857 failMessage = "Nodes have different leaderboards"
Jon Halla440e872016-03-31 15:15:50 -07002858 activeCLIs = [ main.CLIs[i] for i in main.activeNodes ]
Jon Hall41d39f12016-04-11 22:54:35 -07002859 sameResult, oldLeaders = main.HA.consistentLeaderboards( activeCLIs )
Jon Halla440e872016-03-31 15:15:50 -07002860 if sameResult:
2861 oldLeader = oldLeaders[ 0 ][ 0 ]
2862 main.log.warn( oldLeader )
acsmars71adceb2015-08-31 15:09:26 -07002863 else:
Jon Halla440e872016-03-31 15:15:50 -07002864 oldLeader = None
acsmars71adceb2015-08-31 15:09:26 -07002865 utilities.assert_equals(
Jon Halla440e872016-03-31 15:15:50 -07002866 expect=True,
acsmars71adceb2015-08-31 15:09:26 -07002867 actual=sameResult,
Jon Halla440e872016-03-31 15:15:50 -07002868 onpass="Leaderboards are consistent for the election topic",
acsmars71adceb2015-08-31 15:09:26 -07002869 onfail=failMessage )
Jon Hall5cf14d52015-07-16 12:15:19 -07002870
2871 main.step( "Find current leader and withdraw" )
acsmars71adceb2015-08-31 15:09:26 -07002872 withdrawResult = main.TRUE
Jon Hall5cf14d52015-07-16 12:15:19 -07002873 # do some sanity checking on leader before using it
acsmars71adceb2015-08-31 15:09:26 -07002874 if oldLeader is None:
2875 main.log.error( "Leadership isn't consistent." )
2876 withdrawResult = main.FALSE
2877 # Get the CLI of the oldLeader
Jon Halla440e872016-03-31 15:15:50 -07002878 for i in main.activeNodes:
acsmars71adceb2015-08-31 15:09:26 -07002879 if oldLeader == main.nodes[ i ].ip_address:
2880 oldLeaderCLI = main.CLIs[ i ]
Jon Hall5cf14d52015-07-16 12:15:19 -07002881 break
2882 else: # FOR/ELSE statement
2883 main.log.error( "Leader election, could not find current leader" )
2884 if oldLeader:
acsmars71adceb2015-08-31 15:09:26 -07002885 withdrawResult = oldLeaderCLI.electionTestWithdraw()
Jon Hall5cf14d52015-07-16 12:15:19 -07002886 utilities.assert_equals(
2887 expect=main.TRUE,
2888 actual=withdrawResult,
2889 onpass="Node was withdrawn from election",
2890 onfail="Node was not withdrawn from election" )
2891
acsmars71adceb2015-08-31 15:09:26 -07002892 main.step( "Check that a new node was elected leader" )
acsmars71adceb2015-08-31 15:09:26 -07002893 failMessage = "Nodes have different leaders"
acsmars71adceb2015-08-31 15:09:26 -07002894 # Get new leaders and candidates
Jon Hall41d39f12016-04-11 22:54:35 -07002895 newLeaderResult, newLeaders = main.HA.consistentLeaderboards( activeCLIs )
Jon Hall3a7843a2016-04-12 03:01:09 -07002896 newLeader = None
Jon Halla440e872016-03-31 15:15:50 -07002897 if newLeaderResult:
Jon Hall3a7843a2016-04-12 03:01:09 -07002898 if newLeaders[ 0 ][ 0 ] == 'none':
2899 main.log.error( "No leader was elected on at least 1 node" )
2900 if not expectNoLeader:
2901 newLeaderResult = False
Jon Hall25463a82016-04-13 14:03:52 -07002902 newLeader = newLeaders[ 0 ][ 0 ]
acsmars71adceb2015-08-31 15:09:26 -07002903
2904 # Check that the new leader is not the older leader, which was withdrawn
2905 if newLeader == oldLeader:
Jon Halla440e872016-03-31 15:15:50 -07002906 newLeaderResult = False
Jon Hall6e709752016-02-01 13:38:46 -08002907 main.log.error( "All nodes still see old leader: " + str( oldLeader ) +
acsmars71adceb2015-08-31 15:09:26 -07002908 " as the current leader" )
Jon Hall5cf14d52015-07-16 12:15:19 -07002909 utilities.assert_equals(
Jon Halla440e872016-03-31 15:15:50 -07002910 expect=True,
acsmars71adceb2015-08-31 15:09:26 -07002911 actual=newLeaderResult,
Jon Hall5cf14d52015-07-16 12:15:19 -07002912 onpass="Leadership election passed",
2913 onfail="Something went wrong with Leadership election" )
2914
Jon Halla440e872016-03-31 15:15:50 -07002915 main.step( "Check that that new leader was the candidate of old leader" )
2916 # candidates[ 2 ] should become the top candidate after withdrawl
acsmars71adceb2015-08-31 15:09:26 -07002917 correctCandidateResult = main.TRUE
2918 if expectNoLeader:
2919 if newLeader == 'none':
2920 main.log.info( "No leader expected. None found. Pass" )
2921 correctCandidateResult = main.TRUE
2922 else:
2923 main.log.info( "Expected no leader, got: " + str( newLeader ) )
2924 correctCandidateResult = main.FALSE
Jon Halla440e872016-03-31 15:15:50 -07002925 elif len( oldLeaders[0] ) >= 3:
2926 if newLeader == oldLeaders[ 0 ][ 2 ]:
2927 # correct leader was elected
2928 correctCandidateResult = main.TRUE
2929 else:
2930 correctCandidateResult = main.FALSE
2931 main.log.error( "Candidate {} was elected. {} should have had priority.".format(
2932 newLeader, oldLeaders[ 0 ][ 2 ] ) )
2933 else:
2934 main.log.warn( "Could not determine who should be the correct leader" )
2935 main.log.debug( oldLeaders[ 0 ] )
acsmars71adceb2015-08-31 15:09:26 -07002936 correctCandidateResult = main.FALSE
acsmars71adceb2015-08-31 15:09:26 -07002937 utilities.assert_equals(
2938 expect=main.TRUE,
2939 actual=correctCandidateResult,
2940 onpass="Correct Candidate Elected",
2941 onfail="Incorrect Candidate Elected" )
2942
Jon Hall5cf14d52015-07-16 12:15:19 -07002943 main.step( "Run for election on old leader( just so everyone " +
2944 "is in the hat )" )
acsmars71adceb2015-08-31 15:09:26 -07002945 if oldLeaderCLI is not None:
2946 runResult = oldLeaderCLI.electionTestRun()
Jon Hall5cf14d52015-07-16 12:15:19 -07002947 else:
acsmars71adceb2015-08-31 15:09:26 -07002948 main.log.error( "No old leader to re-elect" )
Jon Hall5cf14d52015-07-16 12:15:19 -07002949 runResult = main.FALSE
2950 utilities.assert_equals(
2951 expect=main.TRUE,
2952 actual=runResult,
2953 onpass="App re-ran for election",
2954 onfail="App failed to run for election" )
Jon Halla440e872016-03-31 15:15:50 -07002955
acsmars71adceb2015-08-31 15:09:26 -07002956 main.step(
2957 "Check that oldLeader is a candidate, and leader if only 1 node" )
Jon Hall5cf14d52015-07-16 12:15:19 -07002958 # verify leader didn't just change
Jon Halla440e872016-03-31 15:15:50 -07002959 # Get new leaders and candidates
2960 reRunLeaders = []
2961 time.sleep( 5 ) # Paremterize
Jon Hall41d39f12016-04-11 22:54:35 -07002962 positionResult, reRunLeaders = main.HA.consistentLeaderboards( activeCLIs )
acsmars71adceb2015-08-31 15:09:26 -07002963
2964 # Check that the re-elected node is last on the candidate List
Jon Hall3a7843a2016-04-12 03:01:09 -07002965 if not reRunLeaders[0]:
2966 positionResult = main.FALSE
2967 elif oldLeader != reRunLeaders[ 0 ][ -1 ]:
Jon Halla440e872016-03-31 15:15:50 -07002968 main.log.error( "Old Leader ({}) not in the proper position: {} ".format( str( oldLeader),
2969 str( reRunLeaders[ 0 ] ) ) )
acsmars71adceb2015-08-31 15:09:26 -07002970 positionResult = main.FALSE
Jon Hall5cf14d52015-07-16 12:15:19 -07002971 utilities.assert_equals(
Jon Halla440e872016-03-31 15:15:50 -07002972 expect=True,
acsmars71adceb2015-08-31 15:09:26 -07002973 actual=positionResult,
Jon Hall5cf14d52015-07-16 12:15:19 -07002974 onpass="Old leader successfully re-ran for election",
2975 onfail="Something went wrong with Leadership election after " +
2976 "the old leader re-ran for election" )
2977
2978 def CASE16( self, main ):
2979 """
2980 Install Distributed Primitives app
2981 """
2982 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002983 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002984 assert main, "main not defined"
2985 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002986 assert main.CLIs, "main.CLIs not defined"
2987 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002988
2989 # Variables for the distributed primitives tests
2990 global pCounterName
Jon Hall5cf14d52015-07-16 12:15:19 -07002991 global pCounterValue
Jon Hall5cf14d52015-07-16 12:15:19 -07002992 global onosSet
2993 global onosSetName
2994 pCounterName = "TestON-Partitions"
Jon Hall5cf14d52015-07-16 12:15:19 -07002995 pCounterValue = 0
Jon Hall5cf14d52015-07-16 12:15:19 -07002996 onosSet = set([])
2997 onosSetName = "TestON-set"
2998
2999 description = "Install Primitives app"
3000 main.case( description )
3001 main.step( "Install Primitives app" )
3002 appName = "org.onosproject.distributedprimitives"
Jon Halla440e872016-03-31 15:15:50 -07003003 node = main.activeNodes[0]
3004 appResults = main.CLIs[node].activateApp( appName )
Jon Hall5cf14d52015-07-16 12:15:19 -07003005 utilities.assert_equals( expect=main.TRUE,
3006 actual=appResults,
3007 onpass="Primitives app activated",
3008 onfail="Primitives app not activated" )
3009 time.sleep( 5 ) # To allow all nodes to activate
3010
3011 def CASE17( self, main ):
3012 """
3013 Check for basic functionality with distributed primitives
3014 """
Jon Hall5cf14d52015-07-16 12:15:19 -07003015 # Make sure variables are defined/set
Jon Halle1a3b752015-07-22 13:02:46 -07003016 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07003017 assert main, "main not defined"
3018 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07003019 assert main.CLIs, "main.CLIs not defined"
3020 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07003021 assert pCounterName, "pCounterName not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07003022 assert onosSetName, "onosSetName not defined"
3023 # NOTE: assert fails if value is 0/None/Empty/False
3024 try:
3025 pCounterValue
3026 except NameError:
3027 main.log.error( "pCounterValue not defined, setting to 0" )
3028 pCounterValue = 0
3029 try:
Jon Hall5cf14d52015-07-16 12:15:19 -07003030 onosSet
3031 except NameError:
3032 main.log.error( "onosSet not defined, setting to empty Set" )
3033 onosSet = set([])
3034 # Variables for the distributed primitives tests. These are local only
3035 addValue = "a"
3036 addAllValue = "a b c d e f"
3037 retainValue = "c d e f"
3038
3039 description = "Check for basic functionality with distributed " +\
3040 "primitives"
3041 main.case( description )
Jon Halle1a3b752015-07-22 13:02:46 -07003042 main.caseExplanation = "Test the methods of the distributed " +\
3043 "primitives (counters and sets) throught the cli"
Jon Hall5cf14d52015-07-16 12:15:19 -07003044 # DISTRIBUTED ATOMIC COUNTERS
Jon Halle1a3b752015-07-22 13:02:46 -07003045 # Partitioned counters
3046 main.step( "Increment then get a default counter on each node" )
Jon Hall5cf14d52015-07-16 12:15:19 -07003047 pCounters = []
3048 threads = []
3049 addedPValues = []
Jon Halla440e872016-03-31 15:15:50 -07003050 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003051 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3052 name="counterAddAndGet-" + str( i ),
Jon Hall5cf14d52015-07-16 12:15:19 -07003053 args=[ pCounterName ] )
3054 pCounterValue += 1
3055 addedPValues.append( pCounterValue )
3056 threads.append( t )
3057 t.start()
3058
3059 for t in threads:
3060 t.join()
3061 pCounters.append( t.result )
3062 # Check that counter incremented numController times
3063 pCounterResults = True
3064 for i in addedPValues:
3065 tmpResult = i in pCounters
3066 pCounterResults = pCounterResults and tmpResult
3067 if not tmpResult:
3068 main.log.error( str( i ) + " is not in partitioned "
3069 "counter incremented results" )
3070 utilities.assert_equals( expect=True,
3071 actual=pCounterResults,
3072 onpass="Default counter incremented",
3073 onfail="Error incrementing default" +
3074 " counter" )
3075
Jon Halle1a3b752015-07-22 13:02:46 -07003076 main.step( "Get then Increment a default counter on each node" )
3077 pCounters = []
3078 threads = []
3079 addedPValues = []
Jon Halla440e872016-03-31 15:15:50 -07003080 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003081 t = main.Thread( target=main.CLIs[i].counterTestGetAndAdd,
3082 name="counterGetAndAdd-" + str( i ),
3083 args=[ pCounterName ] )
3084 addedPValues.append( pCounterValue )
3085 pCounterValue += 1
3086 threads.append( t )
3087 t.start()
3088
3089 for t in threads:
3090 t.join()
3091 pCounters.append( t.result )
3092 # Check that counter incremented numController times
3093 pCounterResults = True
3094 for i in addedPValues:
3095 tmpResult = i in pCounters
3096 pCounterResults = pCounterResults and tmpResult
3097 if not tmpResult:
3098 main.log.error( str( i ) + " is not in partitioned "
3099 "counter incremented results" )
3100 utilities.assert_equals( expect=True,
3101 actual=pCounterResults,
3102 onpass="Default counter incremented",
3103 onfail="Error incrementing default" +
3104 " counter" )
3105
3106 main.step( "Counters we added have the correct values" )
Jon Hall41d39f12016-04-11 22:54:35 -07003107 incrementCheck = main.HA.counterCheck( pCounterName, pCounterValue )
Jon Halle1a3b752015-07-22 13:02:46 -07003108 utilities.assert_equals( expect=main.TRUE,
3109 actual=incrementCheck,
3110 onpass="Added counters are correct",
3111 onfail="Added counters are incorrect" )
3112
3113 main.step( "Add -8 to then get a default counter on each node" )
3114 pCounters = []
3115 threads = []
3116 addedPValues = []
Jon Halla440e872016-03-31 15:15:50 -07003117 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003118 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3119 name="counterIncrement-" + str( i ),
3120 args=[ pCounterName ],
3121 kwargs={ "delta": -8 } )
3122 pCounterValue += -8
3123 addedPValues.append( pCounterValue )
3124 threads.append( t )
3125 t.start()
3126
3127 for t in threads:
3128 t.join()
3129 pCounters.append( t.result )
3130 # Check that counter incremented numController times
3131 pCounterResults = True
3132 for i in addedPValues:
3133 tmpResult = i in pCounters
3134 pCounterResults = pCounterResults and tmpResult
3135 if not tmpResult:
3136 main.log.error( str( i ) + " is not in partitioned "
3137 "counter incremented results" )
3138 utilities.assert_equals( expect=True,
3139 actual=pCounterResults,
3140 onpass="Default counter incremented",
3141 onfail="Error incrementing default" +
3142 " counter" )
3143
3144 main.step( "Add 5 to then get a default counter on each node" )
3145 pCounters = []
3146 threads = []
3147 addedPValues = []
Jon Halla440e872016-03-31 15:15:50 -07003148 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003149 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3150 name="counterIncrement-" + str( i ),
3151 args=[ pCounterName ],
3152 kwargs={ "delta": 5 } )
3153 pCounterValue += 5
3154 addedPValues.append( pCounterValue )
3155 threads.append( t )
3156 t.start()
3157
3158 for t in threads:
3159 t.join()
3160 pCounters.append( t.result )
3161 # Check that counter incremented numController times
3162 pCounterResults = True
3163 for i in addedPValues:
3164 tmpResult = i in pCounters
3165 pCounterResults = pCounterResults and tmpResult
3166 if not tmpResult:
3167 main.log.error( str( i ) + " is not in partitioned "
3168 "counter incremented results" )
3169 utilities.assert_equals( expect=True,
3170 actual=pCounterResults,
3171 onpass="Default counter incremented",
3172 onfail="Error incrementing default" +
3173 " counter" )
3174
3175 main.step( "Get then add 5 to a default counter on each node" )
3176 pCounters = []
3177 threads = []
3178 addedPValues = []
Jon Halla440e872016-03-31 15:15:50 -07003179 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003180 t = main.Thread( target=main.CLIs[i].counterTestGetAndAdd,
3181 name="counterIncrement-" + str( i ),
3182 args=[ pCounterName ],
3183 kwargs={ "delta": 5 } )
3184 addedPValues.append( pCounterValue )
3185 pCounterValue += 5
3186 threads.append( t )
3187 t.start()
3188
3189 for t in threads:
3190 t.join()
3191 pCounters.append( t.result )
3192 # Check that counter incremented numController times
3193 pCounterResults = True
3194 for i in addedPValues:
3195 tmpResult = i in pCounters
3196 pCounterResults = pCounterResults and tmpResult
3197 if not tmpResult:
3198 main.log.error( str( i ) + " is not in partitioned "
3199 "counter incremented results" )
3200 utilities.assert_equals( expect=True,
3201 actual=pCounterResults,
3202 onpass="Default counter incremented",
3203 onfail="Error incrementing default" +
3204 " counter" )
3205
3206 main.step( "Counters we added have the correct values" )
Jon Hall41d39f12016-04-11 22:54:35 -07003207 incrementCheck = main.HA.counterCheck( pCounterName, pCounterValue )
Jon Halle1a3b752015-07-22 13:02:46 -07003208 utilities.assert_equals( expect=main.TRUE,
3209 actual=incrementCheck,
3210 onpass="Added counters are correct",
3211 onfail="Added counters are incorrect" )
3212
Jon Hall5cf14d52015-07-16 12:15:19 -07003213 # DISTRIBUTED SETS
3214 main.step( "Distributed Set get" )
3215 size = len( onosSet )
3216 getResponses = []
3217 threads = []
Jon Halla440e872016-03-31 15:15:50 -07003218 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003219 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003220 name="setTestGet-" + str( i ),
3221 args=[ onosSetName ] )
3222 threads.append( t )
3223 t.start()
3224 for t in threads:
3225 t.join()
3226 getResponses.append( t.result )
3227
3228 getResults = main.TRUE
Jon Halla440e872016-03-31 15:15:50 -07003229 for i in range( len( main.activeNodes ) ):
3230 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003231 if isinstance( getResponses[ i ], list):
3232 current = set( getResponses[ i ] )
3233 if len( current ) == len( getResponses[ i ] ):
3234 # no repeats
3235 if onosSet != current:
Jon Halla440e872016-03-31 15:15:50 -07003236 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003237 " has incorrect view" +
3238 " of set " + onosSetName + ":\n" +
3239 str( getResponses[ i ] ) )
3240 main.log.debug( "Expected: " + str( onosSet ) )
3241 main.log.debug( "Actual: " + str( current ) )
3242 getResults = main.FALSE
3243 else:
3244 # error, set is not a set
Jon Halla440e872016-03-31 15:15:50 -07003245 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003246 " has repeat elements in" +
3247 " set " + onosSetName + ":\n" +
3248 str( getResponses[ i ] ) )
3249 getResults = main.FALSE
3250 elif getResponses[ i ] == main.ERROR:
3251 getResults = main.FALSE
3252 utilities.assert_equals( expect=main.TRUE,
3253 actual=getResults,
3254 onpass="Set elements are correct",
3255 onfail="Set elements are incorrect" )
3256
3257 main.step( "Distributed Set size" )
3258 sizeResponses = []
3259 threads = []
Jon Halla440e872016-03-31 15:15:50 -07003260 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003261 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07003262 name="setTestSize-" + str( i ),
3263 args=[ onosSetName ] )
3264 threads.append( t )
3265 t.start()
3266 for t in threads:
3267 t.join()
3268 sizeResponses.append( t.result )
3269
3270 sizeResults = main.TRUE
Jon Halla440e872016-03-31 15:15:50 -07003271 for i in range( len( main.activeNodes ) ):
3272 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003273 if size != sizeResponses[ i ]:
3274 sizeResults = main.FALSE
Jon Halla440e872016-03-31 15:15:50 -07003275 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003276 " expected a size of " + str( size ) +
3277 " for set " + onosSetName +
3278 " but got " + str( sizeResponses[ i ] ) )
3279 utilities.assert_equals( expect=main.TRUE,
3280 actual=sizeResults,
3281 onpass="Set sizes are correct",
3282 onfail="Set sizes are incorrect" )
3283
3284 main.step( "Distributed Set add()" )
3285 onosSet.add( addValue )
3286 addResponses = []
3287 threads = []
Jon Halla440e872016-03-31 15:15:50 -07003288 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003289 t = main.Thread( target=main.CLIs[i].setTestAdd,
Jon Hall5cf14d52015-07-16 12:15:19 -07003290 name="setTestAdd-" + str( i ),
3291 args=[ onosSetName, addValue ] )
3292 threads.append( t )
3293 t.start()
3294 for t in threads:
3295 t.join()
3296 addResponses.append( t.result )
3297
3298 # main.TRUE = successfully changed the set
3299 # main.FALSE = action resulted in no change in set
3300 # main.ERROR - Some error in executing the function
3301 addResults = main.TRUE
Jon Halla440e872016-03-31 15:15:50 -07003302 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003303 if addResponses[ i ] == main.TRUE:
3304 # All is well
3305 pass
3306 elif addResponses[ i ] == main.FALSE:
3307 # Already in set, probably fine
3308 pass
3309 elif addResponses[ i ] == main.ERROR:
3310 # Error in execution
3311 addResults = main.FALSE
3312 else:
3313 # unexpected result
3314 addResults = main.FALSE
3315 if addResults != main.TRUE:
3316 main.log.error( "Error executing set add" )
3317
3318 # Check if set is still correct
3319 size = len( onosSet )
3320 getResponses = []
3321 threads = []
Jon Halla440e872016-03-31 15:15:50 -07003322 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003323 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003324 name="setTestGet-" + str( i ),
3325 args=[ onosSetName ] )
3326 threads.append( t )
3327 t.start()
3328 for t in threads:
3329 t.join()
3330 getResponses.append( t.result )
3331 getResults = main.TRUE
Jon Halla440e872016-03-31 15:15:50 -07003332 for i in range( len( main.activeNodes ) ):
3333 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003334 if isinstance( getResponses[ i ], list):
3335 current = set( getResponses[ i ] )
3336 if len( current ) == len( getResponses[ i ] ):
3337 # no repeats
3338 if onosSet != current:
Jon Halla440e872016-03-31 15:15:50 -07003339 main.log.error( "ONOS" + node + " has incorrect view" +
Jon Hall5cf14d52015-07-16 12:15:19 -07003340 " of set " + onosSetName + ":\n" +
3341 str( getResponses[ i ] ) )
3342 main.log.debug( "Expected: " + str( onosSet ) )
3343 main.log.debug( "Actual: " + str( current ) )
3344 getResults = main.FALSE
3345 else:
3346 # error, set is not a set
Jon Halla440e872016-03-31 15:15:50 -07003347 main.log.error( "ONOS" + node + " has repeat elements in" +
Jon Hall5cf14d52015-07-16 12:15:19 -07003348 " set " + onosSetName + ":\n" +
3349 str( getResponses[ i ] ) )
3350 getResults = main.FALSE
3351 elif getResponses[ i ] == main.ERROR:
3352 getResults = main.FALSE
3353 sizeResponses = []
3354 threads = []
Jon Halla440e872016-03-31 15:15:50 -07003355 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003356 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07003357 name="setTestSize-" + str( i ),
3358 args=[ onosSetName ] )
3359 threads.append( t )
3360 t.start()
3361 for t in threads:
3362 t.join()
3363 sizeResponses.append( t.result )
3364 sizeResults = main.TRUE
Jon Halla440e872016-03-31 15:15:50 -07003365 for i in range( len( main.activeNodes ) ):
3366 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003367 if size != sizeResponses[ i ]:
3368 sizeResults = main.FALSE
Jon Halla440e872016-03-31 15:15:50 -07003369 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003370 " expected a size of " + str( size ) +
3371 " for set " + onosSetName +
3372 " but got " + str( sizeResponses[ i ] ) )
3373 addResults = addResults and getResults and sizeResults
3374 utilities.assert_equals( expect=main.TRUE,
3375 actual=addResults,
3376 onpass="Set add correct",
3377 onfail="Set add was incorrect" )
3378
3379 main.step( "Distributed Set addAll()" )
3380 onosSet.update( addAllValue.split() )
3381 addResponses = []
3382 threads = []
Jon Halla440e872016-03-31 15:15:50 -07003383 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003384 t = main.Thread( target=main.CLIs[i].setTestAdd,
Jon Hall5cf14d52015-07-16 12:15:19 -07003385 name="setTestAddAll-" + str( i ),
3386 args=[ onosSetName, addAllValue ] )
3387 threads.append( t )
3388 t.start()
3389 for t in threads:
3390 t.join()
3391 addResponses.append( t.result )
3392
3393 # main.TRUE = successfully changed the set
3394 # main.FALSE = action resulted in no change in set
3395 # main.ERROR - Some error in executing the function
3396 addAllResults = main.TRUE
Jon Halla440e872016-03-31 15:15:50 -07003397 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003398 if addResponses[ i ] == main.TRUE:
3399 # All is well
3400 pass
3401 elif addResponses[ i ] == main.FALSE:
3402 # Already in set, probably fine
3403 pass
3404 elif addResponses[ i ] == main.ERROR:
3405 # Error in execution
3406 addAllResults = main.FALSE
3407 else:
3408 # unexpected result
3409 addAllResults = main.FALSE
3410 if addAllResults != main.TRUE:
3411 main.log.error( "Error executing set addAll" )
3412
3413 # Check if set is still correct
3414 size = len( onosSet )
3415 getResponses = []
3416 threads = []
Jon Halla440e872016-03-31 15:15:50 -07003417 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003418 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003419 name="setTestGet-" + str( i ),
3420 args=[ onosSetName ] )
3421 threads.append( t )
3422 t.start()
3423 for t in threads:
3424 t.join()
3425 getResponses.append( t.result )
3426 getResults = main.TRUE
Jon Halla440e872016-03-31 15:15:50 -07003427 for i in range( len( main.activeNodes ) ):
3428 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003429 if isinstance( getResponses[ i ], list):
3430 current = set( getResponses[ i ] )
3431 if len( current ) == len( getResponses[ i ] ):
3432 # no repeats
3433 if onosSet != current:
Jon Halla440e872016-03-31 15:15:50 -07003434 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003435 " has incorrect view" +
3436 " of set " + onosSetName + ":\n" +
3437 str( getResponses[ i ] ) )
3438 main.log.debug( "Expected: " + str( onosSet ) )
3439 main.log.debug( "Actual: " + str( current ) )
3440 getResults = main.FALSE
3441 else:
3442 # error, set is not a set
Jon Halla440e872016-03-31 15:15:50 -07003443 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003444 " has repeat elements in" +
3445 " set " + onosSetName + ":\n" +
3446 str( getResponses[ i ] ) )
3447 getResults = main.FALSE
3448 elif getResponses[ i ] == main.ERROR:
3449 getResults = main.FALSE
3450 sizeResponses = []
3451 threads = []
Jon Halla440e872016-03-31 15:15:50 -07003452 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003453 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07003454 name="setTestSize-" + str( i ),
3455 args=[ onosSetName ] )
3456 threads.append( t )
3457 t.start()
3458 for t in threads:
3459 t.join()
3460 sizeResponses.append( t.result )
3461 sizeResults = main.TRUE
Jon Halla440e872016-03-31 15:15:50 -07003462 for i in range( len( main.activeNodes ) ):
3463 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003464 if size != sizeResponses[ i ]:
3465 sizeResults = main.FALSE
Jon Halla440e872016-03-31 15:15:50 -07003466 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003467 " expected a size of " + str( size ) +
3468 " for set " + onosSetName +
3469 " but got " + str( sizeResponses[ i ] ) )
3470 addAllResults = addAllResults and getResults and sizeResults
3471 utilities.assert_equals( expect=main.TRUE,
3472 actual=addAllResults,
3473 onpass="Set addAll correct",
3474 onfail="Set addAll was incorrect" )
3475
3476 main.step( "Distributed Set contains()" )
3477 containsResponses = []
3478 threads = []
Jon Halla440e872016-03-31 15:15:50 -07003479 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003480 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003481 name="setContains-" + str( i ),
3482 args=[ onosSetName ],
3483 kwargs={ "values": addValue } )
3484 threads.append( t )
3485 t.start()
3486 for t in threads:
3487 t.join()
3488 # NOTE: This is the tuple
3489 containsResponses.append( t.result )
3490
3491 containsResults = main.TRUE
Jon Halla440e872016-03-31 15:15:50 -07003492 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003493 if containsResponses[ i ] == main.ERROR:
3494 containsResults = main.FALSE
3495 else:
3496 containsResults = containsResults and\
3497 containsResponses[ i ][ 1 ]
3498 utilities.assert_equals( expect=main.TRUE,
3499 actual=containsResults,
3500 onpass="Set contains is functional",
3501 onfail="Set contains failed" )
3502
3503 main.step( "Distributed Set containsAll()" )
3504 containsAllResponses = []
3505 threads = []
Jon Halla440e872016-03-31 15:15:50 -07003506 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003507 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003508 name="setContainsAll-" + str( i ),
3509 args=[ onosSetName ],
3510 kwargs={ "values": addAllValue } )
3511 threads.append( t )
3512 t.start()
3513 for t in threads:
3514 t.join()
3515 # NOTE: This is the tuple
3516 containsAllResponses.append( t.result )
3517
3518 containsAllResults = main.TRUE
Jon Halla440e872016-03-31 15:15:50 -07003519 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003520 if containsResponses[ i ] == main.ERROR:
3521 containsResults = main.FALSE
3522 else:
3523 containsResults = containsResults and\
3524 containsResponses[ i ][ 1 ]
3525 utilities.assert_equals( expect=main.TRUE,
3526 actual=containsAllResults,
3527 onpass="Set containsAll is functional",
3528 onfail="Set containsAll failed" )
3529
3530 main.step( "Distributed Set remove()" )
3531 onosSet.remove( addValue )
3532 removeResponses = []
3533 threads = []
Jon Halla440e872016-03-31 15:15:50 -07003534 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003535 t = main.Thread( target=main.CLIs[i].setTestRemove,
Jon Hall5cf14d52015-07-16 12:15:19 -07003536 name="setTestRemove-" + str( i ),
3537 args=[ onosSetName, addValue ] )
3538 threads.append( t )
3539 t.start()
3540 for t in threads:
3541 t.join()
3542 removeResponses.append( t.result )
3543
3544 # main.TRUE = successfully changed the set
3545 # main.FALSE = action resulted in no change in set
3546 # main.ERROR - Some error in executing the function
3547 removeResults = main.TRUE
Jon Halla440e872016-03-31 15:15:50 -07003548 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003549 if removeResponses[ i ] == main.TRUE:
3550 # All is well
3551 pass
3552 elif removeResponses[ i ] == main.FALSE:
3553 # not in set, probably fine
3554 pass
3555 elif removeResponses[ i ] == main.ERROR:
3556 # Error in execution
3557 removeResults = main.FALSE
3558 else:
3559 # unexpected result
3560 removeResults = main.FALSE
3561 if removeResults != main.TRUE:
3562 main.log.error( "Error executing set remove" )
3563
3564 # Check if set is still correct
3565 size = len( onosSet )
3566 getResponses = []
3567 threads = []
Jon Halla440e872016-03-31 15:15:50 -07003568 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003569 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003570 name="setTestGet-" + str( i ),
3571 args=[ onosSetName ] )
3572 threads.append( t )
3573 t.start()
3574 for t in threads:
3575 t.join()
3576 getResponses.append( t.result )
3577 getResults = main.TRUE
Jon Halla440e872016-03-31 15:15:50 -07003578 for i in range( len( main.activeNodes ) ):
3579 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003580 if isinstance( getResponses[ i ], list):
3581 current = set( getResponses[ i ] )
3582 if len( current ) == len( getResponses[ i ] ):
3583 # no repeats
3584 if onosSet != current:
Jon Halla440e872016-03-31 15:15:50 -07003585 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003586 " has incorrect view" +
3587 " of set " + onosSetName + ":\n" +
3588 str( getResponses[ i ] ) )
3589 main.log.debug( "Expected: " + str( onosSet ) )
3590 main.log.debug( "Actual: " + str( current ) )
3591 getResults = main.FALSE
3592 else:
3593 # error, set is not a set
Jon Halla440e872016-03-31 15:15:50 -07003594 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003595 " has repeat elements in" +
3596 " set " + onosSetName + ":\n" +
3597 str( getResponses[ i ] ) )
3598 getResults = main.FALSE
3599 elif getResponses[ i ] == main.ERROR:
3600 getResults = main.FALSE
3601 sizeResponses = []
3602 threads = []
Jon Halla440e872016-03-31 15:15:50 -07003603 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003604 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07003605 name="setTestSize-" + str( i ),
3606 args=[ onosSetName ] )
3607 threads.append( t )
3608 t.start()
3609 for t in threads:
3610 t.join()
3611 sizeResponses.append( t.result )
3612 sizeResults = main.TRUE
Jon Halla440e872016-03-31 15:15:50 -07003613 for i in range( len( main.activeNodes ) ):
3614 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003615 if size != sizeResponses[ i ]:
3616 sizeResults = main.FALSE
Jon Halla440e872016-03-31 15:15:50 -07003617 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003618 " expected a size of " + str( size ) +
3619 " for set " + onosSetName +
3620 " but got " + str( sizeResponses[ i ] ) )
3621 removeResults = removeResults and getResults and sizeResults
3622 utilities.assert_equals( expect=main.TRUE,
3623 actual=removeResults,
3624 onpass="Set remove correct",
3625 onfail="Set remove was incorrect" )
3626
3627 main.step( "Distributed Set removeAll()" )
3628 onosSet.difference_update( addAllValue.split() )
3629 removeAllResponses = []
3630 threads = []
3631 try:
Jon Halla440e872016-03-31 15:15:50 -07003632 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003633 t = main.Thread( target=main.CLIs[i].setTestRemove,
Jon Hall5cf14d52015-07-16 12:15:19 -07003634 name="setTestRemoveAll-" + str( i ),
3635 args=[ onosSetName, addAllValue ] )
3636 threads.append( t )
3637 t.start()
3638 for t in threads:
3639 t.join()
3640 removeAllResponses.append( t.result )
3641 except Exception, e:
3642 main.log.exception(e)
3643
3644 # main.TRUE = successfully changed the set
3645 # main.FALSE = action resulted in no change in set
3646 # main.ERROR - Some error in executing the function
3647 removeAllResults = main.TRUE
Jon Halla440e872016-03-31 15:15:50 -07003648 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003649 if removeAllResponses[ i ] == main.TRUE:
3650 # All is well
3651 pass
3652 elif removeAllResponses[ i ] == main.FALSE:
3653 # not in set, probably fine
3654 pass
3655 elif removeAllResponses[ i ] == main.ERROR:
3656 # Error in execution
3657 removeAllResults = main.FALSE
3658 else:
3659 # unexpected result
3660 removeAllResults = main.FALSE
3661 if removeAllResults != main.TRUE:
3662 main.log.error( "Error executing set removeAll" )
3663
3664 # Check if set is still correct
3665 size = len( onosSet )
3666 getResponses = []
3667 threads = []
Jon Halla440e872016-03-31 15:15:50 -07003668 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003669 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003670 name="setTestGet-" + str( i ),
3671 args=[ onosSetName ] )
3672 threads.append( t )
3673 t.start()
3674 for t in threads:
3675 t.join()
3676 getResponses.append( t.result )
3677 getResults = main.TRUE
Jon Halla440e872016-03-31 15:15:50 -07003678 for i in range( len( main.activeNodes ) ):
3679 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003680 if isinstance( getResponses[ i ], list):
3681 current = set( getResponses[ i ] )
3682 if len( current ) == len( getResponses[ i ] ):
3683 # no repeats
3684 if onosSet != current:
Jon Halla440e872016-03-31 15:15:50 -07003685 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003686 " has incorrect view" +
3687 " of set " + onosSetName + ":\n" +
3688 str( getResponses[ i ] ) )
3689 main.log.debug( "Expected: " + str( onosSet ) )
3690 main.log.debug( "Actual: " + str( current ) )
3691 getResults = main.FALSE
3692 else:
3693 # error, set is not a set
Jon Halla440e872016-03-31 15:15:50 -07003694 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003695 " has repeat elements in" +
3696 " set " + onosSetName + ":\n" +
3697 str( getResponses[ i ] ) )
3698 getResults = main.FALSE
3699 elif getResponses[ i ] == main.ERROR:
3700 getResults = main.FALSE
3701 sizeResponses = []
3702 threads = []
Jon Halla440e872016-03-31 15:15:50 -07003703 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003704 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07003705 name="setTestSize-" + str( i ),
3706 args=[ onosSetName ] )
3707 threads.append( t )
3708 t.start()
3709 for t in threads:
3710 t.join()
3711 sizeResponses.append( t.result )
3712 sizeResults = main.TRUE
Jon Halla440e872016-03-31 15:15:50 -07003713 for i in range( len( main.activeNodes ) ):
3714 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003715 if size != sizeResponses[ i ]:
3716 sizeResults = main.FALSE
Jon Halla440e872016-03-31 15:15:50 -07003717 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003718 " expected a size of " + str( size ) +
3719 " for set " + onosSetName +
3720 " but got " + str( sizeResponses[ i ] ) )
3721 removeAllResults = removeAllResults and getResults and sizeResults
3722 utilities.assert_equals( expect=main.TRUE,
3723 actual=removeAllResults,
3724 onpass="Set removeAll correct",
3725 onfail="Set removeAll was incorrect" )
3726
3727 main.step( "Distributed Set addAll()" )
3728 onosSet.update( addAllValue.split() )
3729 addResponses = []
3730 threads = []
Jon Halla440e872016-03-31 15:15:50 -07003731 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003732 t = main.Thread( target=main.CLIs[i].setTestAdd,
Jon Hall5cf14d52015-07-16 12:15:19 -07003733 name="setTestAddAll-" + str( i ),
3734 args=[ onosSetName, addAllValue ] )
3735 threads.append( t )
3736 t.start()
3737 for t in threads:
3738 t.join()
3739 addResponses.append( t.result )
3740
3741 # main.TRUE = successfully changed the set
3742 # main.FALSE = action resulted in no change in set
3743 # main.ERROR - Some error in executing the function
3744 addAllResults = main.TRUE
Jon Halla440e872016-03-31 15:15:50 -07003745 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003746 if addResponses[ i ] == main.TRUE:
3747 # All is well
3748 pass
3749 elif addResponses[ i ] == main.FALSE:
3750 # Already in set, probably fine
3751 pass
3752 elif addResponses[ i ] == main.ERROR:
3753 # Error in execution
3754 addAllResults = main.FALSE
3755 else:
3756 # unexpected result
3757 addAllResults = main.FALSE
3758 if addAllResults != main.TRUE:
3759 main.log.error( "Error executing set addAll" )
3760
3761 # Check if set is still correct
3762 size = len( onosSet )
3763 getResponses = []
3764 threads = []
Jon Halla440e872016-03-31 15:15:50 -07003765 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003766 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003767 name="setTestGet-" + str( i ),
3768 args=[ onosSetName ] )
3769 threads.append( t )
3770 t.start()
3771 for t in threads:
3772 t.join()
3773 getResponses.append( t.result )
3774 getResults = main.TRUE
Jon Halla440e872016-03-31 15:15:50 -07003775 for i in range( len( main.activeNodes ) ):
3776 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003777 if isinstance( getResponses[ i ], list):
3778 current = set( getResponses[ i ] )
3779 if len( current ) == len( getResponses[ i ] ):
3780 # no repeats
3781 if onosSet != current:
Jon Halla440e872016-03-31 15:15:50 -07003782 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003783 " has incorrect view" +
3784 " of set " + onosSetName + ":\n" +
3785 str( getResponses[ i ] ) )
3786 main.log.debug( "Expected: " + str( onosSet ) )
3787 main.log.debug( "Actual: " + str( current ) )
3788 getResults = main.FALSE
3789 else:
3790 # error, set is not a set
Jon Halla440e872016-03-31 15:15:50 -07003791 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003792 " has repeat elements in" +
3793 " set " + onosSetName + ":\n" +
3794 str( getResponses[ i ] ) )
3795 getResults = main.FALSE
3796 elif getResponses[ i ] == main.ERROR:
3797 getResults = main.FALSE
3798 sizeResponses = []
3799 threads = []
Jon Halla440e872016-03-31 15:15:50 -07003800 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003801 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07003802 name="setTestSize-" + str( i ),
3803 args=[ onosSetName ] )
3804 threads.append( t )
3805 t.start()
3806 for t in threads:
3807 t.join()
3808 sizeResponses.append( t.result )
3809 sizeResults = main.TRUE
Jon Halla440e872016-03-31 15:15:50 -07003810 for i in range( len( main.activeNodes ) ):
3811 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003812 if size != sizeResponses[ i ]:
3813 sizeResults = main.FALSE
Jon Halla440e872016-03-31 15:15:50 -07003814 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003815 " expected a size of " + str( size ) +
3816 " for set " + onosSetName +
3817 " but got " + str( sizeResponses[ i ] ) )
3818 addAllResults = addAllResults and getResults and sizeResults
3819 utilities.assert_equals( expect=main.TRUE,
3820 actual=addAllResults,
3821 onpass="Set addAll correct",
3822 onfail="Set addAll was incorrect" )
3823
3824 main.step( "Distributed Set clear()" )
3825 onosSet.clear()
3826 clearResponses = []
3827 threads = []
Jon Halla440e872016-03-31 15:15:50 -07003828 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003829 t = main.Thread( target=main.CLIs[i].setTestRemove,
Jon Hall5cf14d52015-07-16 12:15:19 -07003830 name="setTestClear-" + str( i ),
3831 args=[ onosSetName, " "], # Values doesn't matter
3832 kwargs={ "clear": True } )
3833 threads.append( t )
3834 t.start()
3835 for t in threads:
3836 t.join()
3837 clearResponses.append( t.result )
3838
3839 # main.TRUE = successfully changed the set
3840 # main.FALSE = action resulted in no change in set
3841 # main.ERROR - Some error in executing the function
3842 clearResults = main.TRUE
Jon Halla440e872016-03-31 15:15:50 -07003843 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003844 if clearResponses[ i ] == main.TRUE:
3845 # All is well
3846 pass
3847 elif clearResponses[ i ] == main.FALSE:
3848 # Nothing set, probably fine
3849 pass
3850 elif clearResponses[ i ] == main.ERROR:
3851 # Error in execution
3852 clearResults = main.FALSE
3853 else:
3854 # unexpected result
3855 clearResults = main.FALSE
3856 if clearResults != main.TRUE:
3857 main.log.error( "Error executing set clear" )
3858
3859 # Check if set is still correct
3860 size = len( onosSet )
3861 getResponses = []
3862 threads = []
Jon Halla440e872016-03-31 15:15:50 -07003863 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003864 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003865 name="setTestGet-" + str( i ),
3866 args=[ onosSetName ] )
3867 threads.append( t )
3868 t.start()
3869 for t in threads:
3870 t.join()
3871 getResponses.append( t.result )
3872 getResults = main.TRUE
Jon Halla440e872016-03-31 15:15:50 -07003873 for i in range( len( main.activeNodes ) ):
3874 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003875 if isinstance( getResponses[ i ], list):
3876 current = set( getResponses[ i ] )
3877 if len( current ) == len( getResponses[ i ] ):
3878 # no repeats
3879 if onosSet != current:
Jon Halla440e872016-03-31 15:15:50 -07003880 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003881 " has incorrect view" +
3882 " of set " + onosSetName + ":\n" +
3883 str( getResponses[ i ] ) )
3884 main.log.debug( "Expected: " + str( onosSet ) )
3885 main.log.debug( "Actual: " + str( current ) )
3886 getResults = main.FALSE
3887 else:
3888 # error, set is not a set
Jon Halla440e872016-03-31 15:15:50 -07003889 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003890 " has repeat elements in" +
3891 " set " + onosSetName + ":\n" +
3892 str( getResponses[ i ] ) )
3893 getResults = main.FALSE
3894 elif getResponses[ i ] == main.ERROR:
3895 getResults = main.FALSE
3896 sizeResponses = []
3897 threads = []
Jon Halla440e872016-03-31 15:15:50 -07003898 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003899 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07003900 name="setTestSize-" + str( i ),
3901 args=[ onosSetName ] )
3902 threads.append( t )
3903 t.start()
3904 for t in threads:
3905 t.join()
3906 sizeResponses.append( t.result )
3907 sizeResults = main.TRUE
Jon Halla440e872016-03-31 15:15:50 -07003908 for i in range( len( main.activeNodes ) ):
3909 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003910 if size != sizeResponses[ i ]:
3911 sizeResults = main.FALSE
Jon Halla440e872016-03-31 15:15:50 -07003912 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003913 " expected a size of " + str( size ) +
3914 " for set " + onosSetName +
3915 " but got " + str( sizeResponses[ i ] ) )
3916 clearResults = clearResults and getResults and sizeResults
3917 utilities.assert_equals( expect=main.TRUE,
3918 actual=clearResults,
3919 onpass="Set clear correct",
3920 onfail="Set clear was incorrect" )
3921
3922 main.step( "Distributed Set addAll()" )
3923 onosSet.update( addAllValue.split() )
3924 addResponses = []
3925 threads = []
Jon Halla440e872016-03-31 15:15:50 -07003926 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003927 t = main.Thread( target=main.CLIs[i].setTestAdd,
Jon Hall5cf14d52015-07-16 12:15:19 -07003928 name="setTestAddAll-" + str( i ),
3929 args=[ onosSetName, addAllValue ] )
3930 threads.append( t )
3931 t.start()
3932 for t in threads:
3933 t.join()
3934 addResponses.append( t.result )
3935
3936 # main.TRUE = successfully changed the set
3937 # main.FALSE = action resulted in no change in set
3938 # main.ERROR - Some error in executing the function
3939 addAllResults = main.TRUE
Jon Halla440e872016-03-31 15:15:50 -07003940 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003941 if addResponses[ i ] == main.TRUE:
3942 # All is well
3943 pass
3944 elif addResponses[ i ] == main.FALSE:
3945 # Already in set, probably fine
3946 pass
3947 elif addResponses[ i ] == main.ERROR:
3948 # Error in execution
3949 addAllResults = main.FALSE
3950 else:
3951 # unexpected result
3952 addAllResults = main.FALSE
3953 if addAllResults != main.TRUE:
3954 main.log.error( "Error executing set addAll" )
3955
3956 # Check if set is still correct
3957 size = len( onosSet )
3958 getResponses = []
3959 threads = []
Jon Halla440e872016-03-31 15:15:50 -07003960 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003961 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003962 name="setTestGet-" + str( i ),
3963 args=[ onosSetName ] )
3964 threads.append( t )
3965 t.start()
3966 for t in threads:
3967 t.join()
3968 getResponses.append( t.result )
3969 getResults = main.TRUE
Jon Halla440e872016-03-31 15:15:50 -07003970 for i in range( len( main.activeNodes ) ):
3971 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003972 if isinstance( getResponses[ i ], list):
3973 current = set( getResponses[ i ] )
3974 if len( current ) == len( getResponses[ i ] ):
3975 # no repeats
3976 if onosSet != current:
Jon Halla440e872016-03-31 15:15:50 -07003977 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003978 " has incorrect view" +
3979 " of set " + onosSetName + ":\n" +
3980 str( getResponses[ i ] ) )
3981 main.log.debug( "Expected: " + str( onosSet ) )
3982 main.log.debug( "Actual: " + str( current ) )
3983 getResults = main.FALSE
3984 else:
3985 # error, set is not a set
Jon Halla440e872016-03-31 15:15:50 -07003986 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003987 " has repeat elements in" +
3988 " set " + onosSetName + ":\n" +
3989 str( getResponses[ i ] ) )
3990 getResults = main.FALSE
3991 elif getResponses[ i ] == main.ERROR:
3992 getResults = main.FALSE
3993 sizeResponses = []
3994 threads = []
Jon Halla440e872016-03-31 15:15:50 -07003995 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003996 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07003997 name="setTestSize-" + str( i ),
3998 args=[ onosSetName ] )
3999 threads.append( t )
4000 t.start()
4001 for t in threads:
4002 t.join()
4003 sizeResponses.append( t.result )
4004 sizeResults = main.TRUE
Jon Halla440e872016-03-31 15:15:50 -07004005 for i in range( len( main.activeNodes ) ):
4006 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004007 if size != sizeResponses[ i ]:
4008 sizeResults = main.FALSE
Jon Halla440e872016-03-31 15:15:50 -07004009 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004010 " expected a size of " + str( size ) +
4011 " for set " + onosSetName +
4012 " but got " + str( sizeResponses[ i ] ) )
4013 addAllResults = addAllResults and getResults and sizeResults
4014 utilities.assert_equals( expect=main.TRUE,
4015 actual=addAllResults,
4016 onpass="Set addAll correct",
4017 onfail="Set addAll was incorrect" )
4018
4019 main.step( "Distributed Set retain()" )
4020 onosSet.intersection_update( retainValue.split() )
4021 retainResponses = []
4022 threads = []
Jon Halla440e872016-03-31 15:15:50 -07004023 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004024 t = main.Thread( target=main.CLIs[i].setTestRemove,
Jon Hall5cf14d52015-07-16 12:15:19 -07004025 name="setTestRetain-" + str( i ),
4026 args=[ onosSetName, retainValue ],
4027 kwargs={ "retain": True } )
4028 threads.append( t )
4029 t.start()
4030 for t in threads:
4031 t.join()
4032 retainResponses.append( t.result )
4033
4034 # main.TRUE = successfully changed the set
4035 # main.FALSE = action resulted in no change in set
4036 # main.ERROR - Some error in executing the function
4037 retainResults = main.TRUE
Jon Halla440e872016-03-31 15:15:50 -07004038 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07004039 if retainResponses[ i ] == main.TRUE:
4040 # All is well
4041 pass
4042 elif retainResponses[ i ] == main.FALSE:
4043 # Already in set, probably fine
4044 pass
4045 elif retainResponses[ i ] == main.ERROR:
4046 # Error in execution
4047 retainResults = main.FALSE
4048 else:
4049 # unexpected result
4050 retainResults = main.FALSE
4051 if retainResults != main.TRUE:
4052 main.log.error( "Error executing set retain" )
4053
4054 # Check if set is still correct
4055 size = len( onosSet )
4056 getResponses = []
4057 threads = []
Jon Halla440e872016-03-31 15:15:50 -07004058 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004059 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07004060 name="setTestGet-" + str( i ),
4061 args=[ onosSetName ] )
4062 threads.append( t )
4063 t.start()
4064 for t in threads:
4065 t.join()
4066 getResponses.append( t.result )
4067 getResults = main.TRUE
Jon Halla440e872016-03-31 15:15:50 -07004068 for i in range( len( main.activeNodes ) ):
4069 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004070 if isinstance( getResponses[ i ], list):
4071 current = set( getResponses[ i ] )
4072 if len( current ) == len( getResponses[ i ] ):
4073 # no repeats
4074 if onosSet != current:
Jon Halla440e872016-03-31 15:15:50 -07004075 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004076 " has incorrect view" +
4077 " of set " + onosSetName + ":\n" +
4078 str( getResponses[ i ] ) )
4079 main.log.debug( "Expected: " + str( onosSet ) )
4080 main.log.debug( "Actual: " + str( current ) )
4081 getResults = main.FALSE
4082 else:
4083 # error, set is not a set
Jon Halla440e872016-03-31 15:15:50 -07004084 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004085 " has repeat elements in" +
4086 " set " + onosSetName + ":\n" +
4087 str( getResponses[ i ] ) )
4088 getResults = main.FALSE
4089 elif getResponses[ i ] == main.ERROR:
4090 getResults = main.FALSE
4091 sizeResponses = []
4092 threads = []
Jon Halla440e872016-03-31 15:15:50 -07004093 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004094 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07004095 name="setTestSize-" + str( i ),
4096 args=[ onosSetName ] )
4097 threads.append( t )
4098 t.start()
4099 for t in threads:
4100 t.join()
4101 sizeResponses.append( t.result )
4102 sizeResults = main.TRUE
Jon Halla440e872016-03-31 15:15:50 -07004103 for i in range( len( main.activeNodes ) ):
4104 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004105 if size != sizeResponses[ i ]:
4106 sizeResults = main.FALSE
Jon Halla440e872016-03-31 15:15:50 -07004107 main.log.error( "ONOS" + node + " expected a size of " +
Jon Hall5cf14d52015-07-16 12:15:19 -07004108 str( size ) + " for set " + onosSetName +
4109 " but got " + str( sizeResponses[ i ] ) )
4110 retainResults = retainResults and getResults and sizeResults
4111 utilities.assert_equals( expect=main.TRUE,
4112 actual=retainResults,
4113 onpass="Set retain correct",
4114 onfail="Set retain was incorrect" )
4115
Jon Hall2a5002c2015-08-21 16:49:11 -07004116 # Transactional maps
4117 main.step( "Partitioned Transactional maps put" )
4118 tMapValue = "Testing"
4119 numKeys = 100
4120 putResult = True
Jon Halla440e872016-03-31 15:15:50 -07004121 node = main.activeNodes[0]
4122 putResponses = main.CLIs[node].transactionalMapPut( numKeys, tMapValue )
4123 if putResponses and len( putResponses ) == 100:
Jon Hall2a5002c2015-08-21 16:49:11 -07004124 for i in putResponses:
4125 if putResponses[ i ][ 'value' ] != tMapValue:
4126 putResult = False
4127 else:
4128 putResult = False
4129 if not putResult:
4130 main.log.debug( "Put response values: " + str( putResponses ) )
4131 utilities.assert_equals( expect=True,
4132 actual=putResult,
4133 onpass="Partitioned Transactional Map put successful",
4134 onfail="Partitioned Transactional Map put values are incorrect" )
4135
4136 main.step( "Partitioned Transactional maps get" )
Jon Hall9bfadd22016-05-11 14:48:07 -07004137 # FIXME: is this sleep needed?
4138 time.sleep( 5 )
4139
Jon Hall2a5002c2015-08-21 16:49:11 -07004140 getCheck = True
4141 for n in range( 1, numKeys + 1 ):
4142 getResponses = []
4143 threads = []
4144 valueCheck = True
Jon Halla440e872016-03-31 15:15:50 -07004145 for i in main.activeNodes:
Jon Hall2a5002c2015-08-21 16:49:11 -07004146 t = main.Thread( target=main.CLIs[i].transactionalMapGet,
4147 name="TMap-get-" + str( i ),
Jon Halla440e872016-03-31 15:15:50 -07004148 args=[ "Key" + str( n ) ] )
Jon Hall2a5002c2015-08-21 16:49:11 -07004149 threads.append( t )
4150 t.start()
4151 for t in threads:
4152 t.join()
4153 getResponses.append( t.result )
4154 for node in getResponses:
4155 if node != tMapValue:
4156 valueCheck = False
4157 if not valueCheck:
4158 main.log.warn( "Values for key 'Key" + str( n ) + "' do not match:" )
4159 main.log.warn( getResponses )
4160 getCheck = getCheck and valueCheck
4161 utilities.assert_equals( expect=True,
4162 actual=getCheck,
4163 onpass="Partitioned Transactional Map get values were correct",
4164 onfail="Partitioned Transactional Map values incorrect" )