blob: c8ae7d98a38ea7fd6c0473ac9a51aab1358221a4 [file] [log] [blame]
Jon Hall5cf14d52015-07-16 12:15:19 -07001"""
2Description: This test is to determine if the HA test setup is
3 working correctly. There are no failures so this test should
4 have a 100% pass rate
5
6List of test cases:
7CASE1: Compile ONOS and push it to the test machines
8CASE2: Assign devices to controllers
9CASE21: Assign mastership to controllers
10CASE3: Assign intents
11CASE4: Ping across added host intents
12CASE5: Reading state of ONOS
13CASE6: The Failure case. Since this is the Sanity test, we do nothing.
14CASE7: Check state after control plane failure
15CASE8: Compare topo
16CASE9: Link s3-s28 down
17CASE10: Link s3-s28 up
18CASE11: Switch down
19CASE12: Switch up
20CASE13: Clean up
21CASE14: start election app on all onos nodes
22CASE15: Check that Leadership Election is still functional
23CASE16: Install Distributed Primitives app
24CASE17: Check for basic functionality with distributed primitives
25"""
26
27
28class HAsanity:
29
30 def __init__( self ):
31 self.default = ''
32
33 def CASE1( self, main ):
34 """
35 CASE1 is to compile ONOS and push it to the test machines
36
37 Startup sequence:
38 cell <name>
39 onos-verify-cell
40 NOTE: temporary - onos-remove-raft-logs
41 onos-uninstall
42 start mininet
43 git pull
44 mvn clean install
45 onos-package
46 onos-install -f
47 onos-wait-for-start
48 start cli sessions
49 start tcpdump
50 """
Jon Halle1a3b752015-07-22 13:02:46 -070051 import imp
Jon Hallf3d16e72015-12-16 17:45:08 -080052 import time
Jon Halla440e872016-03-31 15:15:50 -070053 import json
Jon Hall5cf14d52015-07-16 12:15:19 -070054 main.log.info( "ONOS HA Sanity test - initialization" )
55 main.case( "Setting up test environment" )
Jon Hall783bbf92015-07-23 14:33:19 -070056 main.caseExplanation = "Setup the test environment including " +\
Jon Hall5cf14d52015-07-16 12:15:19 -070057 "installing ONOS, starting Mininet and ONOS" +\
58 "cli sessions."
Jon Hall5cf14d52015-07-16 12:15:19 -070059
60 # load some variables from the params file
61 PULLCODE = False
62 if main.params[ 'Git' ] == 'True':
63 PULLCODE = True
64 gitBranch = main.params[ 'branch' ]
65 cellName = main.params[ 'ENV' ][ 'cellName' ]
66
Jon Halle1a3b752015-07-22 13:02:46 -070067 main.numCtrls = int( main.params[ 'num_controllers' ] )
Jon Hall5cf14d52015-07-16 12:15:19 -070068 if main.ONOSbench.maxNodes:
Jon Halle1a3b752015-07-22 13:02:46 -070069 if main.ONOSbench.maxNodes < main.numCtrls:
70 main.numCtrls = int( main.ONOSbench.maxNodes )
Jon Hall5cf14d52015-07-16 12:15:19 -070071 # TODO: refactor how to get onos port, maybe put into component tag?
Jon Halle1a3b752015-07-22 13:02:46 -070072 # set global variables
Jon Hall5cf14d52015-07-16 12:15:19 -070073 global ONOS1Port
74 global ONOS2Port
75 global ONOS3Port
76 global ONOS4Port
77 global ONOS5Port
78 global ONOS6Port
79 global ONOS7Port
Jon Halla440e872016-03-31 15:15:50 -070080 # These are for csv plotting in jenkins
81 global labels
82 global data
83 labels = []
84 data = []
Jon Hall5cf14d52015-07-16 12:15:19 -070085
86 # FIXME: just get controller port from params?
87 # TODO: do we really need all these?
88 ONOS1Port = main.params[ 'CTRL' ][ 'port1' ]
89 ONOS2Port = main.params[ 'CTRL' ][ 'port2' ]
90 ONOS3Port = main.params[ 'CTRL' ][ 'port3' ]
91 ONOS4Port = main.params[ 'CTRL' ][ 'port4' ]
92 ONOS5Port = main.params[ 'CTRL' ][ 'port5' ]
93 ONOS6Port = main.params[ 'CTRL' ][ 'port6' ]
94 ONOS7Port = main.params[ 'CTRL' ][ 'port7' ]
95
Jon Halle1a3b752015-07-22 13:02:46 -070096 try:
Jon Hall53c5e662016-04-13 16:06:56 -070097 from tests.HA.dependencies.HA import HA
Jon Hall41d39f12016-04-11 22:54:35 -070098 main.HA = HA()
Jon Halle1a3b752015-07-22 13:02:46 -070099 except Exception as e:
100 main.log.exception( e )
101 main.cleanup()
102 main.exit()
103
104 main.CLIs = []
105 main.nodes = []
Jon Hall5cf14d52015-07-16 12:15:19 -0700106 ipList = []
Jon Halle1a3b752015-07-22 13:02:46 -0700107 for i in range( 1, main.numCtrls + 1 ):
108 try:
109 main.CLIs.append( getattr( main, 'ONOScli' + str( i ) ) )
110 main.nodes.append( getattr( main, 'ONOS' + str( i ) ) )
111 ipList.append( main.nodes[ -1 ].ip_address )
112 except AttributeError:
113 break
Jon Hall5cf14d52015-07-16 12:15:19 -0700114
115 main.step( "Create cell file" )
116 cellAppString = main.params[ 'ENV' ][ 'appString' ]
117 main.ONOSbench.createCellFile( main.ONOSbench.ip_address, cellName,
118 main.Mininet1.ip_address,
119 cellAppString, ipList )
120 main.step( "Applying cell variable to environment" )
121 cellResult = main.ONOSbench.setCell( cellName )
122 verifyResult = main.ONOSbench.verifyCell()
123
124 # FIXME:this is short term fix
125 main.log.info( "Removing raft logs" )
126 main.ONOSbench.onosRemoveRaftLogs()
127
128 main.log.info( "Uninstalling ONOS" )
Jon Halle1a3b752015-07-22 13:02:46 -0700129 for node in main.nodes:
Jon Hall5cf14d52015-07-16 12:15:19 -0700130 main.ONOSbench.onosUninstall( node.ip_address )
131
132 # Make sure ONOS is DEAD
133 main.log.info( "Killing any ONOS processes" )
134 killResults = main.TRUE
Jon Halle1a3b752015-07-22 13:02:46 -0700135 for node in main.nodes:
Jon Hall5cf14d52015-07-16 12:15:19 -0700136 killed = main.ONOSbench.onosKill( node.ip_address )
137 killResults = killResults and killed
138
139 cleanInstallResult = main.TRUE
140 gitPullResult = main.TRUE
141
142 main.step( "Starting Mininet" )
143 # scp topo file to mininet
144 # TODO: move to params?
145 topoName = "obelisk.py"
146 filePath = main.ONOSbench.home + "/tools/test/topos/"
kelvin-onlabd9e23de2015-08-06 10:34:44 -0700147 main.ONOSbench.scp( main.Mininet1,
148 filePath + topoName,
149 main.Mininet1.home,
150 direction="to" )
Jon Hall5cf14d52015-07-16 12:15:19 -0700151 mnResult = main.Mininet1.startNet( )
152 utilities.assert_equals( expect=main.TRUE, actual=mnResult,
153 onpass="Mininet Started",
154 onfail="Error starting Mininet" )
155
156 main.step( "Git checkout and pull " + gitBranch )
157 if PULLCODE:
158 main.ONOSbench.gitCheckout( gitBranch )
159 gitPullResult = main.ONOSbench.gitPull()
160 # values of 1 or 3 are good
161 utilities.assert_lesser( expect=0, actual=gitPullResult,
162 onpass="Git pull successful",
163 onfail="Git pull failed" )
164 main.ONOSbench.getVersion( report=True )
165
166 main.step( "Using mvn clean install" )
167 cleanInstallResult = main.TRUE
168 if PULLCODE and gitPullResult == main.TRUE:
169 cleanInstallResult = main.ONOSbench.cleanInstall()
170 else:
171 main.log.warn( "Did not pull new code so skipping mvn " +
172 "clean install" )
173 utilities.assert_equals( expect=main.TRUE,
174 actual=cleanInstallResult,
175 onpass="MCI successful",
176 onfail="MCI failed" )
177 # GRAPHS
178 # NOTE: important params here:
179 # job = name of Jenkins job
180 # Plot Name = Plot-HA, only can be used if multiple plots
181 # index = The number of the graph under plot name
182 job = "HAsanity"
183 plotName = "Plot-HA"
Jon Hall843f8bc2016-03-18 14:28:13 -0700184 index = "2"
Jon Hall5cf14d52015-07-16 12:15:19 -0700185 graphs = '<ac:structured-macro ac:name="html">\n'
186 graphs += '<ac:plain-text-body><![CDATA[\n'
187 graphs += '<iframe src="https://onos-jenkins.onlab.us/job/' + job +\
Jon Halla9845df2016-01-15 14:55:58 -0800188 '/plot/' + plotName + '/getPlot?index=' + index +\
Jon Hall5cf14d52015-07-16 12:15:19 -0700189 '&width=500&height=300"' +\
190 'noborder="0" width="500" height="300" scrolling="yes" ' +\
191 'seamless="seamless"></iframe>\n'
192 graphs += ']]></ac:plain-text-body>\n'
193 graphs += '</ac:structured-macro>\n'
194 main.log.wiki(graphs)
195
196 main.step( "Creating ONOS package" )
197 packageResult = main.ONOSbench.onosPackage()
198 utilities.assert_equals( expect=main.TRUE, actual=packageResult,
199 onpass="ONOS package successful",
200 onfail="ONOS package failed" )
201
202 main.step( "Installing ONOS package" )
203 onosInstallResult = main.TRUE
Jon Halle1a3b752015-07-22 13:02:46 -0700204 for node in main.nodes:
Jon Hall5cf14d52015-07-16 12:15:19 -0700205 tmpResult = main.ONOSbench.onosInstall( options="-f",
206 node=node.ip_address )
207 onosInstallResult = onosInstallResult and tmpResult
208 utilities.assert_equals( expect=main.TRUE, actual=onosInstallResult,
209 onpass="ONOS install successful",
210 onfail="ONOS install failed" )
211
212 main.step( "Checking if ONOS is up yet" )
213 for i in range( 2 ):
214 onosIsupResult = main.TRUE
Jon Halle1a3b752015-07-22 13:02:46 -0700215 for node in main.nodes:
Jon Hall5cf14d52015-07-16 12:15:19 -0700216 started = main.ONOSbench.isup( node.ip_address )
217 if not started:
Jon Hallc6793552016-01-19 14:18:37 -0800218 main.log.error( node.name + " hasn't started" )
Jon Hall5cf14d52015-07-16 12:15:19 -0700219 onosIsupResult = onosIsupResult and started
220 if onosIsupResult == main.TRUE:
221 break
222 utilities.assert_equals( expect=main.TRUE, actual=onosIsupResult,
223 onpass="ONOS startup successful",
224 onfail="ONOS startup failed" )
225
Jon Hall6509dbf2016-06-21 17:01:17 -0700226 main.step( "Starting ONOS CLI sessions" )
Jon Hall5cf14d52015-07-16 12:15:19 -0700227 cliResults = main.TRUE
228 threads = []
Jon Halle1a3b752015-07-22 13:02:46 -0700229 for i in range( main.numCtrls ):
230 t = main.Thread( target=main.CLIs[i].startOnosCli,
Jon Hall5cf14d52015-07-16 12:15:19 -0700231 name="startOnosCli-" + str( i ),
Jon Halle1a3b752015-07-22 13:02:46 -0700232 args=[main.nodes[i].ip_address] )
Jon Hall5cf14d52015-07-16 12:15:19 -0700233 threads.append( t )
234 t.start()
235
236 for t in threads:
237 t.join()
238 cliResults = cliResults and t.result
239 utilities.assert_equals( expect=main.TRUE, actual=cliResults,
240 onpass="ONOS cli startup successful",
241 onfail="ONOS cli startup failed" )
242
Jon Halla440e872016-03-31 15:15:50 -0700243 # Create a list of active nodes for use when some nodes are stopped
244 main.activeNodes = [ i for i in range( 0, len( main.CLIs ) ) ]
245
Jon Hall5cf14d52015-07-16 12:15:19 -0700246 if main.params[ 'tcpdump' ].lower() == "true":
247 main.step( "Start Packet Capture MN" )
248 main.Mininet2.startTcpdump(
249 str( main.params[ 'MNtcpdump' ][ 'folder' ] ) + str( main.TEST )
250 + "-MN.pcap",
251 intf=main.params[ 'MNtcpdump' ][ 'intf' ],
252 port=main.params[ 'MNtcpdump' ][ 'port' ] )
253
Jon Halla440e872016-03-31 15:15:50 -0700254 main.step( "Checking ONOS nodes" )
Jon Hall41d39f12016-04-11 22:54:35 -0700255 nodeResults = utilities.retry( main.HA.nodesCheck,
256 False,
257 args=[main.activeNodes],
258 attempts=5 )
Jon Halla440e872016-03-31 15:15:50 -0700259
Jon Hall41d39f12016-04-11 22:54:35 -0700260 utilities.assert_equals( expect=True, actual=nodeResults,
Jon Halla440e872016-03-31 15:15:50 -0700261 onpass="Nodes check successful",
262 onfail="Nodes check NOT successful" )
263
264 if not nodeResults:
Jon Hall7ac7bc32016-05-05 10:57:02 -0700265 for i in main.activeNodes:
266 cli = main.CLIs[i]
Jon Halla440e872016-03-31 15:15:50 -0700267 main.log.debug( "{} components not ACTIVE: \n{}".format(
268 cli.name,
269 cli.sendline( "scr:list | grep -v ACTIVE" ) ) )
Jon Hall5cf14d52015-07-16 12:15:19 -0700270 main.log.error( "Failed to start ONOS, stopping test" )
271 main.cleanup()
272 main.exit()
273
Jon Hall172b7ba2016-04-07 18:12:20 -0700274 main.step( "Activate apps defined in the params file" )
275 # get data from the params
276 apps = main.params.get( 'apps' )
277 if apps:
278 apps = apps.split(',')
279 main.log.warn( apps )
280 activateResult = True
281 for app in apps:
282 main.CLIs[ 0 ].app( app, "Activate" )
283 # TODO: check this worked
284 time.sleep( 10 ) # wait for apps to activate
285 for app in apps:
286 state = main.CLIs[ 0 ].appStatus( app )
287 if state == "ACTIVE":
288 activateResult = activeResult and True
289 else:
290 main.log.error( "{} is in {} state".format( app, state ) )
291 activeResult = False
292 utilities.assert_equals( expect=True,
293 actual=activateResult,
294 onpass="Successfully activated apps",
295 onfail="Failed to activate apps" )
296 else:
297 main.log.warn( "No apps were specified to be loaded after startup" )
298
299 main.step( "Set ONOS configurations" )
300 config = main.params.get( 'ONOS_Configuration' )
301 if config:
302 main.log.debug( config )
303 checkResult = main.TRUE
304 for component in config:
305 for setting in config[component]:
306 value = config[component][setting]
307 check = main.CLIs[ 0 ].setCfg( component, setting, value )
308 main.log.info( "Value was changed? {}".format( main.TRUE == check ) )
309 checkResult = check and checkResult
310 utilities.assert_equals( expect=main.TRUE,
311 actual=checkResult,
312 onpass="Successfully set config",
313 onfail="Failed to set config" )
314 else:
315 main.log.warn( "No configurations were specified to be changed after startup" )
316
Jon Hall9d2dcad2016-04-08 10:15:20 -0700317 main.step( "App Ids check" )
318 appCheck = main.TRUE
319 threads = []
320 for i in main.activeNodes:
321 t = main.Thread( target=main.CLIs[i].appToIDCheck,
322 name="appToIDCheck-" + str( i ),
323 args=[] )
324 threads.append( t )
325 t.start()
326
327 for t in threads:
328 t.join()
329 appCheck = appCheck and t.result
330 if appCheck != main.TRUE:
331 node = main.activeNodes[0]
332 main.log.warn( main.CLIs[node].apps() )
333 main.log.warn( main.CLIs[node].appIDs() )
334 utilities.assert_equals( expect=main.TRUE, actual=appCheck,
335 onpass="App Ids seem to be correct",
336 onfail="Something is wrong with app Ids" )
337
Jon Hall5cf14d52015-07-16 12:15:19 -0700338 def CASE2( self, main ):
339 """
340 Assign devices to controllers
341 """
342 import re
Jon Halle1a3b752015-07-22 13:02:46 -0700343 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700344 assert main, "main not defined"
345 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -0700346 assert main.CLIs, "main.CLIs not defined"
347 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700348 assert ONOS1Port, "ONOS1Port not defined"
349 assert ONOS2Port, "ONOS2Port not defined"
350 assert ONOS3Port, "ONOS3Port not defined"
351 assert ONOS4Port, "ONOS4Port not defined"
352 assert ONOS5Port, "ONOS5Port not defined"
353 assert ONOS6Port, "ONOS6Port not defined"
354 assert ONOS7Port, "ONOS7Port not defined"
355
356 main.case( "Assigning devices to controllers" )
Jon Hall783bbf92015-07-23 14:33:19 -0700357 main.caseExplanation = "Assign switches to ONOS using 'ovs-vsctl' " +\
Jon Hall5cf14d52015-07-16 12:15:19 -0700358 "and check that an ONOS node becomes the " +\
359 "master of the device."
360 main.step( "Assign switches to controllers" )
361
362 ipList = []
Jon Halle1a3b752015-07-22 13:02:46 -0700363 for i in range( main.numCtrls ):
364 ipList.append( main.nodes[ i ].ip_address )
Jon Hall5cf14d52015-07-16 12:15:19 -0700365 swList = []
366 for i in range( 1, 29 ):
367 swList.append( "s" + str( i ) )
368 main.Mininet1.assignSwController( sw=swList, ip=ipList )
369
370 mastershipCheck = main.TRUE
371 for i in range( 1, 29 ):
372 response = main.Mininet1.getSwController( "s" + str( i ) )
373 try:
374 main.log.info( str( response ) )
375 except Exception:
376 main.log.info( repr( response ) )
Jon Halle1a3b752015-07-22 13:02:46 -0700377 for node in main.nodes:
Jon Hall5cf14d52015-07-16 12:15:19 -0700378 if re.search( "tcp:" + node.ip_address, response ):
379 mastershipCheck = mastershipCheck and main.TRUE
380 else:
381 main.log.error( "Error, node " + node.ip_address + " is " +
382 "not in the list of controllers s" +
383 str( i ) + " is connecting to." )
384 mastershipCheck = main.FALSE
385 utilities.assert_equals(
386 expect=main.TRUE,
387 actual=mastershipCheck,
388 onpass="Switch mastership assigned correctly",
389 onfail="Switches not assigned correctly to controllers" )
390
391 def CASE21( self, main ):
392 """
393 Assign mastership to controllers
394 """
Jon Hall5cf14d52015-07-16 12:15:19 -0700395 import time
Jon Halle1a3b752015-07-22 13:02:46 -0700396 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700397 assert main, "main not defined"
398 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -0700399 assert main.CLIs, "main.CLIs not defined"
400 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700401 assert ONOS1Port, "ONOS1Port not defined"
402 assert ONOS2Port, "ONOS2Port not defined"
403 assert ONOS3Port, "ONOS3Port not defined"
404 assert ONOS4Port, "ONOS4Port not defined"
405 assert ONOS5Port, "ONOS5Port not defined"
406 assert ONOS6Port, "ONOS6Port not defined"
407 assert ONOS7Port, "ONOS7Port not defined"
408
409 main.case( "Assigning Controller roles for switches" )
Jon Hall783bbf92015-07-23 14:33:19 -0700410 main.caseExplanation = "Check that ONOS is connected to each " +\
Jon Hall5cf14d52015-07-16 12:15:19 -0700411 "device. Then manually assign" +\
412 " mastership to specific ONOS nodes using" +\
413 " 'device-role'"
414 main.step( "Assign mastership of switches to specific controllers" )
415 # Manually assign mastership to the controller we want
416 roleCall = main.TRUE
417
418 ipList = [ ]
419 deviceList = []
Jon Halla440e872016-03-31 15:15:50 -0700420 onosCli = main.CLIs[ main.activeNodes[0] ]
Jon Hall5cf14d52015-07-16 12:15:19 -0700421 try:
422 # Assign mastership to specific controllers. This assignment was
423 # determined for a 7 node cluser, but will work with any sized
424 # cluster
425 for i in range( 1, 29 ): # switches 1 through 28
426 # set up correct variables:
427 if i == 1:
428 c = 0
Jon Halle1a3b752015-07-22 13:02:46 -0700429 ip = main.nodes[ c ].ip_address # ONOS1
Jon Halla440e872016-03-31 15:15:50 -0700430 deviceId = onosCli.getDevice( "1000" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700431 elif i == 2:
Jon Halle1a3b752015-07-22 13:02:46 -0700432 c = 1 % main.numCtrls
433 ip = main.nodes[ c ].ip_address # ONOS2
Jon Halla440e872016-03-31 15:15:50 -0700434 deviceId = onosCli.getDevice( "2000" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700435 elif i == 3:
Jon Halle1a3b752015-07-22 13:02:46 -0700436 c = 1 % main.numCtrls
437 ip = main.nodes[ c ].ip_address # ONOS2
Jon Halla440e872016-03-31 15:15:50 -0700438 deviceId = onosCli.getDevice( "3000" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700439 elif i == 4:
Jon Halle1a3b752015-07-22 13:02:46 -0700440 c = 3 % main.numCtrls
441 ip = main.nodes[ c ].ip_address # ONOS4
Jon Halla440e872016-03-31 15:15:50 -0700442 deviceId = onosCli.getDevice( "3004" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700443 elif i == 5:
Jon Halle1a3b752015-07-22 13:02:46 -0700444 c = 2 % main.numCtrls
445 ip = main.nodes[ c ].ip_address # ONOS3
Jon Halla440e872016-03-31 15:15:50 -0700446 deviceId = onosCli.getDevice( "5000" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700447 elif i == 6:
Jon Halle1a3b752015-07-22 13:02:46 -0700448 c = 2 % main.numCtrls
449 ip = main.nodes[ c ].ip_address # ONOS3
Jon Halla440e872016-03-31 15:15:50 -0700450 deviceId = onosCli.getDevice( "6000" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700451 elif i == 7:
Jon Halle1a3b752015-07-22 13:02:46 -0700452 c = 5 % main.numCtrls
453 ip = main.nodes[ c ].ip_address # ONOS6
Jon Halla440e872016-03-31 15:15:50 -0700454 deviceId = onosCli.getDevice( "6007" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700455 elif i >= 8 and i <= 17:
Jon Halle1a3b752015-07-22 13:02:46 -0700456 c = 4 % main.numCtrls
457 ip = main.nodes[ c ].ip_address # ONOS5
Jon Hall5cf14d52015-07-16 12:15:19 -0700458 dpid = '3' + str( i ).zfill( 3 )
Jon Halla440e872016-03-31 15:15:50 -0700459 deviceId = onosCli.getDevice( dpid ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700460 elif i >= 18 and i <= 27:
Jon Halle1a3b752015-07-22 13:02:46 -0700461 c = 6 % main.numCtrls
462 ip = main.nodes[ c ].ip_address # ONOS7
Jon Hall5cf14d52015-07-16 12:15:19 -0700463 dpid = '6' + str( i ).zfill( 3 )
Jon Halla440e872016-03-31 15:15:50 -0700464 deviceId = onosCli.getDevice( dpid ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700465 elif i == 28:
466 c = 0
Jon Halle1a3b752015-07-22 13:02:46 -0700467 ip = main.nodes[ c ].ip_address # ONOS1
Jon Halla440e872016-03-31 15:15:50 -0700468 deviceId = onosCli.getDevice( "2800" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700469 else:
470 main.log.error( "You didn't write an else statement for " +
471 "switch s" + str( i ) )
472 roleCall = main.FALSE
473 # Assign switch
474 assert deviceId, "No device id for s" + str( i ) + " in ONOS"
475 # TODO: make this controller dynamic
Jon Halla440e872016-03-31 15:15:50 -0700476 roleCall = roleCall and onosCli.deviceRole( deviceId, ip )
Jon Hall5cf14d52015-07-16 12:15:19 -0700477 ipList.append( ip )
478 deviceList.append( deviceId )
479 except ( AttributeError, AssertionError ):
480 main.log.exception( "Something is wrong with ONOS device view" )
Jon Halla440e872016-03-31 15:15:50 -0700481 main.log.info( onosCli.devices() )
Jon Hall5cf14d52015-07-16 12:15:19 -0700482 utilities.assert_equals(
483 expect=main.TRUE,
484 actual=roleCall,
485 onpass="Re-assigned switch mastership to designated controller",
486 onfail="Something wrong with deviceRole calls" )
487
488 main.step( "Check mastership was correctly assigned" )
489 roleCheck = main.TRUE
490 # NOTE: This is due to the fact that device mastership change is not
491 # atomic and is actually a multi step process
492 time.sleep( 5 )
493 for i in range( len( ipList ) ):
494 ip = ipList[i]
495 deviceId = deviceList[i]
496 # Check assignment
Jon Halla440e872016-03-31 15:15:50 -0700497 master = onosCli.getRole( deviceId ).get( 'master' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700498 if ip in master:
499 roleCheck = roleCheck and main.TRUE
500 else:
501 roleCheck = roleCheck and main.FALSE
502 main.log.error( "Error, controller " + ip + " is not" +
503 " master " + "of device " +
504 str( deviceId ) + ". Master is " +
505 repr( master ) + "." )
506 utilities.assert_equals(
507 expect=main.TRUE,
508 actual=roleCheck,
509 onpass="Switches were successfully reassigned to designated " +
510 "controller",
511 onfail="Switches were not successfully reassigned" )
512
513 def CASE3( self, main ):
514 """
515 Assign intents
516 """
517 import time
518 import json
Jon Halle1a3b752015-07-22 13:02:46 -0700519 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700520 assert main, "main not defined"
521 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -0700522 assert main.CLIs, "main.CLIs not defined"
523 assert main.nodes, "main.nodes not defined"
Jon Halla440e872016-03-31 15:15:50 -0700524 try:
525 labels
526 except NameError:
527 main.log.error( "labels not defined, setting to []" )
528 labels = []
529 try:
530 data
531 except NameError:
532 main.log.error( "data not defined, setting to []" )
533 data = []
Jon Hall5cf14d52015-07-16 12:15:19 -0700534 main.case( "Adding host Intents" )
Jon Hall783bbf92015-07-23 14:33:19 -0700535 main.caseExplanation = "Discover hosts by using pingall then " +\
Jon Hall5cf14d52015-07-16 12:15:19 -0700536 "assign predetermined host-to-host intents." +\
537 " After installation, check that the intent" +\
538 " is distributed to all nodes and the state" +\
539 " is INSTALLED"
540
541 # install onos-app-fwd
542 main.step( "Install reactive forwarding app" )
Jon Halla440e872016-03-31 15:15:50 -0700543 onosCli = main.CLIs[ main.activeNodes[0] ]
544 installResults = onosCli.activateApp( "org.onosproject.fwd" )
Jon Hall5cf14d52015-07-16 12:15:19 -0700545 utilities.assert_equals( expect=main.TRUE, actual=installResults,
546 onpass="Install fwd successful",
547 onfail="Install fwd failed" )
548
549 main.step( "Check app ids" )
550 appCheck = main.TRUE
551 threads = []
Jon Halla440e872016-03-31 15:15:50 -0700552 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -0700553 t = main.Thread( target=main.CLIs[i].appToIDCheck,
Jon Hall5cf14d52015-07-16 12:15:19 -0700554 name="appToIDCheck-" + str( i ),
555 args=[] )
556 threads.append( t )
557 t.start()
558
559 for t in threads:
560 t.join()
561 appCheck = appCheck and t.result
562 if appCheck != main.TRUE:
Jon Halla440e872016-03-31 15:15:50 -0700563 main.log.warn( onosCli.apps() )
564 main.log.warn( onosCli.appIDs() )
Jon Hall5cf14d52015-07-16 12:15:19 -0700565 utilities.assert_equals( expect=main.TRUE, actual=appCheck,
566 onpass="App Ids seem to be correct",
567 onfail="Something is wrong with app Ids" )
568
569 main.step( "Discovering Hosts( Via pingall for now )" )
570 # FIXME: Once we have a host discovery mechanism, use that instead
571 # REACTIVE FWD test
572 pingResult = main.FALSE
Jon Hall96091e62015-09-21 17:34:17 -0700573 passMsg = "Reactive Pingall test passed"
574 time1 = time.time()
575 pingResult = main.Mininet1.pingall()
576 time2 = time.time()
577 if not pingResult:
578 main.log.warn("First pingall failed. Trying again...")
Jon Hall5cf14d52015-07-16 12:15:19 -0700579 pingResult = main.Mininet1.pingall()
Jon Hall96091e62015-09-21 17:34:17 -0700580 passMsg += " on the second try"
581 utilities.assert_equals(
582 expect=main.TRUE,
583 actual=pingResult,
584 onpass= passMsg,
585 onfail="Reactive Pingall failed, " +
586 "one or more ping pairs failed" )
587 main.log.info( "Time for pingall: %2f seconds" %
588 ( time2 - time1 ) )
Jon Halld2871c22016-07-26 11:01:14 -0700589 if not pingResult:
590 main.cleanup()
591 main.exit()
Jon Hall5cf14d52015-07-16 12:15:19 -0700592 # timeout for fwd flows
593 time.sleep( 11 )
594 # uninstall onos-app-fwd
595 main.step( "Uninstall reactive forwarding app" )
Jon Halla440e872016-03-31 15:15:50 -0700596 node = main.activeNodes[0]
597 uninstallResult = main.CLIs[node].deactivateApp( "org.onosproject.fwd" )
Jon Hall5cf14d52015-07-16 12:15:19 -0700598 utilities.assert_equals( expect=main.TRUE, actual=uninstallResult,
599 onpass="Uninstall fwd successful",
600 onfail="Uninstall fwd failed" )
Jon Hall5cf14d52015-07-16 12:15:19 -0700601
602 main.step( "Check app ids" )
603 threads = []
604 appCheck2 = main.TRUE
Jon Halla440e872016-03-31 15:15:50 -0700605 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -0700606 t = main.Thread( target=main.CLIs[i].appToIDCheck,
Jon Hall5cf14d52015-07-16 12:15:19 -0700607 name="appToIDCheck-" + str( i ),
608 args=[] )
609 threads.append( t )
610 t.start()
611
612 for t in threads:
613 t.join()
614 appCheck2 = appCheck2 and t.result
615 if appCheck2 != main.TRUE:
Jon Halla440e872016-03-31 15:15:50 -0700616 node = main.activeNodes[0]
617 main.log.warn( main.CLIs[node].apps() )
618 main.log.warn( main.CLIs[node].appIDs() )
Jon Hall5cf14d52015-07-16 12:15:19 -0700619 utilities.assert_equals( expect=main.TRUE, actual=appCheck2,
620 onpass="App Ids seem to be correct",
621 onfail="Something is wrong with app Ids" )
622
623 main.step( "Add host intents via cli" )
624 intentIds = []
Jon Hall6e709752016-02-01 13:38:46 -0800625 # TODO: move the host numbers to params
626 # Maybe look at all the paths we ping?
Jon Hall5cf14d52015-07-16 12:15:19 -0700627 intentAddResult = True
628 hostResult = main.TRUE
629 for i in range( 8, 18 ):
630 main.log.info( "Adding host intent between h" + str( i ) +
631 " and h" + str( i + 10 ) )
632 host1 = "00:00:00:00:00:" + \
633 str( hex( i )[ 2: ] ).zfill( 2 ).upper()
634 host2 = "00:00:00:00:00:" + \
635 str( hex( i + 10 )[ 2: ] ).zfill( 2 ).upper()
636 # NOTE: getHost can return None
Jon Halla440e872016-03-31 15:15:50 -0700637 host1Dict = onosCli.getHost( host1 )
638 host2Dict = onosCli.getHost( host2 )
Jon Hall5cf14d52015-07-16 12:15:19 -0700639 host1Id = None
640 host2Id = None
641 if host1Dict and host2Dict:
642 host1Id = host1Dict.get( 'id', None )
643 host2Id = host2Dict.get( 'id', None )
644 if host1Id and host2Id:
Jon Halla440e872016-03-31 15:15:50 -0700645 nodeNum = ( i % len( main.activeNodes ) )
646 node = main.activeNodes[nodeNum]
647 tmpId = main.CLIs[node].addHostIntent( host1Id, host2Id )
Jon Hall5cf14d52015-07-16 12:15:19 -0700648 if tmpId:
649 main.log.info( "Added intent with id: " + tmpId )
650 intentIds.append( tmpId )
651 else:
652 main.log.error( "addHostIntent returned: " +
653 repr( tmpId ) )
654 else:
655 main.log.error( "Error, getHost() failed for h" + str( i ) +
656 " and/or h" + str( i + 10 ) )
Jon Halla440e872016-03-31 15:15:50 -0700657 node = main.activeNodes[0]
658 hosts = main.CLIs[node].hosts()
Jon Hall5cf14d52015-07-16 12:15:19 -0700659 main.log.warn( "Hosts output: " )
660 try:
661 main.log.warn( json.dumps( json.loads( hosts ),
662 sort_keys=True,
663 indent=4,
664 separators=( ',', ': ' ) ) )
665 except ( ValueError, TypeError ):
666 main.log.warn( repr( hosts ) )
667 hostResult = main.FALSE
668 utilities.assert_equals( expect=main.TRUE, actual=hostResult,
669 onpass="Found a host id for each host",
670 onfail="Error looking up host ids" )
671
672 intentStart = time.time()
Jon Halla440e872016-03-31 15:15:50 -0700673 onosIds = onosCli.getAllIntentsId()
Jon Hall5cf14d52015-07-16 12:15:19 -0700674 main.log.info( "Submitted intents: " + str( intentIds ) )
675 main.log.info( "Intents in ONOS: " + str( onosIds ) )
676 for intent in intentIds:
677 if intent in onosIds:
678 pass # intent submitted is in onos
679 else:
680 intentAddResult = False
681 if intentAddResult:
682 intentStop = time.time()
683 else:
684 intentStop = None
685 # Print the intent states
Jon Halla440e872016-03-31 15:15:50 -0700686 intents = onosCli.intents()
Jon Hall5cf14d52015-07-16 12:15:19 -0700687 intentStates = []
688 installedCheck = True
689 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
690 count = 0
691 try:
692 for intent in json.loads( intents ):
693 state = intent.get( 'state', None )
694 if "INSTALLED" not in state:
695 installedCheck = False
696 intentId = intent.get( 'id', None )
697 intentStates.append( ( intentId, state ) )
698 except ( ValueError, TypeError ):
699 main.log.exception( "Error parsing intents" )
700 # add submitted intents not in the store
701 tmplist = [ i for i, s in intentStates ]
702 missingIntents = False
703 for i in intentIds:
704 if i not in tmplist:
705 intentStates.append( ( i, " - " ) )
706 missingIntents = True
707 intentStates.sort()
708 for i, s in intentStates:
709 count += 1
710 main.log.info( "%-6s%-15s%-15s" %
711 ( str( count ), str( i ), str( s ) ) )
Jon Halla440e872016-03-31 15:15:50 -0700712 leaders = onosCli.leaders()
Jon Hall5cf14d52015-07-16 12:15:19 -0700713 try:
714 missing = False
715 if leaders:
716 parsedLeaders = json.loads( leaders )
717 main.log.warn( json.dumps( parsedLeaders,
718 sort_keys=True,
719 indent=4,
720 separators=( ',', ': ' ) ) )
721 # check for all intent partitions
722 topics = []
723 for i in range( 14 ):
724 topics.append( "intent-partition-" + str( i ) )
725 main.log.debug( topics )
726 ONOStopics = [ j['topic'] for j in parsedLeaders ]
727 for topic in topics:
728 if topic not in ONOStopics:
729 main.log.error( "Error: " + topic +
730 " not in leaders" )
731 missing = True
732 else:
733 main.log.error( "leaders() returned None" )
734 except ( ValueError, TypeError ):
735 main.log.exception( "Error parsing leaders" )
736 main.log.error( repr( leaders ) )
737 # Check all nodes
738 if missing:
Jon Halla440e872016-03-31 15:15:50 -0700739 for i in main.activeNodes:
740 response = main.CLIs[i].leaders( jsonFormat=False)
741 main.log.warn( str( main.CLIs[i].name ) + " leaders output: \n" +
Jon Hall5cf14d52015-07-16 12:15:19 -0700742 str( response ) )
743
Jon Halla440e872016-03-31 15:15:50 -0700744 partitions = onosCli.partitions()
Jon Hall5cf14d52015-07-16 12:15:19 -0700745 try:
746 if partitions :
747 parsedPartitions = json.loads( partitions )
748 main.log.warn( json.dumps( parsedPartitions,
749 sort_keys=True,
750 indent=4,
751 separators=( ',', ': ' ) ) )
752 # TODO check for a leader in all paritions
753 # TODO check for consistency among nodes
754 else:
755 main.log.error( "partitions() returned None" )
756 except ( ValueError, TypeError ):
757 main.log.exception( "Error parsing partitions" )
758 main.log.error( repr( partitions ) )
Jon Halla440e872016-03-31 15:15:50 -0700759 pendingMap = onosCli.pendingMap()
Jon Hall5cf14d52015-07-16 12:15:19 -0700760 try:
761 if pendingMap :
762 parsedPending = json.loads( pendingMap )
763 main.log.warn( json.dumps( parsedPending,
764 sort_keys=True,
765 indent=4,
766 separators=( ',', ': ' ) ) )
767 # TODO check something here?
768 else:
769 main.log.error( "pendingMap() returned None" )
770 except ( ValueError, TypeError ):
771 main.log.exception( "Error parsing pending map" )
772 main.log.error( repr( pendingMap ) )
773
774 intentAddResult = bool( intentAddResult and not missingIntents and
775 installedCheck )
776 if not intentAddResult:
777 main.log.error( "Error in pushing host intents to ONOS" )
778
779 main.step( "Intent Anti-Entropy dispersion" )
Jon Halla440e872016-03-31 15:15:50 -0700780 for j in range(100):
Jon Hall5cf14d52015-07-16 12:15:19 -0700781 correct = True
782 main.log.info( "Submitted intents: " + str( sorted( intentIds ) ) )
Jon Halla440e872016-03-31 15:15:50 -0700783 for i in main.activeNodes:
Jon Hall5cf14d52015-07-16 12:15:19 -0700784 onosIds = []
Jon Halla440e872016-03-31 15:15:50 -0700785 ids = main.CLIs[i].getAllIntentsId()
Jon Hall5cf14d52015-07-16 12:15:19 -0700786 onosIds.append( ids )
Jon Halla440e872016-03-31 15:15:50 -0700787 main.log.debug( "Intents in " + main.CLIs[i].name + ": " +
Jon Hall5cf14d52015-07-16 12:15:19 -0700788 str( sorted( onosIds ) ) )
789 if sorted( ids ) != sorted( intentIds ):
790 main.log.warn( "Set of intent IDs doesn't match" )
791 correct = False
792 break
793 else:
Jon Halla440e872016-03-31 15:15:50 -0700794 intents = json.loads( main.CLIs[i].intents() )
Jon Hall5cf14d52015-07-16 12:15:19 -0700795 for intent in intents:
796 if intent[ 'state' ] != "INSTALLED":
797 main.log.warn( "Intent " + intent[ 'id' ] +
798 " is " + intent[ 'state' ] )
799 correct = False
800 break
801 if correct:
802 break
803 else:
804 time.sleep(1)
805 if not intentStop:
806 intentStop = time.time()
807 global gossipTime
808 gossipTime = intentStop - intentStart
809 main.log.info( "It took about " + str( gossipTime ) +
810 " seconds for all intents to appear in each node" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700811 gossipPeriod = int( main.params['timers']['gossip'] )
Jon Halla440e872016-03-31 15:15:50 -0700812 maxGossipTime = gossipPeriod * len( main.activeNodes )
Jon Hall5cf14d52015-07-16 12:15:19 -0700813 utilities.assert_greater_equals(
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700814 expect=maxGossipTime, actual=gossipTime,
Jon Hall5cf14d52015-07-16 12:15:19 -0700815 onpass="ECM anti-entropy for intents worked within " +
816 "expected time",
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700817 onfail="Intent ECM anti-entropy took too long. " +
818 "Expected time:{}, Actual time:{}".format( maxGossipTime,
819 gossipTime ) )
820 if gossipTime <= maxGossipTime:
Jon Hall5cf14d52015-07-16 12:15:19 -0700821 intentAddResult = True
822
823 if not intentAddResult or "key" in pendingMap:
824 import time
825 installedCheck = True
826 main.log.info( "Sleeping 60 seconds to see if intents are found" )
827 time.sleep( 60 )
Jon Halla440e872016-03-31 15:15:50 -0700828 onosIds = onosCli.getAllIntentsId()
Jon Hall5cf14d52015-07-16 12:15:19 -0700829 main.log.info( "Submitted intents: " + str( intentIds ) )
830 main.log.info( "Intents in ONOS: " + str( onosIds ) )
831 # Print the intent states
Jon Halla440e872016-03-31 15:15:50 -0700832 intents = onosCli.intents()
Jon Hall5cf14d52015-07-16 12:15:19 -0700833 intentStates = []
834 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
835 count = 0
836 try:
837 for intent in json.loads( intents ):
838 # Iter through intents of a node
839 state = intent.get( 'state', None )
840 if "INSTALLED" not in state:
841 installedCheck = False
842 intentId = intent.get( 'id', None )
843 intentStates.append( ( intentId, state ) )
844 except ( ValueError, TypeError ):
845 main.log.exception( "Error parsing intents" )
846 # add submitted intents not in the store
847 tmplist = [ i for i, s in intentStates ]
848 for i in intentIds:
849 if i not in tmplist:
850 intentStates.append( ( i, " - " ) )
851 intentStates.sort()
852 for i, s in intentStates:
853 count += 1
854 main.log.info( "%-6s%-15s%-15s" %
855 ( str( count ), str( i ), str( s ) ) )
Jon Halla440e872016-03-31 15:15:50 -0700856 leaders = onosCli.leaders()
Jon Hall5cf14d52015-07-16 12:15:19 -0700857 try:
858 missing = False
859 if leaders:
860 parsedLeaders = json.loads( leaders )
861 main.log.warn( json.dumps( parsedLeaders,
862 sort_keys=True,
863 indent=4,
864 separators=( ',', ': ' ) ) )
865 # check for all intent partitions
866 # check for election
867 topics = []
868 for i in range( 14 ):
869 topics.append( "intent-partition-" + str( i ) )
870 # FIXME: this should only be after we start the app
871 topics.append( "org.onosproject.election" )
872 main.log.debug( topics )
873 ONOStopics = [ j['topic'] for j in parsedLeaders ]
874 for topic in topics:
875 if topic not in ONOStopics:
876 main.log.error( "Error: " + topic +
877 " not in leaders" )
878 missing = True
879 else:
880 main.log.error( "leaders() returned None" )
881 except ( ValueError, TypeError ):
882 main.log.exception( "Error parsing leaders" )
883 main.log.error( repr( leaders ) )
884 # Check all nodes
885 if missing:
Jon Halla440e872016-03-31 15:15:50 -0700886 for i in main.activeNodes:
887 node = main.CLIs[i]
Jon Hall5cf14d52015-07-16 12:15:19 -0700888 response = node.leaders( jsonFormat=False)
889 main.log.warn( str( node.name ) + " leaders output: \n" +
890 str( response ) )
891
Jon Halla440e872016-03-31 15:15:50 -0700892 partitions = onosCli.partitions()
Jon Hall5cf14d52015-07-16 12:15:19 -0700893 try:
894 if partitions :
895 parsedPartitions = json.loads( partitions )
896 main.log.warn( json.dumps( parsedPartitions,
897 sort_keys=True,
898 indent=4,
899 separators=( ',', ': ' ) ) )
900 # TODO check for a leader in all paritions
901 # TODO check for consistency among nodes
902 else:
903 main.log.error( "partitions() returned None" )
904 except ( ValueError, TypeError ):
905 main.log.exception( "Error parsing partitions" )
906 main.log.error( repr( partitions ) )
Jon Halla440e872016-03-31 15:15:50 -0700907 pendingMap = onosCli.pendingMap()
Jon Hall5cf14d52015-07-16 12:15:19 -0700908 try:
909 if pendingMap :
910 parsedPending = json.loads( pendingMap )
911 main.log.warn( json.dumps( parsedPending,
912 sort_keys=True,
913 indent=4,
914 separators=( ',', ': ' ) ) )
915 # TODO check something here?
916 else:
917 main.log.error( "pendingMap() returned None" )
918 except ( ValueError, TypeError ):
919 main.log.exception( "Error parsing pending map" )
920 main.log.error( repr( pendingMap ) )
921
922 def CASE4( self, main ):
923 """
924 Ping across added host intents
925 """
926 import json
927 import time
Jon Halle1a3b752015-07-22 13:02:46 -0700928 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700929 assert main, "main not defined"
930 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -0700931 assert main.CLIs, "main.CLIs not defined"
932 assert main.nodes, "main.nodes not defined"
Jon Halla440e872016-03-31 15:15:50 -0700933 main.case( "Verify connectivity by sending traffic across Intents" )
Jon Hall783bbf92015-07-23 14:33:19 -0700934 main.caseExplanation = "Ping across added host intents to check " +\
Jon Hall5cf14d52015-07-16 12:15:19 -0700935 "functionality and check the state of " +\
936 "the intent"
Jon Hall5cf14d52015-07-16 12:15:19 -0700937
Jon Hall41d39f12016-04-11 22:54:35 -0700938 onosCli = main.CLIs[ main.activeNodes[0] ]
Jon Hall5cf14d52015-07-16 12:15:19 -0700939 main.step( "Check Intent state" )
940 installedCheck = False
941 loopCount = 0
942 while not installedCheck and loopCount < 40:
943 installedCheck = True
944 # Print the intent states
Jon Halla440e872016-03-31 15:15:50 -0700945 intents = onosCli.intents()
Jon Hall5cf14d52015-07-16 12:15:19 -0700946 intentStates = []
947 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
948 count = 0
949 # Iter through intents of a node
950 try:
951 for intent in json.loads( intents ):
952 state = intent.get( 'state', None )
953 if "INSTALLED" not in state:
954 installedCheck = False
955 intentId = intent.get( 'id', None )
956 intentStates.append( ( intentId, state ) )
957 except ( ValueError, TypeError ):
958 main.log.exception( "Error parsing intents." )
959 # Print states
960 intentStates.sort()
961 for i, s in intentStates:
962 count += 1
963 main.log.info( "%-6s%-15s%-15s" %
964 ( str( count ), str( i ), str( s ) ) )
965 if not installedCheck:
966 time.sleep( 1 )
967 loopCount += 1
968 utilities.assert_equals( expect=True, actual=installedCheck,
969 onpass="Intents are all INSTALLED",
970 onfail="Intents are not all in " +
971 "INSTALLED state" )
972
Jon Hall9d2dcad2016-04-08 10:15:20 -0700973 main.step( "Ping across added host intents" )
Jon Hall9d2dcad2016-04-08 10:15:20 -0700974 PingResult = main.TRUE
975 for i in range( 8, 18 ):
976 ping = main.Mininet1.pingHost( src="h" + str( i ),
977 target="h" + str( i + 10 ) )
978 PingResult = PingResult and ping
979 if ping == main.FALSE:
980 main.log.warn( "Ping failed between h" + str( i ) +
981 " and h" + str( i + 10 ) )
982 elif ping == main.TRUE:
983 main.log.info( "Ping test passed!" )
984 # Don't set PingResult or you'd override failures
985 if PingResult == main.FALSE:
986 main.log.error(
987 "Intents have not been installed correctly, pings failed." )
988 # TODO: pretty print
989 main.log.warn( "ONOS1 intents: " )
990 try:
991 tmpIntents = onosCli.intents()
992 main.log.warn( json.dumps( json.loads( tmpIntents ),
993 sort_keys=True,
994 indent=4,
995 separators=( ',', ': ' ) ) )
996 except ( ValueError, TypeError ):
997 main.log.warn( repr( tmpIntents ) )
998 utilities.assert_equals(
999 expect=main.TRUE,
1000 actual=PingResult,
1001 onpass="Intents have been installed correctly and pings work",
1002 onfail="Intents have not been installed correctly, pings failed." )
1003
Jon Hall5cf14d52015-07-16 12:15:19 -07001004 main.step( "Check leadership of topics" )
Jon Halla440e872016-03-31 15:15:50 -07001005 leaders = onosCli.leaders()
Jon Hall5cf14d52015-07-16 12:15:19 -07001006 topicCheck = main.TRUE
1007 try:
1008 if leaders:
1009 parsedLeaders = json.loads( leaders )
1010 main.log.warn( json.dumps( parsedLeaders,
1011 sort_keys=True,
1012 indent=4,
1013 separators=( ',', ': ' ) ) )
1014 # check for all intent partitions
1015 # check for election
1016 # TODO: Look at Devices as topics now that it uses this system
1017 topics = []
1018 for i in range( 14 ):
1019 topics.append( "intent-partition-" + str( i ) )
1020 # FIXME: this should only be after we start the app
1021 # FIXME: topics.append( "org.onosproject.election" )
1022 # Print leaders output
1023 main.log.debug( topics )
1024 ONOStopics = [ j['topic'] for j in parsedLeaders ]
1025 for topic in topics:
1026 if topic not in ONOStopics:
1027 main.log.error( "Error: " + topic +
1028 " not in leaders" )
1029 topicCheck = main.FALSE
1030 else:
1031 main.log.error( "leaders() returned None" )
1032 topicCheck = main.FALSE
1033 except ( ValueError, TypeError ):
1034 topicCheck = main.FALSE
1035 main.log.exception( "Error parsing leaders" )
1036 main.log.error( repr( leaders ) )
1037 # TODO: Check for a leader of these topics
1038 # Check all nodes
1039 if topicCheck:
Jon Halla440e872016-03-31 15:15:50 -07001040 for i in main.activeNodes:
1041 node = main.CLIs[i]
Jon Hall5cf14d52015-07-16 12:15:19 -07001042 response = node.leaders( jsonFormat=False)
1043 main.log.warn( str( node.name ) + " leaders output: \n" +
1044 str( response ) )
1045
1046 utilities.assert_equals( expect=main.TRUE, actual=topicCheck,
1047 onpass="intent Partitions is in leaders",
1048 onfail="Some topics were lost " )
1049 # Print partitions
Jon Halla440e872016-03-31 15:15:50 -07001050 partitions = onosCli.partitions()
Jon Hall5cf14d52015-07-16 12:15:19 -07001051 try:
1052 if partitions :
1053 parsedPartitions = json.loads( partitions )
1054 main.log.warn( json.dumps( parsedPartitions,
1055 sort_keys=True,
1056 indent=4,
1057 separators=( ',', ': ' ) ) )
1058 # TODO check for a leader in all paritions
1059 # TODO check for consistency among nodes
1060 else:
1061 main.log.error( "partitions() returned None" )
1062 except ( ValueError, TypeError ):
1063 main.log.exception( "Error parsing partitions" )
1064 main.log.error( repr( partitions ) )
1065 # Print Pending Map
Jon Halla440e872016-03-31 15:15:50 -07001066 pendingMap = onosCli.pendingMap()
Jon Hall5cf14d52015-07-16 12:15:19 -07001067 try:
1068 if pendingMap :
1069 parsedPending = json.loads( pendingMap )
1070 main.log.warn( json.dumps( parsedPending,
1071 sort_keys=True,
1072 indent=4,
1073 separators=( ',', ': ' ) ) )
1074 # TODO check something here?
1075 else:
1076 main.log.error( "pendingMap() returned None" )
1077 except ( ValueError, TypeError ):
1078 main.log.exception( "Error parsing pending map" )
1079 main.log.error( repr( pendingMap ) )
1080
1081 if not installedCheck:
1082 main.log.info( "Waiting 60 seconds to see if the state of " +
1083 "intents change" )
1084 time.sleep( 60 )
1085 # Print the intent states
Jon Halla440e872016-03-31 15:15:50 -07001086 intents = onosCli.intents()
Jon Hall5cf14d52015-07-16 12:15:19 -07001087 intentStates = []
1088 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
1089 count = 0
1090 # Iter through intents of a node
1091 try:
1092 for intent in json.loads( intents ):
1093 state = intent.get( 'state', None )
1094 if "INSTALLED" not in state:
1095 installedCheck = False
1096 intentId = intent.get( 'id', None )
1097 intentStates.append( ( intentId, state ) )
1098 except ( ValueError, TypeError ):
1099 main.log.exception( "Error parsing intents." )
1100 intentStates.sort()
1101 for i, s in intentStates:
1102 count += 1
1103 main.log.info( "%-6s%-15s%-15s" %
1104 ( str( count ), str( i ), str( s ) ) )
Jon Halla440e872016-03-31 15:15:50 -07001105 leaders = onosCli.leaders()
Jon Hall5cf14d52015-07-16 12:15:19 -07001106 try:
1107 missing = False
1108 if leaders:
1109 parsedLeaders = json.loads( leaders )
1110 main.log.warn( json.dumps( parsedLeaders,
1111 sort_keys=True,
1112 indent=4,
1113 separators=( ',', ': ' ) ) )
1114 # check for all intent partitions
1115 # check for election
1116 topics = []
1117 for i in range( 14 ):
1118 topics.append( "intent-partition-" + str( i ) )
1119 # FIXME: this should only be after we start the app
1120 topics.append( "org.onosproject.election" )
1121 main.log.debug( topics )
1122 ONOStopics = [ j['topic'] for j in parsedLeaders ]
1123 for topic in topics:
1124 if topic not in ONOStopics:
1125 main.log.error( "Error: " + topic +
1126 " not in leaders" )
1127 missing = True
1128 else:
1129 main.log.error( "leaders() returned None" )
1130 except ( ValueError, TypeError ):
1131 main.log.exception( "Error parsing leaders" )
1132 main.log.error( repr( leaders ) )
1133 if missing:
Jon Halla440e872016-03-31 15:15:50 -07001134 for i in main.activeNodes:
1135 node = main.CLIs[i]
Jon Hall5cf14d52015-07-16 12:15:19 -07001136 response = node.leaders( jsonFormat=False)
1137 main.log.warn( str( node.name ) + " leaders output: \n" +
1138 str( response ) )
1139
Jon Halla440e872016-03-31 15:15:50 -07001140 partitions = onosCli.partitions()
Jon Hall5cf14d52015-07-16 12:15:19 -07001141 try:
1142 if partitions :
1143 parsedPartitions = json.loads( partitions )
1144 main.log.warn( json.dumps( parsedPartitions,
1145 sort_keys=True,
1146 indent=4,
1147 separators=( ',', ': ' ) ) )
1148 # TODO check for a leader in all paritions
1149 # TODO check for consistency among nodes
1150 else:
1151 main.log.error( "partitions() returned None" )
1152 except ( ValueError, TypeError ):
1153 main.log.exception( "Error parsing partitions" )
1154 main.log.error( repr( partitions ) )
Jon Halla440e872016-03-31 15:15:50 -07001155 pendingMap = onosCli.pendingMap()
Jon Hall5cf14d52015-07-16 12:15:19 -07001156 try:
1157 if pendingMap :
1158 parsedPending = json.loads( pendingMap )
1159 main.log.warn( json.dumps( parsedPending,
1160 sort_keys=True,
1161 indent=4,
1162 separators=( ',', ': ' ) ) )
1163 # TODO check something here?
1164 else:
1165 main.log.error( "pendingMap() returned None" )
1166 except ( ValueError, TypeError ):
1167 main.log.exception( "Error parsing pending map" )
1168 main.log.error( repr( pendingMap ) )
1169 # Print flowrules
Jon Halla440e872016-03-31 15:15:50 -07001170 node = main.activeNodes[0]
1171 main.log.debug( main.CLIs[node].flows( jsonFormat=False ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001172 main.step( "Wait a minute then ping again" )
1173 # the wait is above
1174 PingResult = main.TRUE
1175 for i in range( 8, 18 ):
1176 ping = main.Mininet1.pingHost( src="h" + str( i ),
1177 target="h" + str( i + 10 ) )
1178 PingResult = PingResult and ping
1179 if ping == main.FALSE:
1180 main.log.warn( "Ping failed between h" + str( i ) +
1181 " and h" + str( i + 10 ) )
1182 elif ping == main.TRUE:
1183 main.log.info( "Ping test passed!" )
1184 # Don't set PingResult or you'd override failures
1185 if PingResult == main.FALSE:
1186 main.log.error(
1187 "Intents have not been installed correctly, pings failed." )
1188 # TODO: pretty print
1189 main.log.warn( "ONOS1 intents: " )
1190 try:
Jon Halla440e872016-03-31 15:15:50 -07001191 tmpIntents = onosCli.intents()
Jon Hall5cf14d52015-07-16 12:15:19 -07001192 main.log.warn( json.dumps( json.loads( tmpIntents ),
1193 sort_keys=True,
1194 indent=4,
1195 separators=( ',', ': ' ) ) )
1196 except ( ValueError, TypeError ):
1197 main.log.warn( repr( tmpIntents ) )
1198 utilities.assert_equals(
1199 expect=main.TRUE,
1200 actual=PingResult,
1201 onpass="Intents have been installed correctly and pings work",
1202 onfail="Intents have not been installed correctly, pings failed." )
1203
1204 def CASE5( self, main ):
1205 """
1206 Reading state of ONOS
1207 """
1208 import json
1209 import time
Jon Halle1a3b752015-07-22 13:02:46 -07001210 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07001211 assert main, "main not defined"
1212 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07001213 assert main.CLIs, "main.CLIs not defined"
1214 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07001215
1216 main.case( "Setting up and gathering data for current state" )
1217 # The general idea for this test case is to pull the state of
1218 # ( intents,flows, topology,... ) from each ONOS node
1219 # We can then compare them with each other and also with past states
1220
1221 main.step( "Check that each switch has a master" )
1222 global mastershipState
1223 mastershipState = '[]'
1224
1225 # Assert that each device has a master
1226 rolesNotNull = main.TRUE
1227 threads = []
Jon Halla440e872016-03-31 15:15:50 -07001228 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001229 t = main.Thread( target=main.CLIs[i].rolesNotNull,
Jon Hall5cf14d52015-07-16 12:15:19 -07001230 name="rolesNotNull-" + str( i ),
1231 args=[] )
1232 threads.append( t )
1233 t.start()
1234
1235 for t in threads:
1236 t.join()
1237 rolesNotNull = rolesNotNull and t.result
1238 utilities.assert_equals(
1239 expect=main.TRUE,
1240 actual=rolesNotNull,
1241 onpass="Each device has a master",
1242 onfail="Some devices don't have a master assigned" )
1243
1244 main.step( "Get the Mastership of each switch from each controller" )
1245 ONOSMastership = []
1246 mastershipCheck = main.FALSE
1247 consistentMastership = True
1248 rolesResults = True
1249 threads = []
Jon Halla440e872016-03-31 15:15:50 -07001250 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001251 t = main.Thread( target=main.CLIs[i].roles,
Jon Hall5cf14d52015-07-16 12:15:19 -07001252 name="roles-" + str( i ),
1253 args=[] )
1254 threads.append( t )
1255 t.start()
1256
1257 for t in threads:
1258 t.join()
1259 ONOSMastership.append( t.result )
1260
Jon Halla440e872016-03-31 15:15:50 -07001261 for i in range( len( ONOSMastership ) ):
1262 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001263 if not ONOSMastership[i] or "Error" in ONOSMastership[i]:
Jon Halla440e872016-03-31 15:15:50 -07001264 main.log.error( "Error in getting ONOS" + node + " roles" )
1265 main.log.warn( "ONOS" + node + " mastership response: " +
1266 repr( ONOSMastership[i] ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001267 rolesResults = False
1268 utilities.assert_equals(
1269 expect=True,
1270 actual=rolesResults,
1271 onpass="No error in reading roles output",
1272 onfail="Error in reading roles from ONOS" )
1273
1274 main.step( "Check for consistency in roles from each controller" )
1275 if all([ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
1276 main.log.info(
1277 "Switch roles are consistent across all ONOS nodes" )
1278 else:
1279 consistentMastership = False
1280 utilities.assert_equals(
1281 expect=True,
1282 actual=consistentMastership,
1283 onpass="Switch roles are consistent across all ONOS nodes",
1284 onfail="ONOS nodes have different views of switch roles" )
1285
1286 if rolesResults and not consistentMastership:
Jon Halla440e872016-03-31 15:15:50 -07001287 for i in range( len( main.activeNodes ) ):
1288 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001289 try:
1290 main.log.warn(
Jon Halla440e872016-03-31 15:15:50 -07001291 "ONOS" + node + " roles: ",
Jon Hall5cf14d52015-07-16 12:15:19 -07001292 json.dumps(
1293 json.loads( ONOSMastership[ i ] ),
1294 sort_keys=True,
1295 indent=4,
1296 separators=( ',', ': ' ) ) )
1297 except ( ValueError, TypeError ):
1298 main.log.warn( repr( ONOSMastership[ i ] ) )
1299 elif rolesResults and consistentMastership:
1300 mastershipCheck = main.TRUE
1301 mastershipState = ONOSMastership[ 0 ]
1302
1303 main.step( "Get the intents from each controller" )
1304 global intentState
1305 intentState = []
1306 ONOSIntents = []
1307 intentCheck = main.FALSE
1308 consistentIntents = True
1309 intentsResults = True
1310 threads = []
Jon Halla440e872016-03-31 15:15:50 -07001311 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001312 t = main.Thread( target=main.CLIs[i].intents,
Jon Hall5cf14d52015-07-16 12:15:19 -07001313 name="intents-" + str( i ),
1314 args=[],
1315 kwargs={ 'jsonFormat': True } )
1316 threads.append( t )
1317 t.start()
1318
1319 for t in threads:
1320 t.join()
1321 ONOSIntents.append( t.result )
1322
Jon Halla440e872016-03-31 15:15:50 -07001323 for i in range( len( ONOSIntents ) ):
1324 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001325 if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
Jon Halla440e872016-03-31 15:15:50 -07001326 main.log.error( "Error in getting ONOS" + node + " intents" )
1327 main.log.warn( "ONOS" + node + " intents response: " +
Jon Hall5cf14d52015-07-16 12:15:19 -07001328 repr( ONOSIntents[ i ] ) )
1329 intentsResults = False
1330 utilities.assert_equals(
1331 expect=True,
1332 actual=intentsResults,
1333 onpass="No error in reading intents output",
1334 onfail="Error in reading intents from ONOS" )
1335
1336 main.step( "Check for consistency in Intents from each controller" )
1337 if all([ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
1338 main.log.info( "Intents are consistent across all ONOS " +
1339 "nodes" )
1340 else:
1341 consistentIntents = False
1342 main.log.error( "Intents not consistent" )
1343 utilities.assert_equals(
1344 expect=True,
1345 actual=consistentIntents,
1346 onpass="Intents are consistent across all ONOS nodes",
1347 onfail="ONOS nodes have different views of intents" )
1348
1349 if intentsResults:
1350 # Try to make it easy to figure out what is happening
1351 #
1352 # Intent ONOS1 ONOS2 ...
1353 # 0x01 INSTALLED INSTALLING
1354 # ... ... ...
1355 # ... ... ...
1356 title = " Id"
Jon Halla440e872016-03-31 15:15:50 -07001357 for n in main.activeNodes:
Jon Hall5cf14d52015-07-16 12:15:19 -07001358 title += " " * 10 + "ONOS" + str( n + 1 )
1359 main.log.warn( title )
Jon Halle1a3b752015-07-22 13:02:46 -07001360 # get all intent keys in the cluster
Jon Hall5cf14d52015-07-16 12:15:19 -07001361 keys = []
1362 try:
1363 # Get the set of all intent keys
1364 for nodeStr in ONOSIntents:
1365 node = json.loads( nodeStr )
1366 for intent in node:
1367 keys.append( intent.get( 'id' ) )
1368 keys = set( keys )
1369 # For each intent key, print the state on each node
1370 for key in keys:
1371 row = "%-13s" % key
1372 for nodeStr in ONOSIntents:
1373 node = json.loads( nodeStr )
1374 for intent in node:
1375 if intent.get( 'id', "Error" ) == key:
1376 row += "%-15s" % intent.get( 'state' )
1377 main.log.warn( row )
1378 # End of intent state table
1379 except ValueError as e:
1380 main.log.exception( e )
1381 main.log.debug( "nodeStr was: " + repr( nodeStr ) )
1382
1383 if intentsResults and not consistentIntents:
1384 # print the json objects
Jon Halla440e872016-03-31 15:15:50 -07001385 n = str( main.activeNodes[-1] + 1 )
1386 main.log.debug( "ONOS" + n + " intents: " )
Jon Hall5cf14d52015-07-16 12:15:19 -07001387 main.log.debug( json.dumps( json.loads( ONOSIntents[ -1 ] ),
1388 sort_keys=True,
1389 indent=4,
1390 separators=( ',', ': ' ) ) )
Jon Halla440e872016-03-31 15:15:50 -07001391 for i in range( len( ONOSIntents ) ):
1392 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001393 if ONOSIntents[ i ] != ONOSIntents[ -1 ]:
Jon Halla440e872016-03-31 15:15:50 -07001394 main.log.debug( "ONOS" + node + " intents: " )
Jon Hall5cf14d52015-07-16 12:15:19 -07001395 main.log.debug( json.dumps( json.loads( ONOSIntents[i] ),
1396 sort_keys=True,
1397 indent=4,
1398 separators=( ',', ': ' ) ) )
1399 else:
Jon Halla440e872016-03-31 15:15:50 -07001400 main.log.debug( "ONOS" + node + " intents match ONOS" +
1401 n + " intents" )
Jon Hall5cf14d52015-07-16 12:15:19 -07001402 elif intentsResults and consistentIntents:
1403 intentCheck = main.TRUE
1404 intentState = ONOSIntents[ 0 ]
1405
1406 main.step( "Get the flows from each controller" )
1407 global flowState
1408 flowState = []
1409 ONOSFlows = []
1410 ONOSFlowsJson = []
1411 flowCheck = main.FALSE
1412 consistentFlows = True
1413 flowsResults = True
1414 threads = []
Jon Halla440e872016-03-31 15:15:50 -07001415 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001416 t = main.Thread( target=main.CLIs[i].flows,
Jon Hall5cf14d52015-07-16 12:15:19 -07001417 name="flows-" + str( i ),
1418 args=[],
1419 kwargs={ 'jsonFormat': True } )
1420 threads.append( t )
1421 t.start()
1422
1423 # NOTE: Flows command can take some time to run
1424 time.sleep(30)
1425 for t in threads:
1426 t.join()
1427 result = t.result
1428 ONOSFlows.append( result )
1429
Jon Halla440e872016-03-31 15:15:50 -07001430 for i in range( len( ONOSFlows ) ):
1431 num = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001432 if not ONOSFlows[ i ] or "Error" in ONOSFlows[ i ]:
1433 main.log.error( "Error in getting ONOS" + num + " flows" )
1434 main.log.warn( "ONOS" + num + " flows response: " +
1435 repr( ONOSFlows[ i ] ) )
1436 flowsResults = False
1437 ONOSFlowsJson.append( None )
1438 else:
1439 try:
1440 ONOSFlowsJson.append( json.loads( ONOSFlows[ i ] ) )
1441 except ( ValueError, TypeError ):
1442 # FIXME: change this to log.error?
1443 main.log.exception( "Error in parsing ONOS" + num +
1444 " response as json." )
1445 main.log.error( repr( ONOSFlows[ i ] ) )
1446 ONOSFlowsJson.append( None )
1447 flowsResults = False
1448 utilities.assert_equals(
1449 expect=True,
1450 actual=flowsResults,
1451 onpass="No error in reading flows output",
1452 onfail="Error in reading flows from ONOS" )
1453
1454 main.step( "Check for consistency in Flows from each controller" )
1455 tmp = [ len( i ) == len( ONOSFlowsJson[ 0 ] ) for i in ONOSFlowsJson ]
1456 if all( tmp ):
1457 main.log.info( "Flow count is consistent across all ONOS nodes" )
1458 else:
1459 consistentFlows = False
1460 utilities.assert_equals(
1461 expect=True,
1462 actual=consistentFlows,
1463 onpass="The flow count is consistent across all ONOS nodes",
1464 onfail="ONOS nodes have different flow counts" )
1465
1466 if flowsResults and not consistentFlows:
Jon Halla440e872016-03-31 15:15:50 -07001467 for i in range( len( ONOSFlows ) ):
1468 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001469 try:
1470 main.log.warn(
Jon Halla440e872016-03-31 15:15:50 -07001471 "ONOS" + node + " flows: " +
Jon Hall5cf14d52015-07-16 12:15:19 -07001472 json.dumps( json.loads( ONOSFlows[i] ), sort_keys=True,
1473 indent=4, separators=( ',', ': ' ) ) )
1474 except ( ValueError, TypeError ):
Jon Halla440e872016-03-31 15:15:50 -07001475 main.log.warn( "ONOS" + node + " flows: " +
1476 repr( ONOSFlows[ i ] ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001477 elif flowsResults and consistentFlows:
1478 flowCheck = main.TRUE
1479 flowState = ONOSFlows[ 0 ]
1480
1481 main.step( "Get the OF Table entries" )
1482 global flows
1483 flows = []
1484 for i in range( 1, 29 ):
GlennRC68467eb2015-11-16 18:01:01 -08001485 flows.append( main.Mininet1.getFlowTable( "s" + str( i ), version="1.3", debug=False ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001486 if flowCheck == main.FALSE:
1487 for table in flows:
1488 main.log.warn( table )
1489 # TODO: Compare switch flow tables with ONOS flow tables
1490
1491 main.step( "Start continuous pings" )
1492 main.Mininet2.pingLong(
1493 src=main.params[ 'PING' ][ 'source1' ],
1494 target=main.params[ 'PING' ][ 'target1' ],
1495 pingTime=500 )
1496 main.Mininet2.pingLong(
1497 src=main.params[ 'PING' ][ 'source2' ],
1498 target=main.params[ 'PING' ][ 'target2' ],
1499 pingTime=500 )
1500 main.Mininet2.pingLong(
1501 src=main.params[ 'PING' ][ 'source3' ],
1502 target=main.params[ 'PING' ][ 'target3' ],
1503 pingTime=500 )
1504 main.Mininet2.pingLong(
1505 src=main.params[ 'PING' ][ 'source4' ],
1506 target=main.params[ 'PING' ][ 'target4' ],
1507 pingTime=500 )
1508 main.Mininet2.pingLong(
1509 src=main.params[ 'PING' ][ 'source5' ],
1510 target=main.params[ 'PING' ][ 'target5' ],
1511 pingTime=500 )
1512 main.Mininet2.pingLong(
1513 src=main.params[ 'PING' ][ 'source6' ],
1514 target=main.params[ 'PING' ][ 'target6' ],
1515 pingTime=500 )
1516 main.Mininet2.pingLong(
1517 src=main.params[ 'PING' ][ 'source7' ],
1518 target=main.params[ 'PING' ][ 'target7' ],
1519 pingTime=500 )
1520 main.Mininet2.pingLong(
1521 src=main.params[ 'PING' ][ 'source8' ],
1522 target=main.params[ 'PING' ][ 'target8' ],
1523 pingTime=500 )
1524 main.Mininet2.pingLong(
1525 src=main.params[ 'PING' ][ 'source9' ],
1526 target=main.params[ 'PING' ][ 'target9' ],
1527 pingTime=500 )
1528 main.Mininet2.pingLong(
1529 src=main.params[ 'PING' ][ 'source10' ],
1530 target=main.params[ 'PING' ][ 'target10' ],
1531 pingTime=500 )
1532
1533 main.step( "Collecting topology information from ONOS" )
1534 devices = []
1535 threads = []
Jon Halla440e872016-03-31 15:15:50 -07001536 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001537 t = main.Thread( target=main.CLIs[i].devices,
Jon Hall5cf14d52015-07-16 12:15:19 -07001538 name="devices-" + str( i ),
1539 args=[ ] )
1540 threads.append( t )
1541 t.start()
1542
1543 for t in threads:
1544 t.join()
1545 devices.append( t.result )
1546 hosts = []
1547 threads = []
Jon Halla440e872016-03-31 15:15:50 -07001548 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001549 t = main.Thread( target=main.CLIs[i].hosts,
Jon Hall5cf14d52015-07-16 12:15:19 -07001550 name="hosts-" + str( i ),
1551 args=[ ] )
1552 threads.append( t )
1553 t.start()
1554
1555 for t in threads:
1556 t.join()
1557 try:
1558 hosts.append( json.loads( t.result ) )
1559 except ( ValueError, TypeError ):
1560 # FIXME: better handling of this, print which node
1561 # Maybe use thread name?
1562 main.log.exception( "Error parsing json output of hosts" )
Jon Hallf3d16e72015-12-16 17:45:08 -08001563 main.log.warn( repr( t.result ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001564 hosts.append( None )
1565
1566 ports = []
1567 threads = []
Jon Halla440e872016-03-31 15:15:50 -07001568 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001569 t = main.Thread( target=main.CLIs[i].ports,
Jon Hall5cf14d52015-07-16 12:15:19 -07001570 name="ports-" + str( i ),
1571 args=[ ] )
1572 threads.append( t )
1573 t.start()
1574
1575 for t in threads:
1576 t.join()
1577 ports.append( t.result )
1578 links = []
1579 threads = []
Jon Halla440e872016-03-31 15:15:50 -07001580 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001581 t = main.Thread( target=main.CLIs[i].links,
Jon Hall5cf14d52015-07-16 12:15:19 -07001582 name="links-" + str( i ),
1583 args=[ ] )
1584 threads.append( t )
1585 t.start()
1586
1587 for t in threads:
1588 t.join()
1589 links.append( t.result )
1590 clusters = []
1591 threads = []
Jon Halla440e872016-03-31 15:15:50 -07001592 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001593 t = main.Thread( target=main.CLIs[i].clusters,
Jon Hall5cf14d52015-07-16 12:15:19 -07001594 name="clusters-" + str( i ),
1595 args=[ ] )
1596 threads.append( t )
1597 t.start()
1598
1599 for t in threads:
1600 t.join()
1601 clusters.append( t.result )
1602 # Compare json objects for hosts and dataplane clusters
1603
1604 # hosts
1605 main.step( "Host view is consistent across ONOS nodes" )
1606 consistentHostsResult = main.TRUE
1607 for controller in range( len( hosts ) ):
Jon Halla440e872016-03-31 15:15:50 -07001608 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hallf3d16e72015-12-16 17:45:08 -08001609 if hosts[ controller ] and "Error" not in hosts[ controller ]:
Jon Hall5cf14d52015-07-16 12:15:19 -07001610 if hosts[ controller ] == hosts[ 0 ]:
1611 continue
1612 else: # hosts not consistent
1613 main.log.error( "hosts from ONOS" +
1614 controllerStr +
1615 " is inconsistent with ONOS1" )
1616 main.log.warn( repr( hosts[ controller ] ) )
1617 consistentHostsResult = main.FALSE
1618
1619 else:
1620 main.log.error( "Error in getting ONOS hosts from ONOS" +
1621 controllerStr )
1622 consistentHostsResult = main.FALSE
1623 main.log.warn( "ONOS" + controllerStr +
1624 " hosts response: " +
1625 repr( hosts[ controller ] ) )
1626 utilities.assert_equals(
1627 expect=main.TRUE,
1628 actual=consistentHostsResult,
1629 onpass="Hosts view is consistent across all ONOS nodes",
1630 onfail="ONOS nodes have different views of hosts" )
1631
1632 main.step( "Each host has an IP address" )
1633 ipResult = main.TRUE
1634 for controller in range( 0, len( hosts ) ):
Jon Halla440e872016-03-31 15:15:50 -07001635 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hallf3d16e72015-12-16 17:45:08 -08001636 if hosts[ controller ]:
1637 for host in hosts[ controller ]:
1638 if not host.get( 'ipAddresses', [ ] ):
1639 main.log.error( "Error with host ips on controller" +
1640 controllerStr + ": " + str( host ) )
1641 ipResult = main.FALSE
Jon Hall5cf14d52015-07-16 12:15:19 -07001642 utilities.assert_equals(
1643 expect=main.TRUE,
1644 actual=ipResult,
1645 onpass="The ips of the hosts aren't empty",
1646 onfail="The ip of at least one host is missing" )
1647
1648 # Strongly connected clusters of devices
1649 main.step( "Cluster view is consistent across ONOS nodes" )
1650 consistentClustersResult = main.TRUE
1651 for controller in range( len( clusters ) ):
Jon Halla440e872016-03-31 15:15:50 -07001652 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001653 if "Error" not in clusters[ controller ]:
1654 if clusters[ controller ] == clusters[ 0 ]:
1655 continue
1656 else: # clusters not consistent
1657 main.log.error( "clusters from ONOS" + controllerStr +
1658 " is inconsistent with ONOS1" )
1659 consistentClustersResult = main.FALSE
1660
1661 else:
1662 main.log.error( "Error in getting dataplane clusters " +
1663 "from ONOS" + controllerStr )
1664 consistentClustersResult = main.FALSE
1665 main.log.warn( "ONOS" + controllerStr +
1666 " clusters response: " +
1667 repr( clusters[ controller ] ) )
1668 utilities.assert_equals(
1669 expect=main.TRUE,
1670 actual=consistentClustersResult,
1671 onpass="Clusters view is consistent across all ONOS nodes",
1672 onfail="ONOS nodes have different views of clusters" )
Jon Hall64948022016-05-12 13:38:50 -07001673 if not consistentClustersResult:
Jon Hall172b7ba2016-04-07 18:12:20 -07001674 main.log.debug( clusters )
Jon Hall64948022016-05-12 13:38:50 -07001675
Jon Hall5cf14d52015-07-16 12:15:19 -07001676 # there should always only be one cluster
1677 main.step( "Cluster view correct across ONOS nodes" )
1678 try:
1679 numClusters = len( json.loads( clusters[ 0 ] ) )
1680 except ( ValueError, TypeError ):
1681 main.log.exception( "Error parsing clusters[0]: " +
1682 repr( clusters[ 0 ] ) )
Jon Hall6e709752016-02-01 13:38:46 -08001683 numClusters = "ERROR"
Jon Hall5cf14d52015-07-16 12:15:19 -07001684 clusterResults = main.FALSE
1685 if numClusters == 1:
1686 clusterResults = main.TRUE
1687 utilities.assert_equals(
1688 expect=1,
1689 actual=numClusters,
1690 onpass="ONOS shows 1 SCC",
1691 onfail="ONOS shows " + str( numClusters ) + " SCCs" )
1692
1693 main.step( "Comparing ONOS topology to MN" )
1694 devicesResults = main.TRUE
1695 linksResults = main.TRUE
1696 hostsResults = main.TRUE
1697 mnSwitches = main.Mininet1.getSwitches()
1698 mnLinks = main.Mininet1.getLinks()
1699 mnHosts = main.Mininet1.getHosts()
Jon Halla440e872016-03-31 15:15:50 -07001700 for controller in main.activeNodes:
1701 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001702 if devices[ controller ] and ports[ controller ] and\
1703 "Error" not in devices[ controller ] and\
1704 "Error" not in ports[ controller ]:
Jon Halla440e872016-03-31 15:15:50 -07001705 currentDevicesResult = main.Mininet1.compareSwitches(
1706 mnSwitches,
1707 json.loads( devices[ controller ] ),
1708 json.loads( ports[ controller ] ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001709 else:
1710 currentDevicesResult = main.FALSE
1711 utilities.assert_equals( expect=main.TRUE,
1712 actual=currentDevicesResult,
1713 onpass="ONOS" + controllerStr +
1714 " Switches view is correct",
1715 onfail="ONOS" + controllerStr +
1716 " Switches view is incorrect" )
1717 if links[ controller ] and "Error" not in links[ controller ]:
1718 currentLinksResult = main.Mininet1.compareLinks(
1719 mnSwitches, mnLinks,
1720 json.loads( links[ controller ] ) )
1721 else:
1722 currentLinksResult = main.FALSE
1723 utilities.assert_equals( expect=main.TRUE,
1724 actual=currentLinksResult,
1725 onpass="ONOS" + controllerStr +
1726 " links view is correct",
1727 onfail="ONOS" + controllerStr +
1728 " links view is incorrect" )
1729
Jon Hall657cdf62015-12-17 14:40:51 -08001730 if hosts[ controller ] and "Error" not in hosts[ controller ]:
Jon Hall5cf14d52015-07-16 12:15:19 -07001731 currentHostsResult = main.Mininet1.compareHosts(
1732 mnHosts,
1733 hosts[ controller ] )
1734 else:
1735 currentHostsResult = main.FALSE
1736 utilities.assert_equals( expect=main.TRUE,
1737 actual=currentHostsResult,
1738 onpass="ONOS" + controllerStr +
1739 " hosts exist in Mininet",
1740 onfail="ONOS" + controllerStr +
1741 " hosts don't match Mininet" )
1742
1743 devicesResults = devicesResults and currentDevicesResult
1744 linksResults = linksResults and currentLinksResult
1745 hostsResults = hostsResults and currentHostsResult
1746
1747 main.step( "Device information is correct" )
1748 utilities.assert_equals(
1749 expect=main.TRUE,
1750 actual=devicesResults,
1751 onpass="Device information is correct",
1752 onfail="Device information is incorrect" )
1753
1754 main.step( "Links are correct" )
1755 utilities.assert_equals(
1756 expect=main.TRUE,
1757 actual=linksResults,
1758 onpass="Link are correct",
1759 onfail="Links are incorrect" )
1760
1761 main.step( "Hosts are correct" )
1762 utilities.assert_equals(
1763 expect=main.TRUE,
1764 actual=hostsResults,
1765 onpass="Hosts are correct",
1766 onfail="Hosts are incorrect" )
1767
1768 def CASE6( self, main ):
1769 """
1770 The Failure case. Since this is the Sanity test, we do nothing.
1771 """
1772 import time
Jon Halle1a3b752015-07-22 13:02:46 -07001773 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07001774 assert main, "main not defined"
1775 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07001776 assert main.CLIs, "main.CLIs not defined"
1777 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07001778 main.case( "Wait 60 seconds instead of inducing a failure" )
1779 time.sleep( 60 )
1780 utilities.assert_equals(
1781 expect=main.TRUE,
1782 actual=main.TRUE,
1783 onpass="Sleeping 60 seconds",
1784 onfail="Something is terribly wrong with my math" )
1785
1786 def CASE7( self, main ):
1787 """
1788 Check state after ONOS failure
1789 """
1790 import json
Jon Halle1a3b752015-07-22 13:02:46 -07001791 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07001792 assert main, "main not defined"
1793 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07001794 assert main.CLIs, "main.CLIs not defined"
1795 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07001796 main.case( "Running ONOS Constant State Tests" )
1797
1798 main.step( "Check that each switch has a master" )
1799 # Assert that each device has a master
1800 rolesNotNull = main.TRUE
1801 threads = []
Jon Halla440e872016-03-31 15:15:50 -07001802 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001803 t = main.Thread( target=main.CLIs[i].rolesNotNull,
Jon Hall5cf14d52015-07-16 12:15:19 -07001804 name="rolesNotNull-" + str( i ),
1805 args=[ ] )
1806 threads.append( t )
1807 t.start()
1808
1809 for t in threads:
1810 t.join()
1811 rolesNotNull = rolesNotNull and t.result
1812 utilities.assert_equals(
1813 expect=main.TRUE,
1814 actual=rolesNotNull,
1815 onpass="Each device has a master",
1816 onfail="Some devices don't have a master assigned" )
1817
1818 main.step( "Read device roles from ONOS" )
1819 ONOSMastership = []
1820 mastershipCheck = main.FALSE
1821 consistentMastership = True
1822 rolesResults = True
1823 threads = []
Jon Halla440e872016-03-31 15:15:50 -07001824 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001825 t = main.Thread( target=main.CLIs[i].roles,
Jon Hall5cf14d52015-07-16 12:15:19 -07001826 name="roles-" + str( i ),
1827 args=[] )
1828 threads.append( t )
1829 t.start()
1830
1831 for t in threads:
1832 t.join()
1833 ONOSMastership.append( t.result )
1834
Jon Halla440e872016-03-31 15:15:50 -07001835 for i in range( len( ONOSMastership ) ):
1836 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001837 if not ONOSMastership[i] or "Error" in ONOSMastership[i]:
Jon Halla440e872016-03-31 15:15:50 -07001838 main.log.error( "Error in getting ONOS" + node + " roles" )
1839 main.log.warn( "ONOS" + node + " mastership response: " +
1840 repr( ONOSMastership[i] ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001841 rolesResults = False
1842 utilities.assert_equals(
1843 expect=True,
1844 actual=rolesResults,
1845 onpass="No error in reading roles output",
1846 onfail="Error in reading roles from ONOS" )
1847
1848 main.step( "Check for consistency in roles from each controller" )
1849 if all([ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
1850 main.log.info(
1851 "Switch roles are consistent across all ONOS nodes" )
1852 else:
1853 consistentMastership = False
1854 utilities.assert_equals(
1855 expect=True,
1856 actual=consistentMastership,
1857 onpass="Switch roles are consistent across all ONOS nodes",
1858 onfail="ONOS nodes have different views of switch roles" )
1859
1860 if rolesResults and not consistentMastership:
Jon Halla440e872016-03-31 15:15:50 -07001861 for i in range( len( ONOSMastership ) ):
1862 node = str( main.activeNodes[i] + 1 )
1863 main.log.warn( "ONOS" + node + " roles: ",
1864 json.dumps( json.loads( ONOSMastership[ i ] ),
1865 sort_keys=True,
1866 indent=4,
1867 separators=( ',', ': ' ) ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001868
1869 description2 = "Compare switch roles from before failure"
1870 main.step( description2 )
1871 try:
1872 currentJson = json.loads( ONOSMastership[0] )
1873 oldJson = json.loads( mastershipState )
1874 except ( ValueError, TypeError ):
1875 main.log.exception( "Something is wrong with parsing " +
1876 "ONOSMastership[0] or mastershipState" )
1877 main.log.error( "ONOSMastership[0]: " + repr( ONOSMastership[0] ) )
1878 main.log.error( "mastershipState" + repr( mastershipState ) )
1879 main.cleanup()
1880 main.exit()
1881 mastershipCheck = main.TRUE
1882 for i in range( 1, 29 ):
1883 switchDPID = str(
1884 main.Mininet1.getSwitchDPID( switch="s" + str( i ) ) )
1885 current = [ switch[ 'master' ] for switch in currentJson
1886 if switchDPID in switch[ 'id' ] ]
1887 old = [ switch[ 'master' ] for switch in oldJson
1888 if switchDPID in switch[ 'id' ] ]
1889 if current == old:
1890 mastershipCheck = mastershipCheck and main.TRUE
1891 else:
1892 main.log.warn( "Mastership of switch %s changed" % switchDPID )
1893 mastershipCheck = main.FALSE
1894 utilities.assert_equals(
1895 expect=main.TRUE,
1896 actual=mastershipCheck,
1897 onpass="Mastership of Switches was not changed",
1898 onfail="Mastership of some switches changed" )
1899 mastershipCheck = mastershipCheck and consistentMastership
1900
1901 main.step( "Get the intents and compare across all nodes" )
1902 ONOSIntents = []
1903 intentCheck = main.FALSE
1904 consistentIntents = True
1905 intentsResults = True
1906 threads = []
Jon Halla440e872016-03-31 15:15:50 -07001907 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001908 t = main.Thread( target=main.CLIs[i].intents,
Jon Hall5cf14d52015-07-16 12:15:19 -07001909 name="intents-" + str( i ),
1910 args=[],
1911 kwargs={ 'jsonFormat': True } )
1912 threads.append( t )
1913 t.start()
1914
1915 for t in threads:
1916 t.join()
1917 ONOSIntents.append( t.result )
1918
Jon Halla440e872016-03-31 15:15:50 -07001919 for i in range( len( ONOSIntents) ):
1920 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001921 if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
Jon Halla440e872016-03-31 15:15:50 -07001922 main.log.error( "Error in getting ONOS" + node + " intents" )
1923 main.log.warn( "ONOS" + node + " intents response: " +
Jon Hall5cf14d52015-07-16 12:15:19 -07001924 repr( ONOSIntents[ i ] ) )
1925 intentsResults = False
1926 utilities.assert_equals(
1927 expect=True,
1928 actual=intentsResults,
1929 onpass="No error in reading intents output",
1930 onfail="Error in reading intents from ONOS" )
1931
1932 main.step( "Check for consistency in Intents from each controller" )
1933 if all([ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
1934 main.log.info( "Intents are consistent across all ONOS " +
1935 "nodes" )
1936 else:
1937 consistentIntents = False
1938
1939 # Try to make it easy to figure out what is happening
1940 #
1941 # Intent ONOS1 ONOS2 ...
1942 # 0x01 INSTALLED INSTALLING
1943 # ... ... ...
1944 # ... ... ...
1945 title = " ID"
Jon Halla440e872016-03-31 15:15:50 -07001946 for n in main.activeNodes:
Jon Hall5cf14d52015-07-16 12:15:19 -07001947 title += " " * 10 + "ONOS" + str( n + 1 )
1948 main.log.warn( title )
1949 # get all intent keys in the cluster
1950 keys = []
1951 for nodeStr in ONOSIntents:
1952 node = json.loads( nodeStr )
1953 for intent in node:
1954 keys.append( intent.get( 'id' ) )
1955 keys = set( keys )
1956 for key in keys:
1957 row = "%-13s" % key
1958 for nodeStr in ONOSIntents:
1959 node = json.loads( nodeStr )
1960 for intent in node:
1961 if intent.get( 'id' ) == key:
1962 row += "%-15s" % intent.get( 'state' )
1963 main.log.warn( row )
1964 # End table view
1965
1966 utilities.assert_equals(
1967 expect=True,
1968 actual=consistentIntents,
1969 onpass="Intents are consistent across all ONOS nodes",
1970 onfail="ONOS nodes have different views of intents" )
1971 intentStates = []
1972 for node in ONOSIntents: # Iter through ONOS nodes
1973 nodeStates = []
1974 # Iter through intents of a node
1975 try:
1976 for intent in json.loads( node ):
1977 nodeStates.append( intent[ 'state' ] )
1978 except ( ValueError, TypeError ):
1979 main.log.exception( "Error in parsing intents" )
1980 main.log.error( repr( node ) )
1981 intentStates.append( nodeStates )
1982 out = [ (i, nodeStates.count( i ) ) for i in set( nodeStates ) ]
1983 main.log.info( dict( out ) )
1984
1985 if intentsResults and not consistentIntents:
Jon Halla440e872016-03-31 15:15:50 -07001986 for i in range( len( main.activeNodes ) ):
1987 node = str( main.activeNodes[i] + 1 )
1988 main.log.warn( "ONOS" + node + " intents: " )
Jon Hall5cf14d52015-07-16 12:15:19 -07001989 main.log.warn( json.dumps(
1990 json.loads( ONOSIntents[ i ] ),
1991 sort_keys=True,
1992 indent=4,
1993 separators=( ',', ': ' ) ) )
1994 elif intentsResults and consistentIntents:
1995 intentCheck = main.TRUE
1996
1997 # NOTE: Store has no durability, so intents are lost across system
1998 # restarts
1999 main.step( "Compare current intents with intents before the failure" )
2000 # NOTE: this requires case 5 to pass for intentState to be set.
2001 # maybe we should stop the test if that fails?
2002 sameIntents = main.FALSE
Jon Halla440e872016-03-31 15:15:50 -07002003 try:
2004 intentState
2005 except NameError:
2006 main.log.warn( "No previous intent state was saved" )
2007 else:
2008 if intentState and intentState == ONOSIntents[ 0 ]:
2009 sameIntents = main.TRUE
2010 main.log.info( "Intents are consistent with before failure" )
2011 # TODO: possibly the states have changed? we may need to figure out
2012 # what the acceptable states are
2013 elif len( intentState ) == len( ONOSIntents[ 0 ] ):
2014 sameIntents = main.TRUE
2015 try:
2016 before = json.loads( intentState )
2017 after = json.loads( ONOSIntents[ 0 ] )
2018 for intent in before:
2019 if intent not in after:
2020 sameIntents = main.FALSE
2021 main.log.debug( "Intent is not currently in ONOS " +
2022 "(at least in the same form):" )
2023 main.log.debug( json.dumps( intent ) )
2024 except ( ValueError, TypeError ):
2025 main.log.exception( "Exception printing intents" )
2026 main.log.debug( repr( ONOSIntents[0] ) )
2027 main.log.debug( repr( intentState ) )
2028 if sameIntents == main.FALSE:
2029 try:
2030 main.log.debug( "ONOS intents before: " )
2031 main.log.debug( json.dumps( json.loads( intentState ),
2032 sort_keys=True, indent=4,
2033 separators=( ',', ': ' ) ) )
2034 main.log.debug( "Current ONOS intents: " )
2035 main.log.debug( json.dumps( json.loads( ONOSIntents[ 0 ] ),
2036 sort_keys=True, indent=4,
2037 separators=( ',', ': ' ) ) )
2038 except ( ValueError, TypeError ):
2039 main.log.exception( "Exception printing intents" )
2040 main.log.debug( repr( ONOSIntents[0] ) )
2041 main.log.debug( repr( intentState ) )
2042 utilities.assert_equals(
2043 expect=main.TRUE,
2044 actual=sameIntents,
2045 onpass="Intents are consistent with before failure",
2046 onfail="The Intents changed during failure" )
Jon Hall5cf14d52015-07-16 12:15:19 -07002047 intentCheck = intentCheck and sameIntents
2048
2049 main.step( "Get the OF Table entries and compare to before " +
2050 "component failure" )
2051 FlowTables = main.TRUE
Jon Hall5cf14d52015-07-16 12:15:19 -07002052 for i in range( 28 ):
2053 main.log.info( "Checking flow table on s" + str( i + 1 ) )
GlennRC68467eb2015-11-16 18:01:01 -08002054 tmpFlows = main.Mininet1.getFlowTable( "s" + str( i + 1 ), version="1.3", debug=False )
Jon Hall41d39f12016-04-11 22:54:35 -07002055 curSwitch = main.Mininet1.flowTableComp( flows[i], tmpFlows )
2056 FlowTables = FlowTables and curSwitch
2057 if curSwitch == main.FALSE:
GlennRC68467eb2015-11-16 18:01:01 -08002058 main.log.warn( "Differences in flow table for switch: s{}".format( i + 1 ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07002059 utilities.assert_equals(
2060 expect=main.TRUE,
2061 actual=FlowTables,
2062 onpass="No changes were found in the flow tables",
2063 onfail="Changes were found in the flow tables" )
2064
2065 main.Mininet2.pingLongKill()
2066 '''
2067 main.step( "Check the continuous pings to ensure that no packets " +
2068 "were dropped during component failure" )
2069 main.Mininet2.pingKill( main.params[ 'TESTONUSER' ],
2070 main.params[ 'TESTONIP' ] )
2071 LossInPings = main.FALSE
2072 # NOTE: checkForLoss returns main.FALSE with 0% packet loss
2073 for i in range( 8, 18 ):
2074 main.log.info(
2075 "Checking for a loss in pings along flow from s" +
2076 str( i ) )
2077 LossInPings = main.Mininet2.checkForLoss(
2078 "/tmp/ping.h" +
2079 str( i ) ) or LossInPings
2080 if LossInPings == main.TRUE:
2081 main.log.info( "Loss in ping detected" )
2082 elif LossInPings == main.ERROR:
2083 main.log.info( "There are multiple mininet process running" )
2084 elif LossInPings == main.FALSE:
2085 main.log.info( "No Loss in the pings" )
2086 main.log.info( "No loss of dataplane connectivity" )
2087 utilities.assert_equals(
2088 expect=main.FALSE,
2089 actual=LossInPings,
2090 onpass="No Loss of connectivity",
2091 onfail="Loss of dataplane connectivity detected" )
2092 '''
2093
2094 main.step( "Leadership Election is still functional" )
2095 # Test of LeadershipElection
Jon Halla440e872016-03-31 15:15:50 -07002096 leaderList = []
2097
Jon Hall5cf14d52015-07-16 12:15:19 -07002098 # NOTE: this only works for the sanity test. In case of failures,
2099 # leader will likely change
Jon Halla440e872016-03-31 15:15:50 -07002100 leader = main.nodes[ main.activeNodes[ 0 ] ].ip_address
Jon Hall5cf14d52015-07-16 12:15:19 -07002101 leaderResult = main.TRUE
Jon Halla440e872016-03-31 15:15:50 -07002102
2103 for i in main.activeNodes:
2104 cli = main.CLIs[i]
Jon Hall5cf14d52015-07-16 12:15:19 -07002105 leaderN = cli.electionTestLeader()
Jon Halla440e872016-03-31 15:15:50 -07002106 leaderList.append( leaderN )
Jon Hall5cf14d52015-07-16 12:15:19 -07002107 # verify leader is ONOS1
2108 if leaderN == leader:
2109 # all is well
2110 # NOTE: In failure scenario, this could be a new node, maybe
2111 # check != ONOS1
2112 pass
2113 elif leaderN == main.FALSE:
2114 # error in response
2115 main.log.error( "Something is wrong with " +
2116 "electionTestLeader function, check the" +
2117 " error logs" )
2118 leaderResult = main.FALSE
Jon Halla440e872016-03-31 15:15:50 -07002119 elif leaderN is None:
2120 main.log.error( cli.name +
2121 " shows no leader for the election-app was" +
2122 " elected after the old one died" )
Jon Hall5cf14d52015-07-16 12:15:19 -07002123 leaderResult = main.FALSE
Jon Halla440e872016-03-31 15:15:50 -07002124 if len( set( leaderList ) ) != 1:
2125 leaderResult = main.FALSE
2126 main.log.error(
2127 "Inconsistent view of leader for the election test app" )
2128 # TODO: print the list
Jon Hall5cf14d52015-07-16 12:15:19 -07002129 utilities.assert_equals(
2130 expect=main.TRUE,
2131 actual=leaderResult,
2132 onpass="Leadership election passed",
2133 onfail="Something went wrong with Leadership election" )
2134
2135 def CASE8( self, main ):
2136 """
2137 Compare topo
2138 """
2139 import json
2140 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002141 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002142 assert main, "main not defined"
2143 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002144 assert main.CLIs, "main.CLIs not defined"
2145 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002146
2147 main.case( "Compare ONOS Topology view to Mininet topology" )
Jon Hall783bbf92015-07-23 14:33:19 -07002148 main.caseExplanation = "Compare topology objects between Mininet" +\
Jon Hall5cf14d52015-07-16 12:15:19 -07002149 " and ONOS"
Jon Hall5cf14d52015-07-16 12:15:19 -07002150 topoResult = main.FALSE
Jon Halla440e872016-03-31 15:15:50 -07002151 topoFailMsg = "ONOS topology don't match Mininet"
Jon Hall5cf14d52015-07-16 12:15:19 -07002152 elapsed = 0
2153 count = 0
Jon Halle9b1fa32015-12-08 15:32:21 -08002154 main.step( "Comparing ONOS topology to MN topology" )
Jon Hall5cf14d52015-07-16 12:15:19 -07002155 startTime = time.time()
2156 # Give time for Gossip to work
Jon Halle9b1fa32015-12-08 15:32:21 -08002157 while topoResult == main.FALSE and ( elapsed < 60 or count < 3 ):
Jon Hall96091e62015-09-21 17:34:17 -07002158 devicesResults = main.TRUE
2159 linksResults = main.TRUE
2160 hostsResults = main.TRUE
2161 hostAttachmentResults = True
Jon Hall5cf14d52015-07-16 12:15:19 -07002162 count += 1
2163 cliStart = time.time()
2164 devices = []
2165 threads = []
Jon Halla440e872016-03-31 15:15:50 -07002166 for i in main.activeNodes:
2167 t = main.Thread( target=utilities.retry,
Jon Hall5cf14d52015-07-16 12:15:19 -07002168 name="devices-" + str( i ),
Jon Halla440e872016-03-31 15:15:50 -07002169 args=[ main.CLIs[i].devices, [ None ] ],
2170 kwargs= { 'sleep': 5, 'attempts': 5,
2171 'randomTime': True } )
Jon Hall5cf14d52015-07-16 12:15:19 -07002172 threads.append( t )
2173 t.start()
2174
2175 for t in threads:
2176 t.join()
2177 devices.append( t.result )
2178 hosts = []
2179 ipResult = main.TRUE
2180 threads = []
Jon Halla440e872016-03-31 15:15:50 -07002181 for i in main.activeNodes:
Jon Halld8f6de82015-12-17 17:04:34 -08002182 t = main.Thread( target=utilities.retry,
Jon Hall5cf14d52015-07-16 12:15:19 -07002183 name="hosts-" + str( i ),
Jon Halld8f6de82015-12-17 17:04:34 -08002184 args=[ main.CLIs[i].hosts, [ None ] ],
2185 kwargs= { 'sleep': 5, 'attempts': 5,
2186 'randomTime': True } )
Jon Hall5cf14d52015-07-16 12:15:19 -07002187 threads.append( t )
2188 t.start()
2189
2190 for t in threads:
2191 t.join()
2192 try:
2193 hosts.append( json.loads( t.result ) )
2194 except ( ValueError, TypeError ):
2195 main.log.exception( "Error parsing hosts results" )
2196 main.log.error( repr( t.result ) )
Jon Hallf3d16e72015-12-16 17:45:08 -08002197 hosts.append( None )
Jon Hall5cf14d52015-07-16 12:15:19 -07002198 for controller in range( 0, len( hosts ) ):
Jon Halla440e872016-03-31 15:15:50 -07002199 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hallacd1b182015-12-17 11:43:20 -08002200 if hosts[ controller ]:
2201 for host in hosts[ controller ]:
2202 if host is None or host.get( 'ipAddresses', [] ) == []:
2203 main.log.error(
2204 "Error with host ipAddresses on controller" +
2205 controllerStr + ": " + str( host ) )
2206 ipResult = main.FALSE
Jon Hall5cf14d52015-07-16 12:15:19 -07002207 ports = []
2208 threads = []
Jon Halla440e872016-03-31 15:15:50 -07002209 for i in main.activeNodes:
2210 t = main.Thread( target=utilities.retry,
Jon Hall5cf14d52015-07-16 12:15:19 -07002211 name="ports-" + str( i ),
Jon Halla440e872016-03-31 15:15:50 -07002212 args=[ main.CLIs[i].ports, [ None ] ],
2213 kwargs= { 'sleep': 5, 'attempts': 5,
2214 'randomTime': True } )
Jon Hall5cf14d52015-07-16 12:15:19 -07002215 threads.append( t )
2216 t.start()
2217
2218 for t in threads:
2219 t.join()
2220 ports.append( t.result )
2221 links = []
2222 threads = []
Jon Halla440e872016-03-31 15:15:50 -07002223 for i in main.activeNodes:
2224 t = main.Thread( target=utilities.retry,
Jon Hall5cf14d52015-07-16 12:15:19 -07002225 name="links-" + str( i ),
Jon Halla440e872016-03-31 15:15:50 -07002226 args=[ main.CLIs[i].links, [ None ] ],
2227 kwargs= { 'sleep': 5, 'attempts': 5,
2228 'randomTime': True } )
Jon Hall5cf14d52015-07-16 12:15:19 -07002229 threads.append( t )
2230 t.start()
2231
2232 for t in threads:
2233 t.join()
2234 links.append( t.result )
2235 clusters = []
2236 threads = []
Jon Halla440e872016-03-31 15:15:50 -07002237 for i in main.activeNodes:
2238 t = main.Thread( target=utilities.retry,
Jon Hall5cf14d52015-07-16 12:15:19 -07002239 name="clusters-" + str( i ),
Jon Halla440e872016-03-31 15:15:50 -07002240 args=[ main.CLIs[i].clusters, [ None ] ],
2241 kwargs= { 'sleep': 5, 'attempts': 5,
2242 'randomTime': True } )
Jon Hall5cf14d52015-07-16 12:15:19 -07002243 threads.append( t )
2244 t.start()
2245
2246 for t in threads:
2247 t.join()
2248 clusters.append( t.result )
2249
2250 elapsed = time.time() - startTime
2251 cliTime = time.time() - cliStart
2252 print "Elapsed time: " + str( elapsed )
2253 print "CLI time: " + str( cliTime )
2254
Jon Halla440e872016-03-31 15:15:50 -07002255 if all( e is None for e in devices ) and\
2256 all( e is None for e in hosts ) and\
2257 all( e is None for e in ports ) and\
2258 all( e is None for e in links ) and\
2259 all( e is None for e in clusters ):
2260 topoFailMsg = "Could not get topology from ONOS"
2261 main.log.error( topoFailMsg )
2262 continue # Try again, No use trying to compare
2263
Jon Hall5cf14d52015-07-16 12:15:19 -07002264 mnSwitches = main.Mininet1.getSwitches()
2265 mnLinks = main.Mininet1.getLinks()
2266 mnHosts = main.Mininet1.getHosts()
Jon Halla440e872016-03-31 15:15:50 -07002267 for controller in range( len( main.activeNodes ) ):
2268 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07002269 if devices[ controller ] and ports[ controller ] and\
2270 "Error" not in devices[ controller ] and\
2271 "Error" not in ports[ controller ]:
2272
Jon Hallc6793552016-01-19 14:18:37 -08002273 try:
2274 currentDevicesResult = main.Mininet1.compareSwitches(
2275 mnSwitches,
2276 json.loads( devices[ controller ] ),
2277 json.loads( ports[ controller ] ) )
2278 except ( TypeError, ValueError ) as e:
2279 main.log.exception( "Object not as expected; devices={!r}\nports={!r}".format(
2280 devices[ controller ], ports[ controller ] ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07002281 else:
2282 currentDevicesResult = main.FALSE
2283 utilities.assert_equals( expect=main.TRUE,
2284 actual=currentDevicesResult,
2285 onpass="ONOS" + controllerStr +
2286 " Switches view is correct",
2287 onfail="ONOS" + controllerStr +
2288 " Switches view is incorrect" )
2289
2290 if links[ controller ] and "Error" not in links[ controller ]:
2291 currentLinksResult = main.Mininet1.compareLinks(
2292 mnSwitches, mnLinks,
2293 json.loads( links[ controller ] ) )
2294 else:
2295 currentLinksResult = main.FALSE
2296 utilities.assert_equals( expect=main.TRUE,
2297 actual=currentLinksResult,
2298 onpass="ONOS" + controllerStr +
2299 " links view is correct",
2300 onfail="ONOS" + controllerStr +
2301 " links view is incorrect" )
Jon Hall657cdf62015-12-17 14:40:51 -08002302 if hosts[ controller ] and "Error" not in hosts[ controller ]:
Jon Hall5cf14d52015-07-16 12:15:19 -07002303 currentHostsResult = main.Mininet1.compareHosts(
2304 mnHosts,
2305 hosts[ controller ] )
Jon Hall13b446e2016-01-05 12:17:01 -08002306 elif hosts[ controller ] == []:
2307 currentHostsResult = main.TRUE
Jon Hall5cf14d52015-07-16 12:15:19 -07002308 else:
2309 currentHostsResult = main.FALSE
2310 utilities.assert_equals( expect=main.TRUE,
2311 actual=currentHostsResult,
2312 onpass="ONOS" + controllerStr +
2313 " hosts exist in Mininet",
2314 onfail="ONOS" + controllerStr +
2315 " hosts don't match Mininet" )
2316 # CHECKING HOST ATTACHMENT POINTS
2317 hostAttachment = True
2318 zeroHosts = False
2319 # FIXME: topo-HA/obelisk specific mappings:
2320 # key is mac and value is dpid
2321 mappings = {}
2322 for i in range( 1, 29 ): # hosts 1 through 28
2323 # set up correct variables:
2324 macId = "00:" * 5 + hex( i ).split( "0x" )[1].upper().zfill(2)
2325 if i == 1:
2326 deviceId = "1000".zfill(16)
2327 elif i == 2:
2328 deviceId = "2000".zfill(16)
2329 elif i == 3:
2330 deviceId = "3000".zfill(16)
2331 elif i == 4:
2332 deviceId = "3004".zfill(16)
2333 elif i == 5:
2334 deviceId = "5000".zfill(16)
2335 elif i == 6:
2336 deviceId = "6000".zfill(16)
2337 elif i == 7:
2338 deviceId = "6007".zfill(16)
2339 elif i >= 8 and i <= 17:
2340 dpid = '3' + str( i ).zfill( 3 )
2341 deviceId = dpid.zfill(16)
2342 elif i >= 18 and i <= 27:
2343 dpid = '6' + str( i ).zfill( 3 )
2344 deviceId = dpid.zfill(16)
2345 elif i == 28:
2346 deviceId = "2800".zfill(16)
2347 mappings[ macId ] = deviceId
Jon Halld8f6de82015-12-17 17:04:34 -08002348 if hosts[ controller ] is not None and "Error" not in hosts[ controller ]:
Jon Hall5cf14d52015-07-16 12:15:19 -07002349 if hosts[ controller ] == []:
2350 main.log.warn( "There are no hosts discovered" )
2351 zeroHosts = True
2352 else:
2353 for host in hosts[ controller ]:
2354 mac = None
2355 location = None
2356 device = None
2357 port = None
2358 try:
2359 mac = host.get( 'mac' )
2360 assert mac, "mac field could not be found for this host object"
2361
2362 location = host.get( 'location' )
2363 assert location, "location field could not be found for this host object"
2364
2365 # Trim the protocol identifier off deviceId
2366 device = str( location.get( 'elementId' ) ).split(':')[1]
2367 assert device, "elementId field could not be found for this host location object"
2368
2369 port = location.get( 'port' )
2370 assert port, "port field could not be found for this host location object"
2371
2372 # Now check if this matches where they should be
2373 if mac and device and port:
2374 if str( port ) != "1":
2375 main.log.error( "The attachment port is incorrect for " +
2376 "host " + str( mac ) +
2377 ". Expected: 1 Actual: " + str( port) )
2378 hostAttachment = False
2379 if device != mappings[ str( mac ) ]:
2380 main.log.error( "The attachment device is incorrect for " +
2381 "host " + str( mac ) +
2382 ". Expected: " + mappings[ str( mac ) ] +
2383 " Actual: " + device )
2384 hostAttachment = False
2385 else:
2386 hostAttachment = False
2387 except AssertionError:
2388 main.log.exception( "Json object not as expected" )
2389 main.log.error( repr( host ) )
2390 hostAttachment = False
2391 else:
2392 main.log.error( "No hosts json output or \"Error\"" +
2393 " in output. hosts = " +
2394 repr( hosts[ controller ] ) )
2395 if zeroHosts is False:
2396 hostAttachment = True
2397
2398 # END CHECKING HOST ATTACHMENT POINTS
2399 devicesResults = devicesResults and currentDevicesResult
2400 linksResults = linksResults and currentLinksResult
2401 hostsResults = hostsResults and currentHostsResult
2402 hostAttachmentResults = hostAttachmentResults and\
2403 hostAttachment
2404 topoResult = ( devicesResults and linksResults
2405 and hostsResults and ipResult and
2406 hostAttachmentResults )
Jon Halle9b1fa32015-12-08 15:32:21 -08002407 utilities.assert_equals( expect=True,
2408 actual=topoResult,
2409 onpass="ONOS topology matches Mininet",
Jon Halla440e872016-03-31 15:15:50 -07002410 onfail=topoFailMsg )
Jon Halle9b1fa32015-12-08 15:32:21 -08002411 # End of While loop to pull ONOS state
Jon Hall5cf14d52015-07-16 12:15:19 -07002412
2413 # Compare json objects for hosts and dataplane clusters
2414
2415 # hosts
2416 main.step( "Hosts view is consistent across all ONOS nodes" )
2417 consistentHostsResult = main.TRUE
2418 for controller in range( len( hosts ) ):
Jon Halla440e872016-03-31 15:15:50 -07002419 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hall13b446e2016-01-05 12:17:01 -08002420 if hosts[ controller ] is not None and "Error" not in hosts[ controller ]:
Jon Hall5cf14d52015-07-16 12:15:19 -07002421 if hosts[ controller ] == hosts[ 0 ]:
2422 continue
2423 else: # hosts not consistent
2424 main.log.error( "hosts from ONOS" + controllerStr +
2425 " is inconsistent with ONOS1" )
2426 main.log.warn( repr( hosts[ controller ] ) )
2427 consistentHostsResult = main.FALSE
2428
2429 else:
2430 main.log.error( "Error in getting ONOS hosts from ONOS" +
2431 controllerStr )
2432 consistentHostsResult = main.FALSE
2433 main.log.warn( "ONOS" + controllerStr +
2434 " hosts response: " +
2435 repr( hosts[ controller ] ) )
2436 utilities.assert_equals(
2437 expect=main.TRUE,
2438 actual=consistentHostsResult,
2439 onpass="Hosts view is consistent across all ONOS nodes",
2440 onfail="ONOS nodes have different views of hosts" )
2441
2442 main.step( "Hosts information is correct" )
2443 hostsResults = hostsResults and ipResult
2444 utilities.assert_equals(
2445 expect=main.TRUE,
2446 actual=hostsResults,
2447 onpass="Host information is correct",
2448 onfail="Host information is incorrect" )
2449
2450 main.step( "Host attachment points to the network" )
2451 utilities.assert_equals(
2452 expect=True,
2453 actual=hostAttachmentResults,
2454 onpass="Hosts are correctly attached to the network",
2455 onfail="ONOS did not correctly attach hosts to the network" )
2456
2457 # Strongly connected clusters of devices
2458 main.step( "Clusters view is consistent across all ONOS nodes" )
2459 consistentClustersResult = main.TRUE
2460 for controller in range( len( clusters ) ):
Jon Halla440e872016-03-31 15:15:50 -07002461 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07002462 if "Error" not in clusters[ controller ]:
2463 if clusters[ controller ] == clusters[ 0 ]:
2464 continue
2465 else: # clusters not consistent
2466 main.log.error( "clusters from ONOS" +
2467 controllerStr +
2468 " is inconsistent with ONOS1" )
2469 consistentClustersResult = main.FALSE
Jon Hall5cf14d52015-07-16 12:15:19 -07002470 else:
2471 main.log.error( "Error in getting dataplane clusters " +
2472 "from ONOS" + controllerStr )
2473 consistentClustersResult = main.FALSE
2474 main.log.warn( "ONOS" + controllerStr +
2475 " clusters response: " +
2476 repr( clusters[ controller ] ) )
2477 utilities.assert_equals(
2478 expect=main.TRUE,
2479 actual=consistentClustersResult,
2480 onpass="Clusters view is consistent across all ONOS nodes",
2481 onfail="ONOS nodes have different views of clusters" )
Jon Hall64948022016-05-12 13:38:50 -07002482 if not consistentClustersResult:
2483 main.log.debug( clusters )
Jon Hall5cf14d52015-07-16 12:15:19 -07002484
2485 main.step( "There is only one SCC" )
2486 # there should always only be one cluster
2487 try:
2488 numClusters = len( json.loads( clusters[ 0 ] ) )
2489 except ( ValueError, TypeError ):
2490 main.log.exception( "Error parsing clusters[0]: " +
2491 repr( clusters[0] ) )
Jon Halla440e872016-03-31 15:15:50 -07002492 numClusters = "ERROR"
Jon Hall5cf14d52015-07-16 12:15:19 -07002493 clusterResults = main.FALSE
2494 if numClusters == 1:
2495 clusterResults = main.TRUE
2496 utilities.assert_equals(
2497 expect=1,
2498 actual=numClusters,
2499 onpass="ONOS shows 1 SCC",
2500 onfail="ONOS shows " + str( numClusters ) + " SCCs" )
2501
2502 topoResult = ( devicesResults and linksResults
2503 and hostsResults and consistentHostsResult
2504 and consistentClustersResult and clusterResults
2505 and ipResult and hostAttachmentResults )
2506
2507 topoResult = topoResult and int( count <= 2 )
2508 note = "note it takes about " + str( int( cliTime ) ) + \
2509 " seconds for the test to make all the cli calls to fetch " +\
2510 "the topology from each ONOS instance"
2511 main.log.info(
2512 "Very crass estimate for topology discovery/convergence( " +
2513 str( note ) + " ): " + str( elapsed ) + " seconds, " +
2514 str( count ) + " tries" )
2515
2516 main.step( "Device information is correct" )
2517 utilities.assert_equals(
2518 expect=main.TRUE,
2519 actual=devicesResults,
2520 onpass="Device information is correct",
2521 onfail="Device information is incorrect" )
2522
2523 main.step( "Links are correct" )
2524 utilities.assert_equals(
2525 expect=main.TRUE,
2526 actual=linksResults,
2527 onpass="Link are correct",
2528 onfail="Links are incorrect" )
2529
2530 main.step( "Hosts are correct" )
2531 utilities.assert_equals(
2532 expect=main.TRUE,
2533 actual=hostsResults,
2534 onpass="Hosts are correct",
2535 onfail="Hosts are incorrect" )
2536
2537 # FIXME: move this to an ONOS state case
2538 main.step( "Checking ONOS nodes" )
Jon Hall41d39f12016-04-11 22:54:35 -07002539 nodeResults = utilities.retry( main.HA.nodesCheck,
2540 False,
2541 args=[main.activeNodes],
2542 attempts=5 )
Jon Hall5cf14d52015-07-16 12:15:19 -07002543
Jon Hall41d39f12016-04-11 22:54:35 -07002544 utilities.assert_equals( expect=True, actual=nodeResults,
Jon Hall5cf14d52015-07-16 12:15:19 -07002545 onpass="Nodes check successful",
2546 onfail="Nodes check NOT successful" )
Jon Halla440e872016-03-31 15:15:50 -07002547 if not nodeResults:
Jon Hall41d39f12016-04-11 22:54:35 -07002548 for i in main.activeNodes:
Jon Halla440e872016-03-31 15:15:50 -07002549 main.log.debug( "{} components not ACTIVE: \n{}".format(
Jon Hall41d39f12016-04-11 22:54:35 -07002550 main.CLIs[i].name,
2551 main.CLIs[i].sendline( "scr:list | grep -v ACTIVE" ) ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07002552
Jon Halld2871c22016-07-26 11:01:14 -07002553 if not topoResult:
2554 main.cleanup()
2555 main.exit()
2556
Jon Hall5cf14d52015-07-16 12:15:19 -07002557 def CASE9( self, main ):
2558 """
2559 Link s3-s28 down
2560 """
2561 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002562 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002563 assert main, "main not defined"
2564 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002565 assert main.CLIs, "main.CLIs not defined"
2566 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002567 # NOTE: You should probably run a topology check after this
2568
2569 linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
2570
2571 description = "Turn off a link to ensure that Link Discovery " +\
2572 "is working properly"
2573 main.case( description )
2574
2575 main.step( "Kill Link between s3 and s28" )
2576 LinkDown = main.Mininet1.link( END1="s3", END2="s28", OPTION="down" )
2577 main.log.info( "Waiting " + str( linkSleep ) +
2578 " seconds for link down to be discovered" )
2579 time.sleep( linkSleep )
2580 utilities.assert_equals( expect=main.TRUE, actual=LinkDown,
2581 onpass="Link down successful",
2582 onfail="Failed to bring link down" )
2583 # TODO do some sort of check here
2584
2585 def CASE10( self, main ):
2586 """
2587 Link s3-s28 up
2588 """
2589 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002590 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002591 assert main, "main not defined"
2592 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002593 assert main.CLIs, "main.CLIs not defined"
2594 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002595 # NOTE: You should probably run a topology check after this
2596
2597 linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
2598
2599 description = "Restore a link to ensure that Link Discovery is " + \
2600 "working properly"
2601 main.case( description )
2602
2603 main.step( "Bring link between s3 and s28 back up" )
2604 LinkUp = main.Mininet1.link( END1="s3", END2="s28", OPTION="up" )
2605 main.log.info( "Waiting " + str( linkSleep ) +
2606 " seconds for link up to be discovered" )
2607 time.sleep( linkSleep )
2608 utilities.assert_equals( expect=main.TRUE, actual=LinkUp,
2609 onpass="Link up successful",
2610 onfail="Failed to bring link up" )
2611 # TODO do some sort of check here
2612
2613 def CASE11( self, main ):
2614 """
2615 Switch Down
2616 """
2617 # NOTE: You should probably run a topology check after this
2618 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002619 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002620 assert main, "main not defined"
2621 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002622 assert main.CLIs, "main.CLIs not defined"
2623 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002624
2625 switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
2626
2627 description = "Killing a switch to ensure it is discovered correctly"
Jon Halla440e872016-03-31 15:15:50 -07002628 onosCli = main.CLIs[ main.activeNodes[0] ]
Jon Hall5cf14d52015-07-16 12:15:19 -07002629 main.case( description )
2630 switch = main.params[ 'kill' ][ 'switch' ]
2631 switchDPID = main.params[ 'kill' ][ 'dpid' ]
2632
2633 # TODO: Make this switch parameterizable
2634 main.step( "Kill " + switch )
2635 main.log.info( "Deleting " + switch )
2636 main.Mininet1.delSwitch( switch )
2637 main.log.info( "Waiting " + str( switchSleep ) +
2638 " seconds for switch down to be discovered" )
2639 time.sleep( switchSleep )
Jon Halla440e872016-03-31 15:15:50 -07002640 device = onosCli.getDevice( dpid=switchDPID )
Jon Hall5cf14d52015-07-16 12:15:19 -07002641 # Peek at the deleted switch
2642 main.log.warn( str( device ) )
2643 result = main.FALSE
2644 if device and device[ 'available' ] is False:
2645 result = main.TRUE
2646 utilities.assert_equals( expect=main.TRUE, actual=result,
2647 onpass="Kill switch successful",
2648 onfail="Failed to kill switch?" )
2649
2650 def CASE12( self, main ):
2651 """
2652 Switch Up
2653 """
2654 # NOTE: You should probably run a topology check after this
2655 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002656 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002657 assert main, "main not defined"
2658 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002659 assert main.CLIs, "main.CLIs not defined"
2660 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002661 assert ONOS1Port, "ONOS1Port not defined"
2662 assert ONOS2Port, "ONOS2Port not defined"
2663 assert ONOS3Port, "ONOS3Port not defined"
2664 assert ONOS4Port, "ONOS4Port not defined"
2665 assert ONOS5Port, "ONOS5Port not defined"
2666 assert ONOS6Port, "ONOS6Port not defined"
2667 assert ONOS7Port, "ONOS7Port not defined"
2668
2669 switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
2670 switch = main.params[ 'kill' ][ 'switch' ]
2671 switchDPID = main.params[ 'kill' ][ 'dpid' ]
2672 links = main.params[ 'kill' ][ 'links' ].split()
Jon Halla440e872016-03-31 15:15:50 -07002673 onosCli = main.CLIs[ main.activeNodes[0] ]
Jon Hall5cf14d52015-07-16 12:15:19 -07002674 description = "Adding a switch to ensure it is discovered correctly"
2675 main.case( description )
2676
2677 main.step( "Add back " + switch )
2678 main.Mininet1.addSwitch( switch, dpid=switchDPID )
2679 for peer in links:
2680 main.Mininet1.addLink( switch, peer )
Jon Halla440e872016-03-31 15:15:50 -07002681 ipList = [ node.ip_address for node in main.nodes ]
Jon Hall5cf14d52015-07-16 12:15:19 -07002682 main.Mininet1.assignSwController( sw=switch, ip=ipList )
2683 main.log.info( "Waiting " + str( switchSleep ) +
2684 " seconds for switch up to be discovered" )
2685 time.sleep( switchSleep )
Jon Halla440e872016-03-31 15:15:50 -07002686 device = onosCli.getDevice( dpid=switchDPID )
Jon Hall5cf14d52015-07-16 12:15:19 -07002687 # Peek at the deleted switch
2688 main.log.warn( str( device ) )
2689 result = main.FALSE
2690 if device and device[ 'available' ]:
2691 result = main.TRUE
2692 utilities.assert_equals( expect=main.TRUE, actual=result,
2693 onpass="add switch successful",
2694 onfail="Failed to add switch?" )
2695
2696 def CASE13( self, main ):
2697 """
2698 Clean up
2699 """
2700 import os
2701 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002702 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002703 assert main, "main not defined"
2704 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002705 assert main.CLIs, "main.CLIs not defined"
2706 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002707
2708 # printing colors to terminal
2709 colors = { 'cyan': '\033[96m', 'purple': '\033[95m',
2710 'blue': '\033[94m', 'green': '\033[92m',
2711 'yellow': '\033[93m', 'red': '\033[91m', 'end': '\033[0m' }
2712 main.case( "Test Cleanup" )
2713 main.step( "Killing tcpdumps" )
2714 main.Mininet2.stopTcpdump()
2715
2716 testname = main.TEST
Jon Hall96091e62015-09-21 17:34:17 -07002717 if main.params[ 'BACKUP' ][ 'ENABLED' ] == "True":
Jon Hall5cf14d52015-07-16 12:15:19 -07002718 main.step( "Copying MN pcap and ONOS log files to test station" )
2719 teststationUser = main.params[ 'BACKUP' ][ 'TESTONUSER' ]
2720 teststationIP = main.params[ 'BACKUP' ][ 'TESTONIP' ]
Jon Hall96091e62015-09-21 17:34:17 -07002721 # NOTE: MN Pcap file is being saved to logdir.
2722 # We scp this file as MN and TestON aren't necessarily the same vm
2723
2724 # FIXME: To be replaced with a Jenkin's post script
Jon Hall5cf14d52015-07-16 12:15:19 -07002725 # TODO: Load these from params
2726 # NOTE: must end in /
2727 logFolder = "/opt/onos/log/"
2728 logFiles = [ "karaf.log", "karaf.log.1" ]
2729 # NOTE: must end in /
Jon Hall5cf14d52015-07-16 12:15:19 -07002730 for f in logFiles:
Jon Halle1a3b752015-07-22 13:02:46 -07002731 for node in main.nodes:
Jon Halla440e872016-03-31 15:15:50 -07002732 dstName = main.logdir + "/" + node.name + "-" + f
Jon Hall96091e62015-09-21 17:34:17 -07002733 main.ONOSbench.secureCopy( node.user_name, node.ip_address,
2734 logFolder + f, dstName )
Jon Hall5cf14d52015-07-16 12:15:19 -07002735 # std*.log's
2736 # NOTE: must end in /
2737 logFolder = "/opt/onos/var/"
2738 logFiles = [ "stderr.log", "stdout.log" ]
2739 # NOTE: must end in /
Jon Hall5cf14d52015-07-16 12:15:19 -07002740 for f in logFiles:
Jon Halle1a3b752015-07-22 13:02:46 -07002741 for node in main.nodes:
Jon Halla440e872016-03-31 15:15:50 -07002742 dstName = main.logdir + "/" + node.name + "-" + f
Jon Hall96091e62015-09-21 17:34:17 -07002743 main.ONOSbench.secureCopy( node.user_name, node.ip_address,
2744 logFolder + f, dstName )
2745 else:
2746 main.log.debug( "skipping saving log files" )
Jon Hall5cf14d52015-07-16 12:15:19 -07002747
2748 main.step( "Stopping Mininet" )
2749 mnResult = main.Mininet1.stopNet()
2750 utilities.assert_equals( expect=main.TRUE, actual=mnResult,
2751 onpass="Mininet stopped",
2752 onfail="MN cleanup NOT successful" )
2753
2754 main.step( "Checking ONOS Logs for errors" )
Jon Halle1a3b752015-07-22 13:02:46 -07002755 for node in main.nodes:
Jon Hall96091e62015-09-21 17:34:17 -07002756 main.log.debug( "Checking logs for errors on " + node.name + ":" )
2757 main.log.warn( main.ONOSbench.checkLogs( node.ip_address ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07002758
2759 try:
2760 timerLog = open( main.logdir + "/Timers.csv", 'w')
2761 # Overwrite with empty line and close
2762 labels = "Gossip Intents"
2763 data = str( gossipTime )
2764 timerLog.write( labels + "\n" + data )
2765 timerLog.close()
2766 except NameError, e:
2767 main.log.exception(e)
2768
2769 def CASE14( self, main ):
2770 """
2771 start election app on all onos nodes
2772 """
Jon Halle1a3b752015-07-22 13:02:46 -07002773 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002774 assert main, "main not defined"
2775 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002776 assert main.CLIs, "main.CLIs not defined"
2777 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002778
2779 main.case("Start Leadership Election app")
2780 main.step( "Install leadership election app" )
Jon Halla440e872016-03-31 15:15:50 -07002781 onosCli = main.CLIs[ main.activeNodes[0] ]
2782 appResult = onosCli.activateApp( "org.onosproject.election" )
Jon Hall5cf14d52015-07-16 12:15:19 -07002783 utilities.assert_equals(
2784 expect=main.TRUE,
2785 actual=appResult,
2786 onpass="Election app installed",
2787 onfail="Something went wrong with installing Leadership election" )
2788
2789 main.step( "Run for election on each node" )
Jon Halla440e872016-03-31 15:15:50 -07002790 for i in main.activeNodes:
2791 main.CLIs[i].electionTestRun()
Jon Hall25463a82016-04-13 14:03:52 -07002792 time.sleep(5)
2793 activeCLIs = [ main.CLIs[i] for i in main.activeNodes ]
2794 sameResult, leaders = main.HA.consistentLeaderboards( activeCLIs )
Jon Hall5cf14d52015-07-16 12:15:19 -07002795 utilities.assert_equals(
Jon Hall25463a82016-04-13 14:03:52 -07002796 expect=True,
2797 actual=sameResult,
2798 onpass="All nodes see the same leaderboards",
2799 onfail="Inconsistent leaderboards" )
Jon Hall5cf14d52015-07-16 12:15:19 -07002800
Jon Hall25463a82016-04-13 14:03:52 -07002801 if sameResult:
2802 leader = leaders[ 0 ][ 0 ]
2803 if main.nodes[main.activeNodes[0]].ip_address in leader:
2804 correctLeader = True
2805 else:
2806 correctLeader = False
2807 main.step( "First node was elected leader" )
2808 utilities.assert_equals(
2809 expect=True,
2810 actual=correctLeader,
2811 onpass="Correct leader was elected",
2812 onfail="Incorrect leader" )
Jon Hall5cf14d52015-07-16 12:15:19 -07002813
2814 def CASE15( self, main ):
2815 """
2816 Check that Leadership Election is still functional
acsmars71adceb2015-08-31 15:09:26 -07002817 15.1 Run election on each node
2818 15.2 Check that each node has the same leaders and candidates
2819 15.3 Find current leader and withdraw
2820 15.4 Check that a new node was elected leader
2821 15.5 Check that that new leader was the candidate of old leader
2822 15.6 Run for election on old leader
2823 15.7 Check that oldLeader is a candidate, and leader if only 1 node
2824 15.8 Make sure that the old leader was added to the candidate list
2825
2826 old and new variable prefixes refer to data from before vs after
2827 withdrawl and later before withdrawl vs after re-election
Jon Hall5cf14d52015-07-16 12:15:19 -07002828 """
2829 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002830 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002831 assert main, "main not defined"
2832 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002833 assert main.CLIs, "main.CLIs not defined"
2834 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002835
Jon Halla440e872016-03-31 15:15:50 -07002836 description = "Check that Leadership Election is still functional"
Jon Hall5cf14d52015-07-16 12:15:19 -07002837 main.case( description )
Jon Halla440e872016-03-31 15:15:50 -07002838 # NOTE: Need to re-run after restarts since being a canidate is not persistant
Jon Hall5cf14d52015-07-16 12:15:19 -07002839
Jon Halla440e872016-03-31 15:15:50 -07002840 oldLeaders = [] # list of lists of each nodes' candidates before
2841 newLeaders = [] # list of lists of each nodes' candidates after
acsmars71adceb2015-08-31 15:09:26 -07002842 oldLeader = '' # the old leader from oldLeaders, None if not same
2843 newLeader = '' # the new leaders fron newLoeaders, None if not same
2844 oldLeaderCLI = None # the CLI of the old leader used for re-electing
2845 expectNoLeader = False # True when there is only one leader
2846 if main.numCtrls == 1:
2847 expectNoLeader = True
2848
2849 main.step( "Run for election on each node" )
2850 electionResult = main.TRUE
2851
Jon Halla440e872016-03-31 15:15:50 -07002852 for i in main.activeNodes: # run test election on each node
2853 if main.CLIs[i].electionTestRun() == main.FALSE:
acsmars71adceb2015-08-31 15:09:26 -07002854 electionResult = main.FALSE
Jon Hall5cf14d52015-07-16 12:15:19 -07002855 utilities.assert_equals(
2856 expect=main.TRUE,
acsmars71adceb2015-08-31 15:09:26 -07002857 actual=electionResult,
2858 onpass="All nodes successfully ran for leadership",
2859 onfail="At least one node failed to run for leadership" )
2860
acsmars3a72bde2015-09-02 14:16:22 -07002861 if electionResult == main.FALSE:
2862 main.log.error(
Jon Halla440e872016-03-31 15:15:50 -07002863 "Skipping Test Case because Election Test App isn't loaded" )
acsmars3a72bde2015-09-02 14:16:22 -07002864 main.skipCase()
2865
acsmars71adceb2015-08-31 15:09:26 -07002866 main.step( "Check that each node shows the same leader and candidates" )
Jon Halla440e872016-03-31 15:15:50 -07002867 failMessage = "Nodes have different leaderboards"
Jon Halla440e872016-03-31 15:15:50 -07002868 activeCLIs = [ main.CLIs[i] for i in main.activeNodes ]
Jon Hall41d39f12016-04-11 22:54:35 -07002869 sameResult, oldLeaders = main.HA.consistentLeaderboards( activeCLIs )
Jon Halla440e872016-03-31 15:15:50 -07002870 if sameResult:
2871 oldLeader = oldLeaders[ 0 ][ 0 ]
2872 main.log.warn( oldLeader )
acsmars71adceb2015-08-31 15:09:26 -07002873 else:
Jon Halla440e872016-03-31 15:15:50 -07002874 oldLeader = None
acsmars71adceb2015-08-31 15:09:26 -07002875 utilities.assert_equals(
Jon Halla440e872016-03-31 15:15:50 -07002876 expect=True,
acsmars71adceb2015-08-31 15:09:26 -07002877 actual=sameResult,
Jon Halla440e872016-03-31 15:15:50 -07002878 onpass="Leaderboards are consistent for the election topic",
acsmars71adceb2015-08-31 15:09:26 -07002879 onfail=failMessage )
Jon Hall5cf14d52015-07-16 12:15:19 -07002880
2881 main.step( "Find current leader and withdraw" )
acsmars71adceb2015-08-31 15:09:26 -07002882 withdrawResult = main.TRUE
Jon Hall5cf14d52015-07-16 12:15:19 -07002883 # do some sanity checking on leader before using it
acsmars71adceb2015-08-31 15:09:26 -07002884 if oldLeader is None:
2885 main.log.error( "Leadership isn't consistent." )
2886 withdrawResult = main.FALSE
2887 # Get the CLI of the oldLeader
Jon Halla440e872016-03-31 15:15:50 -07002888 for i in main.activeNodes:
acsmars71adceb2015-08-31 15:09:26 -07002889 if oldLeader == main.nodes[ i ].ip_address:
2890 oldLeaderCLI = main.CLIs[ i ]
Jon Hall5cf14d52015-07-16 12:15:19 -07002891 break
2892 else: # FOR/ELSE statement
2893 main.log.error( "Leader election, could not find current leader" )
2894 if oldLeader:
acsmars71adceb2015-08-31 15:09:26 -07002895 withdrawResult = oldLeaderCLI.electionTestWithdraw()
Jon Hall5cf14d52015-07-16 12:15:19 -07002896 utilities.assert_equals(
2897 expect=main.TRUE,
2898 actual=withdrawResult,
2899 onpass="Node was withdrawn from election",
2900 onfail="Node was not withdrawn from election" )
2901
acsmars71adceb2015-08-31 15:09:26 -07002902 main.step( "Check that a new node was elected leader" )
acsmars71adceb2015-08-31 15:09:26 -07002903 failMessage = "Nodes have different leaders"
acsmars71adceb2015-08-31 15:09:26 -07002904 # Get new leaders and candidates
Jon Hall41d39f12016-04-11 22:54:35 -07002905 newLeaderResult, newLeaders = main.HA.consistentLeaderboards( activeCLIs )
Jon Hall3a7843a2016-04-12 03:01:09 -07002906 newLeader = None
Jon Halla440e872016-03-31 15:15:50 -07002907 if newLeaderResult:
Jon Hall3a7843a2016-04-12 03:01:09 -07002908 if newLeaders[ 0 ][ 0 ] == 'none':
2909 main.log.error( "No leader was elected on at least 1 node" )
2910 if not expectNoLeader:
2911 newLeaderResult = False
Jon Hall25463a82016-04-13 14:03:52 -07002912 newLeader = newLeaders[ 0 ][ 0 ]
acsmars71adceb2015-08-31 15:09:26 -07002913
2914 # Check that the new leader is not the older leader, which was withdrawn
2915 if newLeader == oldLeader:
Jon Halla440e872016-03-31 15:15:50 -07002916 newLeaderResult = False
Jon Hall6e709752016-02-01 13:38:46 -08002917 main.log.error( "All nodes still see old leader: " + str( oldLeader ) +
acsmars71adceb2015-08-31 15:09:26 -07002918 " as the current leader" )
Jon Hall5cf14d52015-07-16 12:15:19 -07002919 utilities.assert_equals(
Jon Halla440e872016-03-31 15:15:50 -07002920 expect=True,
acsmars71adceb2015-08-31 15:09:26 -07002921 actual=newLeaderResult,
Jon Hall5cf14d52015-07-16 12:15:19 -07002922 onpass="Leadership election passed",
2923 onfail="Something went wrong with Leadership election" )
2924
Jon Halla440e872016-03-31 15:15:50 -07002925 main.step( "Check that that new leader was the candidate of old leader" )
2926 # candidates[ 2 ] should become the top candidate after withdrawl
acsmars71adceb2015-08-31 15:09:26 -07002927 correctCandidateResult = main.TRUE
2928 if expectNoLeader:
2929 if newLeader == 'none':
2930 main.log.info( "No leader expected. None found. Pass" )
2931 correctCandidateResult = main.TRUE
2932 else:
2933 main.log.info( "Expected no leader, got: " + str( newLeader ) )
2934 correctCandidateResult = main.FALSE
Jon Halla440e872016-03-31 15:15:50 -07002935 elif len( oldLeaders[0] ) >= 3:
2936 if newLeader == oldLeaders[ 0 ][ 2 ]:
2937 # correct leader was elected
2938 correctCandidateResult = main.TRUE
2939 else:
2940 correctCandidateResult = main.FALSE
2941 main.log.error( "Candidate {} was elected. {} should have had priority.".format(
2942 newLeader, oldLeaders[ 0 ][ 2 ] ) )
2943 else:
2944 main.log.warn( "Could not determine who should be the correct leader" )
2945 main.log.debug( oldLeaders[ 0 ] )
acsmars71adceb2015-08-31 15:09:26 -07002946 correctCandidateResult = main.FALSE
acsmars71adceb2015-08-31 15:09:26 -07002947 utilities.assert_equals(
2948 expect=main.TRUE,
2949 actual=correctCandidateResult,
2950 onpass="Correct Candidate Elected",
2951 onfail="Incorrect Candidate Elected" )
2952
Jon Hall5cf14d52015-07-16 12:15:19 -07002953 main.step( "Run for election on old leader( just so everyone " +
2954 "is in the hat )" )
acsmars71adceb2015-08-31 15:09:26 -07002955 if oldLeaderCLI is not None:
2956 runResult = oldLeaderCLI.electionTestRun()
Jon Hall5cf14d52015-07-16 12:15:19 -07002957 else:
acsmars71adceb2015-08-31 15:09:26 -07002958 main.log.error( "No old leader to re-elect" )
Jon Hall5cf14d52015-07-16 12:15:19 -07002959 runResult = main.FALSE
2960 utilities.assert_equals(
2961 expect=main.TRUE,
2962 actual=runResult,
2963 onpass="App re-ran for election",
2964 onfail="App failed to run for election" )
Jon Halla440e872016-03-31 15:15:50 -07002965
acsmars71adceb2015-08-31 15:09:26 -07002966 main.step(
2967 "Check that oldLeader is a candidate, and leader if only 1 node" )
Jon Hall5cf14d52015-07-16 12:15:19 -07002968 # verify leader didn't just change
Jon Halla440e872016-03-31 15:15:50 -07002969 # Get new leaders and candidates
2970 reRunLeaders = []
2971 time.sleep( 5 ) # Paremterize
Jon Hall41d39f12016-04-11 22:54:35 -07002972 positionResult, reRunLeaders = main.HA.consistentLeaderboards( activeCLIs )
acsmars71adceb2015-08-31 15:09:26 -07002973
2974 # Check that the re-elected node is last on the candidate List
Jon Hall3a7843a2016-04-12 03:01:09 -07002975 if not reRunLeaders[0]:
2976 positionResult = main.FALSE
2977 elif oldLeader != reRunLeaders[ 0 ][ -1 ]:
Jon Halla440e872016-03-31 15:15:50 -07002978 main.log.error( "Old Leader ({}) not in the proper position: {} ".format( str( oldLeader),
2979 str( reRunLeaders[ 0 ] ) ) )
acsmars71adceb2015-08-31 15:09:26 -07002980 positionResult = main.FALSE
Jon Hall5cf14d52015-07-16 12:15:19 -07002981 utilities.assert_equals(
Jon Halla440e872016-03-31 15:15:50 -07002982 expect=True,
acsmars71adceb2015-08-31 15:09:26 -07002983 actual=positionResult,
Jon Hall5cf14d52015-07-16 12:15:19 -07002984 onpass="Old leader successfully re-ran for election",
2985 onfail="Something went wrong with Leadership election after " +
2986 "the old leader re-ran for election" )
2987
2988 def CASE16( self, main ):
2989 """
2990 Install Distributed Primitives app
2991 """
2992 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002993 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002994 assert main, "main not defined"
2995 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002996 assert main.CLIs, "main.CLIs not defined"
2997 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002998
2999 # Variables for the distributed primitives tests
3000 global pCounterName
Jon Hall5cf14d52015-07-16 12:15:19 -07003001 global pCounterValue
Jon Hall5cf14d52015-07-16 12:15:19 -07003002 global onosSet
3003 global onosSetName
3004 pCounterName = "TestON-Partitions"
Jon Hall5cf14d52015-07-16 12:15:19 -07003005 pCounterValue = 0
Jon Hall5cf14d52015-07-16 12:15:19 -07003006 onosSet = set([])
3007 onosSetName = "TestON-set"
3008
3009 description = "Install Primitives app"
3010 main.case( description )
3011 main.step( "Install Primitives app" )
3012 appName = "org.onosproject.distributedprimitives"
Jon Halla440e872016-03-31 15:15:50 -07003013 node = main.activeNodes[0]
3014 appResults = main.CLIs[node].activateApp( appName )
Jon Hall5cf14d52015-07-16 12:15:19 -07003015 utilities.assert_equals( expect=main.TRUE,
3016 actual=appResults,
3017 onpass="Primitives app activated",
3018 onfail="Primitives app not activated" )
3019 time.sleep( 5 ) # To allow all nodes to activate
3020
3021 def CASE17( self, main ):
3022 """
3023 Check for basic functionality with distributed primitives
3024 """
Jon Hall5cf14d52015-07-16 12:15:19 -07003025 # Make sure variables are defined/set
Jon Halle1a3b752015-07-22 13:02:46 -07003026 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07003027 assert main, "main not defined"
3028 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07003029 assert main.CLIs, "main.CLIs not defined"
3030 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07003031 assert pCounterName, "pCounterName not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07003032 assert onosSetName, "onosSetName not defined"
3033 # NOTE: assert fails if value is 0/None/Empty/False
3034 try:
3035 pCounterValue
3036 except NameError:
3037 main.log.error( "pCounterValue not defined, setting to 0" )
3038 pCounterValue = 0
3039 try:
Jon Hall5cf14d52015-07-16 12:15:19 -07003040 onosSet
3041 except NameError:
3042 main.log.error( "onosSet not defined, setting to empty Set" )
3043 onosSet = set([])
3044 # Variables for the distributed primitives tests. These are local only
3045 addValue = "a"
3046 addAllValue = "a b c d e f"
3047 retainValue = "c d e f"
3048
3049 description = "Check for basic functionality with distributed " +\
3050 "primitives"
3051 main.case( description )
Jon Halle1a3b752015-07-22 13:02:46 -07003052 main.caseExplanation = "Test the methods of the distributed " +\
3053 "primitives (counters and sets) throught the cli"
Jon Hall5cf14d52015-07-16 12:15:19 -07003054 # DISTRIBUTED ATOMIC COUNTERS
Jon Halle1a3b752015-07-22 13:02:46 -07003055 # Partitioned counters
3056 main.step( "Increment then get a default counter on each node" )
Jon Hall5cf14d52015-07-16 12:15:19 -07003057 pCounters = []
3058 threads = []
3059 addedPValues = []
Jon Halla440e872016-03-31 15:15:50 -07003060 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003061 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3062 name="counterAddAndGet-" + str( i ),
Jon Hall5cf14d52015-07-16 12:15:19 -07003063 args=[ pCounterName ] )
3064 pCounterValue += 1
3065 addedPValues.append( pCounterValue )
3066 threads.append( t )
3067 t.start()
3068
3069 for t in threads:
3070 t.join()
3071 pCounters.append( t.result )
3072 # Check that counter incremented numController times
3073 pCounterResults = True
3074 for i in addedPValues:
3075 tmpResult = i in pCounters
3076 pCounterResults = pCounterResults and tmpResult
3077 if not tmpResult:
3078 main.log.error( str( i ) + " is not in partitioned "
3079 "counter incremented results" )
3080 utilities.assert_equals( expect=True,
3081 actual=pCounterResults,
3082 onpass="Default counter incremented",
3083 onfail="Error incrementing default" +
3084 " counter" )
3085
Jon Halle1a3b752015-07-22 13:02:46 -07003086 main.step( "Get then Increment a default counter on each node" )
3087 pCounters = []
3088 threads = []
3089 addedPValues = []
Jon Halla440e872016-03-31 15:15:50 -07003090 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003091 t = main.Thread( target=main.CLIs[i].counterTestGetAndAdd,
3092 name="counterGetAndAdd-" + str( i ),
3093 args=[ pCounterName ] )
3094 addedPValues.append( pCounterValue )
3095 pCounterValue += 1
3096 threads.append( t )
3097 t.start()
3098
3099 for t in threads:
3100 t.join()
3101 pCounters.append( t.result )
3102 # Check that counter incremented numController times
3103 pCounterResults = True
3104 for i in addedPValues:
3105 tmpResult = i in pCounters
3106 pCounterResults = pCounterResults and tmpResult
3107 if not tmpResult:
3108 main.log.error( str( i ) + " is not in partitioned "
3109 "counter incremented results" )
3110 utilities.assert_equals( expect=True,
3111 actual=pCounterResults,
3112 onpass="Default counter incremented",
3113 onfail="Error incrementing default" +
3114 " counter" )
3115
3116 main.step( "Counters we added have the correct values" )
Jon Hall41d39f12016-04-11 22:54:35 -07003117 incrementCheck = main.HA.counterCheck( pCounterName, pCounterValue )
Jon Halle1a3b752015-07-22 13:02:46 -07003118 utilities.assert_equals( expect=main.TRUE,
3119 actual=incrementCheck,
3120 onpass="Added counters are correct",
3121 onfail="Added counters are incorrect" )
3122
3123 main.step( "Add -8 to then get a default counter on each node" )
3124 pCounters = []
3125 threads = []
3126 addedPValues = []
Jon Halla440e872016-03-31 15:15:50 -07003127 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003128 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3129 name="counterIncrement-" + str( i ),
3130 args=[ pCounterName ],
3131 kwargs={ "delta": -8 } )
3132 pCounterValue += -8
3133 addedPValues.append( pCounterValue )
3134 threads.append( t )
3135 t.start()
3136
3137 for t in threads:
3138 t.join()
3139 pCounters.append( t.result )
3140 # Check that counter incremented numController times
3141 pCounterResults = True
3142 for i in addedPValues:
3143 tmpResult = i in pCounters
3144 pCounterResults = pCounterResults and tmpResult
3145 if not tmpResult:
3146 main.log.error( str( i ) + " is not in partitioned "
3147 "counter incremented results" )
3148 utilities.assert_equals( expect=True,
3149 actual=pCounterResults,
3150 onpass="Default counter incremented",
3151 onfail="Error incrementing default" +
3152 " counter" )
3153
3154 main.step( "Add 5 to then get a default counter on each node" )
3155 pCounters = []
3156 threads = []
3157 addedPValues = []
Jon Halla440e872016-03-31 15:15:50 -07003158 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003159 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3160 name="counterIncrement-" + str( i ),
3161 args=[ pCounterName ],
3162 kwargs={ "delta": 5 } )
3163 pCounterValue += 5
3164 addedPValues.append( pCounterValue )
3165 threads.append( t )
3166 t.start()
3167
3168 for t in threads:
3169 t.join()
3170 pCounters.append( t.result )
3171 # Check that counter incremented numController times
3172 pCounterResults = True
3173 for i in addedPValues:
3174 tmpResult = i in pCounters
3175 pCounterResults = pCounterResults and tmpResult
3176 if not tmpResult:
3177 main.log.error( str( i ) + " is not in partitioned "
3178 "counter incremented results" )
3179 utilities.assert_equals( expect=True,
3180 actual=pCounterResults,
3181 onpass="Default counter incremented",
3182 onfail="Error incrementing default" +
3183 " counter" )
3184
3185 main.step( "Get then add 5 to a default counter on each node" )
3186 pCounters = []
3187 threads = []
3188 addedPValues = []
Jon Halla440e872016-03-31 15:15:50 -07003189 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003190 t = main.Thread( target=main.CLIs[i].counterTestGetAndAdd,
3191 name="counterIncrement-" + str( i ),
3192 args=[ pCounterName ],
3193 kwargs={ "delta": 5 } )
3194 addedPValues.append( pCounterValue )
3195 pCounterValue += 5
3196 threads.append( t )
3197 t.start()
3198
3199 for t in threads:
3200 t.join()
3201 pCounters.append( t.result )
3202 # Check that counter incremented numController times
3203 pCounterResults = True
3204 for i in addedPValues:
3205 tmpResult = i in pCounters
3206 pCounterResults = pCounterResults and tmpResult
3207 if not tmpResult:
3208 main.log.error( str( i ) + " is not in partitioned "
3209 "counter incremented results" )
3210 utilities.assert_equals( expect=True,
3211 actual=pCounterResults,
3212 onpass="Default counter incremented",
3213 onfail="Error incrementing default" +
3214 " counter" )
3215
3216 main.step( "Counters we added have the correct values" )
Jon Hall41d39f12016-04-11 22:54:35 -07003217 incrementCheck = main.HA.counterCheck( pCounterName, pCounterValue )
Jon Halle1a3b752015-07-22 13:02:46 -07003218 utilities.assert_equals( expect=main.TRUE,
3219 actual=incrementCheck,
3220 onpass="Added counters are correct",
3221 onfail="Added counters are incorrect" )
3222
Jon Hall5cf14d52015-07-16 12:15:19 -07003223 # DISTRIBUTED SETS
3224 main.step( "Distributed Set get" )
3225 size = len( onosSet )
3226 getResponses = []
3227 threads = []
Jon Halla440e872016-03-31 15:15:50 -07003228 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003229 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003230 name="setTestGet-" + str( i ),
3231 args=[ onosSetName ] )
3232 threads.append( t )
3233 t.start()
3234 for t in threads:
3235 t.join()
3236 getResponses.append( t.result )
3237
3238 getResults = main.TRUE
Jon Halla440e872016-03-31 15:15:50 -07003239 for i in range( len( main.activeNodes ) ):
3240 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003241 if isinstance( getResponses[ i ], list):
3242 current = set( getResponses[ i ] )
3243 if len( current ) == len( getResponses[ i ] ):
3244 # no repeats
3245 if onosSet != current:
Jon Halla440e872016-03-31 15:15:50 -07003246 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003247 " has incorrect view" +
3248 " of set " + onosSetName + ":\n" +
3249 str( getResponses[ i ] ) )
3250 main.log.debug( "Expected: " + str( onosSet ) )
3251 main.log.debug( "Actual: " + str( current ) )
3252 getResults = main.FALSE
3253 else:
3254 # error, set is not a set
Jon Halla440e872016-03-31 15:15:50 -07003255 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003256 " has repeat elements in" +
3257 " set " + onosSetName + ":\n" +
3258 str( getResponses[ i ] ) )
3259 getResults = main.FALSE
3260 elif getResponses[ i ] == main.ERROR:
3261 getResults = main.FALSE
3262 utilities.assert_equals( expect=main.TRUE,
3263 actual=getResults,
3264 onpass="Set elements are correct",
3265 onfail="Set elements are incorrect" )
3266
3267 main.step( "Distributed Set size" )
3268 sizeResponses = []
3269 threads = []
Jon Halla440e872016-03-31 15:15:50 -07003270 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003271 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07003272 name="setTestSize-" + str( i ),
3273 args=[ onosSetName ] )
3274 threads.append( t )
3275 t.start()
3276 for t in threads:
3277 t.join()
3278 sizeResponses.append( t.result )
3279
3280 sizeResults = main.TRUE
Jon Halla440e872016-03-31 15:15:50 -07003281 for i in range( len( main.activeNodes ) ):
3282 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003283 if size != sizeResponses[ i ]:
3284 sizeResults = main.FALSE
Jon Halla440e872016-03-31 15:15:50 -07003285 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003286 " expected a size of " + str( size ) +
3287 " for set " + onosSetName +
3288 " but got " + str( sizeResponses[ i ] ) )
3289 utilities.assert_equals( expect=main.TRUE,
3290 actual=sizeResults,
3291 onpass="Set sizes are correct",
3292 onfail="Set sizes are incorrect" )
3293
3294 main.step( "Distributed Set add()" )
3295 onosSet.add( addValue )
3296 addResponses = []
3297 threads = []
Jon Halla440e872016-03-31 15:15:50 -07003298 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003299 t = main.Thread( target=main.CLIs[i].setTestAdd,
Jon Hall5cf14d52015-07-16 12:15:19 -07003300 name="setTestAdd-" + str( i ),
3301 args=[ onosSetName, addValue ] )
3302 threads.append( t )
3303 t.start()
3304 for t in threads:
3305 t.join()
3306 addResponses.append( t.result )
3307
3308 # main.TRUE = successfully changed the set
3309 # main.FALSE = action resulted in no change in set
3310 # main.ERROR - Some error in executing the function
3311 addResults = main.TRUE
Jon Halla440e872016-03-31 15:15:50 -07003312 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003313 if addResponses[ i ] == main.TRUE:
3314 # All is well
3315 pass
3316 elif addResponses[ i ] == main.FALSE:
3317 # Already in set, probably fine
3318 pass
3319 elif addResponses[ i ] == main.ERROR:
3320 # Error in execution
3321 addResults = main.FALSE
3322 else:
3323 # unexpected result
3324 addResults = main.FALSE
3325 if addResults != main.TRUE:
3326 main.log.error( "Error executing set add" )
3327
3328 # Check if set is still correct
3329 size = len( onosSet )
3330 getResponses = []
3331 threads = []
Jon Halla440e872016-03-31 15:15:50 -07003332 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003333 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003334 name="setTestGet-" + str( i ),
3335 args=[ onosSetName ] )
3336 threads.append( t )
3337 t.start()
3338 for t in threads:
3339 t.join()
3340 getResponses.append( t.result )
3341 getResults = main.TRUE
Jon Halla440e872016-03-31 15:15:50 -07003342 for i in range( len( main.activeNodes ) ):
3343 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003344 if isinstance( getResponses[ i ], list):
3345 current = set( getResponses[ i ] )
3346 if len( current ) == len( getResponses[ i ] ):
3347 # no repeats
3348 if onosSet != current:
Jon Halla440e872016-03-31 15:15:50 -07003349 main.log.error( "ONOS" + node + " has incorrect view" +
Jon Hall5cf14d52015-07-16 12:15:19 -07003350 " of set " + onosSetName + ":\n" +
3351 str( getResponses[ i ] ) )
3352 main.log.debug( "Expected: " + str( onosSet ) )
3353 main.log.debug( "Actual: " + str( current ) )
3354 getResults = main.FALSE
3355 else:
3356 # error, set is not a set
Jon Halla440e872016-03-31 15:15:50 -07003357 main.log.error( "ONOS" + node + " has repeat elements in" +
Jon Hall5cf14d52015-07-16 12:15:19 -07003358 " set " + onosSetName + ":\n" +
3359 str( getResponses[ i ] ) )
3360 getResults = main.FALSE
3361 elif getResponses[ i ] == main.ERROR:
3362 getResults = main.FALSE
3363 sizeResponses = []
3364 threads = []
Jon Halla440e872016-03-31 15:15:50 -07003365 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003366 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07003367 name="setTestSize-" + str( i ),
3368 args=[ onosSetName ] )
3369 threads.append( t )
3370 t.start()
3371 for t in threads:
3372 t.join()
3373 sizeResponses.append( t.result )
3374 sizeResults = main.TRUE
Jon Halla440e872016-03-31 15:15:50 -07003375 for i in range( len( main.activeNodes ) ):
3376 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003377 if size != sizeResponses[ i ]:
3378 sizeResults = main.FALSE
Jon Halla440e872016-03-31 15:15:50 -07003379 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003380 " expected a size of " + str( size ) +
3381 " for set " + onosSetName +
3382 " but got " + str( sizeResponses[ i ] ) )
3383 addResults = addResults and getResults and sizeResults
3384 utilities.assert_equals( expect=main.TRUE,
3385 actual=addResults,
3386 onpass="Set add correct",
3387 onfail="Set add was incorrect" )
3388
3389 main.step( "Distributed Set addAll()" )
3390 onosSet.update( addAllValue.split() )
3391 addResponses = []
3392 threads = []
Jon Halla440e872016-03-31 15:15:50 -07003393 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003394 t = main.Thread( target=main.CLIs[i].setTestAdd,
Jon Hall5cf14d52015-07-16 12:15:19 -07003395 name="setTestAddAll-" + str( i ),
3396 args=[ onosSetName, addAllValue ] )
3397 threads.append( t )
3398 t.start()
3399 for t in threads:
3400 t.join()
3401 addResponses.append( t.result )
3402
3403 # main.TRUE = successfully changed the set
3404 # main.FALSE = action resulted in no change in set
3405 # main.ERROR - Some error in executing the function
3406 addAllResults = main.TRUE
Jon Halla440e872016-03-31 15:15:50 -07003407 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003408 if addResponses[ i ] == main.TRUE:
3409 # All is well
3410 pass
3411 elif addResponses[ i ] == main.FALSE:
3412 # Already in set, probably fine
3413 pass
3414 elif addResponses[ i ] == main.ERROR:
3415 # Error in execution
3416 addAllResults = main.FALSE
3417 else:
3418 # unexpected result
3419 addAllResults = main.FALSE
3420 if addAllResults != main.TRUE:
3421 main.log.error( "Error executing set addAll" )
3422
3423 # Check if set is still correct
3424 size = len( onosSet )
3425 getResponses = []
3426 threads = []
Jon Halla440e872016-03-31 15:15:50 -07003427 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003428 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003429 name="setTestGet-" + str( i ),
3430 args=[ onosSetName ] )
3431 threads.append( t )
3432 t.start()
3433 for t in threads:
3434 t.join()
3435 getResponses.append( t.result )
3436 getResults = main.TRUE
Jon Halla440e872016-03-31 15:15:50 -07003437 for i in range( len( main.activeNodes ) ):
3438 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003439 if isinstance( getResponses[ i ], list):
3440 current = set( getResponses[ i ] )
3441 if len( current ) == len( getResponses[ i ] ):
3442 # no repeats
3443 if onosSet != current:
Jon Halla440e872016-03-31 15:15:50 -07003444 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003445 " has incorrect view" +
3446 " of set " + onosSetName + ":\n" +
3447 str( getResponses[ i ] ) )
3448 main.log.debug( "Expected: " + str( onosSet ) )
3449 main.log.debug( "Actual: " + str( current ) )
3450 getResults = main.FALSE
3451 else:
3452 # error, set is not a set
Jon Halla440e872016-03-31 15:15:50 -07003453 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003454 " has repeat elements in" +
3455 " set " + onosSetName + ":\n" +
3456 str( getResponses[ i ] ) )
3457 getResults = main.FALSE
3458 elif getResponses[ i ] == main.ERROR:
3459 getResults = main.FALSE
3460 sizeResponses = []
3461 threads = []
Jon Halla440e872016-03-31 15:15:50 -07003462 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003463 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07003464 name="setTestSize-" + str( i ),
3465 args=[ onosSetName ] )
3466 threads.append( t )
3467 t.start()
3468 for t in threads:
3469 t.join()
3470 sizeResponses.append( t.result )
3471 sizeResults = main.TRUE
Jon Halla440e872016-03-31 15:15:50 -07003472 for i in range( len( main.activeNodes ) ):
3473 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003474 if size != sizeResponses[ i ]:
3475 sizeResults = main.FALSE
Jon Halla440e872016-03-31 15:15:50 -07003476 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003477 " expected a size of " + str( size ) +
3478 " for set " + onosSetName +
3479 " but got " + str( sizeResponses[ i ] ) )
3480 addAllResults = addAllResults and getResults and sizeResults
3481 utilities.assert_equals( expect=main.TRUE,
3482 actual=addAllResults,
3483 onpass="Set addAll correct",
3484 onfail="Set addAll was incorrect" )
3485
3486 main.step( "Distributed Set contains()" )
3487 containsResponses = []
3488 threads = []
Jon Halla440e872016-03-31 15:15:50 -07003489 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003490 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003491 name="setContains-" + str( i ),
3492 args=[ onosSetName ],
3493 kwargs={ "values": addValue } )
3494 threads.append( t )
3495 t.start()
3496 for t in threads:
3497 t.join()
3498 # NOTE: This is the tuple
3499 containsResponses.append( t.result )
3500
3501 containsResults = main.TRUE
Jon Halla440e872016-03-31 15:15:50 -07003502 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003503 if containsResponses[ i ] == main.ERROR:
3504 containsResults = main.FALSE
3505 else:
3506 containsResults = containsResults and\
3507 containsResponses[ i ][ 1 ]
3508 utilities.assert_equals( expect=main.TRUE,
3509 actual=containsResults,
3510 onpass="Set contains is functional",
3511 onfail="Set contains failed" )
3512
3513 main.step( "Distributed Set containsAll()" )
3514 containsAllResponses = []
3515 threads = []
Jon Halla440e872016-03-31 15:15:50 -07003516 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003517 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003518 name="setContainsAll-" + str( i ),
3519 args=[ onosSetName ],
3520 kwargs={ "values": addAllValue } )
3521 threads.append( t )
3522 t.start()
3523 for t in threads:
3524 t.join()
3525 # NOTE: This is the tuple
3526 containsAllResponses.append( t.result )
3527
3528 containsAllResults = main.TRUE
Jon Halla440e872016-03-31 15:15:50 -07003529 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003530 if containsResponses[ i ] == main.ERROR:
3531 containsResults = main.FALSE
3532 else:
3533 containsResults = containsResults and\
3534 containsResponses[ i ][ 1 ]
3535 utilities.assert_equals( expect=main.TRUE,
3536 actual=containsAllResults,
3537 onpass="Set containsAll is functional",
3538 onfail="Set containsAll failed" )
3539
3540 main.step( "Distributed Set remove()" )
3541 onosSet.remove( addValue )
3542 removeResponses = []
3543 threads = []
Jon Halla440e872016-03-31 15:15:50 -07003544 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003545 t = main.Thread( target=main.CLIs[i].setTestRemove,
Jon Hall5cf14d52015-07-16 12:15:19 -07003546 name="setTestRemove-" + str( i ),
3547 args=[ onosSetName, addValue ] )
3548 threads.append( t )
3549 t.start()
3550 for t in threads:
3551 t.join()
3552 removeResponses.append( t.result )
3553
3554 # main.TRUE = successfully changed the set
3555 # main.FALSE = action resulted in no change in set
3556 # main.ERROR - Some error in executing the function
3557 removeResults = main.TRUE
Jon Halla440e872016-03-31 15:15:50 -07003558 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003559 if removeResponses[ i ] == main.TRUE:
3560 # All is well
3561 pass
3562 elif removeResponses[ i ] == main.FALSE:
3563 # not in set, probably fine
3564 pass
3565 elif removeResponses[ i ] == main.ERROR:
3566 # Error in execution
3567 removeResults = main.FALSE
3568 else:
3569 # unexpected result
3570 removeResults = main.FALSE
3571 if removeResults != main.TRUE:
3572 main.log.error( "Error executing set remove" )
3573
3574 # Check if set is still correct
3575 size = len( onosSet )
3576 getResponses = []
3577 threads = []
Jon Halla440e872016-03-31 15:15:50 -07003578 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003579 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003580 name="setTestGet-" + str( i ),
3581 args=[ onosSetName ] )
3582 threads.append( t )
3583 t.start()
3584 for t in threads:
3585 t.join()
3586 getResponses.append( t.result )
3587 getResults = main.TRUE
Jon Halla440e872016-03-31 15:15:50 -07003588 for i in range( len( main.activeNodes ) ):
3589 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003590 if isinstance( getResponses[ i ], list):
3591 current = set( getResponses[ i ] )
3592 if len( current ) == len( getResponses[ i ] ):
3593 # no repeats
3594 if onosSet != current:
Jon Halla440e872016-03-31 15:15:50 -07003595 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003596 " has incorrect view" +
3597 " of set " + onosSetName + ":\n" +
3598 str( getResponses[ i ] ) )
3599 main.log.debug( "Expected: " + str( onosSet ) )
3600 main.log.debug( "Actual: " + str( current ) )
3601 getResults = main.FALSE
3602 else:
3603 # error, set is not a set
Jon Halla440e872016-03-31 15:15:50 -07003604 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003605 " has repeat elements in" +
3606 " set " + onosSetName + ":\n" +
3607 str( getResponses[ i ] ) )
3608 getResults = main.FALSE
3609 elif getResponses[ i ] == main.ERROR:
3610 getResults = main.FALSE
3611 sizeResponses = []
3612 threads = []
Jon Halla440e872016-03-31 15:15:50 -07003613 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003614 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07003615 name="setTestSize-" + str( i ),
3616 args=[ onosSetName ] )
3617 threads.append( t )
3618 t.start()
3619 for t in threads:
3620 t.join()
3621 sizeResponses.append( t.result )
3622 sizeResults = main.TRUE
Jon Halla440e872016-03-31 15:15:50 -07003623 for i in range( len( main.activeNodes ) ):
3624 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003625 if size != sizeResponses[ i ]:
3626 sizeResults = main.FALSE
Jon Halla440e872016-03-31 15:15:50 -07003627 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003628 " expected a size of " + str( size ) +
3629 " for set " + onosSetName +
3630 " but got " + str( sizeResponses[ i ] ) )
3631 removeResults = removeResults and getResults and sizeResults
3632 utilities.assert_equals( expect=main.TRUE,
3633 actual=removeResults,
3634 onpass="Set remove correct",
3635 onfail="Set remove was incorrect" )
3636
3637 main.step( "Distributed Set removeAll()" )
3638 onosSet.difference_update( addAllValue.split() )
3639 removeAllResponses = []
3640 threads = []
3641 try:
Jon Halla440e872016-03-31 15:15:50 -07003642 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003643 t = main.Thread( target=main.CLIs[i].setTestRemove,
Jon Hall5cf14d52015-07-16 12:15:19 -07003644 name="setTestRemoveAll-" + str( i ),
3645 args=[ onosSetName, addAllValue ] )
3646 threads.append( t )
3647 t.start()
3648 for t in threads:
3649 t.join()
3650 removeAllResponses.append( t.result )
3651 except Exception, e:
3652 main.log.exception(e)
3653
3654 # main.TRUE = successfully changed the set
3655 # main.FALSE = action resulted in no change in set
3656 # main.ERROR - Some error in executing the function
3657 removeAllResults = main.TRUE
Jon Halla440e872016-03-31 15:15:50 -07003658 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003659 if removeAllResponses[ i ] == main.TRUE:
3660 # All is well
3661 pass
3662 elif removeAllResponses[ i ] == main.FALSE:
3663 # not in set, probably fine
3664 pass
3665 elif removeAllResponses[ i ] == main.ERROR:
3666 # Error in execution
3667 removeAllResults = main.FALSE
3668 else:
3669 # unexpected result
3670 removeAllResults = main.FALSE
3671 if removeAllResults != main.TRUE:
3672 main.log.error( "Error executing set removeAll" )
3673
3674 # Check if set is still correct
3675 size = len( onosSet )
3676 getResponses = []
3677 threads = []
Jon Halla440e872016-03-31 15:15:50 -07003678 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003679 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003680 name="setTestGet-" + str( i ),
3681 args=[ onosSetName ] )
3682 threads.append( t )
3683 t.start()
3684 for t in threads:
3685 t.join()
3686 getResponses.append( t.result )
3687 getResults = main.TRUE
Jon Halla440e872016-03-31 15:15:50 -07003688 for i in range( len( main.activeNodes ) ):
3689 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003690 if isinstance( getResponses[ i ], list):
3691 current = set( getResponses[ i ] )
3692 if len( current ) == len( getResponses[ i ] ):
3693 # no repeats
3694 if onosSet != current:
Jon Halla440e872016-03-31 15:15:50 -07003695 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003696 " has incorrect view" +
3697 " of set " + onosSetName + ":\n" +
3698 str( getResponses[ i ] ) )
3699 main.log.debug( "Expected: " + str( onosSet ) )
3700 main.log.debug( "Actual: " + str( current ) )
3701 getResults = main.FALSE
3702 else:
3703 # error, set is not a set
Jon Halla440e872016-03-31 15:15:50 -07003704 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003705 " has repeat elements in" +
3706 " set " + onosSetName + ":\n" +
3707 str( getResponses[ i ] ) )
3708 getResults = main.FALSE
3709 elif getResponses[ i ] == main.ERROR:
3710 getResults = main.FALSE
3711 sizeResponses = []
3712 threads = []
Jon Halla440e872016-03-31 15:15:50 -07003713 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003714 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07003715 name="setTestSize-" + str( i ),
3716 args=[ onosSetName ] )
3717 threads.append( t )
3718 t.start()
3719 for t in threads:
3720 t.join()
3721 sizeResponses.append( t.result )
3722 sizeResults = main.TRUE
Jon Halla440e872016-03-31 15:15:50 -07003723 for i in range( len( main.activeNodes ) ):
3724 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003725 if size != sizeResponses[ i ]:
3726 sizeResults = main.FALSE
Jon Halla440e872016-03-31 15:15:50 -07003727 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003728 " expected a size of " + str( size ) +
3729 " for set " + onosSetName +
3730 " but got " + str( sizeResponses[ i ] ) )
3731 removeAllResults = removeAllResults and getResults and sizeResults
3732 utilities.assert_equals( expect=main.TRUE,
3733 actual=removeAllResults,
3734 onpass="Set removeAll correct",
3735 onfail="Set removeAll was incorrect" )
3736
3737 main.step( "Distributed Set addAll()" )
3738 onosSet.update( addAllValue.split() )
3739 addResponses = []
3740 threads = []
Jon Halla440e872016-03-31 15:15:50 -07003741 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003742 t = main.Thread( target=main.CLIs[i].setTestAdd,
Jon Hall5cf14d52015-07-16 12:15:19 -07003743 name="setTestAddAll-" + str( i ),
3744 args=[ onosSetName, addAllValue ] )
3745 threads.append( t )
3746 t.start()
3747 for t in threads:
3748 t.join()
3749 addResponses.append( t.result )
3750
3751 # main.TRUE = successfully changed the set
3752 # main.FALSE = action resulted in no change in set
3753 # main.ERROR - Some error in executing the function
3754 addAllResults = main.TRUE
Jon Halla440e872016-03-31 15:15:50 -07003755 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003756 if addResponses[ i ] == main.TRUE:
3757 # All is well
3758 pass
3759 elif addResponses[ i ] == main.FALSE:
3760 # Already in set, probably fine
3761 pass
3762 elif addResponses[ i ] == main.ERROR:
3763 # Error in execution
3764 addAllResults = main.FALSE
3765 else:
3766 # unexpected result
3767 addAllResults = main.FALSE
3768 if addAllResults != main.TRUE:
3769 main.log.error( "Error executing set addAll" )
3770
3771 # Check if set is still correct
3772 size = len( onosSet )
3773 getResponses = []
3774 threads = []
Jon Halla440e872016-03-31 15:15:50 -07003775 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003776 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003777 name="setTestGet-" + str( i ),
3778 args=[ onosSetName ] )
3779 threads.append( t )
3780 t.start()
3781 for t in threads:
3782 t.join()
3783 getResponses.append( t.result )
3784 getResults = main.TRUE
Jon Halla440e872016-03-31 15:15:50 -07003785 for i in range( len( main.activeNodes ) ):
3786 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003787 if isinstance( getResponses[ i ], list):
3788 current = set( getResponses[ i ] )
3789 if len( current ) == len( getResponses[ i ] ):
3790 # no repeats
3791 if onosSet != current:
Jon Halla440e872016-03-31 15:15:50 -07003792 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003793 " has incorrect view" +
3794 " of set " + onosSetName + ":\n" +
3795 str( getResponses[ i ] ) )
3796 main.log.debug( "Expected: " + str( onosSet ) )
3797 main.log.debug( "Actual: " + str( current ) )
3798 getResults = main.FALSE
3799 else:
3800 # error, set is not a set
Jon Halla440e872016-03-31 15:15:50 -07003801 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003802 " has repeat elements in" +
3803 " set " + onosSetName + ":\n" +
3804 str( getResponses[ i ] ) )
3805 getResults = main.FALSE
3806 elif getResponses[ i ] == main.ERROR:
3807 getResults = main.FALSE
3808 sizeResponses = []
3809 threads = []
Jon Halla440e872016-03-31 15:15:50 -07003810 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003811 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07003812 name="setTestSize-" + str( i ),
3813 args=[ onosSetName ] )
3814 threads.append( t )
3815 t.start()
3816 for t in threads:
3817 t.join()
3818 sizeResponses.append( t.result )
3819 sizeResults = main.TRUE
Jon Halla440e872016-03-31 15:15:50 -07003820 for i in range( len( main.activeNodes ) ):
3821 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003822 if size != sizeResponses[ i ]:
3823 sizeResults = main.FALSE
Jon Halla440e872016-03-31 15:15:50 -07003824 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003825 " expected a size of " + str( size ) +
3826 " for set " + onosSetName +
3827 " but got " + str( sizeResponses[ i ] ) )
3828 addAllResults = addAllResults and getResults and sizeResults
3829 utilities.assert_equals( expect=main.TRUE,
3830 actual=addAllResults,
3831 onpass="Set addAll correct",
3832 onfail="Set addAll was incorrect" )
3833
3834 main.step( "Distributed Set clear()" )
3835 onosSet.clear()
3836 clearResponses = []
3837 threads = []
Jon Halla440e872016-03-31 15:15:50 -07003838 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003839 t = main.Thread( target=main.CLIs[i].setTestRemove,
Jon Hall5cf14d52015-07-16 12:15:19 -07003840 name="setTestClear-" + str( i ),
3841 args=[ onosSetName, " "], # Values doesn't matter
3842 kwargs={ "clear": True } )
3843 threads.append( t )
3844 t.start()
3845 for t in threads:
3846 t.join()
3847 clearResponses.append( t.result )
3848
3849 # main.TRUE = successfully changed the set
3850 # main.FALSE = action resulted in no change in set
3851 # main.ERROR - Some error in executing the function
3852 clearResults = main.TRUE
Jon Halla440e872016-03-31 15:15:50 -07003853 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003854 if clearResponses[ i ] == main.TRUE:
3855 # All is well
3856 pass
3857 elif clearResponses[ i ] == main.FALSE:
3858 # Nothing set, probably fine
3859 pass
3860 elif clearResponses[ i ] == main.ERROR:
3861 # Error in execution
3862 clearResults = main.FALSE
3863 else:
3864 # unexpected result
3865 clearResults = main.FALSE
3866 if clearResults != main.TRUE:
3867 main.log.error( "Error executing set clear" )
3868
3869 # Check if set is still correct
3870 size = len( onosSet )
3871 getResponses = []
3872 threads = []
Jon Halla440e872016-03-31 15:15:50 -07003873 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003874 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003875 name="setTestGet-" + str( i ),
3876 args=[ onosSetName ] )
3877 threads.append( t )
3878 t.start()
3879 for t in threads:
3880 t.join()
3881 getResponses.append( t.result )
3882 getResults = main.TRUE
Jon Halla440e872016-03-31 15:15:50 -07003883 for i in range( len( main.activeNodes ) ):
3884 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003885 if isinstance( getResponses[ i ], list):
3886 current = set( getResponses[ i ] )
3887 if len( current ) == len( getResponses[ i ] ):
3888 # no repeats
3889 if onosSet != current:
Jon Halla440e872016-03-31 15:15:50 -07003890 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003891 " has incorrect view" +
3892 " of set " + onosSetName + ":\n" +
3893 str( getResponses[ i ] ) )
3894 main.log.debug( "Expected: " + str( onosSet ) )
3895 main.log.debug( "Actual: " + str( current ) )
3896 getResults = main.FALSE
3897 else:
3898 # error, set is not a set
Jon Halla440e872016-03-31 15:15:50 -07003899 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003900 " has repeat elements in" +
3901 " set " + onosSetName + ":\n" +
3902 str( getResponses[ i ] ) )
3903 getResults = main.FALSE
3904 elif getResponses[ i ] == main.ERROR:
3905 getResults = main.FALSE
3906 sizeResponses = []
3907 threads = []
Jon Halla440e872016-03-31 15:15:50 -07003908 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003909 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07003910 name="setTestSize-" + str( i ),
3911 args=[ onosSetName ] )
3912 threads.append( t )
3913 t.start()
3914 for t in threads:
3915 t.join()
3916 sizeResponses.append( t.result )
3917 sizeResults = main.TRUE
Jon Halla440e872016-03-31 15:15:50 -07003918 for i in range( len( main.activeNodes ) ):
3919 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003920 if size != sizeResponses[ i ]:
3921 sizeResults = main.FALSE
Jon Halla440e872016-03-31 15:15:50 -07003922 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003923 " expected a size of " + str( size ) +
3924 " for set " + onosSetName +
3925 " but got " + str( sizeResponses[ i ] ) )
3926 clearResults = clearResults and getResults and sizeResults
3927 utilities.assert_equals( expect=main.TRUE,
3928 actual=clearResults,
3929 onpass="Set clear correct",
3930 onfail="Set clear was incorrect" )
3931
3932 main.step( "Distributed Set addAll()" )
3933 onosSet.update( addAllValue.split() )
3934 addResponses = []
3935 threads = []
Jon Halla440e872016-03-31 15:15:50 -07003936 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003937 t = main.Thread( target=main.CLIs[i].setTestAdd,
Jon Hall5cf14d52015-07-16 12:15:19 -07003938 name="setTestAddAll-" + str( i ),
3939 args=[ onosSetName, addAllValue ] )
3940 threads.append( t )
3941 t.start()
3942 for t in threads:
3943 t.join()
3944 addResponses.append( t.result )
3945
3946 # main.TRUE = successfully changed the set
3947 # main.FALSE = action resulted in no change in set
3948 # main.ERROR - Some error in executing the function
3949 addAllResults = main.TRUE
Jon Halla440e872016-03-31 15:15:50 -07003950 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003951 if addResponses[ i ] == main.TRUE:
3952 # All is well
3953 pass
3954 elif addResponses[ i ] == main.FALSE:
3955 # Already in set, probably fine
3956 pass
3957 elif addResponses[ i ] == main.ERROR:
3958 # Error in execution
3959 addAllResults = main.FALSE
3960 else:
3961 # unexpected result
3962 addAllResults = main.FALSE
3963 if addAllResults != main.TRUE:
3964 main.log.error( "Error executing set addAll" )
3965
3966 # Check if set is still correct
3967 size = len( onosSet )
3968 getResponses = []
3969 threads = []
Jon Halla440e872016-03-31 15:15:50 -07003970 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003971 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003972 name="setTestGet-" + str( i ),
3973 args=[ onosSetName ] )
3974 threads.append( t )
3975 t.start()
3976 for t in threads:
3977 t.join()
3978 getResponses.append( t.result )
3979 getResults = main.TRUE
Jon Halla440e872016-03-31 15:15:50 -07003980 for i in range( len( main.activeNodes ) ):
3981 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003982 if isinstance( getResponses[ i ], list):
3983 current = set( getResponses[ i ] )
3984 if len( current ) == len( getResponses[ i ] ):
3985 # no repeats
3986 if onosSet != current:
Jon Halla440e872016-03-31 15:15:50 -07003987 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003988 " has incorrect view" +
3989 " of set " + onosSetName + ":\n" +
3990 str( getResponses[ i ] ) )
3991 main.log.debug( "Expected: " + str( onosSet ) )
3992 main.log.debug( "Actual: " + str( current ) )
3993 getResults = main.FALSE
3994 else:
3995 # error, set is not a set
Jon Halla440e872016-03-31 15:15:50 -07003996 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003997 " has repeat elements in" +
3998 " set " + onosSetName + ":\n" +
3999 str( getResponses[ i ] ) )
4000 getResults = main.FALSE
4001 elif getResponses[ i ] == main.ERROR:
4002 getResults = main.FALSE
4003 sizeResponses = []
4004 threads = []
Jon Halla440e872016-03-31 15:15:50 -07004005 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004006 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07004007 name="setTestSize-" + str( i ),
4008 args=[ onosSetName ] )
4009 threads.append( t )
4010 t.start()
4011 for t in threads:
4012 t.join()
4013 sizeResponses.append( t.result )
4014 sizeResults = main.TRUE
Jon Halla440e872016-03-31 15:15:50 -07004015 for i in range( len( main.activeNodes ) ):
4016 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004017 if size != sizeResponses[ i ]:
4018 sizeResults = main.FALSE
Jon Halla440e872016-03-31 15:15:50 -07004019 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004020 " expected a size of " + str( size ) +
4021 " for set " + onosSetName +
4022 " but got " + str( sizeResponses[ i ] ) )
4023 addAllResults = addAllResults and getResults and sizeResults
4024 utilities.assert_equals( expect=main.TRUE,
4025 actual=addAllResults,
4026 onpass="Set addAll correct",
4027 onfail="Set addAll was incorrect" )
4028
4029 main.step( "Distributed Set retain()" )
4030 onosSet.intersection_update( retainValue.split() )
4031 retainResponses = []
4032 threads = []
Jon Halla440e872016-03-31 15:15:50 -07004033 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004034 t = main.Thread( target=main.CLIs[i].setTestRemove,
Jon Hall5cf14d52015-07-16 12:15:19 -07004035 name="setTestRetain-" + str( i ),
4036 args=[ onosSetName, retainValue ],
4037 kwargs={ "retain": True } )
4038 threads.append( t )
4039 t.start()
4040 for t in threads:
4041 t.join()
4042 retainResponses.append( t.result )
4043
4044 # main.TRUE = successfully changed the set
4045 # main.FALSE = action resulted in no change in set
4046 # main.ERROR - Some error in executing the function
4047 retainResults = main.TRUE
Jon Halla440e872016-03-31 15:15:50 -07004048 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07004049 if retainResponses[ i ] == main.TRUE:
4050 # All is well
4051 pass
4052 elif retainResponses[ i ] == main.FALSE:
4053 # Already in set, probably fine
4054 pass
4055 elif retainResponses[ i ] == main.ERROR:
4056 # Error in execution
4057 retainResults = main.FALSE
4058 else:
4059 # unexpected result
4060 retainResults = main.FALSE
4061 if retainResults != main.TRUE:
4062 main.log.error( "Error executing set retain" )
4063
4064 # Check if set is still correct
4065 size = len( onosSet )
4066 getResponses = []
4067 threads = []
Jon Halla440e872016-03-31 15:15:50 -07004068 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004069 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07004070 name="setTestGet-" + str( i ),
4071 args=[ onosSetName ] )
4072 threads.append( t )
4073 t.start()
4074 for t in threads:
4075 t.join()
4076 getResponses.append( t.result )
4077 getResults = main.TRUE
Jon Halla440e872016-03-31 15:15:50 -07004078 for i in range( len( main.activeNodes ) ):
4079 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004080 if isinstance( getResponses[ i ], list):
4081 current = set( getResponses[ i ] )
4082 if len( current ) == len( getResponses[ i ] ):
4083 # no repeats
4084 if onosSet != current:
Jon Halla440e872016-03-31 15:15:50 -07004085 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004086 " has incorrect view" +
4087 " of set " + onosSetName + ":\n" +
4088 str( getResponses[ i ] ) )
4089 main.log.debug( "Expected: " + str( onosSet ) )
4090 main.log.debug( "Actual: " + str( current ) )
4091 getResults = main.FALSE
4092 else:
4093 # error, set is not a set
Jon Halla440e872016-03-31 15:15:50 -07004094 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004095 " has repeat elements in" +
4096 " set " + onosSetName + ":\n" +
4097 str( getResponses[ i ] ) )
4098 getResults = main.FALSE
4099 elif getResponses[ i ] == main.ERROR:
4100 getResults = main.FALSE
4101 sizeResponses = []
4102 threads = []
Jon Halla440e872016-03-31 15:15:50 -07004103 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004104 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07004105 name="setTestSize-" + str( i ),
4106 args=[ onosSetName ] )
4107 threads.append( t )
4108 t.start()
4109 for t in threads:
4110 t.join()
4111 sizeResponses.append( t.result )
4112 sizeResults = main.TRUE
Jon Halla440e872016-03-31 15:15:50 -07004113 for i in range( len( main.activeNodes ) ):
4114 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004115 if size != sizeResponses[ i ]:
4116 sizeResults = main.FALSE
Jon Halla440e872016-03-31 15:15:50 -07004117 main.log.error( "ONOS" + node + " expected a size of " +
Jon Hall5cf14d52015-07-16 12:15:19 -07004118 str( size ) + " for set " + onosSetName +
4119 " but got " + str( sizeResponses[ i ] ) )
4120 retainResults = retainResults and getResults and sizeResults
4121 utilities.assert_equals( expect=main.TRUE,
4122 actual=retainResults,
4123 onpass="Set retain correct",
4124 onfail="Set retain was incorrect" )
4125
Jon Hall2a5002c2015-08-21 16:49:11 -07004126 # Transactional maps
4127 main.step( "Partitioned Transactional maps put" )
4128 tMapValue = "Testing"
4129 numKeys = 100
4130 putResult = True
Jon Halla440e872016-03-31 15:15:50 -07004131 node = main.activeNodes[0]
4132 putResponses = main.CLIs[node].transactionalMapPut( numKeys, tMapValue )
4133 if putResponses and len( putResponses ) == 100:
Jon Hall2a5002c2015-08-21 16:49:11 -07004134 for i in putResponses:
4135 if putResponses[ i ][ 'value' ] != tMapValue:
4136 putResult = False
4137 else:
4138 putResult = False
4139 if not putResult:
4140 main.log.debug( "Put response values: " + str( putResponses ) )
4141 utilities.assert_equals( expect=True,
4142 actual=putResult,
4143 onpass="Partitioned Transactional Map put successful",
4144 onfail="Partitioned Transactional Map put values are incorrect" )
4145
4146 main.step( "Partitioned Transactional maps get" )
Jon Hall9bfadd22016-05-11 14:48:07 -07004147 # FIXME: is this sleep needed?
4148 time.sleep( 5 )
4149
Jon Hall2a5002c2015-08-21 16:49:11 -07004150 getCheck = True
4151 for n in range( 1, numKeys + 1 ):
4152 getResponses = []
4153 threads = []
4154 valueCheck = True
Jon Halla440e872016-03-31 15:15:50 -07004155 for i in main.activeNodes:
Jon Hall2a5002c2015-08-21 16:49:11 -07004156 t = main.Thread( target=main.CLIs[i].transactionalMapGet,
4157 name="TMap-get-" + str( i ),
Jon Halla440e872016-03-31 15:15:50 -07004158 args=[ "Key" + str( n ) ] )
Jon Hall2a5002c2015-08-21 16:49:11 -07004159 threads.append( t )
4160 t.start()
4161 for t in threads:
4162 t.join()
4163 getResponses.append( t.result )
4164 for node in getResponses:
4165 if node != tMapValue:
4166 valueCheck = False
4167 if not valueCheck:
4168 main.log.warn( "Values for key 'Key" + str( n ) + "' do not match:" )
4169 main.log.warn( getResponses )
4170 getCheck = getCheck and valueCheck
4171 utilities.assert_equals( expect=True,
4172 actual=getCheck,
4173 onpass="Partitioned Transactional Map get values were correct",
4174 onfail="Partitioned Transactional Map values incorrect" )