blob: 5e172128a5e45abac524b3bcd6947834194261e8 [file] [log] [blame]
Jon Hall5cf14d52015-07-16 12:15:19 -07001"""
2Description: This test is to determine if the HA test setup is
3 working correctly. There are no failures so this test should
4 have a 100% pass rate
5
6List of test cases:
7CASE1: Compile ONOS and push it to the test machines
8CASE2: Assign devices to controllers
9CASE21: Assign mastership to controllers
10CASE3: Assign intents
11CASE4: Ping across added host intents
12CASE5: Reading state of ONOS
13CASE6: The Failure case. Since this is the Sanity test, we do nothing.
14CASE7: Check state after control plane failure
15CASE8: Compare topo
16CASE9: Link s3-s28 down
17CASE10: Link s3-s28 up
18CASE11: Switch down
19CASE12: Switch up
20CASE13: Clean up
21CASE14: start election app on all onos nodes
22CASE15: Check that Leadership Election is still functional
23CASE16: Install Distributed Primitives app
24CASE17: Check for basic functionality with distributed primitives
25"""
26
27
28class HAsanity:
29
30 def __init__( self ):
31 self.default = ''
32
33 def CASE1( self, main ):
34 """
35 CASE1 is to compile ONOS and push it to the test machines
36
37 Startup sequence:
38 cell <name>
39 onos-verify-cell
40 NOTE: temporary - onos-remove-raft-logs
41 onos-uninstall
42 start mininet
43 git pull
44 mvn clean install
45 onos-package
46 onos-install -f
47 onos-wait-for-start
48 start cli sessions
49 start tcpdump
50 """
Jon Halle1a3b752015-07-22 13:02:46 -070051 import imp
Jon Hallf3d16e72015-12-16 17:45:08 -080052 import time
Jon Halla440e872016-03-31 15:15:50 -070053 import json
Jon Hall5cf14d52015-07-16 12:15:19 -070054 main.log.info( "ONOS HA Sanity test - initialization" )
55 main.case( "Setting up test environment" )
Jon Hall783bbf92015-07-23 14:33:19 -070056 main.caseExplanation = "Setup the test environment including " +\
Jon Hall5cf14d52015-07-16 12:15:19 -070057 "installing ONOS, starting Mininet and ONOS" +\
58 "cli sessions."
Jon Hall5cf14d52015-07-16 12:15:19 -070059
60 # load some variables from the params file
61 PULLCODE = False
62 if main.params[ 'Git' ] == 'True':
63 PULLCODE = True
64 gitBranch = main.params[ 'branch' ]
65 cellName = main.params[ 'ENV' ][ 'cellName' ]
66
Jon Halle1a3b752015-07-22 13:02:46 -070067 main.numCtrls = int( main.params[ 'num_controllers' ] )
Jon Hall5cf14d52015-07-16 12:15:19 -070068 if main.ONOSbench.maxNodes:
Jon Halle1a3b752015-07-22 13:02:46 -070069 if main.ONOSbench.maxNodes < main.numCtrls:
70 main.numCtrls = int( main.ONOSbench.maxNodes )
Jon Hall5cf14d52015-07-16 12:15:19 -070071 # TODO: refactor how to get onos port, maybe put into component tag?
Jon Halle1a3b752015-07-22 13:02:46 -070072 # set global variables
Jon Hall5cf14d52015-07-16 12:15:19 -070073 global ONOS1Port
74 global ONOS2Port
75 global ONOS3Port
76 global ONOS4Port
77 global ONOS5Port
78 global ONOS6Port
79 global ONOS7Port
Jon Halla440e872016-03-31 15:15:50 -070080 # These are for csv plotting in jenkins
81 global labels
82 global data
83 labels = []
84 data = []
Jon Hall5cf14d52015-07-16 12:15:19 -070085
86 # FIXME: just get controller port from params?
87 # TODO: do we really need all these?
88 ONOS1Port = main.params[ 'CTRL' ][ 'port1' ]
89 ONOS2Port = main.params[ 'CTRL' ][ 'port2' ]
90 ONOS3Port = main.params[ 'CTRL' ][ 'port3' ]
91 ONOS4Port = main.params[ 'CTRL' ][ 'port4' ]
92 ONOS5Port = main.params[ 'CTRL' ][ 'port5' ]
93 ONOS6Port = main.params[ 'CTRL' ][ 'port6' ]
94 ONOS7Port = main.params[ 'CTRL' ][ 'port7' ]
95
Jon Halle1a3b752015-07-22 13:02:46 -070096 try:
Jon Hall41d39f12016-04-11 22:54:35 -070097 from tests.HAsanity.dependencies.HA import HA
98 main.HA = HA()
Jon Halle1a3b752015-07-22 13:02:46 -070099 except Exception as e:
100 main.log.exception( e )
101 main.cleanup()
102 main.exit()
103
104 main.CLIs = []
105 main.nodes = []
Jon Hall5cf14d52015-07-16 12:15:19 -0700106 ipList = []
Jon Halle1a3b752015-07-22 13:02:46 -0700107 for i in range( 1, main.numCtrls + 1 ):
108 try:
109 main.CLIs.append( getattr( main, 'ONOScli' + str( i ) ) )
110 main.nodes.append( getattr( main, 'ONOS' + str( i ) ) )
111 ipList.append( main.nodes[ -1 ].ip_address )
112 except AttributeError:
113 break
Jon Hall5cf14d52015-07-16 12:15:19 -0700114
115 main.step( "Create cell file" )
116 cellAppString = main.params[ 'ENV' ][ 'appString' ]
117 main.ONOSbench.createCellFile( main.ONOSbench.ip_address, cellName,
118 main.Mininet1.ip_address,
119 cellAppString, ipList )
120 main.step( "Applying cell variable to environment" )
121 cellResult = main.ONOSbench.setCell( cellName )
122 verifyResult = main.ONOSbench.verifyCell()
123
124 # FIXME:this is short term fix
125 main.log.info( "Removing raft logs" )
126 main.ONOSbench.onosRemoveRaftLogs()
127
128 main.log.info( "Uninstalling ONOS" )
Jon Halle1a3b752015-07-22 13:02:46 -0700129 for node in main.nodes:
Jon Hall5cf14d52015-07-16 12:15:19 -0700130 main.ONOSbench.onosUninstall( node.ip_address )
131
132 # Make sure ONOS is DEAD
133 main.log.info( "Killing any ONOS processes" )
134 killResults = main.TRUE
Jon Halle1a3b752015-07-22 13:02:46 -0700135 for node in main.nodes:
Jon Hall5cf14d52015-07-16 12:15:19 -0700136 killed = main.ONOSbench.onosKill( node.ip_address )
137 killResults = killResults and killed
138
139 cleanInstallResult = main.TRUE
140 gitPullResult = main.TRUE
141
142 main.step( "Starting Mininet" )
143 # scp topo file to mininet
144 # TODO: move to params?
145 topoName = "obelisk.py"
146 filePath = main.ONOSbench.home + "/tools/test/topos/"
kelvin-onlabd9e23de2015-08-06 10:34:44 -0700147 main.ONOSbench.scp( main.Mininet1,
148 filePath + topoName,
149 main.Mininet1.home,
150 direction="to" )
Jon Hall5cf14d52015-07-16 12:15:19 -0700151 mnResult = main.Mininet1.startNet( )
152 utilities.assert_equals( expect=main.TRUE, actual=mnResult,
153 onpass="Mininet Started",
154 onfail="Error starting Mininet" )
155
156 main.step( "Git checkout and pull " + gitBranch )
157 if PULLCODE:
158 main.ONOSbench.gitCheckout( gitBranch )
159 gitPullResult = main.ONOSbench.gitPull()
160 # values of 1 or 3 are good
161 utilities.assert_lesser( expect=0, actual=gitPullResult,
162 onpass="Git pull successful",
163 onfail="Git pull failed" )
164 main.ONOSbench.getVersion( report=True )
165
166 main.step( "Using mvn clean install" )
167 cleanInstallResult = main.TRUE
168 if PULLCODE and gitPullResult == main.TRUE:
169 cleanInstallResult = main.ONOSbench.cleanInstall()
170 else:
171 main.log.warn( "Did not pull new code so skipping mvn " +
172 "clean install" )
173 utilities.assert_equals( expect=main.TRUE,
174 actual=cleanInstallResult,
175 onpass="MCI successful",
176 onfail="MCI failed" )
177 # GRAPHS
178 # NOTE: important params here:
179 # job = name of Jenkins job
180 # Plot Name = Plot-HA, only can be used if multiple plots
181 # index = The number of the graph under plot name
182 job = "HAsanity"
183 plotName = "Plot-HA"
Jon Hall843f8bc2016-03-18 14:28:13 -0700184 index = "2"
Jon Hall5cf14d52015-07-16 12:15:19 -0700185 graphs = '<ac:structured-macro ac:name="html">\n'
186 graphs += '<ac:plain-text-body><![CDATA[\n'
187 graphs += '<iframe src="https://onos-jenkins.onlab.us/job/' + job +\
Jon Halla9845df2016-01-15 14:55:58 -0800188 '/plot/' + plotName + '/getPlot?index=' + index +\
Jon Hall5cf14d52015-07-16 12:15:19 -0700189 '&width=500&height=300"' +\
190 'noborder="0" width="500" height="300" scrolling="yes" ' +\
191 'seamless="seamless"></iframe>\n'
192 graphs += ']]></ac:plain-text-body>\n'
193 graphs += '</ac:structured-macro>\n'
194 main.log.wiki(graphs)
195
196 main.step( "Creating ONOS package" )
197 packageResult = main.ONOSbench.onosPackage()
198 utilities.assert_equals( expect=main.TRUE, actual=packageResult,
199 onpass="ONOS package successful",
200 onfail="ONOS package failed" )
201
202 main.step( "Installing ONOS package" )
203 onosInstallResult = main.TRUE
Jon Halle1a3b752015-07-22 13:02:46 -0700204 for node in main.nodes:
Jon Hall5cf14d52015-07-16 12:15:19 -0700205 tmpResult = main.ONOSbench.onosInstall( options="-f",
206 node=node.ip_address )
207 onosInstallResult = onosInstallResult and tmpResult
208 utilities.assert_equals( expect=main.TRUE, actual=onosInstallResult,
209 onpass="ONOS install successful",
210 onfail="ONOS install failed" )
211
212 main.step( "Checking if ONOS is up yet" )
213 for i in range( 2 ):
214 onosIsupResult = main.TRUE
Jon Halle1a3b752015-07-22 13:02:46 -0700215 for node in main.nodes:
Jon Hall5cf14d52015-07-16 12:15:19 -0700216 started = main.ONOSbench.isup( node.ip_address )
217 if not started:
Jon Hallc6793552016-01-19 14:18:37 -0800218 main.log.error( node.name + " hasn't started" )
Jon Hall5cf14d52015-07-16 12:15:19 -0700219 onosIsupResult = onosIsupResult and started
220 if onosIsupResult == main.TRUE:
221 break
222 utilities.assert_equals( expect=main.TRUE, actual=onosIsupResult,
223 onpass="ONOS startup successful",
224 onfail="ONOS startup failed" )
225
226 main.log.step( "Starting ONOS CLI sessions" )
227 cliResults = main.TRUE
228 threads = []
Jon Halle1a3b752015-07-22 13:02:46 -0700229 for i in range( main.numCtrls ):
230 t = main.Thread( target=main.CLIs[i].startOnosCli,
Jon Hall5cf14d52015-07-16 12:15:19 -0700231 name="startOnosCli-" + str( i ),
Jon Halle1a3b752015-07-22 13:02:46 -0700232 args=[main.nodes[i].ip_address] )
Jon Hall5cf14d52015-07-16 12:15:19 -0700233 threads.append( t )
234 t.start()
235
236 for t in threads:
237 t.join()
238 cliResults = cliResults and t.result
239 utilities.assert_equals( expect=main.TRUE, actual=cliResults,
240 onpass="ONOS cli startup successful",
241 onfail="ONOS cli startup failed" )
242
Jon Halla440e872016-03-31 15:15:50 -0700243 # Create a list of active nodes for use when some nodes are stopped
244 main.activeNodes = [ i for i in range( 0, len( main.CLIs ) ) ]
245
Jon Hall5cf14d52015-07-16 12:15:19 -0700246 if main.params[ 'tcpdump' ].lower() == "true":
247 main.step( "Start Packet Capture MN" )
248 main.Mininet2.startTcpdump(
249 str( main.params[ 'MNtcpdump' ][ 'folder' ] ) + str( main.TEST )
250 + "-MN.pcap",
251 intf=main.params[ 'MNtcpdump' ][ 'intf' ],
252 port=main.params[ 'MNtcpdump' ][ 'port' ] )
253
Jon Halla440e872016-03-31 15:15:50 -0700254 main.step( "Checking ONOS nodes" )
Jon Hall41d39f12016-04-11 22:54:35 -0700255 nodeResults = utilities.retry( main.HA.nodesCheck,
256 False,
257 args=[main.activeNodes],
258 attempts=5 )
Jon Halla440e872016-03-31 15:15:50 -0700259
Jon Hall41d39f12016-04-11 22:54:35 -0700260 utilities.assert_equals( expect=True, actual=nodeResults,
Jon Halla440e872016-03-31 15:15:50 -0700261 onpass="Nodes check successful",
262 onfail="Nodes check NOT successful" )
263
264 if not nodeResults:
265 for cli in main.CLIs:
266 main.log.debug( "{} components not ACTIVE: \n{}".format(
267 cli.name,
268 cli.sendline( "scr:list | grep -v ACTIVE" ) ) )
269
Jon Hall5cf14d52015-07-16 12:15:19 -0700270 if cliResults == main.FALSE:
271 main.log.error( "Failed to start ONOS, stopping test" )
272 main.cleanup()
273 main.exit()
274
Jon Hall172b7ba2016-04-07 18:12:20 -0700275 main.step( "Activate apps defined in the params file" )
276 # get data from the params
277 apps = main.params.get( 'apps' )
278 if apps:
279 apps = apps.split(',')
280 main.log.warn( apps )
281 activateResult = True
282 for app in apps:
283 main.CLIs[ 0 ].app( app, "Activate" )
284 # TODO: check this worked
285 time.sleep( 10 ) # wait for apps to activate
286 for app in apps:
287 state = main.CLIs[ 0 ].appStatus( app )
288 if state == "ACTIVE":
289 activateResult = activeResult and True
290 else:
291 main.log.error( "{} is in {} state".format( app, state ) )
292 activeResult = False
293 utilities.assert_equals( expect=True,
294 actual=activateResult,
295 onpass="Successfully activated apps",
296 onfail="Failed to activate apps" )
297 else:
298 main.log.warn( "No apps were specified to be loaded after startup" )
299
300 main.step( "Set ONOS configurations" )
301 config = main.params.get( 'ONOS_Configuration' )
302 if config:
303 main.log.debug( config )
304 checkResult = main.TRUE
305 for component in config:
306 for setting in config[component]:
307 value = config[component][setting]
308 check = main.CLIs[ 0 ].setCfg( component, setting, value )
309 main.log.info( "Value was changed? {}".format( main.TRUE == check ) )
310 checkResult = check and checkResult
311 utilities.assert_equals( expect=main.TRUE,
312 actual=checkResult,
313 onpass="Successfully set config",
314 onfail="Failed to set config" )
315 else:
316 main.log.warn( "No configurations were specified to be changed after startup" )
317
Jon Hall9d2dcad2016-04-08 10:15:20 -0700318 main.step( "App Ids check" )
319 appCheck = main.TRUE
320 threads = []
321 for i in main.activeNodes:
322 t = main.Thread( target=main.CLIs[i].appToIDCheck,
323 name="appToIDCheck-" + str( i ),
324 args=[] )
325 threads.append( t )
326 t.start()
327
328 for t in threads:
329 t.join()
330 appCheck = appCheck and t.result
331 if appCheck != main.TRUE:
332 node = main.activeNodes[0]
333 main.log.warn( main.CLIs[node].apps() )
334 main.log.warn( main.CLIs[node].appIDs() )
335 utilities.assert_equals( expect=main.TRUE, actual=appCheck,
336 onpass="App Ids seem to be correct",
337 onfail="Something is wrong with app Ids" )
338
Jon Hall5cf14d52015-07-16 12:15:19 -0700339 def CASE2( self, main ):
340 """
341 Assign devices to controllers
342 """
343 import re
Jon Halle1a3b752015-07-22 13:02:46 -0700344 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700345 assert main, "main not defined"
346 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -0700347 assert main.CLIs, "main.CLIs not defined"
348 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700349 assert ONOS1Port, "ONOS1Port not defined"
350 assert ONOS2Port, "ONOS2Port not defined"
351 assert ONOS3Port, "ONOS3Port not defined"
352 assert ONOS4Port, "ONOS4Port not defined"
353 assert ONOS5Port, "ONOS5Port not defined"
354 assert ONOS6Port, "ONOS6Port not defined"
355 assert ONOS7Port, "ONOS7Port not defined"
356
357 main.case( "Assigning devices to controllers" )
Jon Hall783bbf92015-07-23 14:33:19 -0700358 main.caseExplanation = "Assign switches to ONOS using 'ovs-vsctl' " +\
Jon Hall5cf14d52015-07-16 12:15:19 -0700359 "and check that an ONOS node becomes the " +\
360 "master of the device."
361 main.step( "Assign switches to controllers" )
362
363 ipList = []
Jon Halle1a3b752015-07-22 13:02:46 -0700364 for i in range( main.numCtrls ):
365 ipList.append( main.nodes[ i ].ip_address )
Jon Hall5cf14d52015-07-16 12:15:19 -0700366 swList = []
367 for i in range( 1, 29 ):
368 swList.append( "s" + str( i ) )
369 main.Mininet1.assignSwController( sw=swList, ip=ipList )
370
371 mastershipCheck = main.TRUE
372 for i in range( 1, 29 ):
373 response = main.Mininet1.getSwController( "s" + str( i ) )
374 try:
375 main.log.info( str( response ) )
376 except Exception:
377 main.log.info( repr( response ) )
Jon Halle1a3b752015-07-22 13:02:46 -0700378 for node in main.nodes:
Jon Hall5cf14d52015-07-16 12:15:19 -0700379 if re.search( "tcp:" + node.ip_address, response ):
380 mastershipCheck = mastershipCheck and main.TRUE
381 else:
382 main.log.error( "Error, node " + node.ip_address + " is " +
383 "not in the list of controllers s" +
384 str( i ) + " is connecting to." )
385 mastershipCheck = main.FALSE
386 utilities.assert_equals(
387 expect=main.TRUE,
388 actual=mastershipCheck,
389 onpass="Switch mastership assigned correctly",
390 onfail="Switches not assigned correctly to controllers" )
391
392 def CASE21( self, main ):
393 """
394 Assign mastership to controllers
395 """
Jon Hall5cf14d52015-07-16 12:15:19 -0700396 import time
Jon Halle1a3b752015-07-22 13:02:46 -0700397 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700398 assert main, "main not defined"
399 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -0700400 assert main.CLIs, "main.CLIs not defined"
401 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700402 assert ONOS1Port, "ONOS1Port not defined"
403 assert ONOS2Port, "ONOS2Port not defined"
404 assert ONOS3Port, "ONOS3Port not defined"
405 assert ONOS4Port, "ONOS4Port not defined"
406 assert ONOS5Port, "ONOS5Port not defined"
407 assert ONOS6Port, "ONOS6Port not defined"
408 assert ONOS7Port, "ONOS7Port not defined"
409
410 main.case( "Assigning Controller roles for switches" )
Jon Hall783bbf92015-07-23 14:33:19 -0700411 main.caseExplanation = "Check that ONOS is connected to each " +\
Jon Hall5cf14d52015-07-16 12:15:19 -0700412 "device. Then manually assign" +\
413 " mastership to specific ONOS nodes using" +\
414 " 'device-role'"
415 main.step( "Assign mastership of switches to specific controllers" )
416 # Manually assign mastership to the controller we want
417 roleCall = main.TRUE
418
419 ipList = [ ]
420 deviceList = []
Jon Halla440e872016-03-31 15:15:50 -0700421 onosCli = main.CLIs[ main.activeNodes[0] ]
Jon Hall5cf14d52015-07-16 12:15:19 -0700422 try:
423 # Assign mastership to specific controllers. This assignment was
424 # determined for a 7 node cluser, but will work with any sized
425 # cluster
426 for i in range( 1, 29 ): # switches 1 through 28
427 # set up correct variables:
428 if i == 1:
429 c = 0
Jon Halle1a3b752015-07-22 13:02:46 -0700430 ip = main.nodes[ c ].ip_address # ONOS1
Jon Halla440e872016-03-31 15:15:50 -0700431 deviceId = onosCli.getDevice( "1000" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700432 elif i == 2:
Jon Halle1a3b752015-07-22 13:02:46 -0700433 c = 1 % main.numCtrls
434 ip = main.nodes[ c ].ip_address # ONOS2
Jon Halla440e872016-03-31 15:15:50 -0700435 deviceId = onosCli.getDevice( "2000" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700436 elif i == 3:
Jon Halle1a3b752015-07-22 13:02:46 -0700437 c = 1 % main.numCtrls
438 ip = main.nodes[ c ].ip_address # ONOS2
Jon Halla440e872016-03-31 15:15:50 -0700439 deviceId = onosCli.getDevice( "3000" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700440 elif i == 4:
Jon Halle1a3b752015-07-22 13:02:46 -0700441 c = 3 % main.numCtrls
442 ip = main.nodes[ c ].ip_address # ONOS4
Jon Halla440e872016-03-31 15:15:50 -0700443 deviceId = onosCli.getDevice( "3004" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700444 elif i == 5:
Jon Halle1a3b752015-07-22 13:02:46 -0700445 c = 2 % main.numCtrls
446 ip = main.nodes[ c ].ip_address # ONOS3
Jon Halla440e872016-03-31 15:15:50 -0700447 deviceId = onosCli.getDevice( "5000" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700448 elif i == 6:
Jon Halle1a3b752015-07-22 13:02:46 -0700449 c = 2 % main.numCtrls
450 ip = main.nodes[ c ].ip_address # ONOS3
Jon Halla440e872016-03-31 15:15:50 -0700451 deviceId = onosCli.getDevice( "6000" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700452 elif i == 7:
Jon Halle1a3b752015-07-22 13:02:46 -0700453 c = 5 % main.numCtrls
454 ip = main.nodes[ c ].ip_address # ONOS6
Jon Halla440e872016-03-31 15:15:50 -0700455 deviceId = onosCli.getDevice( "6007" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700456 elif i >= 8 and i <= 17:
Jon Halle1a3b752015-07-22 13:02:46 -0700457 c = 4 % main.numCtrls
458 ip = main.nodes[ c ].ip_address # ONOS5
Jon Hall5cf14d52015-07-16 12:15:19 -0700459 dpid = '3' + str( i ).zfill( 3 )
Jon Halla440e872016-03-31 15:15:50 -0700460 deviceId = onosCli.getDevice( dpid ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700461 elif i >= 18 and i <= 27:
Jon Halle1a3b752015-07-22 13:02:46 -0700462 c = 6 % main.numCtrls
463 ip = main.nodes[ c ].ip_address # ONOS7
Jon Hall5cf14d52015-07-16 12:15:19 -0700464 dpid = '6' + str( i ).zfill( 3 )
Jon Halla440e872016-03-31 15:15:50 -0700465 deviceId = onosCli.getDevice( dpid ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700466 elif i == 28:
467 c = 0
Jon Halle1a3b752015-07-22 13:02:46 -0700468 ip = main.nodes[ c ].ip_address # ONOS1
Jon Halla440e872016-03-31 15:15:50 -0700469 deviceId = onosCli.getDevice( "2800" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700470 else:
471 main.log.error( "You didn't write an else statement for " +
472 "switch s" + str( i ) )
473 roleCall = main.FALSE
474 # Assign switch
475 assert deviceId, "No device id for s" + str( i ) + " in ONOS"
476 # TODO: make this controller dynamic
Jon Halla440e872016-03-31 15:15:50 -0700477 roleCall = roleCall and onosCli.deviceRole( deviceId, ip )
Jon Hall5cf14d52015-07-16 12:15:19 -0700478 ipList.append( ip )
479 deviceList.append( deviceId )
480 except ( AttributeError, AssertionError ):
481 main.log.exception( "Something is wrong with ONOS device view" )
Jon Halla440e872016-03-31 15:15:50 -0700482 main.log.info( onosCli.devices() )
Jon Hall5cf14d52015-07-16 12:15:19 -0700483 utilities.assert_equals(
484 expect=main.TRUE,
485 actual=roleCall,
486 onpass="Re-assigned switch mastership to designated controller",
487 onfail="Something wrong with deviceRole calls" )
488
489 main.step( "Check mastership was correctly assigned" )
490 roleCheck = main.TRUE
491 # NOTE: This is due to the fact that device mastership change is not
492 # atomic and is actually a multi step process
493 time.sleep( 5 )
494 for i in range( len( ipList ) ):
495 ip = ipList[i]
496 deviceId = deviceList[i]
497 # Check assignment
Jon Halla440e872016-03-31 15:15:50 -0700498 master = onosCli.getRole( deviceId ).get( 'master' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700499 if ip in master:
500 roleCheck = roleCheck and main.TRUE
501 else:
502 roleCheck = roleCheck and main.FALSE
503 main.log.error( "Error, controller " + ip + " is not" +
504 " master " + "of device " +
505 str( deviceId ) + ". Master is " +
506 repr( master ) + "." )
507 utilities.assert_equals(
508 expect=main.TRUE,
509 actual=roleCheck,
510 onpass="Switches were successfully reassigned to designated " +
511 "controller",
512 onfail="Switches were not successfully reassigned" )
513
514 def CASE3( self, main ):
515 """
516 Assign intents
517 """
518 import time
519 import json
Jon Halle1a3b752015-07-22 13:02:46 -0700520 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700521 assert main, "main not defined"
522 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -0700523 assert main.CLIs, "main.CLIs not defined"
524 assert main.nodes, "main.nodes not defined"
Jon Halla440e872016-03-31 15:15:50 -0700525 try:
526 labels
527 except NameError:
528 main.log.error( "labels not defined, setting to []" )
529 labels = []
530 try:
531 data
532 except NameError:
533 main.log.error( "data not defined, setting to []" )
534 data = []
Jon Hall5cf14d52015-07-16 12:15:19 -0700535 main.case( "Adding host Intents" )
Jon Hall783bbf92015-07-23 14:33:19 -0700536 main.caseExplanation = "Discover hosts by using pingall then " +\
Jon Hall5cf14d52015-07-16 12:15:19 -0700537 "assign predetermined host-to-host intents." +\
538 " After installation, check that the intent" +\
539 " is distributed to all nodes and the state" +\
540 " is INSTALLED"
541
542 # install onos-app-fwd
543 main.step( "Install reactive forwarding app" )
Jon Halla440e872016-03-31 15:15:50 -0700544 onosCli = main.CLIs[ main.activeNodes[0] ]
545 installResults = onosCli.activateApp( "org.onosproject.fwd" )
Jon Hall5cf14d52015-07-16 12:15:19 -0700546 utilities.assert_equals( expect=main.TRUE, actual=installResults,
547 onpass="Install fwd successful",
548 onfail="Install fwd failed" )
549
550 main.step( "Check app ids" )
551 appCheck = main.TRUE
552 threads = []
Jon Halla440e872016-03-31 15:15:50 -0700553 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -0700554 t = main.Thread( target=main.CLIs[i].appToIDCheck,
Jon Hall5cf14d52015-07-16 12:15:19 -0700555 name="appToIDCheck-" + str( i ),
556 args=[] )
557 threads.append( t )
558 t.start()
559
560 for t in threads:
561 t.join()
562 appCheck = appCheck and t.result
563 if appCheck != main.TRUE:
Jon Halla440e872016-03-31 15:15:50 -0700564 main.log.warn( onosCli.apps() )
565 main.log.warn( onosCli.appIDs() )
Jon Hall5cf14d52015-07-16 12:15:19 -0700566 utilities.assert_equals( expect=main.TRUE, actual=appCheck,
567 onpass="App Ids seem to be correct",
568 onfail="Something is wrong with app Ids" )
569
570 main.step( "Discovering Hosts( Via pingall for now )" )
571 # FIXME: Once we have a host discovery mechanism, use that instead
572 # REACTIVE FWD test
573 pingResult = main.FALSE
Jon Hall96091e62015-09-21 17:34:17 -0700574 passMsg = "Reactive Pingall test passed"
575 time1 = time.time()
576 pingResult = main.Mininet1.pingall()
577 time2 = time.time()
578 if not pingResult:
579 main.log.warn("First pingall failed. Trying again...")
Jon Hall5cf14d52015-07-16 12:15:19 -0700580 pingResult = main.Mininet1.pingall()
Jon Hall96091e62015-09-21 17:34:17 -0700581 passMsg += " on the second try"
582 utilities.assert_equals(
583 expect=main.TRUE,
584 actual=pingResult,
585 onpass= passMsg,
586 onfail="Reactive Pingall failed, " +
587 "one or more ping pairs failed" )
588 main.log.info( "Time for pingall: %2f seconds" %
589 ( time2 - time1 ) )
Jon Hall5cf14d52015-07-16 12:15:19 -0700590 # timeout for fwd flows
591 time.sleep( 11 )
592 # uninstall onos-app-fwd
593 main.step( "Uninstall reactive forwarding app" )
Jon Halla440e872016-03-31 15:15:50 -0700594 node = main.activeNodes[0]
595 uninstallResult = main.CLIs[node].deactivateApp( "org.onosproject.fwd" )
Jon Hall5cf14d52015-07-16 12:15:19 -0700596 utilities.assert_equals( expect=main.TRUE, actual=uninstallResult,
597 onpass="Uninstall fwd successful",
598 onfail="Uninstall fwd failed" )
Jon Hall5cf14d52015-07-16 12:15:19 -0700599
600 main.step( "Check app ids" )
601 threads = []
602 appCheck2 = main.TRUE
Jon Halla440e872016-03-31 15:15:50 -0700603 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -0700604 t = main.Thread( target=main.CLIs[i].appToIDCheck,
Jon Hall5cf14d52015-07-16 12:15:19 -0700605 name="appToIDCheck-" + str( i ),
606 args=[] )
607 threads.append( t )
608 t.start()
609
610 for t in threads:
611 t.join()
612 appCheck2 = appCheck2 and t.result
613 if appCheck2 != main.TRUE:
Jon Halla440e872016-03-31 15:15:50 -0700614 node = main.activeNodes[0]
615 main.log.warn( main.CLIs[node].apps() )
616 main.log.warn( main.CLIs[node].appIDs() )
Jon Hall5cf14d52015-07-16 12:15:19 -0700617 utilities.assert_equals( expect=main.TRUE, actual=appCheck2,
618 onpass="App Ids seem to be correct",
619 onfail="Something is wrong with app Ids" )
620
621 main.step( "Add host intents via cli" )
622 intentIds = []
Jon Hall6e709752016-02-01 13:38:46 -0800623 # TODO: move the host numbers to params
624 # Maybe look at all the paths we ping?
Jon Hall5cf14d52015-07-16 12:15:19 -0700625 intentAddResult = True
626 hostResult = main.TRUE
627 for i in range( 8, 18 ):
628 main.log.info( "Adding host intent between h" + str( i ) +
629 " and h" + str( i + 10 ) )
630 host1 = "00:00:00:00:00:" + \
631 str( hex( i )[ 2: ] ).zfill( 2 ).upper()
632 host2 = "00:00:00:00:00:" + \
633 str( hex( i + 10 )[ 2: ] ).zfill( 2 ).upper()
634 # NOTE: getHost can return None
Jon Halla440e872016-03-31 15:15:50 -0700635 host1Dict = onosCli.getHost( host1 )
636 host2Dict = onosCli.getHost( host2 )
Jon Hall5cf14d52015-07-16 12:15:19 -0700637 host1Id = None
638 host2Id = None
639 if host1Dict and host2Dict:
640 host1Id = host1Dict.get( 'id', None )
641 host2Id = host2Dict.get( 'id', None )
642 if host1Id and host2Id:
Jon Halla440e872016-03-31 15:15:50 -0700643 nodeNum = ( i % len( main.activeNodes ) )
644 node = main.activeNodes[nodeNum]
645 tmpId = main.CLIs[node].addHostIntent( host1Id, host2Id )
Jon Hall5cf14d52015-07-16 12:15:19 -0700646 if tmpId:
647 main.log.info( "Added intent with id: " + tmpId )
648 intentIds.append( tmpId )
649 else:
650 main.log.error( "addHostIntent returned: " +
651 repr( tmpId ) )
652 else:
653 main.log.error( "Error, getHost() failed for h" + str( i ) +
654 " and/or h" + str( i + 10 ) )
Jon Halla440e872016-03-31 15:15:50 -0700655 node = main.activeNodes[0]
656 hosts = main.CLIs[node].hosts()
Jon Hall5cf14d52015-07-16 12:15:19 -0700657 main.log.warn( "Hosts output: " )
658 try:
659 main.log.warn( json.dumps( json.loads( hosts ),
660 sort_keys=True,
661 indent=4,
662 separators=( ',', ': ' ) ) )
663 except ( ValueError, TypeError ):
664 main.log.warn( repr( hosts ) )
665 hostResult = main.FALSE
666 utilities.assert_equals( expect=main.TRUE, actual=hostResult,
667 onpass="Found a host id for each host",
668 onfail="Error looking up host ids" )
669
670 intentStart = time.time()
Jon Halla440e872016-03-31 15:15:50 -0700671 onosIds = onosCli.getAllIntentsId()
Jon Hall5cf14d52015-07-16 12:15:19 -0700672 main.log.info( "Submitted intents: " + str( intentIds ) )
673 main.log.info( "Intents in ONOS: " + str( onosIds ) )
674 for intent in intentIds:
675 if intent in onosIds:
676 pass # intent submitted is in onos
677 else:
678 intentAddResult = False
679 if intentAddResult:
680 intentStop = time.time()
681 else:
682 intentStop = None
683 # Print the intent states
Jon Halla440e872016-03-31 15:15:50 -0700684 intents = onosCli.intents()
Jon Hall5cf14d52015-07-16 12:15:19 -0700685 intentStates = []
686 installedCheck = True
687 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
688 count = 0
689 try:
690 for intent in json.loads( intents ):
691 state = intent.get( 'state', None )
692 if "INSTALLED" not in state:
693 installedCheck = False
694 intentId = intent.get( 'id', None )
695 intentStates.append( ( intentId, state ) )
696 except ( ValueError, TypeError ):
697 main.log.exception( "Error parsing intents" )
698 # add submitted intents not in the store
699 tmplist = [ i for i, s in intentStates ]
700 missingIntents = False
701 for i in intentIds:
702 if i not in tmplist:
703 intentStates.append( ( i, " - " ) )
704 missingIntents = True
705 intentStates.sort()
706 for i, s in intentStates:
707 count += 1
708 main.log.info( "%-6s%-15s%-15s" %
709 ( str( count ), str( i ), str( s ) ) )
Jon Halla440e872016-03-31 15:15:50 -0700710 leaders = onosCli.leaders()
Jon Hall5cf14d52015-07-16 12:15:19 -0700711 try:
712 missing = False
713 if leaders:
714 parsedLeaders = json.loads( leaders )
715 main.log.warn( json.dumps( parsedLeaders,
716 sort_keys=True,
717 indent=4,
718 separators=( ',', ': ' ) ) )
719 # check for all intent partitions
720 topics = []
721 for i in range( 14 ):
722 topics.append( "intent-partition-" + str( i ) )
723 main.log.debug( topics )
724 ONOStopics = [ j['topic'] for j in parsedLeaders ]
725 for topic in topics:
726 if topic not in ONOStopics:
727 main.log.error( "Error: " + topic +
728 " not in leaders" )
729 missing = True
730 else:
731 main.log.error( "leaders() returned None" )
732 except ( ValueError, TypeError ):
733 main.log.exception( "Error parsing leaders" )
734 main.log.error( repr( leaders ) )
735 # Check all nodes
736 if missing:
Jon Halla440e872016-03-31 15:15:50 -0700737 for i in main.activeNodes:
738 response = main.CLIs[i].leaders( jsonFormat=False)
739 main.log.warn( str( main.CLIs[i].name ) + " leaders output: \n" +
Jon Hall5cf14d52015-07-16 12:15:19 -0700740 str( response ) )
741
Jon Halla440e872016-03-31 15:15:50 -0700742 partitions = onosCli.partitions()
Jon Hall5cf14d52015-07-16 12:15:19 -0700743 try:
744 if partitions :
745 parsedPartitions = json.loads( partitions )
746 main.log.warn( json.dumps( parsedPartitions,
747 sort_keys=True,
748 indent=4,
749 separators=( ',', ': ' ) ) )
750 # TODO check for a leader in all paritions
751 # TODO check for consistency among nodes
752 else:
753 main.log.error( "partitions() returned None" )
754 except ( ValueError, TypeError ):
755 main.log.exception( "Error parsing partitions" )
756 main.log.error( repr( partitions ) )
Jon Halla440e872016-03-31 15:15:50 -0700757 pendingMap = onosCli.pendingMap()
Jon Hall5cf14d52015-07-16 12:15:19 -0700758 try:
759 if pendingMap :
760 parsedPending = json.loads( pendingMap )
761 main.log.warn( json.dumps( parsedPending,
762 sort_keys=True,
763 indent=4,
764 separators=( ',', ': ' ) ) )
765 # TODO check something here?
766 else:
767 main.log.error( "pendingMap() returned None" )
768 except ( ValueError, TypeError ):
769 main.log.exception( "Error parsing pending map" )
770 main.log.error( repr( pendingMap ) )
771
772 intentAddResult = bool( intentAddResult and not missingIntents and
773 installedCheck )
774 if not intentAddResult:
775 main.log.error( "Error in pushing host intents to ONOS" )
776
777 main.step( "Intent Anti-Entropy dispersion" )
Jon Halla440e872016-03-31 15:15:50 -0700778 for j in range(100):
Jon Hall5cf14d52015-07-16 12:15:19 -0700779 correct = True
780 main.log.info( "Submitted intents: " + str( sorted( intentIds ) ) )
Jon Halla440e872016-03-31 15:15:50 -0700781 for i in main.activeNodes:
Jon Hall5cf14d52015-07-16 12:15:19 -0700782 onosIds = []
Jon Halla440e872016-03-31 15:15:50 -0700783 ids = main.CLIs[i].getAllIntentsId()
Jon Hall5cf14d52015-07-16 12:15:19 -0700784 onosIds.append( ids )
Jon Halla440e872016-03-31 15:15:50 -0700785 main.log.debug( "Intents in " + main.CLIs[i].name + ": " +
Jon Hall5cf14d52015-07-16 12:15:19 -0700786 str( sorted( onosIds ) ) )
787 if sorted( ids ) != sorted( intentIds ):
788 main.log.warn( "Set of intent IDs doesn't match" )
789 correct = False
790 break
791 else:
Jon Halla440e872016-03-31 15:15:50 -0700792 intents = json.loads( main.CLIs[i].intents() )
Jon Hall5cf14d52015-07-16 12:15:19 -0700793 for intent in intents:
794 if intent[ 'state' ] != "INSTALLED":
795 main.log.warn( "Intent " + intent[ 'id' ] +
796 " is " + intent[ 'state' ] )
797 correct = False
798 break
799 if correct:
800 break
801 else:
802 time.sleep(1)
803 if not intentStop:
804 intentStop = time.time()
805 global gossipTime
806 gossipTime = intentStop - intentStart
807 main.log.info( "It took about " + str( gossipTime ) +
808 " seconds for all intents to appear in each node" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700809 gossipPeriod = int( main.params['timers']['gossip'] )
Jon Halla440e872016-03-31 15:15:50 -0700810 maxGossipTime = gossipPeriod * len( main.activeNodes )
Jon Hall5cf14d52015-07-16 12:15:19 -0700811 utilities.assert_greater_equals(
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700812 expect=maxGossipTime, actual=gossipTime,
Jon Hall5cf14d52015-07-16 12:15:19 -0700813 onpass="ECM anti-entropy for intents worked within " +
814 "expected time",
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700815 onfail="Intent ECM anti-entropy took too long. " +
816 "Expected time:{}, Actual time:{}".format( maxGossipTime,
817 gossipTime ) )
818 if gossipTime <= maxGossipTime:
Jon Hall5cf14d52015-07-16 12:15:19 -0700819 intentAddResult = True
820
821 if not intentAddResult or "key" in pendingMap:
822 import time
823 installedCheck = True
824 main.log.info( "Sleeping 60 seconds to see if intents are found" )
825 time.sleep( 60 )
Jon Halla440e872016-03-31 15:15:50 -0700826 onosIds = onosCli.getAllIntentsId()
Jon Hall5cf14d52015-07-16 12:15:19 -0700827 main.log.info( "Submitted intents: " + str( intentIds ) )
828 main.log.info( "Intents in ONOS: " + str( onosIds ) )
829 # Print the intent states
Jon Halla440e872016-03-31 15:15:50 -0700830 intents = onosCli.intents()
Jon Hall5cf14d52015-07-16 12:15:19 -0700831 intentStates = []
832 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
833 count = 0
834 try:
835 for intent in json.loads( intents ):
836 # Iter through intents of a node
837 state = intent.get( 'state', None )
838 if "INSTALLED" not in state:
839 installedCheck = False
840 intentId = intent.get( 'id', None )
841 intentStates.append( ( intentId, state ) )
842 except ( ValueError, TypeError ):
843 main.log.exception( "Error parsing intents" )
844 # add submitted intents not in the store
845 tmplist = [ i for i, s in intentStates ]
846 for i in intentIds:
847 if i not in tmplist:
848 intentStates.append( ( i, " - " ) )
849 intentStates.sort()
850 for i, s in intentStates:
851 count += 1
852 main.log.info( "%-6s%-15s%-15s" %
853 ( str( count ), str( i ), str( s ) ) )
Jon Halla440e872016-03-31 15:15:50 -0700854 leaders = onosCli.leaders()
Jon Hall5cf14d52015-07-16 12:15:19 -0700855 try:
856 missing = False
857 if leaders:
858 parsedLeaders = json.loads( leaders )
859 main.log.warn( json.dumps( parsedLeaders,
860 sort_keys=True,
861 indent=4,
862 separators=( ',', ': ' ) ) )
863 # check for all intent partitions
864 # check for election
865 topics = []
866 for i in range( 14 ):
867 topics.append( "intent-partition-" + str( i ) )
868 # FIXME: this should only be after we start the app
869 topics.append( "org.onosproject.election" )
870 main.log.debug( topics )
871 ONOStopics = [ j['topic'] for j in parsedLeaders ]
872 for topic in topics:
873 if topic not in ONOStopics:
874 main.log.error( "Error: " + topic +
875 " not in leaders" )
876 missing = True
877 else:
878 main.log.error( "leaders() returned None" )
879 except ( ValueError, TypeError ):
880 main.log.exception( "Error parsing leaders" )
881 main.log.error( repr( leaders ) )
882 # Check all nodes
883 if missing:
Jon Halla440e872016-03-31 15:15:50 -0700884 for i in main.activeNodes:
885 node = main.CLIs[i]
Jon Hall5cf14d52015-07-16 12:15:19 -0700886 response = node.leaders( jsonFormat=False)
887 main.log.warn( str( node.name ) + " leaders output: \n" +
888 str( response ) )
889
Jon Halla440e872016-03-31 15:15:50 -0700890 partitions = onosCli.partitions()
Jon Hall5cf14d52015-07-16 12:15:19 -0700891 try:
892 if partitions :
893 parsedPartitions = json.loads( partitions )
894 main.log.warn( json.dumps( parsedPartitions,
895 sort_keys=True,
896 indent=4,
897 separators=( ',', ': ' ) ) )
898 # TODO check for a leader in all paritions
899 # TODO check for consistency among nodes
900 else:
901 main.log.error( "partitions() returned None" )
902 except ( ValueError, TypeError ):
903 main.log.exception( "Error parsing partitions" )
904 main.log.error( repr( partitions ) )
Jon Halla440e872016-03-31 15:15:50 -0700905 pendingMap = onosCli.pendingMap()
Jon Hall5cf14d52015-07-16 12:15:19 -0700906 try:
907 if pendingMap :
908 parsedPending = json.loads( pendingMap )
909 main.log.warn( json.dumps( parsedPending,
910 sort_keys=True,
911 indent=4,
912 separators=( ',', ': ' ) ) )
913 # TODO check something here?
914 else:
915 main.log.error( "pendingMap() returned None" )
916 except ( ValueError, TypeError ):
917 main.log.exception( "Error parsing pending map" )
918 main.log.error( repr( pendingMap ) )
919
920 def CASE4( self, main ):
921 """
922 Ping across added host intents
923 """
924 import json
925 import time
Jon Halle1a3b752015-07-22 13:02:46 -0700926 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700927 assert main, "main not defined"
928 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -0700929 assert main.CLIs, "main.CLIs not defined"
930 assert main.nodes, "main.nodes not defined"
Jon Halla440e872016-03-31 15:15:50 -0700931 main.case( "Verify connectivity by sending traffic across Intents" )
Jon Hall783bbf92015-07-23 14:33:19 -0700932 main.caseExplanation = "Ping across added host intents to check " +\
Jon Hall5cf14d52015-07-16 12:15:19 -0700933 "functionality and check the state of " +\
934 "the intent"
Jon Hall5cf14d52015-07-16 12:15:19 -0700935
Jon Hall41d39f12016-04-11 22:54:35 -0700936 onosCli = main.CLIs[ main.activeNodes[0] ]
Jon Hall5cf14d52015-07-16 12:15:19 -0700937 main.step( "Check Intent state" )
938 installedCheck = False
939 loopCount = 0
940 while not installedCheck and loopCount < 40:
941 installedCheck = True
942 # Print the intent states
Jon Halla440e872016-03-31 15:15:50 -0700943 intents = onosCli.intents()
Jon Hall5cf14d52015-07-16 12:15:19 -0700944 intentStates = []
945 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
946 count = 0
947 # Iter through intents of a node
948 try:
949 for intent in json.loads( intents ):
950 state = intent.get( 'state', None )
951 if "INSTALLED" not in state:
952 installedCheck = False
953 intentId = intent.get( 'id', None )
954 intentStates.append( ( intentId, state ) )
955 except ( ValueError, TypeError ):
956 main.log.exception( "Error parsing intents." )
957 # Print states
958 intentStates.sort()
959 for i, s in intentStates:
960 count += 1
961 main.log.info( "%-6s%-15s%-15s" %
962 ( str( count ), str( i ), str( s ) ) )
963 if not installedCheck:
964 time.sleep( 1 )
965 loopCount += 1
966 utilities.assert_equals( expect=True, actual=installedCheck,
967 onpass="Intents are all INSTALLED",
968 onfail="Intents are not all in " +
969 "INSTALLED state" )
970
Jon Hall9d2dcad2016-04-08 10:15:20 -0700971 main.step( "Ping across added host intents" )
Jon Hall9d2dcad2016-04-08 10:15:20 -0700972 PingResult = main.TRUE
973 for i in range( 8, 18 ):
974 ping = main.Mininet1.pingHost( src="h" + str( i ),
975 target="h" + str( i + 10 ) )
976 PingResult = PingResult and ping
977 if ping == main.FALSE:
978 main.log.warn( "Ping failed between h" + str( i ) +
979 " and h" + str( i + 10 ) )
980 elif ping == main.TRUE:
981 main.log.info( "Ping test passed!" )
982 # Don't set PingResult or you'd override failures
983 if PingResult == main.FALSE:
984 main.log.error(
985 "Intents have not been installed correctly, pings failed." )
986 # TODO: pretty print
987 main.log.warn( "ONOS1 intents: " )
988 try:
989 tmpIntents = onosCli.intents()
990 main.log.warn( json.dumps( json.loads( tmpIntents ),
991 sort_keys=True,
992 indent=4,
993 separators=( ',', ': ' ) ) )
994 except ( ValueError, TypeError ):
995 main.log.warn( repr( tmpIntents ) )
996 utilities.assert_equals(
997 expect=main.TRUE,
998 actual=PingResult,
999 onpass="Intents have been installed correctly and pings work",
1000 onfail="Intents have not been installed correctly, pings failed." )
1001
Jon Hall5cf14d52015-07-16 12:15:19 -07001002 main.step( "Check leadership of topics" )
Jon Halla440e872016-03-31 15:15:50 -07001003 leaders = onosCli.leaders()
Jon Hall5cf14d52015-07-16 12:15:19 -07001004 topicCheck = main.TRUE
1005 try:
1006 if leaders:
1007 parsedLeaders = json.loads( leaders )
1008 main.log.warn( json.dumps( parsedLeaders,
1009 sort_keys=True,
1010 indent=4,
1011 separators=( ',', ': ' ) ) )
1012 # check for all intent partitions
1013 # check for election
1014 # TODO: Look at Devices as topics now that it uses this system
1015 topics = []
1016 for i in range( 14 ):
1017 topics.append( "intent-partition-" + str( i ) )
1018 # FIXME: this should only be after we start the app
1019 # FIXME: topics.append( "org.onosproject.election" )
1020 # Print leaders output
1021 main.log.debug( topics )
1022 ONOStopics = [ j['topic'] for j in parsedLeaders ]
1023 for topic in topics:
1024 if topic not in ONOStopics:
1025 main.log.error( "Error: " + topic +
1026 " not in leaders" )
1027 topicCheck = main.FALSE
1028 else:
1029 main.log.error( "leaders() returned None" )
1030 topicCheck = main.FALSE
1031 except ( ValueError, TypeError ):
1032 topicCheck = main.FALSE
1033 main.log.exception( "Error parsing leaders" )
1034 main.log.error( repr( leaders ) )
1035 # TODO: Check for a leader of these topics
1036 # Check all nodes
1037 if topicCheck:
Jon Halla440e872016-03-31 15:15:50 -07001038 for i in main.activeNodes:
1039 node = main.CLIs[i]
Jon Hall5cf14d52015-07-16 12:15:19 -07001040 response = node.leaders( jsonFormat=False)
1041 main.log.warn( str( node.name ) + " leaders output: \n" +
1042 str( response ) )
1043
1044 utilities.assert_equals( expect=main.TRUE, actual=topicCheck,
1045 onpass="intent Partitions is in leaders",
1046 onfail="Some topics were lost " )
1047 # Print partitions
Jon Halla440e872016-03-31 15:15:50 -07001048 partitions = onosCli.partitions()
Jon Hall5cf14d52015-07-16 12:15:19 -07001049 try:
1050 if partitions :
1051 parsedPartitions = json.loads( partitions )
1052 main.log.warn( json.dumps( parsedPartitions,
1053 sort_keys=True,
1054 indent=4,
1055 separators=( ',', ': ' ) ) )
1056 # TODO check for a leader in all paritions
1057 # TODO check for consistency among nodes
1058 else:
1059 main.log.error( "partitions() returned None" )
1060 except ( ValueError, TypeError ):
1061 main.log.exception( "Error parsing partitions" )
1062 main.log.error( repr( partitions ) )
1063 # Print Pending Map
Jon Halla440e872016-03-31 15:15:50 -07001064 pendingMap = onosCli.pendingMap()
Jon Hall5cf14d52015-07-16 12:15:19 -07001065 try:
1066 if pendingMap :
1067 parsedPending = json.loads( pendingMap )
1068 main.log.warn( json.dumps( parsedPending,
1069 sort_keys=True,
1070 indent=4,
1071 separators=( ',', ': ' ) ) )
1072 # TODO check something here?
1073 else:
1074 main.log.error( "pendingMap() returned None" )
1075 except ( ValueError, TypeError ):
1076 main.log.exception( "Error parsing pending map" )
1077 main.log.error( repr( pendingMap ) )
1078
1079 if not installedCheck:
1080 main.log.info( "Waiting 60 seconds to see if the state of " +
1081 "intents change" )
1082 time.sleep( 60 )
1083 # Print the intent states
Jon Halla440e872016-03-31 15:15:50 -07001084 intents = onosCli.intents()
Jon Hall5cf14d52015-07-16 12:15:19 -07001085 intentStates = []
1086 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
1087 count = 0
1088 # Iter through intents of a node
1089 try:
1090 for intent in json.loads( intents ):
1091 state = intent.get( 'state', None )
1092 if "INSTALLED" not in state:
1093 installedCheck = False
1094 intentId = intent.get( 'id', None )
1095 intentStates.append( ( intentId, state ) )
1096 except ( ValueError, TypeError ):
1097 main.log.exception( "Error parsing intents." )
1098 intentStates.sort()
1099 for i, s in intentStates:
1100 count += 1
1101 main.log.info( "%-6s%-15s%-15s" %
1102 ( str( count ), str( i ), str( s ) ) )
Jon Halla440e872016-03-31 15:15:50 -07001103 leaders = onosCli.leaders()
Jon Hall5cf14d52015-07-16 12:15:19 -07001104 try:
1105 missing = False
1106 if leaders:
1107 parsedLeaders = json.loads( leaders )
1108 main.log.warn( json.dumps( parsedLeaders,
1109 sort_keys=True,
1110 indent=4,
1111 separators=( ',', ': ' ) ) )
1112 # check for all intent partitions
1113 # check for election
1114 topics = []
1115 for i in range( 14 ):
1116 topics.append( "intent-partition-" + str( i ) )
1117 # FIXME: this should only be after we start the app
1118 topics.append( "org.onosproject.election" )
1119 main.log.debug( topics )
1120 ONOStopics = [ j['topic'] for j in parsedLeaders ]
1121 for topic in topics:
1122 if topic not in ONOStopics:
1123 main.log.error( "Error: " + topic +
1124 " not in leaders" )
1125 missing = True
1126 else:
1127 main.log.error( "leaders() returned None" )
1128 except ( ValueError, TypeError ):
1129 main.log.exception( "Error parsing leaders" )
1130 main.log.error( repr( leaders ) )
1131 if missing:
Jon Halla440e872016-03-31 15:15:50 -07001132 for i in main.activeNodes:
1133 node = main.CLIs[i]
Jon Hall5cf14d52015-07-16 12:15:19 -07001134 response = node.leaders( jsonFormat=False)
1135 main.log.warn( str( node.name ) + " leaders output: \n" +
1136 str( response ) )
1137
Jon Halla440e872016-03-31 15:15:50 -07001138 partitions = onosCli.partitions()
Jon Hall5cf14d52015-07-16 12:15:19 -07001139 try:
1140 if partitions :
1141 parsedPartitions = json.loads( partitions )
1142 main.log.warn( json.dumps( parsedPartitions,
1143 sort_keys=True,
1144 indent=4,
1145 separators=( ',', ': ' ) ) )
1146 # TODO check for a leader in all paritions
1147 # TODO check for consistency among nodes
1148 else:
1149 main.log.error( "partitions() returned None" )
1150 except ( ValueError, TypeError ):
1151 main.log.exception( "Error parsing partitions" )
1152 main.log.error( repr( partitions ) )
Jon Halla440e872016-03-31 15:15:50 -07001153 pendingMap = onosCli.pendingMap()
Jon Hall5cf14d52015-07-16 12:15:19 -07001154 try:
1155 if pendingMap :
1156 parsedPending = json.loads( pendingMap )
1157 main.log.warn( json.dumps( parsedPending,
1158 sort_keys=True,
1159 indent=4,
1160 separators=( ',', ': ' ) ) )
1161 # TODO check something here?
1162 else:
1163 main.log.error( "pendingMap() returned None" )
1164 except ( ValueError, TypeError ):
1165 main.log.exception( "Error parsing pending map" )
1166 main.log.error( repr( pendingMap ) )
1167 # Print flowrules
Jon Halla440e872016-03-31 15:15:50 -07001168 node = main.activeNodes[0]
1169 main.log.debug( main.CLIs[node].flows( jsonFormat=False ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001170 main.step( "Wait a minute then ping again" )
1171 # the wait is above
1172 PingResult = main.TRUE
1173 for i in range( 8, 18 ):
1174 ping = main.Mininet1.pingHost( src="h" + str( i ),
1175 target="h" + str( i + 10 ) )
1176 PingResult = PingResult and ping
1177 if ping == main.FALSE:
1178 main.log.warn( "Ping failed between h" + str( i ) +
1179 " and h" + str( i + 10 ) )
1180 elif ping == main.TRUE:
1181 main.log.info( "Ping test passed!" )
1182 # Don't set PingResult or you'd override failures
1183 if PingResult == main.FALSE:
1184 main.log.error(
1185 "Intents have not been installed correctly, pings failed." )
1186 # TODO: pretty print
1187 main.log.warn( "ONOS1 intents: " )
1188 try:
Jon Halla440e872016-03-31 15:15:50 -07001189 tmpIntents = onosCli.intents()
Jon Hall5cf14d52015-07-16 12:15:19 -07001190 main.log.warn( json.dumps( json.loads( tmpIntents ),
1191 sort_keys=True,
1192 indent=4,
1193 separators=( ',', ': ' ) ) )
1194 except ( ValueError, TypeError ):
1195 main.log.warn( repr( tmpIntents ) )
1196 utilities.assert_equals(
1197 expect=main.TRUE,
1198 actual=PingResult,
1199 onpass="Intents have been installed correctly and pings work",
1200 onfail="Intents have not been installed correctly, pings failed." )
1201
1202 def CASE5( self, main ):
1203 """
1204 Reading state of ONOS
1205 """
1206 import json
1207 import time
Jon Halle1a3b752015-07-22 13:02:46 -07001208 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07001209 assert main, "main not defined"
1210 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07001211 assert main.CLIs, "main.CLIs not defined"
1212 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07001213
1214 main.case( "Setting up and gathering data for current state" )
1215 # The general idea for this test case is to pull the state of
1216 # ( intents,flows, topology,... ) from each ONOS node
1217 # We can then compare them with each other and also with past states
1218
1219 main.step( "Check that each switch has a master" )
1220 global mastershipState
1221 mastershipState = '[]'
1222
1223 # Assert that each device has a master
1224 rolesNotNull = main.TRUE
1225 threads = []
Jon Halla440e872016-03-31 15:15:50 -07001226 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001227 t = main.Thread( target=main.CLIs[i].rolesNotNull,
Jon Hall5cf14d52015-07-16 12:15:19 -07001228 name="rolesNotNull-" + str( i ),
1229 args=[] )
1230 threads.append( t )
1231 t.start()
1232
1233 for t in threads:
1234 t.join()
1235 rolesNotNull = rolesNotNull and t.result
1236 utilities.assert_equals(
1237 expect=main.TRUE,
1238 actual=rolesNotNull,
1239 onpass="Each device has a master",
1240 onfail="Some devices don't have a master assigned" )
1241
1242 main.step( "Get the Mastership of each switch from each controller" )
1243 ONOSMastership = []
1244 mastershipCheck = main.FALSE
1245 consistentMastership = True
1246 rolesResults = True
1247 threads = []
Jon Halla440e872016-03-31 15:15:50 -07001248 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001249 t = main.Thread( target=main.CLIs[i].roles,
Jon Hall5cf14d52015-07-16 12:15:19 -07001250 name="roles-" + str( i ),
1251 args=[] )
1252 threads.append( t )
1253 t.start()
1254
1255 for t in threads:
1256 t.join()
1257 ONOSMastership.append( t.result )
1258
Jon Halla440e872016-03-31 15:15:50 -07001259 for i in range( len( ONOSMastership ) ):
1260 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001261 if not ONOSMastership[i] or "Error" in ONOSMastership[i]:
Jon Halla440e872016-03-31 15:15:50 -07001262 main.log.error( "Error in getting ONOS" + node + " roles" )
1263 main.log.warn( "ONOS" + node + " mastership response: " +
1264 repr( ONOSMastership[i] ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001265 rolesResults = False
1266 utilities.assert_equals(
1267 expect=True,
1268 actual=rolesResults,
1269 onpass="No error in reading roles output",
1270 onfail="Error in reading roles from ONOS" )
1271
1272 main.step( "Check for consistency in roles from each controller" )
1273 if all([ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
1274 main.log.info(
1275 "Switch roles are consistent across all ONOS nodes" )
1276 else:
1277 consistentMastership = False
1278 utilities.assert_equals(
1279 expect=True,
1280 actual=consistentMastership,
1281 onpass="Switch roles are consistent across all ONOS nodes",
1282 onfail="ONOS nodes have different views of switch roles" )
1283
1284 if rolesResults and not consistentMastership:
Jon Halla440e872016-03-31 15:15:50 -07001285 for i in range( len( main.activeNodes ) ):
1286 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001287 try:
1288 main.log.warn(
Jon Halla440e872016-03-31 15:15:50 -07001289 "ONOS" + node + " roles: ",
Jon Hall5cf14d52015-07-16 12:15:19 -07001290 json.dumps(
1291 json.loads( ONOSMastership[ i ] ),
1292 sort_keys=True,
1293 indent=4,
1294 separators=( ',', ': ' ) ) )
1295 except ( ValueError, TypeError ):
1296 main.log.warn( repr( ONOSMastership[ i ] ) )
1297 elif rolesResults and consistentMastership:
1298 mastershipCheck = main.TRUE
1299 mastershipState = ONOSMastership[ 0 ]
1300
1301 main.step( "Get the intents from each controller" )
1302 global intentState
1303 intentState = []
1304 ONOSIntents = []
1305 intentCheck = main.FALSE
1306 consistentIntents = True
1307 intentsResults = True
1308 threads = []
Jon Halla440e872016-03-31 15:15:50 -07001309 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001310 t = main.Thread( target=main.CLIs[i].intents,
Jon Hall5cf14d52015-07-16 12:15:19 -07001311 name="intents-" + str( i ),
1312 args=[],
1313 kwargs={ 'jsonFormat': True } )
1314 threads.append( t )
1315 t.start()
1316
1317 for t in threads:
1318 t.join()
1319 ONOSIntents.append( t.result )
1320
Jon Halla440e872016-03-31 15:15:50 -07001321 for i in range( len( ONOSIntents ) ):
1322 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001323 if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
Jon Halla440e872016-03-31 15:15:50 -07001324 main.log.error( "Error in getting ONOS" + node + " intents" )
1325 main.log.warn( "ONOS" + node + " intents response: " +
Jon Hall5cf14d52015-07-16 12:15:19 -07001326 repr( ONOSIntents[ i ] ) )
1327 intentsResults = False
1328 utilities.assert_equals(
1329 expect=True,
1330 actual=intentsResults,
1331 onpass="No error in reading intents output",
1332 onfail="Error in reading intents from ONOS" )
1333
1334 main.step( "Check for consistency in Intents from each controller" )
1335 if all([ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
1336 main.log.info( "Intents are consistent across all ONOS " +
1337 "nodes" )
1338 else:
1339 consistentIntents = False
1340 main.log.error( "Intents not consistent" )
1341 utilities.assert_equals(
1342 expect=True,
1343 actual=consistentIntents,
1344 onpass="Intents are consistent across all ONOS nodes",
1345 onfail="ONOS nodes have different views of intents" )
1346
1347 if intentsResults:
1348 # Try to make it easy to figure out what is happening
1349 #
1350 # Intent ONOS1 ONOS2 ...
1351 # 0x01 INSTALLED INSTALLING
1352 # ... ... ...
1353 # ... ... ...
1354 title = " Id"
Jon Halla440e872016-03-31 15:15:50 -07001355 for n in main.activeNodes:
Jon Hall5cf14d52015-07-16 12:15:19 -07001356 title += " " * 10 + "ONOS" + str( n + 1 )
1357 main.log.warn( title )
Jon Halle1a3b752015-07-22 13:02:46 -07001358 # get all intent keys in the cluster
Jon Hall5cf14d52015-07-16 12:15:19 -07001359 keys = []
1360 try:
1361 # Get the set of all intent keys
1362 for nodeStr in ONOSIntents:
1363 node = json.loads( nodeStr )
1364 for intent in node:
1365 keys.append( intent.get( 'id' ) )
1366 keys = set( keys )
1367 # For each intent key, print the state on each node
1368 for key in keys:
1369 row = "%-13s" % key
1370 for nodeStr in ONOSIntents:
1371 node = json.loads( nodeStr )
1372 for intent in node:
1373 if intent.get( 'id', "Error" ) == key:
1374 row += "%-15s" % intent.get( 'state' )
1375 main.log.warn( row )
1376 # End of intent state table
1377 except ValueError as e:
1378 main.log.exception( e )
1379 main.log.debug( "nodeStr was: " + repr( nodeStr ) )
1380
1381 if intentsResults and not consistentIntents:
1382 # print the json objects
Jon Halla440e872016-03-31 15:15:50 -07001383 n = str( main.activeNodes[-1] + 1 )
1384 main.log.debug( "ONOS" + n + " intents: " )
Jon Hall5cf14d52015-07-16 12:15:19 -07001385 main.log.debug( json.dumps( json.loads( ONOSIntents[ -1 ] ),
1386 sort_keys=True,
1387 indent=4,
1388 separators=( ',', ': ' ) ) )
Jon Halla440e872016-03-31 15:15:50 -07001389 for i in range( len( ONOSIntents ) ):
1390 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001391 if ONOSIntents[ i ] != ONOSIntents[ -1 ]:
Jon Halla440e872016-03-31 15:15:50 -07001392 main.log.debug( "ONOS" + node + " intents: " )
Jon Hall5cf14d52015-07-16 12:15:19 -07001393 main.log.debug( json.dumps( json.loads( ONOSIntents[i] ),
1394 sort_keys=True,
1395 indent=4,
1396 separators=( ',', ': ' ) ) )
1397 else:
Jon Halla440e872016-03-31 15:15:50 -07001398 main.log.debug( "ONOS" + node + " intents match ONOS" +
1399 n + " intents" )
Jon Hall5cf14d52015-07-16 12:15:19 -07001400 elif intentsResults and consistentIntents:
1401 intentCheck = main.TRUE
1402 intentState = ONOSIntents[ 0 ]
1403
1404 main.step( "Get the flows from each controller" )
1405 global flowState
1406 flowState = []
1407 ONOSFlows = []
1408 ONOSFlowsJson = []
1409 flowCheck = main.FALSE
1410 consistentFlows = True
1411 flowsResults = True
1412 threads = []
Jon Halla440e872016-03-31 15:15:50 -07001413 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001414 t = main.Thread( target=main.CLIs[i].flows,
Jon Hall5cf14d52015-07-16 12:15:19 -07001415 name="flows-" + str( i ),
1416 args=[],
1417 kwargs={ 'jsonFormat': True } )
1418 threads.append( t )
1419 t.start()
1420
1421 # NOTE: Flows command can take some time to run
1422 time.sleep(30)
1423 for t in threads:
1424 t.join()
1425 result = t.result
1426 ONOSFlows.append( result )
1427
Jon Halla440e872016-03-31 15:15:50 -07001428 for i in range( len( ONOSFlows ) ):
1429 num = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001430 if not ONOSFlows[ i ] or "Error" in ONOSFlows[ i ]:
1431 main.log.error( "Error in getting ONOS" + num + " flows" )
1432 main.log.warn( "ONOS" + num + " flows response: " +
1433 repr( ONOSFlows[ i ] ) )
1434 flowsResults = False
1435 ONOSFlowsJson.append( None )
1436 else:
1437 try:
1438 ONOSFlowsJson.append( json.loads( ONOSFlows[ i ] ) )
1439 except ( ValueError, TypeError ):
1440 # FIXME: change this to log.error?
1441 main.log.exception( "Error in parsing ONOS" + num +
1442 " response as json." )
1443 main.log.error( repr( ONOSFlows[ i ] ) )
1444 ONOSFlowsJson.append( None )
1445 flowsResults = False
1446 utilities.assert_equals(
1447 expect=True,
1448 actual=flowsResults,
1449 onpass="No error in reading flows output",
1450 onfail="Error in reading flows from ONOS" )
1451
1452 main.step( "Check for consistency in Flows from each controller" )
1453 tmp = [ len( i ) == len( ONOSFlowsJson[ 0 ] ) for i in ONOSFlowsJson ]
1454 if all( tmp ):
1455 main.log.info( "Flow count is consistent across all ONOS nodes" )
1456 else:
1457 consistentFlows = False
1458 utilities.assert_equals(
1459 expect=True,
1460 actual=consistentFlows,
1461 onpass="The flow count is consistent across all ONOS nodes",
1462 onfail="ONOS nodes have different flow counts" )
1463
1464 if flowsResults and not consistentFlows:
Jon Halla440e872016-03-31 15:15:50 -07001465 for i in range( len( ONOSFlows ) ):
1466 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001467 try:
1468 main.log.warn(
Jon Halla440e872016-03-31 15:15:50 -07001469 "ONOS" + node + " flows: " +
Jon Hall5cf14d52015-07-16 12:15:19 -07001470 json.dumps( json.loads( ONOSFlows[i] ), sort_keys=True,
1471 indent=4, separators=( ',', ': ' ) ) )
1472 except ( ValueError, TypeError ):
Jon Halla440e872016-03-31 15:15:50 -07001473 main.log.warn( "ONOS" + node + " flows: " +
1474 repr( ONOSFlows[ i ] ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001475 elif flowsResults and consistentFlows:
1476 flowCheck = main.TRUE
1477 flowState = ONOSFlows[ 0 ]
1478
1479 main.step( "Get the OF Table entries" )
1480 global flows
1481 flows = []
1482 for i in range( 1, 29 ):
GlennRC68467eb2015-11-16 18:01:01 -08001483 flows.append( main.Mininet1.getFlowTable( "s" + str( i ), version="1.3", debug=False ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001484 if flowCheck == main.FALSE:
1485 for table in flows:
1486 main.log.warn( table )
1487 # TODO: Compare switch flow tables with ONOS flow tables
1488
1489 main.step( "Start continuous pings" )
1490 main.Mininet2.pingLong(
1491 src=main.params[ 'PING' ][ 'source1' ],
1492 target=main.params[ 'PING' ][ 'target1' ],
1493 pingTime=500 )
1494 main.Mininet2.pingLong(
1495 src=main.params[ 'PING' ][ 'source2' ],
1496 target=main.params[ 'PING' ][ 'target2' ],
1497 pingTime=500 )
1498 main.Mininet2.pingLong(
1499 src=main.params[ 'PING' ][ 'source3' ],
1500 target=main.params[ 'PING' ][ 'target3' ],
1501 pingTime=500 )
1502 main.Mininet2.pingLong(
1503 src=main.params[ 'PING' ][ 'source4' ],
1504 target=main.params[ 'PING' ][ 'target4' ],
1505 pingTime=500 )
1506 main.Mininet2.pingLong(
1507 src=main.params[ 'PING' ][ 'source5' ],
1508 target=main.params[ 'PING' ][ 'target5' ],
1509 pingTime=500 )
1510 main.Mininet2.pingLong(
1511 src=main.params[ 'PING' ][ 'source6' ],
1512 target=main.params[ 'PING' ][ 'target6' ],
1513 pingTime=500 )
1514 main.Mininet2.pingLong(
1515 src=main.params[ 'PING' ][ 'source7' ],
1516 target=main.params[ 'PING' ][ 'target7' ],
1517 pingTime=500 )
1518 main.Mininet2.pingLong(
1519 src=main.params[ 'PING' ][ 'source8' ],
1520 target=main.params[ 'PING' ][ 'target8' ],
1521 pingTime=500 )
1522 main.Mininet2.pingLong(
1523 src=main.params[ 'PING' ][ 'source9' ],
1524 target=main.params[ 'PING' ][ 'target9' ],
1525 pingTime=500 )
1526 main.Mininet2.pingLong(
1527 src=main.params[ 'PING' ][ 'source10' ],
1528 target=main.params[ 'PING' ][ 'target10' ],
1529 pingTime=500 )
1530
1531 main.step( "Collecting topology information from ONOS" )
1532 devices = []
1533 threads = []
Jon Halla440e872016-03-31 15:15:50 -07001534 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001535 t = main.Thread( target=main.CLIs[i].devices,
Jon Hall5cf14d52015-07-16 12:15:19 -07001536 name="devices-" + str( i ),
1537 args=[ ] )
1538 threads.append( t )
1539 t.start()
1540
1541 for t in threads:
1542 t.join()
1543 devices.append( t.result )
1544 hosts = []
1545 threads = []
Jon Halla440e872016-03-31 15:15:50 -07001546 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001547 t = main.Thread( target=main.CLIs[i].hosts,
Jon Hall5cf14d52015-07-16 12:15:19 -07001548 name="hosts-" + str( i ),
1549 args=[ ] )
1550 threads.append( t )
1551 t.start()
1552
1553 for t in threads:
1554 t.join()
1555 try:
1556 hosts.append( json.loads( t.result ) )
1557 except ( ValueError, TypeError ):
1558 # FIXME: better handling of this, print which node
1559 # Maybe use thread name?
1560 main.log.exception( "Error parsing json output of hosts" )
Jon Hallf3d16e72015-12-16 17:45:08 -08001561 main.log.warn( repr( t.result ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001562 hosts.append( None )
1563
1564 ports = []
1565 threads = []
Jon Halla440e872016-03-31 15:15:50 -07001566 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001567 t = main.Thread( target=main.CLIs[i].ports,
Jon Hall5cf14d52015-07-16 12:15:19 -07001568 name="ports-" + str( i ),
1569 args=[ ] )
1570 threads.append( t )
1571 t.start()
1572
1573 for t in threads:
1574 t.join()
1575 ports.append( t.result )
1576 links = []
1577 threads = []
Jon Halla440e872016-03-31 15:15:50 -07001578 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001579 t = main.Thread( target=main.CLIs[i].links,
Jon Hall5cf14d52015-07-16 12:15:19 -07001580 name="links-" + str( i ),
1581 args=[ ] )
1582 threads.append( t )
1583 t.start()
1584
1585 for t in threads:
1586 t.join()
1587 links.append( t.result )
1588 clusters = []
1589 threads = []
Jon Halla440e872016-03-31 15:15:50 -07001590 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001591 t = main.Thread( target=main.CLIs[i].clusters,
Jon Hall5cf14d52015-07-16 12:15:19 -07001592 name="clusters-" + str( i ),
1593 args=[ ] )
1594 threads.append( t )
1595 t.start()
1596
1597 for t in threads:
1598 t.join()
1599 clusters.append( t.result )
1600 # Compare json objects for hosts and dataplane clusters
1601
1602 # hosts
1603 main.step( "Host view is consistent across ONOS nodes" )
1604 consistentHostsResult = main.TRUE
1605 for controller in range( len( hosts ) ):
Jon Halla440e872016-03-31 15:15:50 -07001606 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hallf3d16e72015-12-16 17:45:08 -08001607 if hosts[ controller ] and "Error" not in hosts[ controller ]:
Jon Hall5cf14d52015-07-16 12:15:19 -07001608 if hosts[ controller ] == hosts[ 0 ]:
1609 continue
1610 else: # hosts not consistent
1611 main.log.error( "hosts from ONOS" +
1612 controllerStr +
1613 " is inconsistent with ONOS1" )
1614 main.log.warn( repr( hosts[ controller ] ) )
1615 consistentHostsResult = main.FALSE
1616
1617 else:
1618 main.log.error( "Error in getting ONOS hosts from ONOS" +
1619 controllerStr )
1620 consistentHostsResult = main.FALSE
1621 main.log.warn( "ONOS" + controllerStr +
1622 " hosts response: " +
1623 repr( hosts[ controller ] ) )
1624 utilities.assert_equals(
1625 expect=main.TRUE,
1626 actual=consistentHostsResult,
1627 onpass="Hosts view is consistent across all ONOS nodes",
1628 onfail="ONOS nodes have different views of hosts" )
1629
1630 main.step( "Each host has an IP address" )
1631 ipResult = main.TRUE
1632 for controller in range( 0, len( hosts ) ):
Jon Halla440e872016-03-31 15:15:50 -07001633 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hallf3d16e72015-12-16 17:45:08 -08001634 if hosts[ controller ]:
1635 for host in hosts[ controller ]:
1636 if not host.get( 'ipAddresses', [ ] ):
1637 main.log.error( "Error with host ips on controller" +
1638 controllerStr + ": " + str( host ) )
1639 ipResult = main.FALSE
Jon Hall5cf14d52015-07-16 12:15:19 -07001640 utilities.assert_equals(
1641 expect=main.TRUE,
1642 actual=ipResult,
1643 onpass="The ips of the hosts aren't empty",
1644 onfail="The ip of at least one host is missing" )
1645
1646 # Strongly connected clusters of devices
1647 main.step( "Cluster view is consistent across ONOS nodes" )
1648 consistentClustersResult = main.TRUE
1649 for controller in range( len( clusters ) ):
Jon Halla440e872016-03-31 15:15:50 -07001650 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001651 if "Error" not in clusters[ controller ]:
1652 if clusters[ controller ] == clusters[ 0 ]:
1653 continue
1654 else: # clusters not consistent
1655 main.log.error( "clusters from ONOS" + controllerStr +
1656 " is inconsistent with ONOS1" )
1657 consistentClustersResult = main.FALSE
1658
1659 else:
1660 main.log.error( "Error in getting dataplane clusters " +
1661 "from ONOS" + controllerStr )
1662 consistentClustersResult = main.FALSE
1663 main.log.warn( "ONOS" + controllerStr +
1664 " clusters response: " +
1665 repr( clusters[ controller ] ) )
1666 utilities.assert_equals(
1667 expect=main.TRUE,
1668 actual=consistentClustersResult,
1669 onpass="Clusters view is consistent across all ONOS nodes",
1670 onfail="ONOS nodes have different views of clusters" )
Jon Hall172b7ba2016-04-07 18:12:20 -07001671 if consistentClustersResult != main.TRUE:
1672 main.log.debug( clusters )
Jon Hall5cf14d52015-07-16 12:15:19 -07001673 # there should always only be one cluster
1674 main.step( "Cluster view correct across ONOS nodes" )
1675 try:
1676 numClusters = len( json.loads( clusters[ 0 ] ) )
1677 except ( ValueError, TypeError ):
1678 main.log.exception( "Error parsing clusters[0]: " +
1679 repr( clusters[ 0 ] ) )
Jon Hall6e709752016-02-01 13:38:46 -08001680 numClusters = "ERROR"
Jon Hall5cf14d52015-07-16 12:15:19 -07001681 clusterResults = main.FALSE
1682 if numClusters == 1:
1683 clusterResults = main.TRUE
1684 utilities.assert_equals(
1685 expect=1,
1686 actual=numClusters,
1687 onpass="ONOS shows 1 SCC",
1688 onfail="ONOS shows " + str( numClusters ) + " SCCs" )
1689
1690 main.step( "Comparing ONOS topology to MN" )
1691 devicesResults = main.TRUE
1692 linksResults = main.TRUE
1693 hostsResults = main.TRUE
1694 mnSwitches = main.Mininet1.getSwitches()
1695 mnLinks = main.Mininet1.getLinks()
1696 mnHosts = main.Mininet1.getHosts()
Jon Halla440e872016-03-31 15:15:50 -07001697 for controller in main.activeNodes:
1698 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001699 if devices[ controller ] and ports[ controller ] and\
1700 "Error" not in devices[ controller ] and\
1701 "Error" not in ports[ controller ]:
Jon Halla440e872016-03-31 15:15:50 -07001702 currentDevicesResult = main.Mininet1.compareSwitches(
1703 mnSwitches,
1704 json.loads( devices[ controller ] ),
1705 json.loads( ports[ controller ] ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001706 else:
1707 currentDevicesResult = main.FALSE
1708 utilities.assert_equals( expect=main.TRUE,
1709 actual=currentDevicesResult,
1710 onpass="ONOS" + controllerStr +
1711 " Switches view is correct",
1712 onfail="ONOS" + controllerStr +
1713 " Switches view is incorrect" )
1714 if links[ controller ] and "Error" not in links[ controller ]:
1715 currentLinksResult = main.Mininet1.compareLinks(
1716 mnSwitches, mnLinks,
1717 json.loads( links[ controller ] ) )
1718 else:
1719 currentLinksResult = main.FALSE
1720 utilities.assert_equals( expect=main.TRUE,
1721 actual=currentLinksResult,
1722 onpass="ONOS" + controllerStr +
1723 " links view is correct",
1724 onfail="ONOS" + controllerStr +
1725 " links view is incorrect" )
1726
Jon Hall657cdf62015-12-17 14:40:51 -08001727 if hosts[ controller ] and "Error" not in hosts[ controller ]:
Jon Hall5cf14d52015-07-16 12:15:19 -07001728 currentHostsResult = main.Mininet1.compareHosts(
1729 mnHosts,
1730 hosts[ controller ] )
1731 else:
1732 currentHostsResult = main.FALSE
1733 utilities.assert_equals( expect=main.TRUE,
1734 actual=currentHostsResult,
1735 onpass="ONOS" + controllerStr +
1736 " hosts exist in Mininet",
1737 onfail="ONOS" + controllerStr +
1738 " hosts don't match Mininet" )
1739
1740 devicesResults = devicesResults and currentDevicesResult
1741 linksResults = linksResults and currentLinksResult
1742 hostsResults = hostsResults and currentHostsResult
1743
1744 main.step( "Device information is correct" )
1745 utilities.assert_equals(
1746 expect=main.TRUE,
1747 actual=devicesResults,
1748 onpass="Device information is correct",
1749 onfail="Device information is incorrect" )
1750
1751 main.step( "Links are correct" )
1752 utilities.assert_equals(
1753 expect=main.TRUE,
1754 actual=linksResults,
1755 onpass="Link are correct",
1756 onfail="Links are incorrect" )
1757
1758 main.step( "Hosts are correct" )
1759 utilities.assert_equals(
1760 expect=main.TRUE,
1761 actual=hostsResults,
1762 onpass="Hosts are correct",
1763 onfail="Hosts are incorrect" )
1764
1765 def CASE6( self, main ):
1766 """
1767 The Failure case. Since this is the Sanity test, we do nothing.
1768 """
1769 import time
Jon Halle1a3b752015-07-22 13:02:46 -07001770 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07001771 assert main, "main not defined"
1772 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07001773 assert main.CLIs, "main.CLIs not defined"
1774 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07001775 main.case( "Wait 60 seconds instead of inducing a failure" )
1776 time.sleep( 60 )
1777 utilities.assert_equals(
1778 expect=main.TRUE,
1779 actual=main.TRUE,
1780 onpass="Sleeping 60 seconds",
1781 onfail="Something is terribly wrong with my math" )
1782
1783 def CASE7( self, main ):
1784 """
1785 Check state after ONOS failure
1786 """
1787 import json
Jon Halle1a3b752015-07-22 13:02:46 -07001788 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07001789 assert main, "main not defined"
1790 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07001791 assert main.CLIs, "main.CLIs not defined"
1792 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07001793 main.case( "Running ONOS Constant State Tests" )
1794
1795 main.step( "Check that each switch has a master" )
1796 # Assert that each device has a master
1797 rolesNotNull = main.TRUE
1798 threads = []
Jon Halla440e872016-03-31 15:15:50 -07001799 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001800 t = main.Thread( target=main.CLIs[i].rolesNotNull,
Jon Hall5cf14d52015-07-16 12:15:19 -07001801 name="rolesNotNull-" + str( i ),
1802 args=[ ] )
1803 threads.append( t )
1804 t.start()
1805
1806 for t in threads:
1807 t.join()
1808 rolesNotNull = rolesNotNull and t.result
1809 utilities.assert_equals(
1810 expect=main.TRUE,
1811 actual=rolesNotNull,
1812 onpass="Each device has a master",
1813 onfail="Some devices don't have a master assigned" )
1814
1815 main.step( "Read device roles from ONOS" )
1816 ONOSMastership = []
1817 mastershipCheck = main.FALSE
1818 consistentMastership = True
1819 rolesResults = True
1820 threads = []
Jon Halla440e872016-03-31 15:15:50 -07001821 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001822 t = main.Thread( target=main.CLIs[i].roles,
Jon Hall5cf14d52015-07-16 12:15:19 -07001823 name="roles-" + str( i ),
1824 args=[] )
1825 threads.append( t )
1826 t.start()
1827
1828 for t in threads:
1829 t.join()
1830 ONOSMastership.append( t.result )
1831
Jon Halla440e872016-03-31 15:15:50 -07001832 for i in range( len( ONOSMastership ) ):
1833 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001834 if not ONOSMastership[i] or "Error" in ONOSMastership[i]:
Jon Halla440e872016-03-31 15:15:50 -07001835 main.log.error( "Error in getting ONOS" + node + " roles" )
1836 main.log.warn( "ONOS" + node + " mastership response: " +
1837 repr( ONOSMastership[i] ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001838 rolesResults = False
1839 utilities.assert_equals(
1840 expect=True,
1841 actual=rolesResults,
1842 onpass="No error in reading roles output",
1843 onfail="Error in reading roles from ONOS" )
1844
1845 main.step( "Check for consistency in roles from each controller" )
1846 if all([ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
1847 main.log.info(
1848 "Switch roles are consistent across all ONOS nodes" )
1849 else:
1850 consistentMastership = False
1851 utilities.assert_equals(
1852 expect=True,
1853 actual=consistentMastership,
1854 onpass="Switch roles are consistent across all ONOS nodes",
1855 onfail="ONOS nodes have different views of switch roles" )
1856
1857 if rolesResults and not consistentMastership:
Jon Halla440e872016-03-31 15:15:50 -07001858 for i in range( len( ONOSMastership ) ):
1859 node = str( main.activeNodes[i] + 1 )
1860 main.log.warn( "ONOS" + node + " roles: ",
1861 json.dumps( json.loads( ONOSMastership[ i ] ),
1862 sort_keys=True,
1863 indent=4,
1864 separators=( ',', ': ' ) ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001865
1866 description2 = "Compare switch roles from before failure"
1867 main.step( description2 )
1868 try:
1869 currentJson = json.loads( ONOSMastership[0] )
1870 oldJson = json.loads( mastershipState )
1871 except ( ValueError, TypeError ):
1872 main.log.exception( "Something is wrong with parsing " +
1873 "ONOSMastership[0] or mastershipState" )
1874 main.log.error( "ONOSMastership[0]: " + repr( ONOSMastership[0] ) )
1875 main.log.error( "mastershipState" + repr( mastershipState ) )
1876 main.cleanup()
1877 main.exit()
1878 mastershipCheck = main.TRUE
1879 for i in range( 1, 29 ):
1880 switchDPID = str(
1881 main.Mininet1.getSwitchDPID( switch="s" + str( i ) ) )
1882 current = [ switch[ 'master' ] for switch in currentJson
1883 if switchDPID in switch[ 'id' ] ]
1884 old = [ switch[ 'master' ] for switch in oldJson
1885 if switchDPID in switch[ 'id' ] ]
1886 if current == old:
1887 mastershipCheck = mastershipCheck and main.TRUE
1888 else:
1889 main.log.warn( "Mastership of switch %s changed" % switchDPID )
1890 mastershipCheck = main.FALSE
1891 utilities.assert_equals(
1892 expect=main.TRUE,
1893 actual=mastershipCheck,
1894 onpass="Mastership of Switches was not changed",
1895 onfail="Mastership of some switches changed" )
1896 mastershipCheck = mastershipCheck and consistentMastership
1897
1898 main.step( "Get the intents and compare across all nodes" )
1899 ONOSIntents = []
1900 intentCheck = main.FALSE
1901 consistentIntents = True
1902 intentsResults = True
1903 threads = []
Jon Halla440e872016-03-31 15:15:50 -07001904 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001905 t = main.Thread( target=main.CLIs[i].intents,
Jon Hall5cf14d52015-07-16 12:15:19 -07001906 name="intents-" + str( i ),
1907 args=[],
1908 kwargs={ 'jsonFormat': True } )
1909 threads.append( t )
1910 t.start()
1911
1912 for t in threads:
1913 t.join()
1914 ONOSIntents.append( t.result )
1915
Jon Halla440e872016-03-31 15:15:50 -07001916 for i in range( len( ONOSIntents) ):
1917 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001918 if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
Jon Halla440e872016-03-31 15:15:50 -07001919 main.log.error( "Error in getting ONOS" + node + " intents" )
1920 main.log.warn( "ONOS" + node + " intents response: " +
Jon Hall5cf14d52015-07-16 12:15:19 -07001921 repr( ONOSIntents[ i ] ) )
1922 intentsResults = False
1923 utilities.assert_equals(
1924 expect=True,
1925 actual=intentsResults,
1926 onpass="No error in reading intents output",
1927 onfail="Error in reading intents from ONOS" )
1928
1929 main.step( "Check for consistency in Intents from each controller" )
1930 if all([ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
1931 main.log.info( "Intents are consistent across all ONOS " +
1932 "nodes" )
1933 else:
1934 consistentIntents = False
1935
1936 # Try to make it easy to figure out what is happening
1937 #
1938 # Intent ONOS1 ONOS2 ...
1939 # 0x01 INSTALLED INSTALLING
1940 # ... ... ...
1941 # ... ... ...
1942 title = " ID"
Jon Halla440e872016-03-31 15:15:50 -07001943 for n in main.activeNodes:
Jon Hall5cf14d52015-07-16 12:15:19 -07001944 title += " " * 10 + "ONOS" + str( n + 1 )
1945 main.log.warn( title )
1946 # get all intent keys in the cluster
1947 keys = []
1948 for nodeStr in ONOSIntents:
1949 node = json.loads( nodeStr )
1950 for intent in node:
1951 keys.append( intent.get( 'id' ) )
1952 keys = set( keys )
1953 for key in keys:
1954 row = "%-13s" % key
1955 for nodeStr in ONOSIntents:
1956 node = json.loads( nodeStr )
1957 for intent in node:
1958 if intent.get( 'id' ) == key:
1959 row += "%-15s" % intent.get( 'state' )
1960 main.log.warn( row )
1961 # End table view
1962
1963 utilities.assert_equals(
1964 expect=True,
1965 actual=consistentIntents,
1966 onpass="Intents are consistent across all ONOS nodes",
1967 onfail="ONOS nodes have different views of intents" )
1968 intentStates = []
1969 for node in ONOSIntents: # Iter through ONOS nodes
1970 nodeStates = []
1971 # Iter through intents of a node
1972 try:
1973 for intent in json.loads( node ):
1974 nodeStates.append( intent[ 'state' ] )
1975 except ( ValueError, TypeError ):
1976 main.log.exception( "Error in parsing intents" )
1977 main.log.error( repr( node ) )
1978 intentStates.append( nodeStates )
1979 out = [ (i, nodeStates.count( i ) ) for i in set( nodeStates ) ]
1980 main.log.info( dict( out ) )
1981
1982 if intentsResults and not consistentIntents:
Jon Halla440e872016-03-31 15:15:50 -07001983 for i in range( len( main.activeNodes ) ):
1984 node = str( main.activeNodes[i] + 1 )
1985 main.log.warn( "ONOS" + node + " intents: " )
Jon Hall5cf14d52015-07-16 12:15:19 -07001986 main.log.warn( json.dumps(
1987 json.loads( ONOSIntents[ i ] ),
1988 sort_keys=True,
1989 indent=4,
1990 separators=( ',', ': ' ) ) )
1991 elif intentsResults and consistentIntents:
1992 intentCheck = main.TRUE
1993
1994 # NOTE: Store has no durability, so intents are lost across system
1995 # restarts
1996 main.step( "Compare current intents with intents before the failure" )
1997 # NOTE: this requires case 5 to pass for intentState to be set.
1998 # maybe we should stop the test if that fails?
1999 sameIntents = main.FALSE
Jon Halla440e872016-03-31 15:15:50 -07002000 try:
2001 intentState
2002 except NameError:
2003 main.log.warn( "No previous intent state was saved" )
2004 else:
2005 if intentState and intentState == ONOSIntents[ 0 ]:
2006 sameIntents = main.TRUE
2007 main.log.info( "Intents are consistent with before failure" )
2008 # TODO: possibly the states have changed? we may need to figure out
2009 # what the acceptable states are
2010 elif len( intentState ) == len( ONOSIntents[ 0 ] ):
2011 sameIntents = main.TRUE
2012 try:
2013 before = json.loads( intentState )
2014 after = json.loads( ONOSIntents[ 0 ] )
2015 for intent in before:
2016 if intent not in after:
2017 sameIntents = main.FALSE
2018 main.log.debug( "Intent is not currently in ONOS " +
2019 "(at least in the same form):" )
2020 main.log.debug( json.dumps( intent ) )
2021 except ( ValueError, TypeError ):
2022 main.log.exception( "Exception printing intents" )
2023 main.log.debug( repr( ONOSIntents[0] ) )
2024 main.log.debug( repr( intentState ) )
2025 if sameIntents == main.FALSE:
2026 try:
2027 main.log.debug( "ONOS intents before: " )
2028 main.log.debug( json.dumps( json.loads( intentState ),
2029 sort_keys=True, indent=4,
2030 separators=( ',', ': ' ) ) )
2031 main.log.debug( "Current ONOS intents: " )
2032 main.log.debug( json.dumps( json.loads( ONOSIntents[ 0 ] ),
2033 sort_keys=True, indent=4,
2034 separators=( ',', ': ' ) ) )
2035 except ( ValueError, TypeError ):
2036 main.log.exception( "Exception printing intents" )
2037 main.log.debug( repr( ONOSIntents[0] ) )
2038 main.log.debug( repr( intentState ) )
2039 utilities.assert_equals(
2040 expect=main.TRUE,
2041 actual=sameIntents,
2042 onpass="Intents are consistent with before failure",
2043 onfail="The Intents changed during failure" )
Jon Hall5cf14d52015-07-16 12:15:19 -07002044 intentCheck = intentCheck and sameIntents
2045
2046 main.step( "Get the OF Table entries and compare to before " +
2047 "component failure" )
2048 FlowTables = main.TRUE
Jon Hall5cf14d52015-07-16 12:15:19 -07002049 for i in range( 28 ):
2050 main.log.info( "Checking flow table on s" + str( i + 1 ) )
GlennRC68467eb2015-11-16 18:01:01 -08002051 tmpFlows = main.Mininet1.getFlowTable( "s" + str( i + 1 ), version="1.3", debug=False )
Jon Hall41d39f12016-04-11 22:54:35 -07002052 curSwitch = main.Mininet1.flowTableComp( flows[i], tmpFlows )
2053 FlowTables = FlowTables and curSwitch
2054 if curSwitch == main.FALSE:
GlennRC68467eb2015-11-16 18:01:01 -08002055 main.log.warn( "Differences in flow table for switch: s{}".format( i + 1 ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07002056 utilities.assert_equals(
2057 expect=main.TRUE,
2058 actual=FlowTables,
2059 onpass="No changes were found in the flow tables",
2060 onfail="Changes were found in the flow tables" )
2061
2062 main.Mininet2.pingLongKill()
2063 '''
2064 main.step( "Check the continuous pings to ensure that no packets " +
2065 "were dropped during component failure" )
2066 main.Mininet2.pingKill( main.params[ 'TESTONUSER' ],
2067 main.params[ 'TESTONIP' ] )
2068 LossInPings = main.FALSE
2069 # NOTE: checkForLoss returns main.FALSE with 0% packet loss
2070 for i in range( 8, 18 ):
2071 main.log.info(
2072 "Checking for a loss in pings along flow from s" +
2073 str( i ) )
2074 LossInPings = main.Mininet2.checkForLoss(
2075 "/tmp/ping.h" +
2076 str( i ) ) or LossInPings
2077 if LossInPings == main.TRUE:
2078 main.log.info( "Loss in ping detected" )
2079 elif LossInPings == main.ERROR:
2080 main.log.info( "There are multiple mininet process running" )
2081 elif LossInPings == main.FALSE:
2082 main.log.info( "No Loss in the pings" )
2083 main.log.info( "No loss of dataplane connectivity" )
2084 utilities.assert_equals(
2085 expect=main.FALSE,
2086 actual=LossInPings,
2087 onpass="No Loss of connectivity",
2088 onfail="Loss of dataplane connectivity detected" )
2089 '''
2090
2091 main.step( "Leadership Election is still functional" )
2092 # Test of LeadershipElection
Jon Halla440e872016-03-31 15:15:50 -07002093 leaderList = []
2094
Jon Hall5cf14d52015-07-16 12:15:19 -07002095 # NOTE: this only works for the sanity test. In case of failures,
2096 # leader will likely change
Jon Halla440e872016-03-31 15:15:50 -07002097 leader = main.nodes[ main.activeNodes[ 0 ] ].ip_address
Jon Hall5cf14d52015-07-16 12:15:19 -07002098 leaderResult = main.TRUE
Jon Halla440e872016-03-31 15:15:50 -07002099
2100 for i in main.activeNodes:
2101 cli = main.CLIs[i]
Jon Hall5cf14d52015-07-16 12:15:19 -07002102 leaderN = cli.electionTestLeader()
Jon Halla440e872016-03-31 15:15:50 -07002103 leaderList.append( leaderN )
Jon Hall5cf14d52015-07-16 12:15:19 -07002104 # verify leader is ONOS1
2105 if leaderN == leader:
2106 # all is well
2107 # NOTE: In failure scenario, this could be a new node, maybe
2108 # check != ONOS1
2109 pass
2110 elif leaderN == main.FALSE:
2111 # error in response
2112 main.log.error( "Something is wrong with " +
2113 "electionTestLeader function, check the" +
2114 " error logs" )
2115 leaderResult = main.FALSE
Jon Halla440e872016-03-31 15:15:50 -07002116 elif leaderN is None:
2117 main.log.error( cli.name +
2118 " shows no leader for the election-app was" +
2119 " elected after the old one died" )
Jon Hall5cf14d52015-07-16 12:15:19 -07002120 leaderResult = main.FALSE
Jon Halla440e872016-03-31 15:15:50 -07002121 if len( set( leaderList ) ) != 1:
2122 leaderResult = main.FALSE
2123 main.log.error(
2124 "Inconsistent view of leader for the election test app" )
2125 # TODO: print the list
Jon Hall5cf14d52015-07-16 12:15:19 -07002126 utilities.assert_equals(
2127 expect=main.TRUE,
2128 actual=leaderResult,
2129 onpass="Leadership election passed",
2130 onfail="Something went wrong with Leadership election" )
2131
2132 def CASE8( self, main ):
2133 """
2134 Compare topo
2135 """
2136 import json
2137 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002138 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002139 assert main, "main not defined"
2140 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002141 assert main.CLIs, "main.CLIs not defined"
2142 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002143
2144 main.case( "Compare ONOS Topology view to Mininet topology" )
Jon Hall783bbf92015-07-23 14:33:19 -07002145 main.caseExplanation = "Compare topology objects between Mininet" +\
Jon Hall5cf14d52015-07-16 12:15:19 -07002146 " and ONOS"
Jon Hall5cf14d52015-07-16 12:15:19 -07002147 topoResult = main.FALSE
Jon Halla440e872016-03-31 15:15:50 -07002148 topoFailMsg = "ONOS topology don't match Mininet"
Jon Hall5cf14d52015-07-16 12:15:19 -07002149 elapsed = 0
2150 count = 0
Jon Halle9b1fa32015-12-08 15:32:21 -08002151 main.step( "Comparing ONOS topology to MN topology" )
Jon Hall5cf14d52015-07-16 12:15:19 -07002152 startTime = time.time()
2153 # Give time for Gossip to work
Jon Halle9b1fa32015-12-08 15:32:21 -08002154 while topoResult == main.FALSE and ( elapsed < 60 or count < 3 ):
Jon Hall96091e62015-09-21 17:34:17 -07002155 devicesResults = main.TRUE
2156 linksResults = main.TRUE
2157 hostsResults = main.TRUE
2158 hostAttachmentResults = True
Jon Hall5cf14d52015-07-16 12:15:19 -07002159 count += 1
2160 cliStart = time.time()
2161 devices = []
2162 threads = []
Jon Halla440e872016-03-31 15:15:50 -07002163 for i in main.activeNodes:
2164 t = main.Thread( target=utilities.retry,
Jon Hall5cf14d52015-07-16 12:15:19 -07002165 name="devices-" + str( i ),
Jon Halla440e872016-03-31 15:15:50 -07002166 args=[ main.CLIs[i].devices, [ None ] ],
2167 kwargs= { 'sleep': 5, 'attempts': 5,
2168 'randomTime': True } )
Jon Hall5cf14d52015-07-16 12:15:19 -07002169 threads.append( t )
2170 t.start()
2171
2172 for t in threads:
2173 t.join()
2174 devices.append( t.result )
2175 hosts = []
2176 ipResult = main.TRUE
2177 threads = []
Jon Halla440e872016-03-31 15:15:50 -07002178 for i in main.activeNodes:
Jon Halld8f6de82015-12-17 17:04:34 -08002179 t = main.Thread( target=utilities.retry,
Jon Hall5cf14d52015-07-16 12:15:19 -07002180 name="hosts-" + str( i ),
Jon Halld8f6de82015-12-17 17:04:34 -08002181 args=[ main.CLIs[i].hosts, [ None ] ],
2182 kwargs= { 'sleep': 5, 'attempts': 5,
2183 'randomTime': True } )
Jon Hall5cf14d52015-07-16 12:15:19 -07002184 threads.append( t )
2185 t.start()
2186
2187 for t in threads:
2188 t.join()
2189 try:
2190 hosts.append( json.loads( t.result ) )
2191 except ( ValueError, TypeError ):
2192 main.log.exception( "Error parsing hosts results" )
2193 main.log.error( repr( t.result ) )
Jon Hallf3d16e72015-12-16 17:45:08 -08002194 hosts.append( None )
Jon Hall5cf14d52015-07-16 12:15:19 -07002195 for controller in range( 0, len( hosts ) ):
Jon Halla440e872016-03-31 15:15:50 -07002196 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hallacd1b182015-12-17 11:43:20 -08002197 if hosts[ controller ]:
2198 for host in hosts[ controller ]:
2199 if host is None or host.get( 'ipAddresses', [] ) == []:
2200 main.log.error(
2201 "Error with host ipAddresses on controller" +
2202 controllerStr + ": " + str( host ) )
2203 ipResult = main.FALSE
Jon Hall5cf14d52015-07-16 12:15:19 -07002204 ports = []
2205 threads = []
Jon Halla440e872016-03-31 15:15:50 -07002206 for i in main.activeNodes:
2207 t = main.Thread( target=utilities.retry,
Jon Hall5cf14d52015-07-16 12:15:19 -07002208 name="ports-" + str( i ),
Jon Halla440e872016-03-31 15:15:50 -07002209 args=[ main.CLIs[i].ports, [ None ] ],
2210 kwargs= { 'sleep': 5, 'attempts': 5,
2211 'randomTime': True } )
Jon Hall5cf14d52015-07-16 12:15:19 -07002212 threads.append( t )
2213 t.start()
2214
2215 for t in threads:
2216 t.join()
2217 ports.append( t.result )
2218 links = []
2219 threads = []
Jon Halla440e872016-03-31 15:15:50 -07002220 for i in main.activeNodes:
2221 t = main.Thread( target=utilities.retry,
Jon Hall5cf14d52015-07-16 12:15:19 -07002222 name="links-" + str( i ),
Jon Halla440e872016-03-31 15:15:50 -07002223 args=[ main.CLIs[i].links, [ None ] ],
2224 kwargs= { 'sleep': 5, 'attempts': 5,
2225 'randomTime': True } )
Jon Hall5cf14d52015-07-16 12:15:19 -07002226 threads.append( t )
2227 t.start()
2228
2229 for t in threads:
2230 t.join()
2231 links.append( t.result )
2232 clusters = []
2233 threads = []
Jon Halla440e872016-03-31 15:15:50 -07002234 for i in main.activeNodes:
2235 t = main.Thread( target=utilities.retry,
Jon Hall5cf14d52015-07-16 12:15:19 -07002236 name="clusters-" + str( i ),
Jon Halla440e872016-03-31 15:15:50 -07002237 args=[ main.CLIs[i].clusters, [ None ] ],
2238 kwargs= { 'sleep': 5, 'attempts': 5,
2239 'randomTime': True } )
Jon Hall5cf14d52015-07-16 12:15:19 -07002240 threads.append( t )
2241 t.start()
2242
2243 for t in threads:
2244 t.join()
2245 clusters.append( t.result )
2246
2247 elapsed = time.time() - startTime
2248 cliTime = time.time() - cliStart
2249 print "Elapsed time: " + str( elapsed )
2250 print "CLI time: " + str( cliTime )
2251
Jon Halla440e872016-03-31 15:15:50 -07002252 if all( e is None for e in devices ) and\
2253 all( e is None for e in hosts ) and\
2254 all( e is None for e in ports ) and\
2255 all( e is None for e in links ) and\
2256 all( e is None for e in clusters ):
2257 topoFailMsg = "Could not get topology from ONOS"
2258 main.log.error( topoFailMsg )
2259 continue # Try again, No use trying to compare
2260
Jon Hall5cf14d52015-07-16 12:15:19 -07002261 mnSwitches = main.Mininet1.getSwitches()
2262 mnLinks = main.Mininet1.getLinks()
2263 mnHosts = main.Mininet1.getHosts()
Jon Halla440e872016-03-31 15:15:50 -07002264 for controller in range( len( main.activeNodes ) ):
2265 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07002266 if devices[ controller ] and ports[ controller ] and\
2267 "Error" not in devices[ controller ] and\
2268 "Error" not in ports[ controller ]:
2269
Jon Hallc6793552016-01-19 14:18:37 -08002270 try:
2271 currentDevicesResult = main.Mininet1.compareSwitches(
2272 mnSwitches,
2273 json.loads( devices[ controller ] ),
2274 json.loads( ports[ controller ] ) )
2275 except ( TypeError, ValueError ) as e:
2276 main.log.exception( "Object not as expected; devices={!r}\nports={!r}".format(
2277 devices[ controller ], ports[ controller ] ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07002278 else:
2279 currentDevicesResult = main.FALSE
2280 utilities.assert_equals( expect=main.TRUE,
2281 actual=currentDevicesResult,
2282 onpass="ONOS" + controllerStr +
2283 " Switches view is correct",
2284 onfail="ONOS" + controllerStr +
2285 " Switches view is incorrect" )
2286
2287 if links[ controller ] and "Error" not in links[ controller ]:
2288 currentLinksResult = main.Mininet1.compareLinks(
2289 mnSwitches, mnLinks,
2290 json.loads( links[ controller ] ) )
2291 else:
2292 currentLinksResult = main.FALSE
2293 utilities.assert_equals( expect=main.TRUE,
2294 actual=currentLinksResult,
2295 onpass="ONOS" + controllerStr +
2296 " links view is correct",
2297 onfail="ONOS" + controllerStr +
2298 " links view is incorrect" )
Jon Hall657cdf62015-12-17 14:40:51 -08002299 if hosts[ controller ] and "Error" not in hosts[ controller ]:
Jon Hall5cf14d52015-07-16 12:15:19 -07002300 currentHostsResult = main.Mininet1.compareHosts(
2301 mnHosts,
2302 hosts[ controller ] )
Jon Hall13b446e2016-01-05 12:17:01 -08002303 elif hosts[ controller ] == []:
2304 currentHostsResult = main.TRUE
Jon Hall5cf14d52015-07-16 12:15:19 -07002305 else:
2306 currentHostsResult = main.FALSE
2307 utilities.assert_equals( expect=main.TRUE,
2308 actual=currentHostsResult,
2309 onpass="ONOS" + controllerStr +
2310 " hosts exist in Mininet",
2311 onfail="ONOS" + controllerStr +
2312 " hosts don't match Mininet" )
2313 # CHECKING HOST ATTACHMENT POINTS
2314 hostAttachment = True
2315 zeroHosts = False
2316 # FIXME: topo-HA/obelisk specific mappings:
2317 # key is mac and value is dpid
2318 mappings = {}
2319 for i in range( 1, 29 ): # hosts 1 through 28
2320 # set up correct variables:
2321 macId = "00:" * 5 + hex( i ).split( "0x" )[1].upper().zfill(2)
2322 if i == 1:
2323 deviceId = "1000".zfill(16)
2324 elif i == 2:
2325 deviceId = "2000".zfill(16)
2326 elif i == 3:
2327 deviceId = "3000".zfill(16)
2328 elif i == 4:
2329 deviceId = "3004".zfill(16)
2330 elif i == 5:
2331 deviceId = "5000".zfill(16)
2332 elif i == 6:
2333 deviceId = "6000".zfill(16)
2334 elif i == 7:
2335 deviceId = "6007".zfill(16)
2336 elif i >= 8 and i <= 17:
2337 dpid = '3' + str( i ).zfill( 3 )
2338 deviceId = dpid.zfill(16)
2339 elif i >= 18 and i <= 27:
2340 dpid = '6' + str( i ).zfill( 3 )
2341 deviceId = dpid.zfill(16)
2342 elif i == 28:
2343 deviceId = "2800".zfill(16)
2344 mappings[ macId ] = deviceId
Jon Halld8f6de82015-12-17 17:04:34 -08002345 if hosts[ controller ] is not None and "Error" not in hosts[ controller ]:
Jon Hall5cf14d52015-07-16 12:15:19 -07002346 if hosts[ controller ] == []:
2347 main.log.warn( "There are no hosts discovered" )
2348 zeroHosts = True
2349 else:
2350 for host in hosts[ controller ]:
2351 mac = None
2352 location = None
2353 device = None
2354 port = None
2355 try:
2356 mac = host.get( 'mac' )
2357 assert mac, "mac field could not be found for this host object"
2358
2359 location = host.get( 'location' )
2360 assert location, "location field could not be found for this host object"
2361
2362 # Trim the protocol identifier off deviceId
2363 device = str( location.get( 'elementId' ) ).split(':')[1]
2364 assert device, "elementId field could not be found for this host location object"
2365
2366 port = location.get( 'port' )
2367 assert port, "port field could not be found for this host location object"
2368
2369 # Now check if this matches where they should be
2370 if mac and device and port:
2371 if str( port ) != "1":
2372 main.log.error( "The attachment port is incorrect for " +
2373 "host " + str( mac ) +
2374 ". Expected: 1 Actual: " + str( port) )
2375 hostAttachment = False
2376 if device != mappings[ str( mac ) ]:
2377 main.log.error( "The attachment device is incorrect for " +
2378 "host " + str( mac ) +
2379 ". Expected: " + mappings[ str( mac ) ] +
2380 " Actual: " + device )
2381 hostAttachment = False
2382 else:
2383 hostAttachment = False
2384 except AssertionError:
2385 main.log.exception( "Json object not as expected" )
2386 main.log.error( repr( host ) )
2387 hostAttachment = False
2388 else:
2389 main.log.error( "No hosts json output or \"Error\"" +
2390 " in output. hosts = " +
2391 repr( hosts[ controller ] ) )
2392 if zeroHosts is False:
2393 hostAttachment = True
2394
2395 # END CHECKING HOST ATTACHMENT POINTS
2396 devicesResults = devicesResults and currentDevicesResult
2397 linksResults = linksResults and currentLinksResult
2398 hostsResults = hostsResults and currentHostsResult
2399 hostAttachmentResults = hostAttachmentResults and\
2400 hostAttachment
2401 topoResult = ( devicesResults and linksResults
2402 and hostsResults and ipResult and
2403 hostAttachmentResults )
Jon Halle9b1fa32015-12-08 15:32:21 -08002404 utilities.assert_equals( expect=True,
2405 actual=topoResult,
2406 onpass="ONOS topology matches Mininet",
Jon Halla440e872016-03-31 15:15:50 -07002407 onfail=topoFailMsg )
Jon Halle9b1fa32015-12-08 15:32:21 -08002408 # End of While loop to pull ONOS state
Jon Hall5cf14d52015-07-16 12:15:19 -07002409
2410 # Compare json objects for hosts and dataplane clusters
2411
2412 # hosts
2413 main.step( "Hosts view is consistent across all ONOS nodes" )
2414 consistentHostsResult = main.TRUE
2415 for controller in range( len( hosts ) ):
Jon Halla440e872016-03-31 15:15:50 -07002416 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hall13b446e2016-01-05 12:17:01 -08002417 if hosts[ controller ] is not None and "Error" not in hosts[ controller ]:
Jon Hall5cf14d52015-07-16 12:15:19 -07002418 if hosts[ controller ] == hosts[ 0 ]:
2419 continue
2420 else: # hosts not consistent
2421 main.log.error( "hosts from ONOS" + controllerStr +
2422 " is inconsistent with ONOS1" )
2423 main.log.warn( repr( hosts[ controller ] ) )
2424 consistentHostsResult = main.FALSE
2425
2426 else:
2427 main.log.error( "Error in getting ONOS hosts from ONOS" +
2428 controllerStr )
2429 consistentHostsResult = main.FALSE
2430 main.log.warn( "ONOS" + controllerStr +
2431 " hosts response: " +
2432 repr( hosts[ controller ] ) )
2433 utilities.assert_equals(
2434 expect=main.TRUE,
2435 actual=consistentHostsResult,
2436 onpass="Hosts view is consistent across all ONOS nodes",
2437 onfail="ONOS nodes have different views of hosts" )
2438
2439 main.step( "Hosts information is correct" )
2440 hostsResults = hostsResults and ipResult
2441 utilities.assert_equals(
2442 expect=main.TRUE,
2443 actual=hostsResults,
2444 onpass="Host information is correct",
2445 onfail="Host information is incorrect" )
2446
2447 main.step( "Host attachment points to the network" )
2448 utilities.assert_equals(
2449 expect=True,
2450 actual=hostAttachmentResults,
2451 onpass="Hosts are correctly attached to the network",
2452 onfail="ONOS did not correctly attach hosts to the network" )
2453
2454 # Strongly connected clusters of devices
2455 main.step( "Clusters view is consistent across all ONOS nodes" )
2456 consistentClustersResult = main.TRUE
2457 for controller in range( len( clusters ) ):
Jon Halla440e872016-03-31 15:15:50 -07002458 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07002459 if "Error" not in clusters[ controller ]:
2460 if clusters[ controller ] == clusters[ 0 ]:
2461 continue
2462 else: # clusters not consistent
2463 main.log.error( "clusters from ONOS" +
2464 controllerStr +
2465 " is inconsistent with ONOS1" )
2466 consistentClustersResult = main.FALSE
Jon Hall5cf14d52015-07-16 12:15:19 -07002467 else:
2468 main.log.error( "Error in getting dataplane clusters " +
2469 "from ONOS" + controllerStr )
2470 consistentClustersResult = main.FALSE
2471 main.log.warn( "ONOS" + controllerStr +
2472 " clusters response: " +
2473 repr( clusters[ controller ] ) )
2474 utilities.assert_equals(
2475 expect=main.TRUE,
2476 actual=consistentClustersResult,
2477 onpass="Clusters view is consistent across all ONOS nodes",
2478 onfail="ONOS nodes have different views of clusters" )
2479
2480 main.step( "There is only one SCC" )
2481 # there should always only be one cluster
2482 try:
2483 numClusters = len( json.loads( clusters[ 0 ] ) )
2484 except ( ValueError, TypeError ):
2485 main.log.exception( "Error parsing clusters[0]: " +
2486 repr( clusters[0] ) )
Jon Halla440e872016-03-31 15:15:50 -07002487 numClusters = "ERROR"
Jon Hall5cf14d52015-07-16 12:15:19 -07002488 clusterResults = main.FALSE
2489 if numClusters == 1:
2490 clusterResults = main.TRUE
2491 utilities.assert_equals(
2492 expect=1,
2493 actual=numClusters,
2494 onpass="ONOS shows 1 SCC",
2495 onfail="ONOS shows " + str( numClusters ) + " SCCs" )
2496
2497 topoResult = ( devicesResults and linksResults
2498 and hostsResults and consistentHostsResult
2499 and consistentClustersResult and clusterResults
2500 and ipResult and hostAttachmentResults )
2501
2502 topoResult = topoResult and int( count <= 2 )
2503 note = "note it takes about " + str( int( cliTime ) ) + \
2504 " seconds for the test to make all the cli calls to fetch " +\
2505 "the topology from each ONOS instance"
2506 main.log.info(
2507 "Very crass estimate for topology discovery/convergence( " +
2508 str( note ) + " ): " + str( elapsed ) + " seconds, " +
2509 str( count ) + " tries" )
2510
2511 main.step( "Device information is correct" )
2512 utilities.assert_equals(
2513 expect=main.TRUE,
2514 actual=devicesResults,
2515 onpass="Device information is correct",
2516 onfail="Device information is incorrect" )
2517
2518 main.step( "Links are correct" )
2519 utilities.assert_equals(
2520 expect=main.TRUE,
2521 actual=linksResults,
2522 onpass="Link are correct",
2523 onfail="Links are incorrect" )
2524
2525 main.step( "Hosts are correct" )
2526 utilities.assert_equals(
2527 expect=main.TRUE,
2528 actual=hostsResults,
2529 onpass="Hosts are correct",
2530 onfail="Hosts are incorrect" )
2531
2532 # FIXME: move this to an ONOS state case
2533 main.step( "Checking ONOS nodes" )
Jon Hall41d39f12016-04-11 22:54:35 -07002534 nodeResults = utilities.retry( main.HA.nodesCheck,
2535 False,
2536 args=[main.activeNodes],
2537 attempts=5 )
Jon Hall5cf14d52015-07-16 12:15:19 -07002538
Jon Hall41d39f12016-04-11 22:54:35 -07002539 utilities.assert_equals( expect=True, actual=nodeResults,
Jon Hall5cf14d52015-07-16 12:15:19 -07002540 onpass="Nodes check successful",
2541 onfail="Nodes check NOT successful" )
Jon Halla440e872016-03-31 15:15:50 -07002542 if not nodeResults:
Jon Hall41d39f12016-04-11 22:54:35 -07002543 for i in main.activeNodes:
Jon Halla440e872016-03-31 15:15:50 -07002544 main.log.debug( "{} components not ACTIVE: \n{}".format(
Jon Hall41d39f12016-04-11 22:54:35 -07002545 main.CLIs[i].name,
2546 main.CLIs[i].sendline( "scr:list | grep -v ACTIVE" ) ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07002547
2548 def CASE9( self, main ):
2549 """
2550 Link s3-s28 down
2551 """
2552 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002553 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002554 assert main, "main not defined"
2555 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002556 assert main.CLIs, "main.CLIs not defined"
2557 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002558 # NOTE: You should probably run a topology check after this
2559
2560 linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
2561
2562 description = "Turn off a link to ensure that Link Discovery " +\
2563 "is working properly"
2564 main.case( description )
2565
2566 main.step( "Kill Link between s3 and s28" )
2567 LinkDown = main.Mininet1.link( END1="s3", END2="s28", OPTION="down" )
2568 main.log.info( "Waiting " + str( linkSleep ) +
2569 " seconds for link down to be discovered" )
2570 time.sleep( linkSleep )
2571 utilities.assert_equals( expect=main.TRUE, actual=LinkDown,
2572 onpass="Link down successful",
2573 onfail="Failed to bring link down" )
2574 # TODO do some sort of check here
2575
2576 def CASE10( self, main ):
2577 """
2578 Link s3-s28 up
2579 """
2580 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002581 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002582 assert main, "main not defined"
2583 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002584 assert main.CLIs, "main.CLIs not defined"
2585 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002586 # NOTE: You should probably run a topology check after this
2587
2588 linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
2589
2590 description = "Restore a link to ensure that Link Discovery is " + \
2591 "working properly"
2592 main.case( description )
2593
2594 main.step( "Bring link between s3 and s28 back up" )
2595 LinkUp = main.Mininet1.link( END1="s3", END2="s28", OPTION="up" )
2596 main.log.info( "Waiting " + str( linkSleep ) +
2597 " seconds for link up to be discovered" )
2598 time.sleep( linkSleep )
2599 utilities.assert_equals( expect=main.TRUE, actual=LinkUp,
2600 onpass="Link up successful",
2601 onfail="Failed to bring link up" )
2602 # TODO do some sort of check here
2603
2604 def CASE11( self, main ):
2605 """
2606 Switch Down
2607 """
2608 # NOTE: You should probably run a topology check after this
2609 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002610 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002611 assert main, "main not defined"
2612 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002613 assert main.CLIs, "main.CLIs not defined"
2614 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002615
2616 switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
2617
2618 description = "Killing a switch to ensure it is discovered correctly"
Jon Halla440e872016-03-31 15:15:50 -07002619 onosCli = main.CLIs[ main.activeNodes[0] ]
Jon Hall5cf14d52015-07-16 12:15:19 -07002620 main.case( description )
2621 switch = main.params[ 'kill' ][ 'switch' ]
2622 switchDPID = main.params[ 'kill' ][ 'dpid' ]
2623
2624 # TODO: Make this switch parameterizable
2625 main.step( "Kill " + switch )
2626 main.log.info( "Deleting " + switch )
2627 main.Mininet1.delSwitch( switch )
2628 main.log.info( "Waiting " + str( switchSleep ) +
2629 " seconds for switch down to be discovered" )
2630 time.sleep( switchSleep )
Jon Halla440e872016-03-31 15:15:50 -07002631 device = onosCli.getDevice( dpid=switchDPID )
Jon Hall5cf14d52015-07-16 12:15:19 -07002632 # Peek at the deleted switch
2633 main.log.warn( str( device ) )
2634 result = main.FALSE
2635 if device and device[ 'available' ] is False:
2636 result = main.TRUE
2637 utilities.assert_equals( expect=main.TRUE, actual=result,
2638 onpass="Kill switch successful",
2639 onfail="Failed to kill switch?" )
2640
2641 def CASE12( self, main ):
2642 """
2643 Switch Up
2644 """
2645 # NOTE: You should probably run a topology check after this
2646 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002647 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002648 assert main, "main not defined"
2649 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002650 assert main.CLIs, "main.CLIs not defined"
2651 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002652 assert ONOS1Port, "ONOS1Port not defined"
2653 assert ONOS2Port, "ONOS2Port not defined"
2654 assert ONOS3Port, "ONOS3Port not defined"
2655 assert ONOS4Port, "ONOS4Port not defined"
2656 assert ONOS5Port, "ONOS5Port not defined"
2657 assert ONOS6Port, "ONOS6Port not defined"
2658 assert ONOS7Port, "ONOS7Port not defined"
2659
2660 switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
2661 switch = main.params[ 'kill' ][ 'switch' ]
2662 switchDPID = main.params[ 'kill' ][ 'dpid' ]
2663 links = main.params[ 'kill' ][ 'links' ].split()
Jon Halla440e872016-03-31 15:15:50 -07002664 onosCli = main.CLIs[ main.activeNodes[0] ]
Jon Hall5cf14d52015-07-16 12:15:19 -07002665 description = "Adding a switch to ensure it is discovered correctly"
2666 main.case( description )
2667
2668 main.step( "Add back " + switch )
2669 main.Mininet1.addSwitch( switch, dpid=switchDPID )
2670 for peer in links:
2671 main.Mininet1.addLink( switch, peer )
Jon Halla440e872016-03-31 15:15:50 -07002672 ipList = [ node.ip_address for node in main.nodes ]
Jon Hall5cf14d52015-07-16 12:15:19 -07002673 main.Mininet1.assignSwController( sw=switch, ip=ipList )
2674 main.log.info( "Waiting " + str( switchSleep ) +
2675 " seconds for switch up to be discovered" )
2676 time.sleep( switchSleep )
Jon Halla440e872016-03-31 15:15:50 -07002677 device = onosCli.getDevice( dpid=switchDPID )
Jon Hall5cf14d52015-07-16 12:15:19 -07002678 # Peek at the deleted switch
2679 main.log.warn( str( device ) )
2680 result = main.FALSE
2681 if device and device[ 'available' ]:
2682 result = main.TRUE
2683 utilities.assert_equals( expect=main.TRUE, actual=result,
2684 onpass="add switch successful",
2685 onfail="Failed to add switch?" )
2686
2687 def CASE13( self, main ):
2688 """
2689 Clean up
2690 """
2691 import os
2692 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002693 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002694 assert main, "main not defined"
2695 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002696 assert main.CLIs, "main.CLIs not defined"
2697 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002698
2699 # printing colors to terminal
2700 colors = { 'cyan': '\033[96m', 'purple': '\033[95m',
2701 'blue': '\033[94m', 'green': '\033[92m',
2702 'yellow': '\033[93m', 'red': '\033[91m', 'end': '\033[0m' }
2703 main.case( "Test Cleanup" )
2704 main.step( "Killing tcpdumps" )
2705 main.Mininet2.stopTcpdump()
2706
2707 testname = main.TEST
Jon Hall96091e62015-09-21 17:34:17 -07002708 if main.params[ 'BACKUP' ][ 'ENABLED' ] == "True":
Jon Hall5cf14d52015-07-16 12:15:19 -07002709 main.step( "Copying MN pcap and ONOS log files to test station" )
2710 teststationUser = main.params[ 'BACKUP' ][ 'TESTONUSER' ]
2711 teststationIP = main.params[ 'BACKUP' ][ 'TESTONIP' ]
Jon Hall96091e62015-09-21 17:34:17 -07002712 # NOTE: MN Pcap file is being saved to logdir.
2713 # We scp this file as MN and TestON aren't necessarily the same vm
2714
2715 # FIXME: To be replaced with a Jenkin's post script
Jon Hall5cf14d52015-07-16 12:15:19 -07002716 # TODO: Load these from params
2717 # NOTE: must end in /
2718 logFolder = "/opt/onos/log/"
2719 logFiles = [ "karaf.log", "karaf.log.1" ]
2720 # NOTE: must end in /
Jon Hall5cf14d52015-07-16 12:15:19 -07002721 for f in logFiles:
Jon Halle1a3b752015-07-22 13:02:46 -07002722 for node in main.nodes:
Jon Halla440e872016-03-31 15:15:50 -07002723 dstName = main.logdir + "/" + node.name + "-" + f
Jon Hall96091e62015-09-21 17:34:17 -07002724 main.ONOSbench.secureCopy( node.user_name, node.ip_address,
2725 logFolder + f, dstName )
Jon Hall5cf14d52015-07-16 12:15:19 -07002726 # std*.log's
2727 # NOTE: must end in /
2728 logFolder = "/opt/onos/var/"
2729 logFiles = [ "stderr.log", "stdout.log" ]
2730 # NOTE: must end in /
Jon Hall5cf14d52015-07-16 12:15:19 -07002731 for f in logFiles:
Jon Halle1a3b752015-07-22 13:02:46 -07002732 for node in main.nodes:
Jon Halla440e872016-03-31 15:15:50 -07002733 dstName = main.logdir + "/" + node.name + "-" + f
Jon Hall96091e62015-09-21 17:34:17 -07002734 main.ONOSbench.secureCopy( node.user_name, node.ip_address,
2735 logFolder + f, dstName )
2736 else:
2737 main.log.debug( "skipping saving log files" )
Jon Hall5cf14d52015-07-16 12:15:19 -07002738
2739 main.step( "Stopping Mininet" )
2740 mnResult = main.Mininet1.stopNet()
2741 utilities.assert_equals( expect=main.TRUE, actual=mnResult,
2742 onpass="Mininet stopped",
2743 onfail="MN cleanup NOT successful" )
2744
2745 main.step( "Checking ONOS Logs for errors" )
Jon Halle1a3b752015-07-22 13:02:46 -07002746 for node in main.nodes:
Jon Hall96091e62015-09-21 17:34:17 -07002747 main.log.debug( "Checking logs for errors on " + node.name + ":" )
2748 main.log.warn( main.ONOSbench.checkLogs( node.ip_address ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07002749
2750 try:
2751 timerLog = open( main.logdir + "/Timers.csv", 'w')
2752 # Overwrite with empty line and close
2753 labels = "Gossip Intents"
2754 data = str( gossipTime )
2755 timerLog.write( labels + "\n" + data )
2756 timerLog.close()
2757 except NameError, e:
2758 main.log.exception(e)
2759
2760 def CASE14( self, main ):
2761 """
2762 start election app on all onos nodes
2763 """
Jon Halle1a3b752015-07-22 13:02:46 -07002764 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002765 assert main, "main not defined"
2766 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002767 assert main.CLIs, "main.CLIs not defined"
2768 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002769
2770 main.case("Start Leadership Election app")
2771 main.step( "Install leadership election app" )
Jon Halla440e872016-03-31 15:15:50 -07002772 onosCli = main.CLIs[ main.activeNodes[0] ]
2773 appResult = onosCli.activateApp( "org.onosproject.election" )
Jon Hall5cf14d52015-07-16 12:15:19 -07002774 utilities.assert_equals(
2775 expect=main.TRUE,
2776 actual=appResult,
2777 onpass="Election app installed",
2778 onfail="Something went wrong with installing Leadership election" )
2779
2780 main.step( "Run for election on each node" )
Jon Halla440e872016-03-31 15:15:50 -07002781 for i in main.activeNodes:
2782 main.CLIs[i].electionTestRun()
Jon Hall25463a82016-04-13 14:03:52 -07002783 time.sleep(5)
2784 activeCLIs = [ main.CLIs[i] for i in main.activeNodes ]
2785 sameResult, leaders = main.HA.consistentLeaderboards( activeCLIs )
Jon Hall5cf14d52015-07-16 12:15:19 -07002786 utilities.assert_equals(
Jon Hall25463a82016-04-13 14:03:52 -07002787 expect=True,
2788 actual=sameResult,
2789 onpass="All nodes see the same leaderboards",
2790 onfail="Inconsistent leaderboards" )
Jon Hall5cf14d52015-07-16 12:15:19 -07002791
Jon Hall25463a82016-04-13 14:03:52 -07002792 if sameResult:
2793 leader = leaders[ 0 ][ 0 ]
2794 if main.nodes[main.activeNodes[0]].ip_address in leader:
2795 correctLeader = True
2796 else:
2797 correctLeader = False
2798 main.step( "First node was elected leader" )
2799 utilities.assert_equals(
2800 expect=True,
2801 actual=correctLeader,
2802 onpass="Correct leader was elected",
2803 onfail="Incorrect leader" )
Jon Hall5cf14d52015-07-16 12:15:19 -07002804
2805 def CASE15( self, main ):
2806 """
2807 Check that Leadership Election is still functional
acsmars71adceb2015-08-31 15:09:26 -07002808 15.1 Run election on each node
2809 15.2 Check that each node has the same leaders and candidates
2810 15.3 Find current leader and withdraw
2811 15.4 Check that a new node was elected leader
2812 15.5 Check that that new leader was the candidate of old leader
2813 15.6 Run for election on old leader
2814 15.7 Check that oldLeader is a candidate, and leader if only 1 node
2815 15.8 Make sure that the old leader was added to the candidate list
2816
2817 old and new variable prefixes refer to data from before vs after
2818 withdrawl and later before withdrawl vs after re-election
Jon Hall5cf14d52015-07-16 12:15:19 -07002819 """
2820 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002821 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002822 assert main, "main not defined"
2823 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002824 assert main.CLIs, "main.CLIs not defined"
2825 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002826
Jon Halla440e872016-03-31 15:15:50 -07002827 description = "Check that Leadership Election is still functional"
Jon Hall5cf14d52015-07-16 12:15:19 -07002828 main.case( description )
Jon Halla440e872016-03-31 15:15:50 -07002829 # NOTE: Need to re-run after restarts since being a canidate is not persistant
Jon Hall5cf14d52015-07-16 12:15:19 -07002830
Jon Halla440e872016-03-31 15:15:50 -07002831 oldLeaders = [] # list of lists of each nodes' candidates before
2832 newLeaders = [] # list of lists of each nodes' candidates after
acsmars71adceb2015-08-31 15:09:26 -07002833 oldLeader = '' # the old leader from oldLeaders, None if not same
2834 newLeader = '' # the new leaders fron newLoeaders, None if not same
2835 oldLeaderCLI = None # the CLI of the old leader used for re-electing
2836 expectNoLeader = False # True when there is only one leader
2837 if main.numCtrls == 1:
2838 expectNoLeader = True
2839
2840 main.step( "Run for election on each node" )
2841 electionResult = main.TRUE
2842
Jon Halla440e872016-03-31 15:15:50 -07002843 for i in main.activeNodes: # run test election on each node
2844 if main.CLIs[i].electionTestRun() == main.FALSE:
acsmars71adceb2015-08-31 15:09:26 -07002845 electionResult = main.FALSE
Jon Hall5cf14d52015-07-16 12:15:19 -07002846 utilities.assert_equals(
2847 expect=main.TRUE,
acsmars71adceb2015-08-31 15:09:26 -07002848 actual=electionResult,
2849 onpass="All nodes successfully ran for leadership",
2850 onfail="At least one node failed to run for leadership" )
2851
acsmars3a72bde2015-09-02 14:16:22 -07002852 if electionResult == main.FALSE:
2853 main.log.error(
Jon Halla440e872016-03-31 15:15:50 -07002854 "Skipping Test Case because Election Test App isn't loaded" )
acsmars3a72bde2015-09-02 14:16:22 -07002855 main.skipCase()
2856
acsmars71adceb2015-08-31 15:09:26 -07002857 main.step( "Check that each node shows the same leader and candidates" )
Jon Halla440e872016-03-31 15:15:50 -07002858 failMessage = "Nodes have different leaderboards"
Jon Halla440e872016-03-31 15:15:50 -07002859 activeCLIs = [ main.CLIs[i] for i in main.activeNodes ]
Jon Hall41d39f12016-04-11 22:54:35 -07002860 sameResult, oldLeaders = main.HA.consistentLeaderboards( activeCLIs )
Jon Halla440e872016-03-31 15:15:50 -07002861 if sameResult:
2862 oldLeader = oldLeaders[ 0 ][ 0 ]
2863 main.log.warn( oldLeader )
acsmars71adceb2015-08-31 15:09:26 -07002864 else:
Jon Halla440e872016-03-31 15:15:50 -07002865 oldLeader = None
acsmars71adceb2015-08-31 15:09:26 -07002866 utilities.assert_equals(
Jon Halla440e872016-03-31 15:15:50 -07002867 expect=True,
acsmars71adceb2015-08-31 15:09:26 -07002868 actual=sameResult,
Jon Halla440e872016-03-31 15:15:50 -07002869 onpass="Leaderboards are consistent for the election topic",
acsmars71adceb2015-08-31 15:09:26 -07002870 onfail=failMessage )
Jon Hall5cf14d52015-07-16 12:15:19 -07002871
2872 main.step( "Find current leader and withdraw" )
acsmars71adceb2015-08-31 15:09:26 -07002873 withdrawResult = main.TRUE
Jon Hall5cf14d52015-07-16 12:15:19 -07002874 # do some sanity checking on leader before using it
acsmars71adceb2015-08-31 15:09:26 -07002875 if oldLeader is None:
2876 main.log.error( "Leadership isn't consistent." )
2877 withdrawResult = main.FALSE
2878 # Get the CLI of the oldLeader
Jon Halla440e872016-03-31 15:15:50 -07002879 for i in main.activeNodes:
acsmars71adceb2015-08-31 15:09:26 -07002880 if oldLeader == main.nodes[ i ].ip_address:
2881 oldLeaderCLI = main.CLIs[ i ]
Jon Hall5cf14d52015-07-16 12:15:19 -07002882 break
2883 else: # FOR/ELSE statement
2884 main.log.error( "Leader election, could not find current leader" )
2885 if oldLeader:
acsmars71adceb2015-08-31 15:09:26 -07002886 withdrawResult = oldLeaderCLI.electionTestWithdraw()
Jon Hall5cf14d52015-07-16 12:15:19 -07002887 utilities.assert_equals(
2888 expect=main.TRUE,
2889 actual=withdrawResult,
2890 onpass="Node was withdrawn from election",
2891 onfail="Node was not withdrawn from election" )
2892
acsmars71adceb2015-08-31 15:09:26 -07002893 main.step( "Check that a new node was elected leader" )
acsmars71adceb2015-08-31 15:09:26 -07002894 failMessage = "Nodes have different leaders"
acsmars71adceb2015-08-31 15:09:26 -07002895 # Get new leaders and candidates
Jon Hall41d39f12016-04-11 22:54:35 -07002896 newLeaderResult, newLeaders = main.HA.consistentLeaderboards( activeCLIs )
Jon Hall3a7843a2016-04-12 03:01:09 -07002897 newLeader = None
Jon Halla440e872016-03-31 15:15:50 -07002898 if newLeaderResult:
Jon Hall3a7843a2016-04-12 03:01:09 -07002899 if newLeaders[ 0 ][ 0 ] == 'none':
2900 main.log.error( "No leader was elected on at least 1 node" )
2901 if not expectNoLeader:
2902 newLeaderResult = False
Jon Hall25463a82016-04-13 14:03:52 -07002903 newLeader = newLeaders[ 0 ][ 0 ]
acsmars71adceb2015-08-31 15:09:26 -07002904
2905 # Check that the new leader is not the older leader, which was withdrawn
2906 if newLeader == oldLeader:
Jon Halla440e872016-03-31 15:15:50 -07002907 newLeaderResult = False
Jon Hall6e709752016-02-01 13:38:46 -08002908 main.log.error( "All nodes still see old leader: " + str( oldLeader ) +
acsmars71adceb2015-08-31 15:09:26 -07002909 " as the current leader" )
Jon Hall5cf14d52015-07-16 12:15:19 -07002910 utilities.assert_equals(
Jon Halla440e872016-03-31 15:15:50 -07002911 expect=True,
acsmars71adceb2015-08-31 15:09:26 -07002912 actual=newLeaderResult,
Jon Hall5cf14d52015-07-16 12:15:19 -07002913 onpass="Leadership election passed",
2914 onfail="Something went wrong with Leadership election" )
2915
Jon Halla440e872016-03-31 15:15:50 -07002916 main.step( "Check that that new leader was the candidate of old leader" )
2917 # candidates[ 2 ] should become the top candidate after withdrawl
acsmars71adceb2015-08-31 15:09:26 -07002918 correctCandidateResult = main.TRUE
2919 if expectNoLeader:
2920 if newLeader == 'none':
2921 main.log.info( "No leader expected. None found. Pass" )
2922 correctCandidateResult = main.TRUE
2923 else:
2924 main.log.info( "Expected no leader, got: " + str( newLeader ) )
2925 correctCandidateResult = main.FALSE
Jon Halla440e872016-03-31 15:15:50 -07002926 elif len( oldLeaders[0] ) >= 3:
2927 if newLeader == oldLeaders[ 0 ][ 2 ]:
2928 # correct leader was elected
2929 correctCandidateResult = main.TRUE
2930 else:
2931 correctCandidateResult = main.FALSE
2932 main.log.error( "Candidate {} was elected. {} should have had priority.".format(
2933 newLeader, oldLeaders[ 0 ][ 2 ] ) )
2934 else:
2935 main.log.warn( "Could not determine who should be the correct leader" )
2936 main.log.debug( oldLeaders[ 0 ] )
acsmars71adceb2015-08-31 15:09:26 -07002937 correctCandidateResult = main.FALSE
acsmars71adceb2015-08-31 15:09:26 -07002938 utilities.assert_equals(
2939 expect=main.TRUE,
2940 actual=correctCandidateResult,
2941 onpass="Correct Candidate Elected",
2942 onfail="Incorrect Candidate Elected" )
2943
Jon Hall5cf14d52015-07-16 12:15:19 -07002944 main.step( "Run for election on old leader( just so everyone " +
2945 "is in the hat )" )
acsmars71adceb2015-08-31 15:09:26 -07002946 if oldLeaderCLI is not None:
2947 runResult = oldLeaderCLI.electionTestRun()
Jon Hall5cf14d52015-07-16 12:15:19 -07002948 else:
acsmars71adceb2015-08-31 15:09:26 -07002949 main.log.error( "No old leader to re-elect" )
Jon Hall5cf14d52015-07-16 12:15:19 -07002950 runResult = main.FALSE
2951 utilities.assert_equals(
2952 expect=main.TRUE,
2953 actual=runResult,
2954 onpass="App re-ran for election",
2955 onfail="App failed to run for election" )
Jon Halla440e872016-03-31 15:15:50 -07002956
acsmars71adceb2015-08-31 15:09:26 -07002957 main.step(
2958 "Check that oldLeader is a candidate, and leader if only 1 node" )
Jon Hall5cf14d52015-07-16 12:15:19 -07002959 # verify leader didn't just change
Jon Halla440e872016-03-31 15:15:50 -07002960 # Get new leaders and candidates
2961 reRunLeaders = []
2962 time.sleep( 5 ) # Paremterize
Jon Hall41d39f12016-04-11 22:54:35 -07002963 positionResult, reRunLeaders = main.HA.consistentLeaderboards( activeCLIs )
acsmars71adceb2015-08-31 15:09:26 -07002964
2965 # Check that the re-elected node is last on the candidate List
Jon Hall3a7843a2016-04-12 03:01:09 -07002966 if not reRunLeaders[0]:
2967 positionResult = main.FALSE
2968 elif oldLeader != reRunLeaders[ 0 ][ -1 ]:
Jon Halla440e872016-03-31 15:15:50 -07002969 main.log.error( "Old Leader ({}) not in the proper position: {} ".format( str( oldLeader),
2970 str( reRunLeaders[ 0 ] ) ) )
acsmars71adceb2015-08-31 15:09:26 -07002971 positionResult = main.FALSE
Jon Hall5cf14d52015-07-16 12:15:19 -07002972 utilities.assert_equals(
Jon Halla440e872016-03-31 15:15:50 -07002973 expect=True,
acsmars71adceb2015-08-31 15:09:26 -07002974 actual=positionResult,
Jon Hall5cf14d52015-07-16 12:15:19 -07002975 onpass="Old leader successfully re-ran for election",
2976 onfail="Something went wrong with Leadership election after " +
2977 "the old leader re-ran for election" )
2978
2979 def CASE16( self, main ):
2980 """
2981 Install Distributed Primitives app
2982 """
2983 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002984 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002985 assert main, "main not defined"
2986 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002987 assert main.CLIs, "main.CLIs not defined"
2988 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002989
2990 # Variables for the distributed primitives tests
2991 global pCounterName
Jon Hall5cf14d52015-07-16 12:15:19 -07002992 global pCounterValue
Jon Hall5cf14d52015-07-16 12:15:19 -07002993 global onosSet
2994 global onosSetName
2995 pCounterName = "TestON-Partitions"
Jon Hall5cf14d52015-07-16 12:15:19 -07002996 pCounterValue = 0
Jon Hall5cf14d52015-07-16 12:15:19 -07002997 onosSet = set([])
2998 onosSetName = "TestON-set"
2999
3000 description = "Install Primitives app"
3001 main.case( description )
3002 main.step( "Install Primitives app" )
3003 appName = "org.onosproject.distributedprimitives"
Jon Halla440e872016-03-31 15:15:50 -07003004 node = main.activeNodes[0]
3005 appResults = main.CLIs[node].activateApp( appName )
Jon Hall5cf14d52015-07-16 12:15:19 -07003006 utilities.assert_equals( expect=main.TRUE,
3007 actual=appResults,
3008 onpass="Primitives app activated",
3009 onfail="Primitives app not activated" )
3010 time.sleep( 5 ) # To allow all nodes to activate
3011
3012 def CASE17( self, main ):
3013 """
3014 Check for basic functionality with distributed primitives
3015 """
Jon Hall5cf14d52015-07-16 12:15:19 -07003016 # Make sure variables are defined/set
Jon Halle1a3b752015-07-22 13:02:46 -07003017 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07003018 assert main, "main not defined"
3019 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07003020 assert main.CLIs, "main.CLIs not defined"
3021 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07003022 assert pCounterName, "pCounterName not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07003023 assert onosSetName, "onosSetName not defined"
3024 # NOTE: assert fails if value is 0/None/Empty/False
3025 try:
3026 pCounterValue
3027 except NameError:
3028 main.log.error( "pCounterValue not defined, setting to 0" )
3029 pCounterValue = 0
3030 try:
Jon Hall5cf14d52015-07-16 12:15:19 -07003031 onosSet
3032 except NameError:
3033 main.log.error( "onosSet not defined, setting to empty Set" )
3034 onosSet = set([])
3035 # Variables for the distributed primitives tests. These are local only
3036 addValue = "a"
3037 addAllValue = "a b c d e f"
3038 retainValue = "c d e f"
3039
3040 description = "Check for basic functionality with distributed " +\
3041 "primitives"
3042 main.case( description )
Jon Halle1a3b752015-07-22 13:02:46 -07003043 main.caseExplanation = "Test the methods of the distributed " +\
3044 "primitives (counters and sets) throught the cli"
Jon Hall5cf14d52015-07-16 12:15:19 -07003045 # DISTRIBUTED ATOMIC COUNTERS
Jon Halle1a3b752015-07-22 13:02:46 -07003046 # Partitioned counters
3047 main.step( "Increment then get a default counter on each node" )
Jon Hall5cf14d52015-07-16 12:15:19 -07003048 pCounters = []
3049 threads = []
3050 addedPValues = []
Jon Halla440e872016-03-31 15:15:50 -07003051 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003052 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3053 name="counterAddAndGet-" + str( i ),
Jon Hall5cf14d52015-07-16 12:15:19 -07003054 args=[ pCounterName ] )
3055 pCounterValue += 1
3056 addedPValues.append( pCounterValue )
3057 threads.append( t )
3058 t.start()
3059
3060 for t in threads:
3061 t.join()
3062 pCounters.append( t.result )
3063 # Check that counter incremented numController times
3064 pCounterResults = True
3065 for i in addedPValues:
3066 tmpResult = i in pCounters
3067 pCounterResults = pCounterResults and tmpResult
3068 if not tmpResult:
3069 main.log.error( str( i ) + " is not in partitioned "
3070 "counter incremented results" )
3071 utilities.assert_equals( expect=True,
3072 actual=pCounterResults,
3073 onpass="Default counter incremented",
3074 onfail="Error incrementing default" +
3075 " counter" )
3076
Jon Halle1a3b752015-07-22 13:02:46 -07003077 main.step( "Get then Increment a default counter on each node" )
3078 pCounters = []
3079 threads = []
3080 addedPValues = []
Jon Halla440e872016-03-31 15:15:50 -07003081 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003082 t = main.Thread( target=main.CLIs[i].counterTestGetAndAdd,
3083 name="counterGetAndAdd-" + str( i ),
3084 args=[ pCounterName ] )
3085 addedPValues.append( pCounterValue )
3086 pCounterValue += 1
3087 threads.append( t )
3088 t.start()
3089
3090 for t in threads:
3091 t.join()
3092 pCounters.append( t.result )
3093 # Check that counter incremented numController times
3094 pCounterResults = True
3095 for i in addedPValues:
3096 tmpResult = i in pCounters
3097 pCounterResults = pCounterResults and tmpResult
3098 if not tmpResult:
3099 main.log.error( str( i ) + " is not in partitioned "
3100 "counter incremented results" )
3101 utilities.assert_equals( expect=True,
3102 actual=pCounterResults,
3103 onpass="Default counter incremented",
3104 onfail="Error incrementing default" +
3105 " counter" )
3106
3107 main.step( "Counters we added have the correct values" )
Jon Hall41d39f12016-04-11 22:54:35 -07003108 incrementCheck = main.HA.counterCheck( pCounterName, pCounterValue )
Jon Halle1a3b752015-07-22 13:02:46 -07003109 utilities.assert_equals( expect=main.TRUE,
3110 actual=incrementCheck,
3111 onpass="Added counters are correct",
3112 onfail="Added counters are incorrect" )
3113
3114 main.step( "Add -8 to then get a default counter on each node" )
3115 pCounters = []
3116 threads = []
3117 addedPValues = []
Jon Halla440e872016-03-31 15:15:50 -07003118 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003119 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3120 name="counterIncrement-" + str( i ),
3121 args=[ pCounterName ],
3122 kwargs={ "delta": -8 } )
3123 pCounterValue += -8
3124 addedPValues.append( pCounterValue )
3125 threads.append( t )
3126 t.start()
3127
3128 for t in threads:
3129 t.join()
3130 pCounters.append( t.result )
3131 # Check that counter incremented numController times
3132 pCounterResults = True
3133 for i in addedPValues:
3134 tmpResult = i in pCounters
3135 pCounterResults = pCounterResults and tmpResult
3136 if not tmpResult:
3137 main.log.error( str( i ) + " is not in partitioned "
3138 "counter incremented results" )
3139 utilities.assert_equals( expect=True,
3140 actual=pCounterResults,
3141 onpass="Default counter incremented",
3142 onfail="Error incrementing default" +
3143 " counter" )
3144
3145 main.step( "Add 5 to then get a default counter on each node" )
3146 pCounters = []
3147 threads = []
3148 addedPValues = []
Jon Halla440e872016-03-31 15:15:50 -07003149 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003150 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3151 name="counterIncrement-" + str( i ),
3152 args=[ pCounterName ],
3153 kwargs={ "delta": 5 } )
3154 pCounterValue += 5
3155 addedPValues.append( pCounterValue )
3156 threads.append( t )
3157 t.start()
3158
3159 for t in threads:
3160 t.join()
3161 pCounters.append( t.result )
3162 # Check that counter incremented numController times
3163 pCounterResults = True
3164 for i in addedPValues:
3165 tmpResult = i in pCounters
3166 pCounterResults = pCounterResults and tmpResult
3167 if not tmpResult:
3168 main.log.error( str( i ) + " is not in partitioned "
3169 "counter incremented results" )
3170 utilities.assert_equals( expect=True,
3171 actual=pCounterResults,
3172 onpass="Default counter incremented",
3173 onfail="Error incrementing default" +
3174 " counter" )
3175
3176 main.step( "Get then add 5 to a default counter on each node" )
3177 pCounters = []
3178 threads = []
3179 addedPValues = []
Jon Halla440e872016-03-31 15:15:50 -07003180 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003181 t = main.Thread( target=main.CLIs[i].counterTestGetAndAdd,
3182 name="counterIncrement-" + str( i ),
3183 args=[ pCounterName ],
3184 kwargs={ "delta": 5 } )
3185 addedPValues.append( pCounterValue )
3186 pCounterValue += 5
3187 threads.append( t )
3188 t.start()
3189
3190 for t in threads:
3191 t.join()
3192 pCounters.append( t.result )
3193 # Check that counter incremented numController times
3194 pCounterResults = True
3195 for i in addedPValues:
3196 tmpResult = i in pCounters
3197 pCounterResults = pCounterResults and tmpResult
3198 if not tmpResult:
3199 main.log.error( str( i ) + " is not in partitioned "
3200 "counter incremented results" )
3201 utilities.assert_equals( expect=True,
3202 actual=pCounterResults,
3203 onpass="Default counter incremented",
3204 onfail="Error incrementing default" +
3205 " counter" )
3206
3207 main.step( "Counters we added have the correct values" )
Jon Hall41d39f12016-04-11 22:54:35 -07003208 incrementCheck = main.HA.counterCheck( pCounterName, pCounterValue )
Jon Halle1a3b752015-07-22 13:02:46 -07003209 utilities.assert_equals( expect=main.TRUE,
3210 actual=incrementCheck,
3211 onpass="Added counters are correct",
3212 onfail="Added counters are incorrect" )
3213
Jon Hall5cf14d52015-07-16 12:15:19 -07003214 # DISTRIBUTED SETS
3215 main.step( "Distributed Set get" )
3216 size = len( onosSet )
3217 getResponses = []
3218 threads = []
Jon Halla440e872016-03-31 15:15:50 -07003219 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003220 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003221 name="setTestGet-" + str( i ),
3222 args=[ onosSetName ] )
3223 threads.append( t )
3224 t.start()
3225 for t in threads:
3226 t.join()
3227 getResponses.append( t.result )
3228
3229 getResults = main.TRUE
Jon Halla440e872016-03-31 15:15:50 -07003230 for i in range( len( main.activeNodes ) ):
3231 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003232 if isinstance( getResponses[ i ], list):
3233 current = set( getResponses[ i ] )
3234 if len( current ) == len( getResponses[ i ] ):
3235 # no repeats
3236 if onosSet != current:
Jon Halla440e872016-03-31 15:15:50 -07003237 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003238 " has incorrect view" +
3239 " of set " + onosSetName + ":\n" +
3240 str( getResponses[ i ] ) )
3241 main.log.debug( "Expected: " + str( onosSet ) )
3242 main.log.debug( "Actual: " + str( current ) )
3243 getResults = main.FALSE
3244 else:
3245 # error, set is not a set
Jon Halla440e872016-03-31 15:15:50 -07003246 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003247 " has repeat elements in" +
3248 " set " + onosSetName + ":\n" +
3249 str( getResponses[ i ] ) )
3250 getResults = main.FALSE
3251 elif getResponses[ i ] == main.ERROR:
3252 getResults = main.FALSE
3253 utilities.assert_equals( expect=main.TRUE,
3254 actual=getResults,
3255 onpass="Set elements are correct",
3256 onfail="Set elements are incorrect" )
3257
3258 main.step( "Distributed Set size" )
3259 sizeResponses = []
3260 threads = []
Jon Halla440e872016-03-31 15:15:50 -07003261 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003262 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07003263 name="setTestSize-" + str( i ),
3264 args=[ onosSetName ] )
3265 threads.append( t )
3266 t.start()
3267 for t in threads:
3268 t.join()
3269 sizeResponses.append( t.result )
3270
3271 sizeResults = main.TRUE
Jon Halla440e872016-03-31 15:15:50 -07003272 for i in range( len( main.activeNodes ) ):
3273 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003274 if size != sizeResponses[ i ]:
3275 sizeResults = main.FALSE
Jon Halla440e872016-03-31 15:15:50 -07003276 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003277 " expected a size of " + str( size ) +
3278 " for set " + onosSetName +
3279 " but got " + str( sizeResponses[ i ] ) )
3280 utilities.assert_equals( expect=main.TRUE,
3281 actual=sizeResults,
3282 onpass="Set sizes are correct",
3283 onfail="Set sizes are incorrect" )
3284
3285 main.step( "Distributed Set add()" )
3286 onosSet.add( addValue )
3287 addResponses = []
3288 threads = []
Jon Halla440e872016-03-31 15:15:50 -07003289 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003290 t = main.Thread( target=main.CLIs[i].setTestAdd,
Jon Hall5cf14d52015-07-16 12:15:19 -07003291 name="setTestAdd-" + str( i ),
3292 args=[ onosSetName, addValue ] )
3293 threads.append( t )
3294 t.start()
3295 for t in threads:
3296 t.join()
3297 addResponses.append( t.result )
3298
3299 # main.TRUE = successfully changed the set
3300 # main.FALSE = action resulted in no change in set
3301 # main.ERROR - Some error in executing the function
3302 addResults = main.TRUE
Jon Halla440e872016-03-31 15:15:50 -07003303 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003304 if addResponses[ i ] == main.TRUE:
3305 # All is well
3306 pass
3307 elif addResponses[ i ] == main.FALSE:
3308 # Already in set, probably fine
3309 pass
3310 elif addResponses[ i ] == main.ERROR:
3311 # Error in execution
3312 addResults = main.FALSE
3313 else:
3314 # unexpected result
3315 addResults = main.FALSE
3316 if addResults != main.TRUE:
3317 main.log.error( "Error executing set add" )
3318
3319 # Check if set is still correct
3320 size = len( onosSet )
3321 getResponses = []
3322 threads = []
Jon Halla440e872016-03-31 15:15:50 -07003323 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003324 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003325 name="setTestGet-" + str( i ),
3326 args=[ onosSetName ] )
3327 threads.append( t )
3328 t.start()
3329 for t in threads:
3330 t.join()
3331 getResponses.append( t.result )
3332 getResults = main.TRUE
Jon Halla440e872016-03-31 15:15:50 -07003333 for i in range( len( main.activeNodes ) ):
3334 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003335 if isinstance( getResponses[ i ], list):
3336 current = set( getResponses[ i ] )
3337 if len( current ) == len( getResponses[ i ] ):
3338 # no repeats
3339 if onosSet != current:
Jon Halla440e872016-03-31 15:15:50 -07003340 main.log.error( "ONOS" + node + " has incorrect view" +
Jon Hall5cf14d52015-07-16 12:15:19 -07003341 " of set " + onosSetName + ":\n" +
3342 str( getResponses[ i ] ) )
3343 main.log.debug( "Expected: " + str( onosSet ) )
3344 main.log.debug( "Actual: " + str( current ) )
3345 getResults = main.FALSE
3346 else:
3347 # error, set is not a set
Jon Halla440e872016-03-31 15:15:50 -07003348 main.log.error( "ONOS" + node + " has repeat elements in" +
Jon Hall5cf14d52015-07-16 12:15:19 -07003349 " set " + onosSetName + ":\n" +
3350 str( getResponses[ i ] ) )
3351 getResults = main.FALSE
3352 elif getResponses[ i ] == main.ERROR:
3353 getResults = main.FALSE
3354 sizeResponses = []
3355 threads = []
Jon Halla440e872016-03-31 15:15:50 -07003356 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003357 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07003358 name="setTestSize-" + str( i ),
3359 args=[ onosSetName ] )
3360 threads.append( t )
3361 t.start()
3362 for t in threads:
3363 t.join()
3364 sizeResponses.append( t.result )
3365 sizeResults = main.TRUE
Jon Halla440e872016-03-31 15:15:50 -07003366 for i in range( len( main.activeNodes ) ):
3367 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003368 if size != sizeResponses[ i ]:
3369 sizeResults = main.FALSE
Jon Halla440e872016-03-31 15:15:50 -07003370 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003371 " expected a size of " + str( size ) +
3372 " for set " + onosSetName +
3373 " but got " + str( sizeResponses[ i ] ) )
3374 addResults = addResults and getResults and sizeResults
3375 utilities.assert_equals( expect=main.TRUE,
3376 actual=addResults,
3377 onpass="Set add correct",
3378 onfail="Set add was incorrect" )
3379
3380 main.step( "Distributed Set addAll()" )
3381 onosSet.update( addAllValue.split() )
3382 addResponses = []
3383 threads = []
Jon Halla440e872016-03-31 15:15:50 -07003384 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003385 t = main.Thread( target=main.CLIs[i].setTestAdd,
Jon Hall5cf14d52015-07-16 12:15:19 -07003386 name="setTestAddAll-" + str( i ),
3387 args=[ onosSetName, addAllValue ] )
3388 threads.append( t )
3389 t.start()
3390 for t in threads:
3391 t.join()
3392 addResponses.append( t.result )
3393
3394 # main.TRUE = successfully changed the set
3395 # main.FALSE = action resulted in no change in set
3396 # main.ERROR - Some error in executing the function
3397 addAllResults = main.TRUE
Jon Halla440e872016-03-31 15:15:50 -07003398 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003399 if addResponses[ i ] == main.TRUE:
3400 # All is well
3401 pass
3402 elif addResponses[ i ] == main.FALSE:
3403 # Already in set, probably fine
3404 pass
3405 elif addResponses[ i ] == main.ERROR:
3406 # Error in execution
3407 addAllResults = main.FALSE
3408 else:
3409 # unexpected result
3410 addAllResults = main.FALSE
3411 if addAllResults != main.TRUE:
3412 main.log.error( "Error executing set addAll" )
3413
3414 # Check if set is still correct
3415 size = len( onosSet )
3416 getResponses = []
3417 threads = []
Jon Halla440e872016-03-31 15:15:50 -07003418 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003419 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003420 name="setTestGet-" + str( i ),
3421 args=[ onosSetName ] )
3422 threads.append( t )
3423 t.start()
3424 for t in threads:
3425 t.join()
3426 getResponses.append( t.result )
3427 getResults = main.TRUE
Jon Halla440e872016-03-31 15:15:50 -07003428 for i in range( len( main.activeNodes ) ):
3429 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003430 if isinstance( getResponses[ i ], list):
3431 current = set( getResponses[ i ] )
3432 if len( current ) == len( getResponses[ i ] ):
3433 # no repeats
3434 if onosSet != current:
Jon Halla440e872016-03-31 15:15:50 -07003435 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003436 " has incorrect view" +
3437 " of set " + onosSetName + ":\n" +
3438 str( getResponses[ i ] ) )
3439 main.log.debug( "Expected: " + str( onosSet ) )
3440 main.log.debug( "Actual: " + str( current ) )
3441 getResults = main.FALSE
3442 else:
3443 # error, set is not a set
Jon Halla440e872016-03-31 15:15:50 -07003444 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003445 " has repeat elements in" +
3446 " set " + onosSetName + ":\n" +
3447 str( getResponses[ i ] ) )
3448 getResults = main.FALSE
3449 elif getResponses[ i ] == main.ERROR:
3450 getResults = main.FALSE
3451 sizeResponses = []
3452 threads = []
Jon Halla440e872016-03-31 15:15:50 -07003453 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003454 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07003455 name="setTestSize-" + str( i ),
3456 args=[ onosSetName ] )
3457 threads.append( t )
3458 t.start()
3459 for t in threads:
3460 t.join()
3461 sizeResponses.append( t.result )
3462 sizeResults = main.TRUE
Jon Halla440e872016-03-31 15:15:50 -07003463 for i in range( len( main.activeNodes ) ):
3464 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003465 if size != sizeResponses[ i ]:
3466 sizeResults = main.FALSE
Jon Halla440e872016-03-31 15:15:50 -07003467 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003468 " expected a size of " + str( size ) +
3469 " for set " + onosSetName +
3470 " but got " + str( sizeResponses[ i ] ) )
3471 addAllResults = addAllResults and getResults and sizeResults
3472 utilities.assert_equals( expect=main.TRUE,
3473 actual=addAllResults,
3474 onpass="Set addAll correct",
3475 onfail="Set addAll was incorrect" )
3476
3477 main.step( "Distributed Set contains()" )
3478 containsResponses = []
3479 threads = []
Jon Halla440e872016-03-31 15:15:50 -07003480 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003481 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003482 name="setContains-" + str( i ),
3483 args=[ onosSetName ],
3484 kwargs={ "values": addValue } )
3485 threads.append( t )
3486 t.start()
3487 for t in threads:
3488 t.join()
3489 # NOTE: This is the tuple
3490 containsResponses.append( t.result )
3491
3492 containsResults = main.TRUE
Jon Halla440e872016-03-31 15:15:50 -07003493 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003494 if containsResponses[ i ] == main.ERROR:
3495 containsResults = main.FALSE
3496 else:
3497 containsResults = containsResults and\
3498 containsResponses[ i ][ 1 ]
3499 utilities.assert_equals( expect=main.TRUE,
3500 actual=containsResults,
3501 onpass="Set contains is functional",
3502 onfail="Set contains failed" )
3503
3504 main.step( "Distributed Set containsAll()" )
3505 containsAllResponses = []
3506 threads = []
Jon Halla440e872016-03-31 15:15:50 -07003507 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003508 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003509 name="setContainsAll-" + str( i ),
3510 args=[ onosSetName ],
3511 kwargs={ "values": addAllValue } )
3512 threads.append( t )
3513 t.start()
3514 for t in threads:
3515 t.join()
3516 # NOTE: This is the tuple
3517 containsAllResponses.append( t.result )
3518
3519 containsAllResults = main.TRUE
Jon Halla440e872016-03-31 15:15:50 -07003520 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003521 if containsResponses[ i ] == main.ERROR:
3522 containsResults = main.FALSE
3523 else:
3524 containsResults = containsResults and\
3525 containsResponses[ i ][ 1 ]
3526 utilities.assert_equals( expect=main.TRUE,
3527 actual=containsAllResults,
3528 onpass="Set containsAll is functional",
3529 onfail="Set containsAll failed" )
3530
3531 main.step( "Distributed Set remove()" )
3532 onosSet.remove( addValue )
3533 removeResponses = []
3534 threads = []
Jon Halla440e872016-03-31 15:15:50 -07003535 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003536 t = main.Thread( target=main.CLIs[i].setTestRemove,
Jon Hall5cf14d52015-07-16 12:15:19 -07003537 name="setTestRemove-" + str( i ),
3538 args=[ onosSetName, addValue ] )
3539 threads.append( t )
3540 t.start()
3541 for t in threads:
3542 t.join()
3543 removeResponses.append( t.result )
3544
3545 # main.TRUE = successfully changed the set
3546 # main.FALSE = action resulted in no change in set
3547 # main.ERROR - Some error in executing the function
3548 removeResults = main.TRUE
Jon Halla440e872016-03-31 15:15:50 -07003549 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003550 if removeResponses[ i ] == main.TRUE:
3551 # All is well
3552 pass
3553 elif removeResponses[ i ] == main.FALSE:
3554 # not in set, probably fine
3555 pass
3556 elif removeResponses[ i ] == main.ERROR:
3557 # Error in execution
3558 removeResults = main.FALSE
3559 else:
3560 # unexpected result
3561 removeResults = main.FALSE
3562 if removeResults != main.TRUE:
3563 main.log.error( "Error executing set remove" )
3564
3565 # Check if set is still correct
3566 size = len( onosSet )
3567 getResponses = []
3568 threads = []
Jon Halla440e872016-03-31 15:15:50 -07003569 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003570 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003571 name="setTestGet-" + str( i ),
3572 args=[ onosSetName ] )
3573 threads.append( t )
3574 t.start()
3575 for t in threads:
3576 t.join()
3577 getResponses.append( t.result )
3578 getResults = main.TRUE
Jon Halla440e872016-03-31 15:15:50 -07003579 for i in range( len( main.activeNodes ) ):
3580 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003581 if isinstance( getResponses[ i ], list):
3582 current = set( getResponses[ i ] )
3583 if len( current ) == len( getResponses[ i ] ):
3584 # no repeats
3585 if onosSet != current:
Jon Halla440e872016-03-31 15:15:50 -07003586 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003587 " has incorrect view" +
3588 " of set " + onosSetName + ":\n" +
3589 str( getResponses[ i ] ) )
3590 main.log.debug( "Expected: " + str( onosSet ) )
3591 main.log.debug( "Actual: " + str( current ) )
3592 getResults = main.FALSE
3593 else:
3594 # error, set is not a set
Jon Halla440e872016-03-31 15:15:50 -07003595 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003596 " has repeat elements in" +
3597 " set " + onosSetName + ":\n" +
3598 str( getResponses[ i ] ) )
3599 getResults = main.FALSE
3600 elif getResponses[ i ] == main.ERROR:
3601 getResults = main.FALSE
3602 sizeResponses = []
3603 threads = []
Jon Halla440e872016-03-31 15:15:50 -07003604 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003605 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07003606 name="setTestSize-" + str( i ),
3607 args=[ onosSetName ] )
3608 threads.append( t )
3609 t.start()
3610 for t in threads:
3611 t.join()
3612 sizeResponses.append( t.result )
3613 sizeResults = main.TRUE
Jon Halla440e872016-03-31 15:15:50 -07003614 for i in range( len( main.activeNodes ) ):
3615 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003616 if size != sizeResponses[ i ]:
3617 sizeResults = main.FALSE
Jon Halla440e872016-03-31 15:15:50 -07003618 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003619 " expected a size of " + str( size ) +
3620 " for set " + onosSetName +
3621 " but got " + str( sizeResponses[ i ] ) )
3622 removeResults = removeResults and getResults and sizeResults
3623 utilities.assert_equals( expect=main.TRUE,
3624 actual=removeResults,
3625 onpass="Set remove correct",
3626 onfail="Set remove was incorrect" )
3627
3628 main.step( "Distributed Set removeAll()" )
3629 onosSet.difference_update( addAllValue.split() )
3630 removeAllResponses = []
3631 threads = []
3632 try:
Jon Halla440e872016-03-31 15:15:50 -07003633 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003634 t = main.Thread( target=main.CLIs[i].setTestRemove,
Jon Hall5cf14d52015-07-16 12:15:19 -07003635 name="setTestRemoveAll-" + str( i ),
3636 args=[ onosSetName, addAllValue ] )
3637 threads.append( t )
3638 t.start()
3639 for t in threads:
3640 t.join()
3641 removeAllResponses.append( t.result )
3642 except Exception, e:
3643 main.log.exception(e)
3644
3645 # main.TRUE = successfully changed the set
3646 # main.FALSE = action resulted in no change in set
3647 # main.ERROR - Some error in executing the function
3648 removeAllResults = main.TRUE
Jon Halla440e872016-03-31 15:15:50 -07003649 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003650 if removeAllResponses[ i ] == main.TRUE:
3651 # All is well
3652 pass
3653 elif removeAllResponses[ i ] == main.FALSE:
3654 # not in set, probably fine
3655 pass
3656 elif removeAllResponses[ i ] == main.ERROR:
3657 # Error in execution
3658 removeAllResults = main.FALSE
3659 else:
3660 # unexpected result
3661 removeAllResults = main.FALSE
3662 if removeAllResults != main.TRUE:
3663 main.log.error( "Error executing set removeAll" )
3664
3665 # Check if set is still correct
3666 size = len( onosSet )
3667 getResponses = []
3668 threads = []
Jon Halla440e872016-03-31 15:15:50 -07003669 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003670 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003671 name="setTestGet-" + str( i ),
3672 args=[ onosSetName ] )
3673 threads.append( t )
3674 t.start()
3675 for t in threads:
3676 t.join()
3677 getResponses.append( t.result )
3678 getResults = main.TRUE
Jon Halla440e872016-03-31 15:15:50 -07003679 for i in range( len( main.activeNodes ) ):
3680 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003681 if isinstance( getResponses[ i ], list):
3682 current = set( getResponses[ i ] )
3683 if len( current ) == len( getResponses[ i ] ):
3684 # no repeats
3685 if onosSet != current:
Jon Halla440e872016-03-31 15:15:50 -07003686 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003687 " has incorrect view" +
3688 " of set " + onosSetName + ":\n" +
3689 str( getResponses[ i ] ) )
3690 main.log.debug( "Expected: " + str( onosSet ) )
3691 main.log.debug( "Actual: " + str( current ) )
3692 getResults = main.FALSE
3693 else:
3694 # error, set is not a set
Jon Halla440e872016-03-31 15:15:50 -07003695 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003696 " has repeat elements in" +
3697 " set " + onosSetName + ":\n" +
3698 str( getResponses[ i ] ) )
3699 getResults = main.FALSE
3700 elif getResponses[ i ] == main.ERROR:
3701 getResults = main.FALSE
3702 sizeResponses = []
3703 threads = []
Jon Halla440e872016-03-31 15:15:50 -07003704 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003705 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07003706 name="setTestSize-" + str( i ),
3707 args=[ onosSetName ] )
3708 threads.append( t )
3709 t.start()
3710 for t in threads:
3711 t.join()
3712 sizeResponses.append( t.result )
3713 sizeResults = main.TRUE
Jon Halla440e872016-03-31 15:15:50 -07003714 for i in range( len( main.activeNodes ) ):
3715 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003716 if size != sizeResponses[ i ]:
3717 sizeResults = main.FALSE
Jon Halla440e872016-03-31 15:15:50 -07003718 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003719 " expected a size of " + str( size ) +
3720 " for set " + onosSetName +
3721 " but got " + str( sizeResponses[ i ] ) )
3722 removeAllResults = removeAllResults and getResults and sizeResults
3723 utilities.assert_equals( expect=main.TRUE,
3724 actual=removeAllResults,
3725 onpass="Set removeAll correct",
3726 onfail="Set removeAll was incorrect" )
3727
3728 main.step( "Distributed Set addAll()" )
3729 onosSet.update( addAllValue.split() )
3730 addResponses = []
3731 threads = []
Jon Halla440e872016-03-31 15:15:50 -07003732 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003733 t = main.Thread( target=main.CLIs[i].setTestAdd,
Jon Hall5cf14d52015-07-16 12:15:19 -07003734 name="setTestAddAll-" + str( i ),
3735 args=[ onosSetName, addAllValue ] )
3736 threads.append( t )
3737 t.start()
3738 for t in threads:
3739 t.join()
3740 addResponses.append( t.result )
3741
3742 # main.TRUE = successfully changed the set
3743 # main.FALSE = action resulted in no change in set
3744 # main.ERROR - Some error in executing the function
3745 addAllResults = main.TRUE
Jon Halla440e872016-03-31 15:15:50 -07003746 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003747 if addResponses[ i ] == main.TRUE:
3748 # All is well
3749 pass
3750 elif addResponses[ i ] == main.FALSE:
3751 # Already in set, probably fine
3752 pass
3753 elif addResponses[ i ] == main.ERROR:
3754 # Error in execution
3755 addAllResults = main.FALSE
3756 else:
3757 # unexpected result
3758 addAllResults = main.FALSE
3759 if addAllResults != main.TRUE:
3760 main.log.error( "Error executing set addAll" )
3761
3762 # Check if set is still correct
3763 size = len( onosSet )
3764 getResponses = []
3765 threads = []
Jon Halla440e872016-03-31 15:15:50 -07003766 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003767 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003768 name="setTestGet-" + str( i ),
3769 args=[ onosSetName ] )
3770 threads.append( t )
3771 t.start()
3772 for t in threads:
3773 t.join()
3774 getResponses.append( t.result )
3775 getResults = main.TRUE
Jon Halla440e872016-03-31 15:15:50 -07003776 for i in range( len( main.activeNodes ) ):
3777 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003778 if isinstance( getResponses[ i ], list):
3779 current = set( getResponses[ i ] )
3780 if len( current ) == len( getResponses[ i ] ):
3781 # no repeats
3782 if onosSet != current:
Jon Halla440e872016-03-31 15:15:50 -07003783 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003784 " has incorrect view" +
3785 " of set " + onosSetName + ":\n" +
3786 str( getResponses[ i ] ) )
3787 main.log.debug( "Expected: " + str( onosSet ) )
3788 main.log.debug( "Actual: " + str( current ) )
3789 getResults = main.FALSE
3790 else:
3791 # error, set is not a set
Jon Halla440e872016-03-31 15:15:50 -07003792 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003793 " has repeat elements in" +
3794 " set " + onosSetName + ":\n" +
3795 str( getResponses[ i ] ) )
3796 getResults = main.FALSE
3797 elif getResponses[ i ] == main.ERROR:
3798 getResults = main.FALSE
3799 sizeResponses = []
3800 threads = []
Jon Halla440e872016-03-31 15:15:50 -07003801 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003802 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07003803 name="setTestSize-" + str( i ),
3804 args=[ onosSetName ] )
3805 threads.append( t )
3806 t.start()
3807 for t in threads:
3808 t.join()
3809 sizeResponses.append( t.result )
3810 sizeResults = main.TRUE
Jon Halla440e872016-03-31 15:15:50 -07003811 for i in range( len( main.activeNodes ) ):
3812 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003813 if size != sizeResponses[ i ]:
3814 sizeResults = main.FALSE
Jon Halla440e872016-03-31 15:15:50 -07003815 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003816 " expected a size of " + str( size ) +
3817 " for set " + onosSetName +
3818 " but got " + str( sizeResponses[ i ] ) )
3819 addAllResults = addAllResults and getResults and sizeResults
3820 utilities.assert_equals( expect=main.TRUE,
3821 actual=addAllResults,
3822 onpass="Set addAll correct",
3823 onfail="Set addAll was incorrect" )
3824
3825 main.step( "Distributed Set clear()" )
3826 onosSet.clear()
3827 clearResponses = []
3828 threads = []
Jon Halla440e872016-03-31 15:15:50 -07003829 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003830 t = main.Thread( target=main.CLIs[i].setTestRemove,
Jon Hall5cf14d52015-07-16 12:15:19 -07003831 name="setTestClear-" + str( i ),
3832 args=[ onosSetName, " "], # Values doesn't matter
3833 kwargs={ "clear": True } )
3834 threads.append( t )
3835 t.start()
3836 for t in threads:
3837 t.join()
3838 clearResponses.append( t.result )
3839
3840 # main.TRUE = successfully changed the set
3841 # main.FALSE = action resulted in no change in set
3842 # main.ERROR - Some error in executing the function
3843 clearResults = main.TRUE
Jon Halla440e872016-03-31 15:15:50 -07003844 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003845 if clearResponses[ i ] == main.TRUE:
3846 # All is well
3847 pass
3848 elif clearResponses[ i ] == main.FALSE:
3849 # Nothing set, probably fine
3850 pass
3851 elif clearResponses[ i ] == main.ERROR:
3852 # Error in execution
3853 clearResults = main.FALSE
3854 else:
3855 # unexpected result
3856 clearResults = main.FALSE
3857 if clearResults != main.TRUE:
3858 main.log.error( "Error executing set clear" )
3859
3860 # Check if set is still correct
3861 size = len( onosSet )
3862 getResponses = []
3863 threads = []
Jon Halla440e872016-03-31 15:15:50 -07003864 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003865 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003866 name="setTestGet-" + str( i ),
3867 args=[ onosSetName ] )
3868 threads.append( t )
3869 t.start()
3870 for t in threads:
3871 t.join()
3872 getResponses.append( t.result )
3873 getResults = main.TRUE
Jon Halla440e872016-03-31 15:15:50 -07003874 for i in range( len( main.activeNodes ) ):
3875 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003876 if isinstance( getResponses[ i ], list):
3877 current = set( getResponses[ i ] )
3878 if len( current ) == len( getResponses[ i ] ):
3879 # no repeats
3880 if onosSet != current:
Jon Halla440e872016-03-31 15:15:50 -07003881 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003882 " has incorrect view" +
3883 " of set " + onosSetName + ":\n" +
3884 str( getResponses[ i ] ) )
3885 main.log.debug( "Expected: " + str( onosSet ) )
3886 main.log.debug( "Actual: " + str( current ) )
3887 getResults = main.FALSE
3888 else:
3889 # error, set is not a set
Jon Halla440e872016-03-31 15:15:50 -07003890 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003891 " has repeat elements in" +
3892 " set " + onosSetName + ":\n" +
3893 str( getResponses[ i ] ) )
3894 getResults = main.FALSE
3895 elif getResponses[ i ] == main.ERROR:
3896 getResults = main.FALSE
3897 sizeResponses = []
3898 threads = []
Jon Halla440e872016-03-31 15:15:50 -07003899 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003900 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07003901 name="setTestSize-" + str( i ),
3902 args=[ onosSetName ] )
3903 threads.append( t )
3904 t.start()
3905 for t in threads:
3906 t.join()
3907 sizeResponses.append( t.result )
3908 sizeResults = main.TRUE
Jon Halla440e872016-03-31 15:15:50 -07003909 for i in range( len( main.activeNodes ) ):
3910 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003911 if size != sizeResponses[ i ]:
3912 sizeResults = main.FALSE
Jon Halla440e872016-03-31 15:15:50 -07003913 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003914 " expected a size of " + str( size ) +
3915 " for set " + onosSetName +
3916 " but got " + str( sizeResponses[ i ] ) )
3917 clearResults = clearResults and getResults and sizeResults
3918 utilities.assert_equals( expect=main.TRUE,
3919 actual=clearResults,
3920 onpass="Set clear correct",
3921 onfail="Set clear was incorrect" )
3922
3923 main.step( "Distributed Set addAll()" )
3924 onosSet.update( addAllValue.split() )
3925 addResponses = []
3926 threads = []
Jon Halla440e872016-03-31 15:15:50 -07003927 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003928 t = main.Thread( target=main.CLIs[i].setTestAdd,
Jon Hall5cf14d52015-07-16 12:15:19 -07003929 name="setTestAddAll-" + str( i ),
3930 args=[ onosSetName, addAllValue ] )
3931 threads.append( t )
3932 t.start()
3933 for t in threads:
3934 t.join()
3935 addResponses.append( t.result )
3936
3937 # main.TRUE = successfully changed the set
3938 # main.FALSE = action resulted in no change in set
3939 # main.ERROR - Some error in executing the function
3940 addAllResults = main.TRUE
Jon Halla440e872016-03-31 15:15:50 -07003941 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003942 if addResponses[ i ] == main.TRUE:
3943 # All is well
3944 pass
3945 elif addResponses[ i ] == main.FALSE:
3946 # Already in set, probably fine
3947 pass
3948 elif addResponses[ i ] == main.ERROR:
3949 # Error in execution
3950 addAllResults = main.FALSE
3951 else:
3952 # unexpected result
3953 addAllResults = main.FALSE
3954 if addAllResults != main.TRUE:
3955 main.log.error( "Error executing set addAll" )
3956
3957 # Check if set is still correct
3958 size = len( onosSet )
3959 getResponses = []
3960 threads = []
Jon Halla440e872016-03-31 15:15:50 -07003961 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003962 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003963 name="setTestGet-" + str( i ),
3964 args=[ onosSetName ] )
3965 threads.append( t )
3966 t.start()
3967 for t in threads:
3968 t.join()
3969 getResponses.append( t.result )
3970 getResults = main.TRUE
Jon Halla440e872016-03-31 15:15:50 -07003971 for i in range( len( main.activeNodes ) ):
3972 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003973 if isinstance( getResponses[ i ], list):
3974 current = set( getResponses[ i ] )
3975 if len( current ) == len( getResponses[ i ] ):
3976 # no repeats
3977 if onosSet != current:
Jon Halla440e872016-03-31 15:15:50 -07003978 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003979 " has incorrect view" +
3980 " of set " + onosSetName + ":\n" +
3981 str( getResponses[ i ] ) )
3982 main.log.debug( "Expected: " + str( onosSet ) )
3983 main.log.debug( "Actual: " + str( current ) )
3984 getResults = main.FALSE
3985 else:
3986 # error, set is not a set
Jon Halla440e872016-03-31 15:15:50 -07003987 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003988 " has repeat elements in" +
3989 " set " + onosSetName + ":\n" +
3990 str( getResponses[ i ] ) )
3991 getResults = main.FALSE
3992 elif getResponses[ i ] == main.ERROR:
3993 getResults = main.FALSE
3994 sizeResponses = []
3995 threads = []
Jon Halla440e872016-03-31 15:15:50 -07003996 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003997 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07003998 name="setTestSize-" + str( i ),
3999 args=[ onosSetName ] )
4000 threads.append( t )
4001 t.start()
4002 for t in threads:
4003 t.join()
4004 sizeResponses.append( t.result )
4005 sizeResults = main.TRUE
Jon Halla440e872016-03-31 15:15:50 -07004006 for i in range( len( main.activeNodes ) ):
4007 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004008 if size != sizeResponses[ i ]:
4009 sizeResults = main.FALSE
Jon Halla440e872016-03-31 15:15:50 -07004010 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004011 " expected a size of " + str( size ) +
4012 " for set " + onosSetName +
4013 " but got " + str( sizeResponses[ i ] ) )
4014 addAllResults = addAllResults and getResults and sizeResults
4015 utilities.assert_equals( expect=main.TRUE,
4016 actual=addAllResults,
4017 onpass="Set addAll correct",
4018 onfail="Set addAll was incorrect" )
4019
4020 main.step( "Distributed Set retain()" )
4021 onosSet.intersection_update( retainValue.split() )
4022 retainResponses = []
4023 threads = []
Jon Halla440e872016-03-31 15:15:50 -07004024 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004025 t = main.Thread( target=main.CLIs[i].setTestRemove,
Jon Hall5cf14d52015-07-16 12:15:19 -07004026 name="setTestRetain-" + str( i ),
4027 args=[ onosSetName, retainValue ],
4028 kwargs={ "retain": True } )
4029 threads.append( t )
4030 t.start()
4031 for t in threads:
4032 t.join()
4033 retainResponses.append( t.result )
4034
4035 # main.TRUE = successfully changed the set
4036 # main.FALSE = action resulted in no change in set
4037 # main.ERROR - Some error in executing the function
4038 retainResults = main.TRUE
Jon Halla440e872016-03-31 15:15:50 -07004039 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07004040 if retainResponses[ i ] == main.TRUE:
4041 # All is well
4042 pass
4043 elif retainResponses[ i ] == main.FALSE:
4044 # Already in set, probably fine
4045 pass
4046 elif retainResponses[ i ] == main.ERROR:
4047 # Error in execution
4048 retainResults = main.FALSE
4049 else:
4050 # unexpected result
4051 retainResults = main.FALSE
4052 if retainResults != main.TRUE:
4053 main.log.error( "Error executing set retain" )
4054
4055 # Check if set is still correct
4056 size = len( onosSet )
4057 getResponses = []
4058 threads = []
Jon Halla440e872016-03-31 15:15:50 -07004059 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004060 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07004061 name="setTestGet-" + str( i ),
4062 args=[ onosSetName ] )
4063 threads.append( t )
4064 t.start()
4065 for t in threads:
4066 t.join()
4067 getResponses.append( t.result )
4068 getResults = main.TRUE
Jon Halla440e872016-03-31 15:15:50 -07004069 for i in range( len( main.activeNodes ) ):
4070 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004071 if isinstance( getResponses[ i ], list):
4072 current = set( getResponses[ i ] )
4073 if len( current ) == len( getResponses[ i ] ):
4074 # no repeats
4075 if onosSet != current:
Jon Halla440e872016-03-31 15:15:50 -07004076 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004077 " has incorrect view" +
4078 " of set " + onosSetName + ":\n" +
4079 str( getResponses[ i ] ) )
4080 main.log.debug( "Expected: " + str( onosSet ) )
4081 main.log.debug( "Actual: " + str( current ) )
4082 getResults = main.FALSE
4083 else:
4084 # error, set is not a set
Jon Halla440e872016-03-31 15:15:50 -07004085 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004086 " has repeat elements in" +
4087 " set " + onosSetName + ":\n" +
4088 str( getResponses[ i ] ) )
4089 getResults = main.FALSE
4090 elif getResponses[ i ] == main.ERROR:
4091 getResults = main.FALSE
4092 sizeResponses = []
4093 threads = []
Jon Halla440e872016-03-31 15:15:50 -07004094 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004095 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07004096 name="setTestSize-" + str( i ),
4097 args=[ onosSetName ] )
4098 threads.append( t )
4099 t.start()
4100 for t in threads:
4101 t.join()
4102 sizeResponses.append( t.result )
4103 sizeResults = main.TRUE
Jon Halla440e872016-03-31 15:15:50 -07004104 for i in range( len( main.activeNodes ) ):
4105 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004106 if size != sizeResponses[ i ]:
4107 sizeResults = main.FALSE
Jon Halla440e872016-03-31 15:15:50 -07004108 main.log.error( "ONOS" + node + " expected a size of " +
Jon Hall5cf14d52015-07-16 12:15:19 -07004109 str( size ) + " for set " + onosSetName +
4110 " but got " + str( sizeResponses[ i ] ) )
4111 retainResults = retainResults and getResults and sizeResults
4112 utilities.assert_equals( expect=main.TRUE,
4113 actual=retainResults,
4114 onpass="Set retain correct",
4115 onfail="Set retain was incorrect" )
4116
Jon Hall2a5002c2015-08-21 16:49:11 -07004117 # Transactional maps
4118 main.step( "Partitioned Transactional maps put" )
4119 tMapValue = "Testing"
4120 numKeys = 100
4121 putResult = True
Jon Halla440e872016-03-31 15:15:50 -07004122 node = main.activeNodes[0]
4123 putResponses = main.CLIs[node].transactionalMapPut( numKeys, tMapValue )
4124 if putResponses and len( putResponses ) == 100:
Jon Hall2a5002c2015-08-21 16:49:11 -07004125 for i in putResponses:
4126 if putResponses[ i ][ 'value' ] != tMapValue:
4127 putResult = False
4128 else:
4129 putResult = False
4130 if not putResult:
4131 main.log.debug( "Put response values: " + str( putResponses ) )
4132 utilities.assert_equals( expect=True,
4133 actual=putResult,
4134 onpass="Partitioned Transactional Map put successful",
4135 onfail="Partitioned Transactional Map put values are incorrect" )
4136
4137 main.step( "Partitioned Transactional maps get" )
4138 getCheck = True
4139 for n in range( 1, numKeys + 1 ):
4140 getResponses = []
4141 threads = []
4142 valueCheck = True
Jon Halla440e872016-03-31 15:15:50 -07004143 for i in main.activeNodes:
Jon Hall2a5002c2015-08-21 16:49:11 -07004144 t = main.Thread( target=main.CLIs[i].transactionalMapGet,
4145 name="TMap-get-" + str( i ),
Jon Halla440e872016-03-31 15:15:50 -07004146 args=[ "Key" + str( n ) ] )
Jon Hall2a5002c2015-08-21 16:49:11 -07004147 threads.append( t )
4148 t.start()
4149 for t in threads:
4150 t.join()
4151 getResponses.append( t.result )
4152 for node in getResponses:
4153 if node != tMapValue:
4154 valueCheck = False
4155 if not valueCheck:
4156 main.log.warn( "Values for key 'Key" + str( n ) + "' do not match:" )
4157 main.log.warn( getResponses )
4158 getCheck = getCheck and valueCheck
4159 utilities.assert_equals( expect=True,
4160 actual=getCheck,
4161 onpass="Partitioned Transactional Map get values were correct",
4162 onfail="Partitioned Transactional Map values incorrect" )