blob: 0295b3bc7a5948ef14eb1a27d7e7ce91d905eb5c [file] [log] [blame]
Jon Hall5cf14d52015-07-16 12:15:19 -07001"""
2Description: This test is to determine if the HA test setup is
3 working correctly. There are no failures so this test should
4 have a 100% pass rate
5
6List of test cases:
7CASE1: Compile ONOS and push it to the test machines
8CASE2: Assign devices to controllers
9CASE21: Assign mastership to controllers
10CASE3: Assign intents
11CASE4: Ping across added host intents
12CASE5: Reading state of ONOS
13CASE6: The Failure case. Since this is the Sanity test, we do nothing.
14CASE7: Check state after control plane failure
15CASE8: Compare topo
16CASE9: Link s3-s28 down
17CASE10: Link s3-s28 up
18CASE11: Switch down
19CASE12: Switch up
20CASE13: Clean up
21CASE14: start election app on all onos nodes
22CASE15: Check that Leadership Election is still functional
23CASE16: Install Distributed Primitives app
24CASE17: Check for basic functionality with distributed primitives
25"""
26
27
28class HAsanity:
29
30 def __init__( self ):
31 self.default = ''
32
33 def CASE1( self, main ):
34 """
35 CASE1 is to compile ONOS and push it to the test machines
36
37 Startup sequence:
38 cell <name>
39 onos-verify-cell
40 NOTE: temporary - onos-remove-raft-logs
41 onos-uninstall
42 start mininet
43 git pull
44 mvn clean install
45 onos-package
46 onos-install -f
47 onos-wait-for-start
48 start cli sessions
49 start tcpdump
50 """
Jon Halle1a3b752015-07-22 13:02:46 -070051 import imp
Jon Hallf3d16e72015-12-16 17:45:08 -080052 import time
Jon Halla440e872016-03-31 15:15:50 -070053 import json
Jon Hall5cf14d52015-07-16 12:15:19 -070054 main.log.info( "ONOS HA Sanity test - initialization" )
55 main.case( "Setting up test environment" )
Jon Hall783bbf92015-07-23 14:33:19 -070056 main.caseExplanation = "Setup the test environment including " +\
Jon Hall5cf14d52015-07-16 12:15:19 -070057 "installing ONOS, starting Mininet and ONOS" +\
58 "cli sessions."
Jon Hall5cf14d52015-07-16 12:15:19 -070059
60 # load some variables from the params file
61 PULLCODE = False
62 if main.params[ 'Git' ] == 'True':
63 PULLCODE = True
64 gitBranch = main.params[ 'branch' ]
65 cellName = main.params[ 'ENV' ][ 'cellName' ]
66
Jon Halle1a3b752015-07-22 13:02:46 -070067 main.numCtrls = int( main.params[ 'num_controllers' ] )
Jon Hall5cf14d52015-07-16 12:15:19 -070068 if main.ONOSbench.maxNodes:
Jon Halle1a3b752015-07-22 13:02:46 -070069 if main.ONOSbench.maxNodes < main.numCtrls:
70 main.numCtrls = int( main.ONOSbench.maxNodes )
Jon Hall5cf14d52015-07-16 12:15:19 -070071 # TODO: refactor how to get onos port, maybe put into component tag?
Jon Halle1a3b752015-07-22 13:02:46 -070072 # set global variables
Jon Hall5cf14d52015-07-16 12:15:19 -070073 global ONOS1Port
74 global ONOS2Port
75 global ONOS3Port
76 global ONOS4Port
77 global ONOS5Port
78 global ONOS6Port
79 global ONOS7Port
Jon Halla440e872016-03-31 15:15:50 -070080 # These are for csv plotting in jenkins
81 global labels
82 global data
83 labels = []
84 data = []
Jon Hall5cf14d52015-07-16 12:15:19 -070085
86 # FIXME: just get controller port from params?
87 # TODO: do we really need all these?
88 ONOS1Port = main.params[ 'CTRL' ][ 'port1' ]
89 ONOS2Port = main.params[ 'CTRL' ][ 'port2' ]
90 ONOS3Port = main.params[ 'CTRL' ][ 'port3' ]
91 ONOS4Port = main.params[ 'CTRL' ][ 'port4' ]
92 ONOS5Port = main.params[ 'CTRL' ][ 'port5' ]
93 ONOS6Port = main.params[ 'CTRL' ][ 'port6' ]
94 ONOS7Port = main.params[ 'CTRL' ][ 'port7' ]
95
Jon Halle1a3b752015-07-22 13:02:46 -070096 try:
Jon Halla440e872016-03-31 15:15:50 -070097 from tests.HAsanity.dependencies.Counters import Counters
98 main.Counters = Counters()
Jon Halle1a3b752015-07-22 13:02:46 -070099 except Exception as e:
100 main.log.exception( e )
101 main.cleanup()
102 main.exit()
103
104 main.CLIs = []
105 main.nodes = []
Jon Hall5cf14d52015-07-16 12:15:19 -0700106 ipList = []
Jon Halle1a3b752015-07-22 13:02:46 -0700107 for i in range( 1, main.numCtrls + 1 ):
108 try:
109 main.CLIs.append( getattr( main, 'ONOScli' + str( i ) ) )
110 main.nodes.append( getattr( main, 'ONOS' + str( i ) ) )
111 ipList.append( main.nodes[ -1 ].ip_address )
112 except AttributeError:
113 break
Jon Hall5cf14d52015-07-16 12:15:19 -0700114
115 main.step( "Create cell file" )
116 cellAppString = main.params[ 'ENV' ][ 'appString' ]
117 main.ONOSbench.createCellFile( main.ONOSbench.ip_address, cellName,
118 main.Mininet1.ip_address,
119 cellAppString, ipList )
120 main.step( "Applying cell variable to environment" )
121 cellResult = main.ONOSbench.setCell( cellName )
122 verifyResult = main.ONOSbench.verifyCell()
123
124 # FIXME:this is short term fix
125 main.log.info( "Removing raft logs" )
126 main.ONOSbench.onosRemoveRaftLogs()
127
128 main.log.info( "Uninstalling ONOS" )
Jon Halle1a3b752015-07-22 13:02:46 -0700129 for node in main.nodes:
Jon Hall5cf14d52015-07-16 12:15:19 -0700130 main.ONOSbench.onosUninstall( node.ip_address )
131
132 # Make sure ONOS is DEAD
133 main.log.info( "Killing any ONOS processes" )
134 killResults = main.TRUE
Jon Halle1a3b752015-07-22 13:02:46 -0700135 for node in main.nodes:
Jon Hall5cf14d52015-07-16 12:15:19 -0700136 killed = main.ONOSbench.onosKill( node.ip_address )
137 killResults = killResults and killed
138
139 cleanInstallResult = main.TRUE
140 gitPullResult = main.TRUE
141
142 main.step( "Starting Mininet" )
143 # scp topo file to mininet
144 # TODO: move to params?
145 topoName = "obelisk.py"
146 filePath = main.ONOSbench.home + "/tools/test/topos/"
kelvin-onlabd9e23de2015-08-06 10:34:44 -0700147 main.ONOSbench.scp( main.Mininet1,
148 filePath + topoName,
149 main.Mininet1.home,
150 direction="to" )
Jon Hall5cf14d52015-07-16 12:15:19 -0700151 mnResult = main.Mininet1.startNet( )
152 utilities.assert_equals( expect=main.TRUE, actual=mnResult,
153 onpass="Mininet Started",
154 onfail="Error starting Mininet" )
155
156 main.step( "Git checkout and pull " + gitBranch )
157 if PULLCODE:
158 main.ONOSbench.gitCheckout( gitBranch )
159 gitPullResult = main.ONOSbench.gitPull()
160 # values of 1 or 3 are good
161 utilities.assert_lesser( expect=0, actual=gitPullResult,
162 onpass="Git pull successful",
163 onfail="Git pull failed" )
164 main.ONOSbench.getVersion( report=True )
165
166 main.step( "Using mvn clean install" )
167 cleanInstallResult = main.TRUE
168 if PULLCODE and gitPullResult == main.TRUE:
169 cleanInstallResult = main.ONOSbench.cleanInstall()
170 else:
171 main.log.warn( "Did not pull new code so skipping mvn " +
172 "clean install" )
173 utilities.assert_equals( expect=main.TRUE,
174 actual=cleanInstallResult,
175 onpass="MCI successful",
176 onfail="MCI failed" )
177 # GRAPHS
178 # NOTE: important params here:
179 # job = name of Jenkins job
180 # Plot Name = Plot-HA, only can be used if multiple plots
181 # index = The number of the graph under plot name
182 job = "HAsanity"
183 plotName = "Plot-HA"
Jon Hall843f8bc2016-03-18 14:28:13 -0700184 index = "2"
Jon Hall5cf14d52015-07-16 12:15:19 -0700185 graphs = '<ac:structured-macro ac:name="html">\n'
186 graphs += '<ac:plain-text-body><![CDATA[\n'
187 graphs += '<iframe src="https://onos-jenkins.onlab.us/job/' + job +\
Jon Halla9845df2016-01-15 14:55:58 -0800188 '/plot/' + plotName + '/getPlot?index=' + index +\
Jon Hall5cf14d52015-07-16 12:15:19 -0700189 '&width=500&height=300"' +\
190 'noborder="0" width="500" height="300" scrolling="yes" ' +\
191 'seamless="seamless"></iframe>\n'
192 graphs += ']]></ac:plain-text-body>\n'
193 graphs += '</ac:structured-macro>\n'
194 main.log.wiki(graphs)
195
196 main.step( "Creating ONOS package" )
197 packageResult = main.ONOSbench.onosPackage()
198 utilities.assert_equals( expect=main.TRUE, actual=packageResult,
199 onpass="ONOS package successful",
200 onfail="ONOS package failed" )
201
202 main.step( "Installing ONOS package" )
203 onosInstallResult = main.TRUE
Jon Halle1a3b752015-07-22 13:02:46 -0700204 for node in main.nodes:
Jon Hall5cf14d52015-07-16 12:15:19 -0700205 tmpResult = main.ONOSbench.onosInstall( options="-f",
206 node=node.ip_address )
207 onosInstallResult = onosInstallResult and tmpResult
208 utilities.assert_equals( expect=main.TRUE, actual=onosInstallResult,
209 onpass="ONOS install successful",
210 onfail="ONOS install failed" )
211
212 main.step( "Checking if ONOS is up yet" )
213 for i in range( 2 ):
214 onosIsupResult = main.TRUE
Jon Halle1a3b752015-07-22 13:02:46 -0700215 for node in main.nodes:
Jon Hall5cf14d52015-07-16 12:15:19 -0700216 started = main.ONOSbench.isup( node.ip_address )
217 if not started:
Jon Hallc6793552016-01-19 14:18:37 -0800218 main.log.error( node.name + " hasn't started" )
Jon Hall5cf14d52015-07-16 12:15:19 -0700219 onosIsupResult = onosIsupResult and started
220 if onosIsupResult == main.TRUE:
221 break
222 utilities.assert_equals( expect=main.TRUE, actual=onosIsupResult,
223 onpass="ONOS startup successful",
224 onfail="ONOS startup failed" )
225
226 main.log.step( "Starting ONOS CLI sessions" )
227 cliResults = main.TRUE
228 threads = []
Jon Halle1a3b752015-07-22 13:02:46 -0700229 for i in range( main.numCtrls ):
230 t = main.Thread( target=main.CLIs[i].startOnosCli,
Jon Hall5cf14d52015-07-16 12:15:19 -0700231 name="startOnosCli-" + str( i ),
Jon Halle1a3b752015-07-22 13:02:46 -0700232 args=[main.nodes[i].ip_address] )
Jon Hall5cf14d52015-07-16 12:15:19 -0700233 threads.append( t )
234 t.start()
235
236 for t in threads:
237 t.join()
238 cliResults = cliResults and t.result
239 utilities.assert_equals( expect=main.TRUE, actual=cliResults,
240 onpass="ONOS cli startup successful",
241 onfail="ONOS cli startup failed" )
242
Jon Halla440e872016-03-31 15:15:50 -0700243 # Create a list of active nodes for use when some nodes are stopped
244 main.activeNodes = [ i for i in range( 0, len( main.CLIs ) ) ]
245
Jon Hall5cf14d52015-07-16 12:15:19 -0700246 if main.params[ 'tcpdump' ].lower() == "true":
247 main.step( "Start Packet Capture MN" )
248 main.Mininet2.startTcpdump(
249 str( main.params[ 'MNtcpdump' ][ 'folder' ] ) + str( main.TEST )
250 + "-MN.pcap",
251 intf=main.params[ 'MNtcpdump' ][ 'intf' ],
252 port=main.params[ 'MNtcpdump' ][ 'port' ] )
253
Jon Halla440e872016-03-31 15:15:50 -0700254 main.step( "Checking ONOS nodes" )
255 nodesOutput = []
256 nodeResults = main.TRUE
257 threads = []
258 for i in main.activeNodes:
259 t = main.Thread( target=main.CLIs[i].nodes,
260 name="nodes-" + str( i ),
261 args=[ ] )
262 threads.append( t )
263 t.start()
264
265 for t in threads:
266 t.join()
267 nodesOutput.append( t.result )
268 ips = [ main.nodes[node].ip_address for node in main.activeNodes ]
269 ips.sort()
270 for i in nodesOutput:
271 try:
272 current = json.loads( i )
273 activeIps = []
274 currentResult = main.FALSE
275 for node in current:
276 if node['state'] == 'READY':
277 activeIps.append( node['ip'] )
278 activeIps.sort()
279 if ips == activeIps:
280 currentResult = main.TRUE
281 except ( ValueError, TypeError ):
282 main.log.error( "Error parsing nodes output" )
283 main.log.warn( repr( i ) )
284 currentResult = main.FALSE
285 nodeResults = nodeResults and currentResult
286 utilities.assert_equals( expect=main.TRUE, actual=nodeResults,
287 onpass="Nodes check successful",
288 onfail="Nodes check NOT successful" )
289
290 if not nodeResults:
291 for cli in main.CLIs:
292 main.log.debug( "{} components not ACTIVE: \n{}".format(
293 cli.name,
294 cli.sendline( "scr:list | grep -v ACTIVE" ) ) )
295
Jon Hall5cf14d52015-07-16 12:15:19 -0700296 if cliResults == main.FALSE:
297 main.log.error( "Failed to start ONOS, stopping test" )
298 main.cleanup()
299 main.exit()
300
Jon Hall172b7ba2016-04-07 18:12:20 -0700301 main.step( "Activate apps defined in the params file" )
302 # get data from the params
303 apps = main.params.get( 'apps' )
304 if apps:
305 apps = apps.split(',')
306 main.log.warn( apps )
307 activateResult = True
308 for app in apps:
309 main.CLIs[ 0 ].app( app, "Activate" )
310 # TODO: check this worked
311 time.sleep( 10 ) # wait for apps to activate
312 for app in apps:
313 state = main.CLIs[ 0 ].appStatus( app )
314 if state == "ACTIVE":
315 activateResult = activeResult and True
316 else:
317 main.log.error( "{} is in {} state".format( app, state ) )
318 activeResult = False
319 utilities.assert_equals( expect=True,
320 actual=activateResult,
321 onpass="Successfully activated apps",
322 onfail="Failed to activate apps" )
323 else:
324 main.log.warn( "No apps were specified to be loaded after startup" )
325
326 main.step( "Set ONOS configurations" )
327 config = main.params.get( 'ONOS_Configuration' )
328 if config:
329 main.log.debug( config )
330 checkResult = main.TRUE
331 for component in config:
332 for setting in config[component]:
333 value = config[component][setting]
334 check = main.CLIs[ 0 ].setCfg( component, setting, value )
335 main.log.info( "Value was changed? {}".format( main.TRUE == check ) )
336 checkResult = check and checkResult
337 utilities.assert_equals( expect=main.TRUE,
338 actual=checkResult,
339 onpass="Successfully set config",
340 onfail="Failed to set config" )
341 else:
342 main.log.warn( "No configurations were specified to be changed after startup" )
343
Jon Hall9d2dcad2016-04-08 10:15:20 -0700344 main.step( "App Ids check" )
345 appCheck = main.TRUE
346 threads = []
347 for i in main.activeNodes:
348 t = main.Thread( target=main.CLIs[i].appToIDCheck,
349 name="appToIDCheck-" + str( i ),
350 args=[] )
351 threads.append( t )
352 t.start()
353
354 for t in threads:
355 t.join()
356 appCheck = appCheck and t.result
357 if appCheck != main.TRUE:
358 node = main.activeNodes[0]
359 main.log.warn( main.CLIs[node].apps() )
360 main.log.warn( main.CLIs[node].appIDs() )
361 utilities.assert_equals( expect=main.TRUE, actual=appCheck,
362 onpass="App Ids seem to be correct",
363 onfail="Something is wrong with app Ids" )
364
Jon Hall5cf14d52015-07-16 12:15:19 -0700365 def CASE2( self, main ):
366 """
367 Assign devices to controllers
368 """
369 import re
Jon Halle1a3b752015-07-22 13:02:46 -0700370 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700371 assert main, "main not defined"
372 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -0700373 assert main.CLIs, "main.CLIs not defined"
374 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700375 assert ONOS1Port, "ONOS1Port not defined"
376 assert ONOS2Port, "ONOS2Port not defined"
377 assert ONOS3Port, "ONOS3Port not defined"
378 assert ONOS4Port, "ONOS4Port not defined"
379 assert ONOS5Port, "ONOS5Port not defined"
380 assert ONOS6Port, "ONOS6Port not defined"
381 assert ONOS7Port, "ONOS7Port not defined"
382
383 main.case( "Assigning devices to controllers" )
Jon Hall783bbf92015-07-23 14:33:19 -0700384 main.caseExplanation = "Assign switches to ONOS using 'ovs-vsctl' " +\
Jon Hall5cf14d52015-07-16 12:15:19 -0700385 "and check that an ONOS node becomes the " +\
386 "master of the device."
387 main.step( "Assign switches to controllers" )
388
389 ipList = []
Jon Halle1a3b752015-07-22 13:02:46 -0700390 for i in range( main.numCtrls ):
391 ipList.append( main.nodes[ i ].ip_address )
Jon Hall5cf14d52015-07-16 12:15:19 -0700392 swList = []
393 for i in range( 1, 29 ):
394 swList.append( "s" + str( i ) )
395 main.Mininet1.assignSwController( sw=swList, ip=ipList )
396
397 mastershipCheck = main.TRUE
398 for i in range( 1, 29 ):
399 response = main.Mininet1.getSwController( "s" + str( i ) )
400 try:
401 main.log.info( str( response ) )
402 except Exception:
403 main.log.info( repr( response ) )
Jon Halle1a3b752015-07-22 13:02:46 -0700404 for node in main.nodes:
Jon Hall5cf14d52015-07-16 12:15:19 -0700405 if re.search( "tcp:" + node.ip_address, response ):
406 mastershipCheck = mastershipCheck and main.TRUE
407 else:
408 main.log.error( "Error, node " + node.ip_address + " is " +
409 "not in the list of controllers s" +
410 str( i ) + " is connecting to." )
411 mastershipCheck = main.FALSE
412 utilities.assert_equals(
413 expect=main.TRUE,
414 actual=mastershipCheck,
415 onpass="Switch mastership assigned correctly",
416 onfail="Switches not assigned correctly to controllers" )
417
418 def CASE21( self, main ):
419 """
420 Assign mastership to controllers
421 """
Jon Hall5cf14d52015-07-16 12:15:19 -0700422 import time
Jon Halle1a3b752015-07-22 13:02:46 -0700423 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700424 assert main, "main not defined"
425 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -0700426 assert main.CLIs, "main.CLIs not defined"
427 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700428 assert ONOS1Port, "ONOS1Port not defined"
429 assert ONOS2Port, "ONOS2Port not defined"
430 assert ONOS3Port, "ONOS3Port not defined"
431 assert ONOS4Port, "ONOS4Port not defined"
432 assert ONOS5Port, "ONOS5Port not defined"
433 assert ONOS6Port, "ONOS6Port not defined"
434 assert ONOS7Port, "ONOS7Port not defined"
435
436 main.case( "Assigning Controller roles for switches" )
Jon Hall783bbf92015-07-23 14:33:19 -0700437 main.caseExplanation = "Check that ONOS is connected to each " +\
Jon Hall5cf14d52015-07-16 12:15:19 -0700438 "device. Then manually assign" +\
439 " mastership to specific ONOS nodes using" +\
440 " 'device-role'"
441 main.step( "Assign mastership of switches to specific controllers" )
442 # Manually assign mastership to the controller we want
443 roleCall = main.TRUE
444
445 ipList = [ ]
446 deviceList = []
Jon Halla440e872016-03-31 15:15:50 -0700447 onosCli = main.CLIs[ main.activeNodes[0] ]
Jon Hall5cf14d52015-07-16 12:15:19 -0700448 try:
449 # Assign mastership to specific controllers. This assignment was
450 # determined for a 7 node cluser, but will work with any sized
451 # cluster
452 for i in range( 1, 29 ): # switches 1 through 28
453 # set up correct variables:
454 if i == 1:
455 c = 0
Jon Halle1a3b752015-07-22 13:02:46 -0700456 ip = main.nodes[ c ].ip_address # ONOS1
Jon Halla440e872016-03-31 15:15:50 -0700457 deviceId = onosCli.getDevice( "1000" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700458 elif i == 2:
Jon Halle1a3b752015-07-22 13:02:46 -0700459 c = 1 % main.numCtrls
460 ip = main.nodes[ c ].ip_address # ONOS2
Jon Halla440e872016-03-31 15:15:50 -0700461 deviceId = onosCli.getDevice( "2000" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700462 elif i == 3:
Jon Halle1a3b752015-07-22 13:02:46 -0700463 c = 1 % main.numCtrls
464 ip = main.nodes[ c ].ip_address # ONOS2
Jon Halla440e872016-03-31 15:15:50 -0700465 deviceId = onosCli.getDevice( "3000" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700466 elif i == 4:
Jon Halle1a3b752015-07-22 13:02:46 -0700467 c = 3 % main.numCtrls
468 ip = main.nodes[ c ].ip_address # ONOS4
Jon Halla440e872016-03-31 15:15:50 -0700469 deviceId = onosCli.getDevice( "3004" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700470 elif i == 5:
Jon Halle1a3b752015-07-22 13:02:46 -0700471 c = 2 % main.numCtrls
472 ip = main.nodes[ c ].ip_address # ONOS3
Jon Halla440e872016-03-31 15:15:50 -0700473 deviceId = onosCli.getDevice( "5000" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700474 elif i == 6:
Jon Halle1a3b752015-07-22 13:02:46 -0700475 c = 2 % main.numCtrls
476 ip = main.nodes[ c ].ip_address # ONOS3
Jon Halla440e872016-03-31 15:15:50 -0700477 deviceId = onosCli.getDevice( "6000" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700478 elif i == 7:
Jon Halle1a3b752015-07-22 13:02:46 -0700479 c = 5 % main.numCtrls
480 ip = main.nodes[ c ].ip_address # ONOS6
Jon Halla440e872016-03-31 15:15:50 -0700481 deviceId = onosCli.getDevice( "6007" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700482 elif i >= 8 and i <= 17:
Jon Halle1a3b752015-07-22 13:02:46 -0700483 c = 4 % main.numCtrls
484 ip = main.nodes[ c ].ip_address # ONOS5
Jon Hall5cf14d52015-07-16 12:15:19 -0700485 dpid = '3' + str( i ).zfill( 3 )
Jon Halla440e872016-03-31 15:15:50 -0700486 deviceId = onosCli.getDevice( dpid ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700487 elif i >= 18 and i <= 27:
Jon Halle1a3b752015-07-22 13:02:46 -0700488 c = 6 % main.numCtrls
489 ip = main.nodes[ c ].ip_address # ONOS7
Jon Hall5cf14d52015-07-16 12:15:19 -0700490 dpid = '6' + str( i ).zfill( 3 )
Jon Halla440e872016-03-31 15:15:50 -0700491 deviceId = onosCli.getDevice( dpid ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700492 elif i == 28:
493 c = 0
Jon Halle1a3b752015-07-22 13:02:46 -0700494 ip = main.nodes[ c ].ip_address # ONOS1
Jon Halla440e872016-03-31 15:15:50 -0700495 deviceId = onosCli.getDevice( "2800" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700496 else:
497 main.log.error( "You didn't write an else statement for " +
498 "switch s" + str( i ) )
499 roleCall = main.FALSE
500 # Assign switch
501 assert deviceId, "No device id for s" + str( i ) + " in ONOS"
502 # TODO: make this controller dynamic
Jon Halla440e872016-03-31 15:15:50 -0700503 roleCall = roleCall and onosCli.deviceRole( deviceId, ip )
Jon Hall5cf14d52015-07-16 12:15:19 -0700504 ipList.append( ip )
505 deviceList.append( deviceId )
506 except ( AttributeError, AssertionError ):
507 main.log.exception( "Something is wrong with ONOS device view" )
Jon Halla440e872016-03-31 15:15:50 -0700508 main.log.info( onosCli.devices() )
Jon Hall5cf14d52015-07-16 12:15:19 -0700509 utilities.assert_equals(
510 expect=main.TRUE,
511 actual=roleCall,
512 onpass="Re-assigned switch mastership to designated controller",
513 onfail="Something wrong with deviceRole calls" )
514
515 main.step( "Check mastership was correctly assigned" )
516 roleCheck = main.TRUE
517 # NOTE: This is due to the fact that device mastership change is not
518 # atomic and is actually a multi step process
519 time.sleep( 5 )
520 for i in range( len( ipList ) ):
521 ip = ipList[i]
522 deviceId = deviceList[i]
523 # Check assignment
Jon Halla440e872016-03-31 15:15:50 -0700524 master = onosCli.getRole( deviceId ).get( 'master' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700525 if ip in master:
526 roleCheck = roleCheck and main.TRUE
527 else:
528 roleCheck = roleCheck and main.FALSE
529 main.log.error( "Error, controller " + ip + " is not" +
530 " master " + "of device " +
531 str( deviceId ) + ". Master is " +
532 repr( master ) + "." )
533 utilities.assert_equals(
534 expect=main.TRUE,
535 actual=roleCheck,
536 onpass="Switches were successfully reassigned to designated " +
537 "controller",
538 onfail="Switches were not successfully reassigned" )
539
540 def CASE3( self, main ):
541 """
542 Assign intents
543 """
544 import time
545 import json
Jon Halle1a3b752015-07-22 13:02:46 -0700546 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700547 assert main, "main not defined"
548 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -0700549 assert main.CLIs, "main.CLIs not defined"
550 assert main.nodes, "main.nodes not defined"
Jon Halla440e872016-03-31 15:15:50 -0700551 try:
552 labels
553 except NameError:
554 main.log.error( "labels not defined, setting to []" )
555 labels = []
556 try:
557 data
558 except NameError:
559 main.log.error( "data not defined, setting to []" )
560 data = []
Jon Hall5cf14d52015-07-16 12:15:19 -0700561 main.case( "Adding host Intents" )
Jon Hall783bbf92015-07-23 14:33:19 -0700562 main.caseExplanation = "Discover hosts by using pingall then " +\
Jon Hall5cf14d52015-07-16 12:15:19 -0700563 "assign predetermined host-to-host intents." +\
564 " After installation, check that the intent" +\
565 " is distributed to all nodes and the state" +\
566 " is INSTALLED"
567
568 # install onos-app-fwd
569 main.step( "Install reactive forwarding app" )
Jon Halla440e872016-03-31 15:15:50 -0700570 onosCli = main.CLIs[ main.activeNodes[0] ]
571 installResults = onosCli.activateApp( "org.onosproject.fwd" )
Jon Hall5cf14d52015-07-16 12:15:19 -0700572 utilities.assert_equals( expect=main.TRUE, actual=installResults,
573 onpass="Install fwd successful",
574 onfail="Install fwd failed" )
575
576 main.step( "Check app ids" )
577 appCheck = main.TRUE
578 threads = []
Jon Halla440e872016-03-31 15:15:50 -0700579 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -0700580 t = main.Thread( target=main.CLIs[i].appToIDCheck,
Jon Hall5cf14d52015-07-16 12:15:19 -0700581 name="appToIDCheck-" + str( i ),
582 args=[] )
583 threads.append( t )
584 t.start()
585
586 for t in threads:
587 t.join()
588 appCheck = appCheck and t.result
589 if appCheck != main.TRUE:
Jon Halla440e872016-03-31 15:15:50 -0700590 main.log.warn( onosCli.apps() )
591 main.log.warn( onosCli.appIDs() )
Jon Hall5cf14d52015-07-16 12:15:19 -0700592 utilities.assert_equals( expect=main.TRUE, actual=appCheck,
593 onpass="App Ids seem to be correct",
594 onfail="Something is wrong with app Ids" )
595
596 main.step( "Discovering Hosts( Via pingall for now )" )
597 # FIXME: Once we have a host discovery mechanism, use that instead
598 # REACTIVE FWD test
599 pingResult = main.FALSE
Jon Hall96091e62015-09-21 17:34:17 -0700600 passMsg = "Reactive Pingall test passed"
601 time1 = time.time()
602 pingResult = main.Mininet1.pingall()
603 time2 = time.time()
604 if not pingResult:
605 main.log.warn("First pingall failed. Trying again...")
Jon Hall5cf14d52015-07-16 12:15:19 -0700606 pingResult = main.Mininet1.pingall()
Jon Hall96091e62015-09-21 17:34:17 -0700607 passMsg += " on the second try"
608 utilities.assert_equals(
609 expect=main.TRUE,
610 actual=pingResult,
611 onpass= passMsg,
612 onfail="Reactive Pingall failed, " +
613 "one or more ping pairs failed" )
614 main.log.info( "Time for pingall: %2f seconds" %
615 ( time2 - time1 ) )
Jon Hall5cf14d52015-07-16 12:15:19 -0700616 # timeout for fwd flows
617 time.sleep( 11 )
618 # uninstall onos-app-fwd
619 main.step( "Uninstall reactive forwarding app" )
Jon Halla440e872016-03-31 15:15:50 -0700620 node = main.activeNodes[0]
621 uninstallResult = main.CLIs[node].deactivateApp( "org.onosproject.fwd" )
Jon Hall5cf14d52015-07-16 12:15:19 -0700622 utilities.assert_equals( expect=main.TRUE, actual=uninstallResult,
623 onpass="Uninstall fwd successful",
624 onfail="Uninstall fwd failed" )
Jon Hall5cf14d52015-07-16 12:15:19 -0700625
626 main.step( "Check app ids" )
627 threads = []
628 appCheck2 = main.TRUE
Jon Halla440e872016-03-31 15:15:50 -0700629 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -0700630 t = main.Thread( target=main.CLIs[i].appToIDCheck,
Jon Hall5cf14d52015-07-16 12:15:19 -0700631 name="appToIDCheck-" + str( i ),
632 args=[] )
633 threads.append( t )
634 t.start()
635
636 for t in threads:
637 t.join()
638 appCheck2 = appCheck2 and t.result
639 if appCheck2 != main.TRUE:
Jon Halla440e872016-03-31 15:15:50 -0700640 node = main.activeNodes[0]
641 main.log.warn( main.CLIs[node].apps() )
642 main.log.warn( main.CLIs[node].appIDs() )
Jon Hall5cf14d52015-07-16 12:15:19 -0700643 utilities.assert_equals( expect=main.TRUE, actual=appCheck2,
644 onpass="App Ids seem to be correct",
645 onfail="Something is wrong with app Ids" )
646
647 main.step( "Add host intents via cli" )
648 intentIds = []
Jon Hall6e709752016-02-01 13:38:46 -0800649 # TODO: move the host numbers to params
650 # Maybe look at all the paths we ping?
Jon Hall5cf14d52015-07-16 12:15:19 -0700651 intentAddResult = True
652 hostResult = main.TRUE
653 for i in range( 8, 18 ):
654 main.log.info( "Adding host intent between h" + str( i ) +
655 " and h" + str( i + 10 ) )
656 host1 = "00:00:00:00:00:" + \
657 str( hex( i )[ 2: ] ).zfill( 2 ).upper()
658 host2 = "00:00:00:00:00:" + \
659 str( hex( i + 10 )[ 2: ] ).zfill( 2 ).upper()
660 # NOTE: getHost can return None
Jon Halla440e872016-03-31 15:15:50 -0700661 host1Dict = onosCli.getHost( host1 )
662 host2Dict = onosCli.getHost( host2 )
Jon Hall5cf14d52015-07-16 12:15:19 -0700663 host1Id = None
664 host2Id = None
665 if host1Dict and host2Dict:
666 host1Id = host1Dict.get( 'id', None )
667 host2Id = host2Dict.get( 'id', None )
668 if host1Id and host2Id:
Jon Halla440e872016-03-31 15:15:50 -0700669 nodeNum = ( i % len( main.activeNodes ) )
670 node = main.activeNodes[nodeNum]
671 tmpId = main.CLIs[node].addHostIntent( host1Id, host2Id )
Jon Hall5cf14d52015-07-16 12:15:19 -0700672 if tmpId:
673 main.log.info( "Added intent with id: " + tmpId )
674 intentIds.append( tmpId )
675 else:
676 main.log.error( "addHostIntent returned: " +
677 repr( tmpId ) )
678 else:
679 main.log.error( "Error, getHost() failed for h" + str( i ) +
680 " and/or h" + str( i + 10 ) )
Jon Halla440e872016-03-31 15:15:50 -0700681 node = main.activeNodes[0]
682 hosts = main.CLIs[node].hosts()
Jon Hall5cf14d52015-07-16 12:15:19 -0700683 main.log.warn( "Hosts output: " )
684 try:
685 main.log.warn( json.dumps( json.loads( hosts ),
686 sort_keys=True,
687 indent=4,
688 separators=( ',', ': ' ) ) )
689 except ( ValueError, TypeError ):
690 main.log.warn( repr( hosts ) )
691 hostResult = main.FALSE
692 utilities.assert_equals( expect=main.TRUE, actual=hostResult,
693 onpass="Found a host id for each host",
694 onfail="Error looking up host ids" )
695
696 intentStart = time.time()
Jon Halla440e872016-03-31 15:15:50 -0700697 onosIds = onosCli.getAllIntentsId()
Jon Hall5cf14d52015-07-16 12:15:19 -0700698 main.log.info( "Submitted intents: " + str( intentIds ) )
699 main.log.info( "Intents in ONOS: " + str( onosIds ) )
700 for intent in intentIds:
701 if intent in onosIds:
702 pass # intent submitted is in onos
703 else:
704 intentAddResult = False
705 if intentAddResult:
706 intentStop = time.time()
707 else:
708 intentStop = None
709 # Print the intent states
Jon Halla440e872016-03-31 15:15:50 -0700710 intents = onosCli.intents()
Jon Hall5cf14d52015-07-16 12:15:19 -0700711 intentStates = []
712 installedCheck = True
713 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
714 count = 0
715 try:
716 for intent in json.loads( intents ):
717 state = intent.get( 'state', None )
718 if "INSTALLED" not in state:
719 installedCheck = False
720 intentId = intent.get( 'id', None )
721 intentStates.append( ( intentId, state ) )
722 except ( ValueError, TypeError ):
723 main.log.exception( "Error parsing intents" )
724 # add submitted intents not in the store
725 tmplist = [ i for i, s in intentStates ]
726 missingIntents = False
727 for i in intentIds:
728 if i not in tmplist:
729 intentStates.append( ( i, " - " ) )
730 missingIntents = True
731 intentStates.sort()
732 for i, s in intentStates:
733 count += 1
734 main.log.info( "%-6s%-15s%-15s" %
735 ( str( count ), str( i ), str( s ) ) )
Jon Halla440e872016-03-31 15:15:50 -0700736 leaders = onosCli.leaders()
Jon Hall5cf14d52015-07-16 12:15:19 -0700737 try:
738 missing = False
739 if leaders:
740 parsedLeaders = json.loads( leaders )
741 main.log.warn( json.dumps( parsedLeaders,
742 sort_keys=True,
743 indent=4,
744 separators=( ',', ': ' ) ) )
745 # check for all intent partitions
746 topics = []
747 for i in range( 14 ):
748 topics.append( "intent-partition-" + str( i ) )
749 main.log.debug( topics )
750 ONOStopics = [ j['topic'] for j in parsedLeaders ]
751 for topic in topics:
752 if topic not in ONOStopics:
753 main.log.error( "Error: " + topic +
754 " not in leaders" )
755 missing = True
756 else:
757 main.log.error( "leaders() returned None" )
758 except ( ValueError, TypeError ):
759 main.log.exception( "Error parsing leaders" )
760 main.log.error( repr( leaders ) )
761 # Check all nodes
762 if missing:
Jon Halla440e872016-03-31 15:15:50 -0700763 for i in main.activeNodes:
764 response = main.CLIs[i].leaders( jsonFormat=False)
765 main.log.warn( str( main.CLIs[i].name ) + " leaders output: \n" +
Jon Hall5cf14d52015-07-16 12:15:19 -0700766 str( response ) )
767
Jon Halla440e872016-03-31 15:15:50 -0700768 partitions = onosCli.partitions()
Jon Hall5cf14d52015-07-16 12:15:19 -0700769 try:
770 if partitions :
771 parsedPartitions = json.loads( partitions )
772 main.log.warn( json.dumps( parsedPartitions,
773 sort_keys=True,
774 indent=4,
775 separators=( ',', ': ' ) ) )
776 # TODO check for a leader in all paritions
777 # TODO check for consistency among nodes
778 else:
779 main.log.error( "partitions() returned None" )
780 except ( ValueError, TypeError ):
781 main.log.exception( "Error parsing partitions" )
782 main.log.error( repr( partitions ) )
Jon Halla440e872016-03-31 15:15:50 -0700783 pendingMap = onosCli.pendingMap()
Jon Hall5cf14d52015-07-16 12:15:19 -0700784 try:
785 if pendingMap :
786 parsedPending = json.loads( pendingMap )
787 main.log.warn( json.dumps( parsedPending,
788 sort_keys=True,
789 indent=4,
790 separators=( ',', ': ' ) ) )
791 # TODO check something here?
792 else:
793 main.log.error( "pendingMap() returned None" )
794 except ( ValueError, TypeError ):
795 main.log.exception( "Error parsing pending map" )
796 main.log.error( repr( pendingMap ) )
797
798 intentAddResult = bool( intentAddResult and not missingIntents and
799 installedCheck )
800 if not intentAddResult:
801 main.log.error( "Error in pushing host intents to ONOS" )
802
803 main.step( "Intent Anti-Entropy dispersion" )
Jon Halla440e872016-03-31 15:15:50 -0700804 for j in range(100):
Jon Hall5cf14d52015-07-16 12:15:19 -0700805 correct = True
806 main.log.info( "Submitted intents: " + str( sorted( intentIds ) ) )
Jon Halla440e872016-03-31 15:15:50 -0700807 for i in main.activeNodes:
Jon Hall5cf14d52015-07-16 12:15:19 -0700808 onosIds = []
Jon Halla440e872016-03-31 15:15:50 -0700809 ids = main.CLIs[i].getAllIntentsId()
Jon Hall5cf14d52015-07-16 12:15:19 -0700810 onosIds.append( ids )
Jon Halla440e872016-03-31 15:15:50 -0700811 main.log.debug( "Intents in " + main.CLIs[i].name + ": " +
Jon Hall5cf14d52015-07-16 12:15:19 -0700812 str( sorted( onosIds ) ) )
813 if sorted( ids ) != sorted( intentIds ):
814 main.log.warn( "Set of intent IDs doesn't match" )
815 correct = False
816 break
817 else:
Jon Halla440e872016-03-31 15:15:50 -0700818 intents = json.loads( main.CLIs[i].intents() )
Jon Hall5cf14d52015-07-16 12:15:19 -0700819 for intent in intents:
820 if intent[ 'state' ] != "INSTALLED":
821 main.log.warn( "Intent " + intent[ 'id' ] +
822 " is " + intent[ 'state' ] )
823 correct = False
824 break
825 if correct:
826 break
827 else:
828 time.sleep(1)
829 if not intentStop:
830 intentStop = time.time()
831 global gossipTime
832 gossipTime = intentStop - intentStart
833 main.log.info( "It took about " + str( gossipTime ) +
834 " seconds for all intents to appear in each node" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700835 gossipPeriod = int( main.params['timers']['gossip'] )
Jon Halla440e872016-03-31 15:15:50 -0700836 maxGossipTime = gossipPeriod * len( main.activeNodes )
Jon Hall5cf14d52015-07-16 12:15:19 -0700837 utilities.assert_greater_equals(
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700838 expect=maxGossipTime, actual=gossipTime,
Jon Hall5cf14d52015-07-16 12:15:19 -0700839 onpass="ECM anti-entropy for intents worked within " +
840 "expected time",
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700841 onfail="Intent ECM anti-entropy took too long. " +
842 "Expected time:{}, Actual time:{}".format( maxGossipTime,
843 gossipTime ) )
844 if gossipTime <= maxGossipTime:
Jon Hall5cf14d52015-07-16 12:15:19 -0700845 intentAddResult = True
846
847 if not intentAddResult or "key" in pendingMap:
848 import time
849 installedCheck = True
850 main.log.info( "Sleeping 60 seconds to see if intents are found" )
851 time.sleep( 60 )
Jon Halla440e872016-03-31 15:15:50 -0700852 onosIds = onosCli.getAllIntentsId()
Jon Hall5cf14d52015-07-16 12:15:19 -0700853 main.log.info( "Submitted intents: " + str( intentIds ) )
854 main.log.info( "Intents in ONOS: " + str( onosIds ) )
855 # Print the intent states
Jon Halla440e872016-03-31 15:15:50 -0700856 intents = onosCli.intents()
Jon Hall5cf14d52015-07-16 12:15:19 -0700857 intentStates = []
858 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
859 count = 0
860 try:
861 for intent in json.loads( intents ):
862 # Iter through intents of a node
863 state = intent.get( 'state', None )
864 if "INSTALLED" not in state:
865 installedCheck = False
866 intentId = intent.get( 'id', None )
867 intentStates.append( ( intentId, state ) )
868 except ( ValueError, TypeError ):
869 main.log.exception( "Error parsing intents" )
870 # add submitted intents not in the store
871 tmplist = [ i for i, s in intentStates ]
872 for i in intentIds:
873 if i not in tmplist:
874 intentStates.append( ( i, " - " ) )
875 intentStates.sort()
876 for i, s in intentStates:
877 count += 1
878 main.log.info( "%-6s%-15s%-15s" %
879 ( str( count ), str( i ), str( s ) ) )
Jon Halla440e872016-03-31 15:15:50 -0700880 leaders = onosCli.leaders()
Jon Hall5cf14d52015-07-16 12:15:19 -0700881 try:
882 missing = False
883 if leaders:
884 parsedLeaders = json.loads( leaders )
885 main.log.warn( json.dumps( parsedLeaders,
886 sort_keys=True,
887 indent=4,
888 separators=( ',', ': ' ) ) )
889 # check for all intent partitions
890 # check for election
891 topics = []
892 for i in range( 14 ):
893 topics.append( "intent-partition-" + str( i ) )
894 # FIXME: this should only be after we start the app
895 topics.append( "org.onosproject.election" )
896 main.log.debug( topics )
897 ONOStopics = [ j['topic'] for j in parsedLeaders ]
898 for topic in topics:
899 if topic not in ONOStopics:
900 main.log.error( "Error: " + topic +
901 " not in leaders" )
902 missing = True
903 else:
904 main.log.error( "leaders() returned None" )
905 except ( ValueError, TypeError ):
906 main.log.exception( "Error parsing leaders" )
907 main.log.error( repr( leaders ) )
908 # Check all nodes
909 if missing:
Jon Halla440e872016-03-31 15:15:50 -0700910 for i in main.activeNodes:
911 node = main.CLIs[i]
Jon Hall5cf14d52015-07-16 12:15:19 -0700912 response = node.leaders( jsonFormat=False)
913 main.log.warn( str( node.name ) + " leaders output: \n" +
914 str( response ) )
915
Jon Halla440e872016-03-31 15:15:50 -0700916 partitions = onosCli.partitions()
Jon Hall5cf14d52015-07-16 12:15:19 -0700917 try:
918 if partitions :
919 parsedPartitions = json.loads( partitions )
920 main.log.warn( json.dumps( parsedPartitions,
921 sort_keys=True,
922 indent=4,
923 separators=( ',', ': ' ) ) )
924 # TODO check for a leader in all paritions
925 # TODO check for consistency among nodes
926 else:
927 main.log.error( "partitions() returned None" )
928 except ( ValueError, TypeError ):
929 main.log.exception( "Error parsing partitions" )
930 main.log.error( repr( partitions ) )
Jon Halla440e872016-03-31 15:15:50 -0700931 pendingMap = onosCli.pendingMap()
Jon Hall5cf14d52015-07-16 12:15:19 -0700932 try:
933 if pendingMap :
934 parsedPending = json.loads( pendingMap )
935 main.log.warn( json.dumps( parsedPending,
936 sort_keys=True,
937 indent=4,
938 separators=( ',', ': ' ) ) )
939 # TODO check something here?
940 else:
941 main.log.error( "pendingMap() returned None" )
942 except ( ValueError, TypeError ):
943 main.log.exception( "Error parsing pending map" )
944 main.log.error( repr( pendingMap ) )
945
946 def CASE4( self, main ):
947 """
948 Ping across added host intents
949 """
950 import json
951 import time
Jon Halle1a3b752015-07-22 13:02:46 -0700952 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700953 assert main, "main not defined"
954 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -0700955 assert main.CLIs, "main.CLIs not defined"
956 assert main.nodes, "main.nodes not defined"
Jon Halla440e872016-03-31 15:15:50 -0700957 main.case( "Verify connectivity by sending traffic across Intents" )
Jon Hall783bbf92015-07-23 14:33:19 -0700958 main.caseExplanation = "Ping across added host intents to check " +\
Jon Hall5cf14d52015-07-16 12:15:19 -0700959 "functionality and check the state of " +\
960 "the intent"
Jon Hall5cf14d52015-07-16 12:15:19 -0700961
962 main.step( "Check Intent state" )
963 installedCheck = False
964 loopCount = 0
965 while not installedCheck and loopCount < 40:
966 installedCheck = True
967 # Print the intent states
Jon Halla440e872016-03-31 15:15:50 -0700968 intents = onosCli.intents()
Jon Hall5cf14d52015-07-16 12:15:19 -0700969 intentStates = []
970 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
971 count = 0
972 # Iter through intents of a node
973 try:
974 for intent in json.loads( intents ):
975 state = intent.get( 'state', None )
976 if "INSTALLED" not in state:
977 installedCheck = False
978 intentId = intent.get( 'id', None )
979 intentStates.append( ( intentId, state ) )
980 except ( ValueError, TypeError ):
981 main.log.exception( "Error parsing intents." )
982 # Print states
983 intentStates.sort()
984 for i, s in intentStates:
985 count += 1
986 main.log.info( "%-6s%-15s%-15s" %
987 ( str( count ), str( i ), str( s ) ) )
988 if not installedCheck:
989 time.sleep( 1 )
990 loopCount += 1
991 utilities.assert_equals( expect=True, actual=installedCheck,
992 onpass="Intents are all INSTALLED",
993 onfail="Intents are not all in " +
994 "INSTALLED state" )
995
Jon Hall9d2dcad2016-04-08 10:15:20 -0700996 main.step( "Ping across added host intents" )
997 onosCli = main.CLIs[ main.activeNodes[0] ]
998 PingResult = main.TRUE
999 for i in range( 8, 18 ):
1000 ping = main.Mininet1.pingHost( src="h" + str( i ),
1001 target="h" + str( i + 10 ) )
1002 PingResult = PingResult and ping
1003 if ping == main.FALSE:
1004 main.log.warn( "Ping failed between h" + str( i ) +
1005 " and h" + str( i + 10 ) )
1006 elif ping == main.TRUE:
1007 main.log.info( "Ping test passed!" )
1008 # Don't set PingResult or you'd override failures
1009 if PingResult == main.FALSE:
1010 main.log.error(
1011 "Intents have not been installed correctly, pings failed." )
1012 # TODO: pretty print
1013 main.log.warn( "ONOS1 intents: " )
1014 try:
1015 tmpIntents = onosCli.intents()
1016 main.log.warn( json.dumps( json.loads( tmpIntents ),
1017 sort_keys=True,
1018 indent=4,
1019 separators=( ',', ': ' ) ) )
1020 except ( ValueError, TypeError ):
1021 main.log.warn( repr( tmpIntents ) )
1022 utilities.assert_equals(
1023 expect=main.TRUE,
1024 actual=PingResult,
1025 onpass="Intents have been installed correctly and pings work",
1026 onfail="Intents have not been installed correctly, pings failed." )
1027
Jon Hall5cf14d52015-07-16 12:15:19 -07001028 main.step( "Check leadership of topics" )
Jon Halla440e872016-03-31 15:15:50 -07001029 leaders = onosCli.leaders()
Jon Hall5cf14d52015-07-16 12:15:19 -07001030 topicCheck = main.TRUE
1031 try:
1032 if leaders:
1033 parsedLeaders = json.loads( leaders )
1034 main.log.warn( json.dumps( parsedLeaders,
1035 sort_keys=True,
1036 indent=4,
1037 separators=( ',', ': ' ) ) )
1038 # check for all intent partitions
1039 # check for election
1040 # TODO: Look at Devices as topics now that it uses this system
1041 topics = []
1042 for i in range( 14 ):
1043 topics.append( "intent-partition-" + str( i ) )
1044 # FIXME: this should only be after we start the app
1045 # FIXME: topics.append( "org.onosproject.election" )
1046 # Print leaders output
1047 main.log.debug( topics )
1048 ONOStopics = [ j['topic'] for j in parsedLeaders ]
1049 for topic in topics:
1050 if topic not in ONOStopics:
1051 main.log.error( "Error: " + topic +
1052 " not in leaders" )
1053 topicCheck = main.FALSE
1054 else:
1055 main.log.error( "leaders() returned None" )
1056 topicCheck = main.FALSE
1057 except ( ValueError, TypeError ):
1058 topicCheck = main.FALSE
1059 main.log.exception( "Error parsing leaders" )
1060 main.log.error( repr( leaders ) )
1061 # TODO: Check for a leader of these topics
1062 # Check all nodes
1063 if topicCheck:
Jon Halla440e872016-03-31 15:15:50 -07001064 for i in main.activeNodes:
1065 node = main.CLIs[i]
Jon Hall5cf14d52015-07-16 12:15:19 -07001066 response = node.leaders( jsonFormat=False)
1067 main.log.warn( str( node.name ) + " leaders output: \n" +
1068 str( response ) )
1069
1070 utilities.assert_equals( expect=main.TRUE, actual=topicCheck,
1071 onpass="intent Partitions is in leaders",
1072 onfail="Some topics were lost " )
1073 # Print partitions
Jon Halla440e872016-03-31 15:15:50 -07001074 partitions = onosCli.partitions()
Jon Hall5cf14d52015-07-16 12:15:19 -07001075 try:
1076 if partitions :
1077 parsedPartitions = json.loads( partitions )
1078 main.log.warn( json.dumps( parsedPartitions,
1079 sort_keys=True,
1080 indent=4,
1081 separators=( ',', ': ' ) ) )
1082 # TODO check for a leader in all paritions
1083 # TODO check for consistency among nodes
1084 else:
1085 main.log.error( "partitions() returned None" )
1086 except ( ValueError, TypeError ):
1087 main.log.exception( "Error parsing partitions" )
1088 main.log.error( repr( partitions ) )
1089 # Print Pending Map
Jon Halla440e872016-03-31 15:15:50 -07001090 pendingMap = onosCli.pendingMap()
Jon Hall5cf14d52015-07-16 12:15:19 -07001091 try:
1092 if pendingMap :
1093 parsedPending = json.loads( pendingMap )
1094 main.log.warn( json.dumps( parsedPending,
1095 sort_keys=True,
1096 indent=4,
1097 separators=( ',', ': ' ) ) )
1098 # TODO check something here?
1099 else:
1100 main.log.error( "pendingMap() returned None" )
1101 except ( ValueError, TypeError ):
1102 main.log.exception( "Error parsing pending map" )
1103 main.log.error( repr( pendingMap ) )
1104
1105 if not installedCheck:
1106 main.log.info( "Waiting 60 seconds to see if the state of " +
1107 "intents change" )
1108 time.sleep( 60 )
1109 # Print the intent states
Jon Halla440e872016-03-31 15:15:50 -07001110 intents = onosCli.intents()
Jon Hall5cf14d52015-07-16 12:15:19 -07001111 intentStates = []
1112 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
1113 count = 0
1114 # Iter through intents of a node
1115 try:
1116 for intent in json.loads( intents ):
1117 state = intent.get( 'state', None )
1118 if "INSTALLED" not in state:
1119 installedCheck = False
1120 intentId = intent.get( 'id', None )
1121 intentStates.append( ( intentId, state ) )
1122 except ( ValueError, TypeError ):
1123 main.log.exception( "Error parsing intents." )
1124 intentStates.sort()
1125 for i, s in intentStates:
1126 count += 1
1127 main.log.info( "%-6s%-15s%-15s" %
1128 ( str( count ), str( i ), str( s ) ) )
Jon Halla440e872016-03-31 15:15:50 -07001129 leaders = onosCli.leaders()
Jon Hall5cf14d52015-07-16 12:15:19 -07001130 try:
1131 missing = False
1132 if leaders:
1133 parsedLeaders = json.loads( leaders )
1134 main.log.warn( json.dumps( parsedLeaders,
1135 sort_keys=True,
1136 indent=4,
1137 separators=( ',', ': ' ) ) )
1138 # check for all intent partitions
1139 # check for election
1140 topics = []
1141 for i in range( 14 ):
1142 topics.append( "intent-partition-" + str( i ) )
1143 # FIXME: this should only be after we start the app
1144 topics.append( "org.onosproject.election" )
1145 main.log.debug( topics )
1146 ONOStopics = [ j['topic'] for j in parsedLeaders ]
1147 for topic in topics:
1148 if topic not in ONOStopics:
1149 main.log.error( "Error: " + topic +
1150 " not in leaders" )
1151 missing = True
1152 else:
1153 main.log.error( "leaders() returned None" )
1154 except ( ValueError, TypeError ):
1155 main.log.exception( "Error parsing leaders" )
1156 main.log.error( repr( leaders ) )
1157 if missing:
Jon Halla440e872016-03-31 15:15:50 -07001158 for i in main.activeNodes:
1159 node = main.CLIs[i]
Jon Hall5cf14d52015-07-16 12:15:19 -07001160 response = node.leaders( jsonFormat=False)
1161 main.log.warn( str( node.name ) + " leaders output: \n" +
1162 str( response ) )
1163
Jon Halla440e872016-03-31 15:15:50 -07001164 partitions = onosCli.partitions()
Jon Hall5cf14d52015-07-16 12:15:19 -07001165 try:
1166 if partitions :
1167 parsedPartitions = json.loads( partitions )
1168 main.log.warn( json.dumps( parsedPartitions,
1169 sort_keys=True,
1170 indent=4,
1171 separators=( ',', ': ' ) ) )
1172 # TODO check for a leader in all paritions
1173 # TODO check for consistency among nodes
1174 else:
1175 main.log.error( "partitions() returned None" )
1176 except ( ValueError, TypeError ):
1177 main.log.exception( "Error parsing partitions" )
1178 main.log.error( repr( partitions ) )
Jon Halla440e872016-03-31 15:15:50 -07001179 pendingMap = onosCli.pendingMap()
Jon Hall5cf14d52015-07-16 12:15:19 -07001180 try:
1181 if pendingMap :
1182 parsedPending = json.loads( pendingMap )
1183 main.log.warn( json.dumps( parsedPending,
1184 sort_keys=True,
1185 indent=4,
1186 separators=( ',', ': ' ) ) )
1187 # TODO check something here?
1188 else:
1189 main.log.error( "pendingMap() returned None" )
1190 except ( ValueError, TypeError ):
1191 main.log.exception( "Error parsing pending map" )
1192 main.log.error( repr( pendingMap ) )
1193 # Print flowrules
Jon Halla440e872016-03-31 15:15:50 -07001194 node = main.activeNodes[0]
1195 main.log.debug( main.CLIs[node].flows( jsonFormat=False ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001196 main.step( "Wait a minute then ping again" )
1197 # the wait is above
1198 PingResult = main.TRUE
1199 for i in range( 8, 18 ):
1200 ping = main.Mininet1.pingHost( src="h" + str( i ),
1201 target="h" + str( i + 10 ) )
1202 PingResult = PingResult and ping
1203 if ping == main.FALSE:
1204 main.log.warn( "Ping failed between h" + str( i ) +
1205 " and h" + str( i + 10 ) )
1206 elif ping == main.TRUE:
1207 main.log.info( "Ping test passed!" )
1208 # Don't set PingResult or you'd override failures
1209 if PingResult == main.FALSE:
1210 main.log.error(
1211 "Intents have not been installed correctly, pings failed." )
1212 # TODO: pretty print
1213 main.log.warn( "ONOS1 intents: " )
1214 try:
Jon Halla440e872016-03-31 15:15:50 -07001215 tmpIntents = onosCli.intents()
Jon Hall5cf14d52015-07-16 12:15:19 -07001216 main.log.warn( json.dumps( json.loads( tmpIntents ),
1217 sort_keys=True,
1218 indent=4,
1219 separators=( ',', ': ' ) ) )
1220 except ( ValueError, TypeError ):
1221 main.log.warn( repr( tmpIntents ) )
1222 utilities.assert_equals(
1223 expect=main.TRUE,
1224 actual=PingResult,
1225 onpass="Intents have been installed correctly and pings work",
1226 onfail="Intents have not been installed correctly, pings failed." )
1227
1228 def CASE5( self, main ):
1229 """
1230 Reading state of ONOS
1231 """
1232 import json
1233 import time
Jon Halle1a3b752015-07-22 13:02:46 -07001234 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07001235 assert main, "main not defined"
1236 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07001237 assert main.CLIs, "main.CLIs not defined"
1238 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07001239
1240 main.case( "Setting up and gathering data for current state" )
1241 # The general idea for this test case is to pull the state of
1242 # ( intents,flows, topology,... ) from each ONOS node
1243 # We can then compare them with each other and also with past states
1244
1245 main.step( "Check that each switch has a master" )
1246 global mastershipState
1247 mastershipState = '[]'
1248
1249 # Assert that each device has a master
1250 rolesNotNull = main.TRUE
1251 threads = []
Jon Halla440e872016-03-31 15:15:50 -07001252 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001253 t = main.Thread( target=main.CLIs[i].rolesNotNull,
Jon Hall5cf14d52015-07-16 12:15:19 -07001254 name="rolesNotNull-" + str( i ),
1255 args=[] )
1256 threads.append( t )
1257 t.start()
1258
1259 for t in threads:
1260 t.join()
1261 rolesNotNull = rolesNotNull and t.result
1262 utilities.assert_equals(
1263 expect=main.TRUE,
1264 actual=rolesNotNull,
1265 onpass="Each device has a master",
1266 onfail="Some devices don't have a master assigned" )
1267
1268 main.step( "Get the Mastership of each switch from each controller" )
1269 ONOSMastership = []
1270 mastershipCheck = main.FALSE
1271 consistentMastership = True
1272 rolesResults = True
1273 threads = []
Jon Halla440e872016-03-31 15:15:50 -07001274 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001275 t = main.Thread( target=main.CLIs[i].roles,
Jon Hall5cf14d52015-07-16 12:15:19 -07001276 name="roles-" + str( i ),
1277 args=[] )
1278 threads.append( t )
1279 t.start()
1280
1281 for t in threads:
1282 t.join()
1283 ONOSMastership.append( t.result )
1284
Jon Halla440e872016-03-31 15:15:50 -07001285 for i in range( len( ONOSMastership ) ):
1286 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001287 if not ONOSMastership[i] or "Error" in ONOSMastership[i]:
Jon Halla440e872016-03-31 15:15:50 -07001288 main.log.error( "Error in getting ONOS" + node + " roles" )
1289 main.log.warn( "ONOS" + node + " mastership response: " +
1290 repr( ONOSMastership[i] ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001291 rolesResults = False
1292 utilities.assert_equals(
1293 expect=True,
1294 actual=rolesResults,
1295 onpass="No error in reading roles output",
1296 onfail="Error in reading roles from ONOS" )
1297
1298 main.step( "Check for consistency in roles from each controller" )
1299 if all([ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
1300 main.log.info(
1301 "Switch roles are consistent across all ONOS nodes" )
1302 else:
1303 consistentMastership = False
1304 utilities.assert_equals(
1305 expect=True,
1306 actual=consistentMastership,
1307 onpass="Switch roles are consistent across all ONOS nodes",
1308 onfail="ONOS nodes have different views of switch roles" )
1309
1310 if rolesResults and not consistentMastership:
Jon Halla440e872016-03-31 15:15:50 -07001311 for i in range( len( main.activeNodes ) ):
1312 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001313 try:
1314 main.log.warn(
Jon Halla440e872016-03-31 15:15:50 -07001315 "ONOS" + node + " roles: ",
Jon Hall5cf14d52015-07-16 12:15:19 -07001316 json.dumps(
1317 json.loads( ONOSMastership[ i ] ),
1318 sort_keys=True,
1319 indent=4,
1320 separators=( ',', ': ' ) ) )
1321 except ( ValueError, TypeError ):
1322 main.log.warn( repr( ONOSMastership[ i ] ) )
1323 elif rolesResults and consistentMastership:
1324 mastershipCheck = main.TRUE
1325 mastershipState = ONOSMastership[ 0 ]
1326
1327 main.step( "Get the intents from each controller" )
1328 global intentState
1329 intentState = []
1330 ONOSIntents = []
1331 intentCheck = main.FALSE
1332 consistentIntents = True
1333 intentsResults = True
1334 threads = []
Jon Halla440e872016-03-31 15:15:50 -07001335 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001336 t = main.Thread( target=main.CLIs[i].intents,
Jon Hall5cf14d52015-07-16 12:15:19 -07001337 name="intents-" + str( i ),
1338 args=[],
1339 kwargs={ 'jsonFormat': True } )
1340 threads.append( t )
1341 t.start()
1342
1343 for t in threads:
1344 t.join()
1345 ONOSIntents.append( t.result )
1346
Jon Halla440e872016-03-31 15:15:50 -07001347 for i in range( len( ONOSIntents ) ):
1348 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001349 if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
Jon Halla440e872016-03-31 15:15:50 -07001350 main.log.error( "Error in getting ONOS" + node + " intents" )
1351 main.log.warn( "ONOS" + node + " intents response: " +
Jon Hall5cf14d52015-07-16 12:15:19 -07001352 repr( ONOSIntents[ i ] ) )
1353 intentsResults = False
1354 utilities.assert_equals(
1355 expect=True,
1356 actual=intentsResults,
1357 onpass="No error in reading intents output",
1358 onfail="Error in reading intents from ONOS" )
1359
1360 main.step( "Check for consistency in Intents from each controller" )
1361 if all([ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
1362 main.log.info( "Intents are consistent across all ONOS " +
1363 "nodes" )
1364 else:
1365 consistentIntents = False
1366 main.log.error( "Intents not consistent" )
1367 utilities.assert_equals(
1368 expect=True,
1369 actual=consistentIntents,
1370 onpass="Intents are consistent across all ONOS nodes",
1371 onfail="ONOS nodes have different views of intents" )
1372
1373 if intentsResults:
1374 # Try to make it easy to figure out what is happening
1375 #
1376 # Intent ONOS1 ONOS2 ...
1377 # 0x01 INSTALLED INSTALLING
1378 # ... ... ...
1379 # ... ... ...
1380 title = " Id"
Jon Halla440e872016-03-31 15:15:50 -07001381 for n in main.activeNodes:
Jon Hall5cf14d52015-07-16 12:15:19 -07001382 title += " " * 10 + "ONOS" + str( n + 1 )
1383 main.log.warn( title )
Jon Halle1a3b752015-07-22 13:02:46 -07001384 # get all intent keys in the cluster
Jon Hall5cf14d52015-07-16 12:15:19 -07001385 keys = []
1386 try:
1387 # Get the set of all intent keys
1388 for nodeStr in ONOSIntents:
1389 node = json.loads( nodeStr )
1390 for intent in node:
1391 keys.append( intent.get( 'id' ) )
1392 keys = set( keys )
1393 # For each intent key, print the state on each node
1394 for key in keys:
1395 row = "%-13s" % key
1396 for nodeStr in ONOSIntents:
1397 node = json.loads( nodeStr )
1398 for intent in node:
1399 if intent.get( 'id', "Error" ) == key:
1400 row += "%-15s" % intent.get( 'state' )
1401 main.log.warn( row )
1402 # End of intent state table
1403 except ValueError as e:
1404 main.log.exception( e )
1405 main.log.debug( "nodeStr was: " + repr( nodeStr ) )
1406
1407 if intentsResults and not consistentIntents:
1408 # print the json objects
Jon Halla440e872016-03-31 15:15:50 -07001409 n = str( main.activeNodes[-1] + 1 )
1410 main.log.debug( "ONOS" + n + " intents: " )
Jon Hall5cf14d52015-07-16 12:15:19 -07001411 main.log.debug( json.dumps( json.loads( ONOSIntents[ -1 ] ),
1412 sort_keys=True,
1413 indent=4,
1414 separators=( ',', ': ' ) ) )
Jon Halla440e872016-03-31 15:15:50 -07001415 for i in range( len( ONOSIntents ) ):
1416 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001417 if ONOSIntents[ i ] != ONOSIntents[ -1 ]:
Jon Halla440e872016-03-31 15:15:50 -07001418 main.log.debug( "ONOS" + node + " intents: " )
Jon Hall5cf14d52015-07-16 12:15:19 -07001419 main.log.debug( json.dumps( json.loads( ONOSIntents[i] ),
1420 sort_keys=True,
1421 indent=4,
1422 separators=( ',', ': ' ) ) )
1423 else:
Jon Halla440e872016-03-31 15:15:50 -07001424 main.log.debug( "ONOS" + node + " intents match ONOS" +
1425 n + " intents" )
Jon Hall5cf14d52015-07-16 12:15:19 -07001426 elif intentsResults and consistentIntents:
1427 intentCheck = main.TRUE
1428 intentState = ONOSIntents[ 0 ]
1429
1430 main.step( "Get the flows from each controller" )
1431 global flowState
1432 flowState = []
1433 ONOSFlows = []
1434 ONOSFlowsJson = []
1435 flowCheck = main.FALSE
1436 consistentFlows = True
1437 flowsResults = True
1438 threads = []
Jon Halla440e872016-03-31 15:15:50 -07001439 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001440 t = main.Thread( target=main.CLIs[i].flows,
Jon Hall5cf14d52015-07-16 12:15:19 -07001441 name="flows-" + str( i ),
1442 args=[],
1443 kwargs={ 'jsonFormat': True } )
1444 threads.append( t )
1445 t.start()
1446
1447 # NOTE: Flows command can take some time to run
1448 time.sleep(30)
1449 for t in threads:
1450 t.join()
1451 result = t.result
1452 ONOSFlows.append( result )
1453
Jon Halla440e872016-03-31 15:15:50 -07001454 for i in range( len( ONOSFlows ) ):
1455 num = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001456 if not ONOSFlows[ i ] or "Error" in ONOSFlows[ i ]:
1457 main.log.error( "Error in getting ONOS" + num + " flows" )
1458 main.log.warn( "ONOS" + num + " flows response: " +
1459 repr( ONOSFlows[ i ] ) )
1460 flowsResults = False
1461 ONOSFlowsJson.append( None )
1462 else:
1463 try:
1464 ONOSFlowsJson.append( json.loads( ONOSFlows[ i ] ) )
1465 except ( ValueError, TypeError ):
1466 # FIXME: change this to log.error?
1467 main.log.exception( "Error in parsing ONOS" + num +
1468 " response as json." )
1469 main.log.error( repr( ONOSFlows[ i ] ) )
1470 ONOSFlowsJson.append( None )
1471 flowsResults = False
1472 utilities.assert_equals(
1473 expect=True,
1474 actual=flowsResults,
1475 onpass="No error in reading flows output",
1476 onfail="Error in reading flows from ONOS" )
1477
1478 main.step( "Check for consistency in Flows from each controller" )
1479 tmp = [ len( i ) == len( ONOSFlowsJson[ 0 ] ) for i in ONOSFlowsJson ]
1480 if all( tmp ):
1481 main.log.info( "Flow count is consistent across all ONOS nodes" )
1482 else:
1483 consistentFlows = False
1484 utilities.assert_equals(
1485 expect=True,
1486 actual=consistentFlows,
1487 onpass="The flow count is consistent across all ONOS nodes",
1488 onfail="ONOS nodes have different flow counts" )
1489
1490 if flowsResults and not consistentFlows:
Jon Halla440e872016-03-31 15:15:50 -07001491 for i in range( len( ONOSFlows ) ):
1492 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001493 try:
1494 main.log.warn(
Jon Halla440e872016-03-31 15:15:50 -07001495 "ONOS" + node + " flows: " +
Jon Hall5cf14d52015-07-16 12:15:19 -07001496 json.dumps( json.loads( ONOSFlows[i] ), sort_keys=True,
1497 indent=4, separators=( ',', ': ' ) ) )
1498 except ( ValueError, TypeError ):
Jon Halla440e872016-03-31 15:15:50 -07001499 main.log.warn( "ONOS" + node + " flows: " +
1500 repr( ONOSFlows[ i ] ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001501 elif flowsResults and consistentFlows:
1502 flowCheck = main.TRUE
1503 flowState = ONOSFlows[ 0 ]
1504
1505 main.step( "Get the OF Table entries" )
1506 global flows
1507 flows = []
1508 for i in range( 1, 29 ):
GlennRC68467eb2015-11-16 18:01:01 -08001509 flows.append( main.Mininet1.getFlowTable( "s" + str( i ), version="1.3", debug=False ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001510 if flowCheck == main.FALSE:
1511 for table in flows:
1512 main.log.warn( table )
1513 # TODO: Compare switch flow tables with ONOS flow tables
1514
1515 main.step( "Start continuous pings" )
1516 main.Mininet2.pingLong(
1517 src=main.params[ 'PING' ][ 'source1' ],
1518 target=main.params[ 'PING' ][ 'target1' ],
1519 pingTime=500 )
1520 main.Mininet2.pingLong(
1521 src=main.params[ 'PING' ][ 'source2' ],
1522 target=main.params[ 'PING' ][ 'target2' ],
1523 pingTime=500 )
1524 main.Mininet2.pingLong(
1525 src=main.params[ 'PING' ][ 'source3' ],
1526 target=main.params[ 'PING' ][ 'target3' ],
1527 pingTime=500 )
1528 main.Mininet2.pingLong(
1529 src=main.params[ 'PING' ][ 'source4' ],
1530 target=main.params[ 'PING' ][ 'target4' ],
1531 pingTime=500 )
1532 main.Mininet2.pingLong(
1533 src=main.params[ 'PING' ][ 'source5' ],
1534 target=main.params[ 'PING' ][ 'target5' ],
1535 pingTime=500 )
1536 main.Mininet2.pingLong(
1537 src=main.params[ 'PING' ][ 'source6' ],
1538 target=main.params[ 'PING' ][ 'target6' ],
1539 pingTime=500 )
1540 main.Mininet2.pingLong(
1541 src=main.params[ 'PING' ][ 'source7' ],
1542 target=main.params[ 'PING' ][ 'target7' ],
1543 pingTime=500 )
1544 main.Mininet2.pingLong(
1545 src=main.params[ 'PING' ][ 'source8' ],
1546 target=main.params[ 'PING' ][ 'target8' ],
1547 pingTime=500 )
1548 main.Mininet2.pingLong(
1549 src=main.params[ 'PING' ][ 'source9' ],
1550 target=main.params[ 'PING' ][ 'target9' ],
1551 pingTime=500 )
1552 main.Mininet2.pingLong(
1553 src=main.params[ 'PING' ][ 'source10' ],
1554 target=main.params[ 'PING' ][ 'target10' ],
1555 pingTime=500 )
1556
1557 main.step( "Collecting topology information from ONOS" )
1558 devices = []
1559 threads = []
Jon Halla440e872016-03-31 15:15:50 -07001560 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001561 t = main.Thread( target=main.CLIs[i].devices,
Jon Hall5cf14d52015-07-16 12:15:19 -07001562 name="devices-" + str( i ),
1563 args=[ ] )
1564 threads.append( t )
1565 t.start()
1566
1567 for t in threads:
1568 t.join()
1569 devices.append( t.result )
1570 hosts = []
1571 threads = []
Jon Halla440e872016-03-31 15:15:50 -07001572 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001573 t = main.Thread( target=main.CLIs[i].hosts,
Jon Hall5cf14d52015-07-16 12:15:19 -07001574 name="hosts-" + str( i ),
1575 args=[ ] )
1576 threads.append( t )
1577 t.start()
1578
1579 for t in threads:
1580 t.join()
1581 try:
1582 hosts.append( json.loads( t.result ) )
1583 except ( ValueError, TypeError ):
1584 # FIXME: better handling of this, print which node
1585 # Maybe use thread name?
1586 main.log.exception( "Error parsing json output of hosts" )
Jon Hallf3d16e72015-12-16 17:45:08 -08001587 main.log.warn( repr( t.result ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001588 hosts.append( None )
1589
1590 ports = []
1591 threads = []
Jon Halla440e872016-03-31 15:15:50 -07001592 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001593 t = main.Thread( target=main.CLIs[i].ports,
Jon Hall5cf14d52015-07-16 12:15:19 -07001594 name="ports-" + str( i ),
1595 args=[ ] )
1596 threads.append( t )
1597 t.start()
1598
1599 for t in threads:
1600 t.join()
1601 ports.append( t.result )
1602 links = []
1603 threads = []
Jon Halla440e872016-03-31 15:15:50 -07001604 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001605 t = main.Thread( target=main.CLIs[i].links,
Jon Hall5cf14d52015-07-16 12:15:19 -07001606 name="links-" + str( i ),
1607 args=[ ] )
1608 threads.append( t )
1609 t.start()
1610
1611 for t in threads:
1612 t.join()
1613 links.append( t.result )
1614 clusters = []
1615 threads = []
Jon Halla440e872016-03-31 15:15:50 -07001616 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001617 t = main.Thread( target=main.CLIs[i].clusters,
Jon Hall5cf14d52015-07-16 12:15:19 -07001618 name="clusters-" + str( i ),
1619 args=[ ] )
1620 threads.append( t )
1621 t.start()
1622
1623 for t in threads:
1624 t.join()
1625 clusters.append( t.result )
1626 # Compare json objects for hosts and dataplane clusters
1627
1628 # hosts
1629 main.step( "Host view is consistent across ONOS nodes" )
1630 consistentHostsResult = main.TRUE
1631 for controller in range( len( hosts ) ):
Jon Halla440e872016-03-31 15:15:50 -07001632 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hallf3d16e72015-12-16 17:45:08 -08001633 if hosts[ controller ] and "Error" not in hosts[ controller ]:
Jon Hall5cf14d52015-07-16 12:15:19 -07001634 if hosts[ controller ] == hosts[ 0 ]:
1635 continue
1636 else: # hosts not consistent
1637 main.log.error( "hosts from ONOS" +
1638 controllerStr +
1639 " is inconsistent with ONOS1" )
1640 main.log.warn( repr( hosts[ controller ] ) )
1641 consistentHostsResult = main.FALSE
1642
1643 else:
1644 main.log.error( "Error in getting ONOS hosts from ONOS" +
1645 controllerStr )
1646 consistentHostsResult = main.FALSE
1647 main.log.warn( "ONOS" + controllerStr +
1648 " hosts response: " +
1649 repr( hosts[ controller ] ) )
1650 utilities.assert_equals(
1651 expect=main.TRUE,
1652 actual=consistentHostsResult,
1653 onpass="Hosts view is consistent across all ONOS nodes",
1654 onfail="ONOS nodes have different views of hosts" )
1655
1656 main.step( "Each host has an IP address" )
1657 ipResult = main.TRUE
1658 for controller in range( 0, len( hosts ) ):
Jon Halla440e872016-03-31 15:15:50 -07001659 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hallf3d16e72015-12-16 17:45:08 -08001660 if hosts[ controller ]:
1661 for host in hosts[ controller ]:
1662 if not host.get( 'ipAddresses', [ ] ):
1663 main.log.error( "Error with host ips on controller" +
1664 controllerStr + ": " + str( host ) )
1665 ipResult = main.FALSE
Jon Hall5cf14d52015-07-16 12:15:19 -07001666 utilities.assert_equals(
1667 expect=main.TRUE,
1668 actual=ipResult,
1669 onpass="The ips of the hosts aren't empty",
1670 onfail="The ip of at least one host is missing" )
1671
1672 # Strongly connected clusters of devices
1673 main.step( "Cluster view is consistent across ONOS nodes" )
1674 consistentClustersResult = main.TRUE
1675 for controller in range( len( clusters ) ):
Jon Halla440e872016-03-31 15:15:50 -07001676 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001677 if "Error" not in clusters[ controller ]:
1678 if clusters[ controller ] == clusters[ 0 ]:
1679 continue
1680 else: # clusters not consistent
1681 main.log.error( "clusters from ONOS" + controllerStr +
1682 " is inconsistent with ONOS1" )
1683 consistentClustersResult = main.FALSE
1684
1685 else:
1686 main.log.error( "Error in getting dataplane clusters " +
1687 "from ONOS" + controllerStr )
1688 consistentClustersResult = main.FALSE
1689 main.log.warn( "ONOS" + controllerStr +
1690 " clusters response: " +
1691 repr( clusters[ controller ] ) )
1692 utilities.assert_equals(
1693 expect=main.TRUE,
1694 actual=consistentClustersResult,
1695 onpass="Clusters view is consistent across all ONOS nodes",
1696 onfail="ONOS nodes have different views of clusters" )
Jon Hall172b7ba2016-04-07 18:12:20 -07001697 if consistentClustersResult != main.TRUE:
1698 main.log.debug( clusters )
Jon Hall5cf14d52015-07-16 12:15:19 -07001699 # there should always only be one cluster
1700 main.step( "Cluster view correct across ONOS nodes" )
1701 try:
1702 numClusters = len( json.loads( clusters[ 0 ] ) )
1703 except ( ValueError, TypeError ):
1704 main.log.exception( "Error parsing clusters[0]: " +
1705 repr( clusters[ 0 ] ) )
Jon Hall6e709752016-02-01 13:38:46 -08001706 numClusters = "ERROR"
Jon Hall5cf14d52015-07-16 12:15:19 -07001707 clusterResults = main.FALSE
1708 if numClusters == 1:
1709 clusterResults = main.TRUE
1710 utilities.assert_equals(
1711 expect=1,
1712 actual=numClusters,
1713 onpass="ONOS shows 1 SCC",
1714 onfail="ONOS shows " + str( numClusters ) + " SCCs" )
1715
1716 main.step( "Comparing ONOS topology to MN" )
1717 devicesResults = main.TRUE
1718 linksResults = main.TRUE
1719 hostsResults = main.TRUE
1720 mnSwitches = main.Mininet1.getSwitches()
1721 mnLinks = main.Mininet1.getLinks()
1722 mnHosts = main.Mininet1.getHosts()
Jon Halla440e872016-03-31 15:15:50 -07001723 for controller in main.activeNodes:
1724 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001725 if devices[ controller ] and ports[ controller ] and\
1726 "Error" not in devices[ controller ] and\
1727 "Error" not in ports[ controller ]:
Jon Halla440e872016-03-31 15:15:50 -07001728 currentDevicesResult = main.Mininet1.compareSwitches(
1729 mnSwitches,
1730 json.loads( devices[ controller ] ),
1731 json.loads( ports[ controller ] ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001732 else:
1733 currentDevicesResult = main.FALSE
1734 utilities.assert_equals( expect=main.TRUE,
1735 actual=currentDevicesResult,
1736 onpass="ONOS" + controllerStr +
1737 " Switches view is correct",
1738 onfail="ONOS" + controllerStr +
1739 " Switches view is incorrect" )
1740 if links[ controller ] and "Error" not in links[ controller ]:
1741 currentLinksResult = main.Mininet1.compareLinks(
1742 mnSwitches, mnLinks,
1743 json.loads( links[ controller ] ) )
1744 else:
1745 currentLinksResult = main.FALSE
1746 utilities.assert_equals( expect=main.TRUE,
1747 actual=currentLinksResult,
1748 onpass="ONOS" + controllerStr +
1749 " links view is correct",
1750 onfail="ONOS" + controllerStr +
1751 " links view is incorrect" )
1752
Jon Hall657cdf62015-12-17 14:40:51 -08001753 if hosts[ controller ] and "Error" not in hosts[ controller ]:
Jon Hall5cf14d52015-07-16 12:15:19 -07001754 currentHostsResult = main.Mininet1.compareHosts(
1755 mnHosts,
1756 hosts[ controller ] )
1757 else:
1758 currentHostsResult = main.FALSE
1759 utilities.assert_equals( expect=main.TRUE,
1760 actual=currentHostsResult,
1761 onpass="ONOS" + controllerStr +
1762 " hosts exist in Mininet",
1763 onfail="ONOS" + controllerStr +
1764 " hosts don't match Mininet" )
1765
1766 devicesResults = devicesResults and currentDevicesResult
1767 linksResults = linksResults and currentLinksResult
1768 hostsResults = hostsResults and currentHostsResult
1769
1770 main.step( "Device information is correct" )
1771 utilities.assert_equals(
1772 expect=main.TRUE,
1773 actual=devicesResults,
1774 onpass="Device information is correct",
1775 onfail="Device information is incorrect" )
1776
1777 main.step( "Links are correct" )
1778 utilities.assert_equals(
1779 expect=main.TRUE,
1780 actual=linksResults,
1781 onpass="Link are correct",
1782 onfail="Links are incorrect" )
1783
1784 main.step( "Hosts are correct" )
1785 utilities.assert_equals(
1786 expect=main.TRUE,
1787 actual=hostsResults,
1788 onpass="Hosts are correct",
1789 onfail="Hosts are incorrect" )
1790
1791 def CASE6( self, main ):
1792 """
1793 The Failure case. Since this is the Sanity test, we do nothing.
1794 """
1795 import time
Jon Halle1a3b752015-07-22 13:02:46 -07001796 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07001797 assert main, "main not defined"
1798 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07001799 assert main.CLIs, "main.CLIs not defined"
1800 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07001801 main.case( "Wait 60 seconds instead of inducing a failure" )
1802 time.sleep( 60 )
1803 utilities.assert_equals(
1804 expect=main.TRUE,
1805 actual=main.TRUE,
1806 onpass="Sleeping 60 seconds",
1807 onfail="Something is terribly wrong with my math" )
1808
1809 def CASE7( self, main ):
1810 """
1811 Check state after ONOS failure
1812 """
1813 import json
Jon Halle1a3b752015-07-22 13:02:46 -07001814 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07001815 assert main, "main not defined"
1816 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07001817 assert main.CLIs, "main.CLIs not defined"
1818 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07001819 main.case( "Running ONOS Constant State Tests" )
1820
1821 main.step( "Check that each switch has a master" )
1822 # Assert that each device has a master
1823 rolesNotNull = main.TRUE
1824 threads = []
Jon Halla440e872016-03-31 15:15:50 -07001825 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001826 t = main.Thread( target=main.CLIs[i].rolesNotNull,
Jon Hall5cf14d52015-07-16 12:15:19 -07001827 name="rolesNotNull-" + str( i ),
1828 args=[ ] )
1829 threads.append( t )
1830 t.start()
1831
1832 for t in threads:
1833 t.join()
1834 rolesNotNull = rolesNotNull and t.result
1835 utilities.assert_equals(
1836 expect=main.TRUE,
1837 actual=rolesNotNull,
1838 onpass="Each device has a master",
1839 onfail="Some devices don't have a master assigned" )
1840
1841 main.step( "Read device roles from ONOS" )
1842 ONOSMastership = []
1843 mastershipCheck = main.FALSE
1844 consistentMastership = True
1845 rolesResults = True
1846 threads = []
Jon Halla440e872016-03-31 15:15:50 -07001847 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001848 t = main.Thread( target=main.CLIs[i].roles,
Jon Hall5cf14d52015-07-16 12:15:19 -07001849 name="roles-" + str( i ),
1850 args=[] )
1851 threads.append( t )
1852 t.start()
1853
1854 for t in threads:
1855 t.join()
1856 ONOSMastership.append( t.result )
1857
Jon Halla440e872016-03-31 15:15:50 -07001858 for i in range( len( ONOSMastership ) ):
1859 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001860 if not ONOSMastership[i] or "Error" in ONOSMastership[i]:
Jon Halla440e872016-03-31 15:15:50 -07001861 main.log.error( "Error in getting ONOS" + node + " roles" )
1862 main.log.warn( "ONOS" + node + " mastership response: " +
1863 repr( ONOSMastership[i] ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001864 rolesResults = False
1865 utilities.assert_equals(
1866 expect=True,
1867 actual=rolesResults,
1868 onpass="No error in reading roles output",
1869 onfail="Error in reading roles from ONOS" )
1870
1871 main.step( "Check for consistency in roles from each controller" )
1872 if all([ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
1873 main.log.info(
1874 "Switch roles are consistent across all ONOS nodes" )
1875 else:
1876 consistentMastership = False
1877 utilities.assert_equals(
1878 expect=True,
1879 actual=consistentMastership,
1880 onpass="Switch roles are consistent across all ONOS nodes",
1881 onfail="ONOS nodes have different views of switch roles" )
1882
1883 if rolesResults and not consistentMastership:
Jon Halla440e872016-03-31 15:15:50 -07001884 for i in range( len( ONOSMastership ) ):
1885 node = str( main.activeNodes[i] + 1 )
1886 main.log.warn( "ONOS" + node + " roles: ",
1887 json.dumps( json.loads( ONOSMastership[ i ] ),
1888 sort_keys=True,
1889 indent=4,
1890 separators=( ',', ': ' ) ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001891
1892 description2 = "Compare switch roles from before failure"
1893 main.step( description2 )
1894 try:
1895 currentJson = json.loads( ONOSMastership[0] )
1896 oldJson = json.loads( mastershipState )
1897 except ( ValueError, TypeError ):
1898 main.log.exception( "Something is wrong with parsing " +
1899 "ONOSMastership[0] or mastershipState" )
1900 main.log.error( "ONOSMastership[0]: " + repr( ONOSMastership[0] ) )
1901 main.log.error( "mastershipState" + repr( mastershipState ) )
1902 main.cleanup()
1903 main.exit()
1904 mastershipCheck = main.TRUE
1905 for i in range( 1, 29 ):
1906 switchDPID = str(
1907 main.Mininet1.getSwitchDPID( switch="s" + str( i ) ) )
1908 current = [ switch[ 'master' ] for switch in currentJson
1909 if switchDPID in switch[ 'id' ] ]
1910 old = [ switch[ 'master' ] for switch in oldJson
1911 if switchDPID in switch[ 'id' ] ]
1912 if current == old:
1913 mastershipCheck = mastershipCheck and main.TRUE
1914 else:
1915 main.log.warn( "Mastership of switch %s changed" % switchDPID )
1916 mastershipCheck = main.FALSE
1917 utilities.assert_equals(
1918 expect=main.TRUE,
1919 actual=mastershipCheck,
1920 onpass="Mastership of Switches was not changed",
1921 onfail="Mastership of some switches changed" )
1922 mastershipCheck = mastershipCheck and consistentMastership
1923
1924 main.step( "Get the intents and compare across all nodes" )
1925 ONOSIntents = []
1926 intentCheck = main.FALSE
1927 consistentIntents = True
1928 intentsResults = True
1929 threads = []
Jon Halla440e872016-03-31 15:15:50 -07001930 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001931 t = main.Thread( target=main.CLIs[i].intents,
Jon Hall5cf14d52015-07-16 12:15:19 -07001932 name="intents-" + str( i ),
1933 args=[],
1934 kwargs={ 'jsonFormat': True } )
1935 threads.append( t )
1936 t.start()
1937
1938 for t in threads:
1939 t.join()
1940 ONOSIntents.append( t.result )
1941
Jon Halla440e872016-03-31 15:15:50 -07001942 for i in range( len( ONOSIntents) ):
1943 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001944 if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
Jon Halla440e872016-03-31 15:15:50 -07001945 main.log.error( "Error in getting ONOS" + node + " intents" )
1946 main.log.warn( "ONOS" + node + " intents response: " +
Jon Hall5cf14d52015-07-16 12:15:19 -07001947 repr( ONOSIntents[ i ] ) )
1948 intentsResults = False
1949 utilities.assert_equals(
1950 expect=True,
1951 actual=intentsResults,
1952 onpass="No error in reading intents output",
1953 onfail="Error in reading intents from ONOS" )
1954
1955 main.step( "Check for consistency in Intents from each controller" )
1956 if all([ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
1957 main.log.info( "Intents are consistent across all ONOS " +
1958 "nodes" )
1959 else:
1960 consistentIntents = False
1961
1962 # Try to make it easy to figure out what is happening
1963 #
1964 # Intent ONOS1 ONOS2 ...
1965 # 0x01 INSTALLED INSTALLING
1966 # ... ... ...
1967 # ... ... ...
1968 title = " ID"
Jon Halla440e872016-03-31 15:15:50 -07001969 for n in main.activeNodes:
Jon Hall5cf14d52015-07-16 12:15:19 -07001970 title += " " * 10 + "ONOS" + str( n + 1 )
1971 main.log.warn( title )
1972 # get all intent keys in the cluster
1973 keys = []
1974 for nodeStr in ONOSIntents:
1975 node = json.loads( nodeStr )
1976 for intent in node:
1977 keys.append( intent.get( 'id' ) )
1978 keys = set( keys )
1979 for key in keys:
1980 row = "%-13s" % key
1981 for nodeStr in ONOSIntents:
1982 node = json.loads( nodeStr )
1983 for intent in node:
1984 if intent.get( 'id' ) == key:
1985 row += "%-15s" % intent.get( 'state' )
1986 main.log.warn( row )
1987 # End table view
1988
1989 utilities.assert_equals(
1990 expect=True,
1991 actual=consistentIntents,
1992 onpass="Intents are consistent across all ONOS nodes",
1993 onfail="ONOS nodes have different views of intents" )
1994 intentStates = []
1995 for node in ONOSIntents: # Iter through ONOS nodes
1996 nodeStates = []
1997 # Iter through intents of a node
1998 try:
1999 for intent in json.loads( node ):
2000 nodeStates.append( intent[ 'state' ] )
2001 except ( ValueError, TypeError ):
2002 main.log.exception( "Error in parsing intents" )
2003 main.log.error( repr( node ) )
2004 intentStates.append( nodeStates )
2005 out = [ (i, nodeStates.count( i ) ) for i in set( nodeStates ) ]
2006 main.log.info( dict( out ) )
2007
2008 if intentsResults and not consistentIntents:
Jon Halla440e872016-03-31 15:15:50 -07002009 for i in range( len( main.activeNodes ) ):
2010 node = str( main.activeNodes[i] + 1 )
2011 main.log.warn( "ONOS" + node + " intents: " )
Jon Hall5cf14d52015-07-16 12:15:19 -07002012 main.log.warn( json.dumps(
2013 json.loads( ONOSIntents[ i ] ),
2014 sort_keys=True,
2015 indent=4,
2016 separators=( ',', ': ' ) ) )
2017 elif intentsResults and consistentIntents:
2018 intentCheck = main.TRUE
2019
2020 # NOTE: Store has no durability, so intents are lost across system
2021 # restarts
2022 main.step( "Compare current intents with intents before the failure" )
2023 # NOTE: this requires case 5 to pass for intentState to be set.
2024 # maybe we should stop the test if that fails?
2025 sameIntents = main.FALSE
Jon Halla440e872016-03-31 15:15:50 -07002026 try:
2027 intentState
2028 except NameError:
2029 main.log.warn( "No previous intent state was saved" )
2030 else:
2031 if intentState and intentState == ONOSIntents[ 0 ]:
2032 sameIntents = main.TRUE
2033 main.log.info( "Intents are consistent with before failure" )
2034 # TODO: possibly the states have changed? we may need to figure out
2035 # what the acceptable states are
2036 elif len( intentState ) == len( ONOSIntents[ 0 ] ):
2037 sameIntents = main.TRUE
2038 try:
2039 before = json.loads( intentState )
2040 after = json.loads( ONOSIntents[ 0 ] )
2041 for intent in before:
2042 if intent not in after:
2043 sameIntents = main.FALSE
2044 main.log.debug( "Intent is not currently in ONOS " +
2045 "(at least in the same form):" )
2046 main.log.debug( json.dumps( intent ) )
2047 except ( ValueError, TypeError ):
2048 main.log.exception( "Exception printing intents" )
2049 main.log.debug( repr( ONOSIntents[0] ) )
2050 main.log.debug( repr( intentState ) )
2051 if sameIntents == main.FALSE:
2052 try:
2053 main.log.debug( "ONOS intents before: " )
2054 main.log.debug( json.dumps( json.loads( intentState ),
2055 sort_keys=True, indent=4,
2056 separators=( ',', ': ' ) ) )
2057 main.log.debug( "Current ONOS intents: " )
2058 main.log.debug( json.dumps( json.loads( ONOSIntents[ 0 ] ),
2059 sort_keys=True, indent=4,
2060 separators=( ',', ': ' ) ) )
2061 except ( ValueError, TypeError ):
2062 main.log.exception( "Exception printing intents" )
2063 main.log.debug( repr( ONOSIntents[0] ) )
2064 main.log.debug( repr( intentState ) )
2065 utilities.assert_equals(
2066 expect=main.TRUE,
2067 actual=sameIntents,
2068 onpass="Intents are consistent with before failure",
2069 onfail="The Intents changed during failure" )
Jon Hall5cf14d52015-07-16 12:15:19 -07002070 intentCheck = intentCheck and sameIntents
2071
2072 main.step( "Get the OF Table entries and compare to before " +
2073 "component failure" )
2074 FlowTables = main.TRUE
Jon Hall5cf14d52015-07-16 12:15:19 -07002075 for i in range( 28 ):
2076 main.log.info( "Checking flow table on s" + str( i + 1 ) )
GlennRC68467eb2015-11-16 18:01:01 -08002077 tmpFlows = main.Mininet1.getFlowTable( "s" + str( i + 1 ), version="1.3", debug=False )
2078 FlowTables = FlowTables and main.Mininet1.flowTableComp( flows[i], tmpFlows )
Jon Hall5cf14d52015-07-16 12:15:19 -07002079 if FlowTables == main.FALSE:
GlennRC68467eb2015-11-16 18:01:01 -08002080 main.log.warn( "Differences in flow table for switch: s{}".format( i + 1 ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07002081 utilities.assert_equals(
2082 expect=main.TRUE,
2083 actual=FlowTables,
2084 onpass="No changes were found in the flow tables",
2085 onfail="Changes were found in the flow tables" )
2086
2087 main.Mininet2.pingLongKill()
2088 '''
2089 main.step( "Check the continuous pings to ensure that no packets " +
2090 "were dropped during component failure" )
2091 main.Mininet2.pingKill( main.params[ 'TESTONUSER' ],
2092 main.params[ 'TESTONIP' ] )
2093 LossInPings = main.FALSE
2094 # NOTE: checkForLoss returns main.FALSE with 0% packet loss
2095 for i in range( 8, 18 ):
2096 main.log.info(
2097 "Checking for a loss in pings along flow from s" +
2098 str( i ) )
2099 LossInPings = main.Mininet2.checkForLoss(
2100 "/tmp/ping.h" +
2101 str( i ) ) or LossInPings
2102 if LossInPings == main.TRUE:
2103 main.log.info( "Loss in ping detected" )
2104 elif LossInPings == main.ERROR:
2105 main.log.info( "There are multiple mininet process running" )
2106 elif LossInPings == main.FALSE:
2107 main.log.info( "No Loss in the pings" )
2108 main.log.info( "No loss of dataplane connectivity" )
2109 utilities.assert_equals(
2110 expect=main.FALSE,
2111 actual=LossInPings,
2112 onpass="No Loss of connectivity",
2113 onfail="Loss of dataplane connectivity detected" )
2114 '''
2115
2116 main.step( "Leadership Election is still functional" )
2117 # Test of LeadershipElection
Jon Halla440e872016-03-31 15:15:50 -07002118 leaderList = []
2119
Jon Hall5cf14d52015-07-16 12:15:19 -07002120 # NOTE: this only works for the sanity test. In case of failures,
2121 # leader will likely change
Jon Halla440e872016-03-31 15:15:50 -07002122 leader = main.nodes[ main.activeNodes[ 0 ] ].ip_address
Jon Hall5cf14d52015-07-16 12:15:19 -07002123 leaderResult = main.TRUE
Jon Halla440e872016-03-31 15:15:50 -07002124
2125 for i in main.activeNodes:
2126 cli = main.CLIs[i]
Jon Hall5cf14d52015-07-16 12:15:19 -07002127 leaderN = cli.electionTestLeader()
Jon Halla440e872016-03-31 15:15:50 -07002128 leaderList.append( leaderN )
Jon Hall5cf14d52015-07-16 12:15:19 -07002129 # verify leader is ONOS1
2130 if leaderN == leader:
2131 # all is well
2132 # NOTE: In failure scenario, this could be a new node, maybe
2133 # check != ONOS1
2134 pass
2135 elif leaderN == main.FALSE:
2136 # error in response
2137 main.log.error( "Something is wrong with " +
2138 "electionTestLeader function, check the" +
2139 " error logs" )
2140 leaderResult = main.FALSE
Jon Halla440e872016-03-31 15:15:50 -07002141 elif leaderN is None:
2142 main.log.error( cli.name +
2143 " shows no leader for the election-app was" +
2144 " elected after the old one died" )
Jon Hall5cf14d52015-07-16 12:15:19 -07002145 leaderResult = main.FALSE
Jon Halla440e872016-03-31 15:15:50 -07002146 if len( set( leaderList ) ) != 1:
2147 leaderResult = main.FALSE
2148 main.log.error(
2149 "Inconsistent view of leader for the election test app" )
2150 # TODO: print the list
Jon Hall5cf14d52015-07-16 12:15:19 -07002151 utilities.assert_equals(
2152 expect=main.TRUE,
2153 actual=leaderResult,
2154 onpass="Leadership election passed",
2155 onfail="Something went wrong with Leadership election" )
2156
2157 def CASE8( self, main ):
2158 """
2159 Compare topo
2160 """
2161 import json
2162 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002163 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002164 assert main, "main not defined"
2165 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002166 assert main.CLIs, "main.CLIs not defined"
2167 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002168
2169 main.case( "Compare ONOS Topology view to Mininet topology" )
Jon Hall783bbf92015-07-23 14:33:19 -07002170 main.caseExplanation = "Compare topology objects between Mininet" +\
Jon Hall5cf14d52015-07-16 12:15:19 -07002171 " and ONOS"
Jon Hall5cf14d52015-07-16 12:15:19 -07002172 topoResult = main.FALSE
Jon Halla440e872016-03-31 15:15:50 -07002173 topoFailMsg = "ONOS topology don't match Mininet"
Jon Hall5cf14d52015-07-16 12:15:19 -07002174 elapsed = 0
2175 count = 0
Jon Halle9b1fa32015-12-08 15:32:21 -08002176 main.step( "Comparing ONOS topology to MN topology" )
Jon Hall5cf14d52015-07-16 12:15:19 -07002177 startTime = time.time()
2178 # Give time for Gossip to work
Jon Halle9b1fa32015-12-08 15:32:21 -08002179 while topoResult == main.FALSE and ( elapsed < 60 or count < 3 ):
Jon Hall96091e62015-09-21 17:34:17 -07002180 devicesResults = main.TRUE
2181 linksResults = main.TRUE
2182 hostsResults = main.TRUE
2183 hostAttachmentResults = True
Jon Hall5cf14d52015-07-16 12:15:19 -07002184 count += 1
2185 cliStart = time.time()
2186 devices = []
2187 threads = []
Jon Halla440e872016-03-31 15:15:50 -07002188 for i in main.activeNodes:
2189 t = main.Thread( target=utilities.retry,
Jon Hall5cf14d52015-07-16 12:15:19 -07002190 name="devices-" + str( i ),
Jon Halla440e872016-03-31 15:15:50 -07002191 args=[ main.CLIs[i].devices, [ None ] ],
2192 kwargs= { 'sleep': 5, 'attempts': 5,
2193 'randomTime': True } )
Jon Hall5cf14d52015-07-16 12:15:19 -07002194 threads.append( t )
2195 t.start()
2196
2197 for t in threads:
2198 t.join()
2199 devices.append( t.result )
2200 hosts = []
2201 ipResult = main.TRUE
2202 threads = []
Jon Halla440e872016-03-31 15:15:50 -07002203 for i in main.activeNodes:
Jon Halld8f6de82015-12-17 17:04:34 -08002204 t = main.Thread( target=utilities.retry,
Jon Hall5cf14d52015-07-16 12:15:19 -07002205 name="hosts-" + str( i ),
Jon Halld8f6de82015-12-17 17:04:34 -08002206 args=[ main.CLIs[i].hosts, [ None ] ],
2207 kwargs= { 'sleep': 5, 'attempts': 5,
2208 'randomTime': True } )
Jon Hall5cf14d52015-07-16 12:15:19 -07002209 threads.append( t )
2210 t.start()
2211
2212 for t in threads:
2213 t.join()
2214 try:
2215 hosts.append( json.loads( t.result ) )
2216 except ( ValueError, TypeError ):
2217 main.log.exception( "Error parsing hosts results" )
2218 main.log.error( repr( t.result ) )
Jon Hallf3d16e72015-12-16 17:45:08 -08002219 hosts.append( None )
Jon Hall5cf14d52015-07-16 12:15:19 -07002220 for controller in range( 0, len( hosts ) ):
Jon Halla440e872016-03-31 15:15:50 -07002221 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hallacd1b182015-12-17 11:43:20 -08002222 if hosts[ controller ]:
2223 for host in hosts[ controller ]:
2224 if host is None or host.get( 'ipAddresses', [] ) == []:
2225 main.log.error(
2226 "Error with host ipAddresses on controller" +
2227 controllerStr + ": " + str( host ) )
2228 ipResult = main.FALSE
Jon Hall5cf14d52015-07-16 12:15:19 -07002229 ports = []
2230 threads = []
Jon Halla440e872016-03-31 15:15:50 -07002231 for i in main.activeNodes:
2232 t = main.Thread( target=utilities.retry,
Jon Hall5cf14d52015-07-16 12:15:19 -07002233 name="ports-" + str( i ),
Jon Halla440e872016-03-31 15:15:50 -07002234 args=[ main.CLIs[i].ports, [ None ] ],
2235 kwargs= { 'sleep': 5, 'attempts': 5,
2236 'randomTime': True } )
Jon Hall5cf14d52015-07-16 12:15:19 -07002237 threads.append( t )
2238 t.start()
2239
2240 for t in threads:
2241 t.join()
2242 ports.append( t.result )
2243 links = []
2244 threads = []
Jon Halla440e872016-03-31 15:15:50 -07002245 for i in main.activeNodes:
2246 t = main.Thread( target=utilities.retry,
Jon Hall5cf14d52015-07-16 12:15:19 -07002247 name="links-" + str( i ),
Jon Halla440e872016-03-31 15:15:50 -07002248 args=[ main.CLIs[i].links, [ None ] ],
2249 kwargs= { 'sleep': 5, 'attempts': 5,
2250 'randomTime': True } )
Jon Hall5cf14d52015-07-16 12:15:19 -07002251 threads.append( t )
2252 t.start()
2253
2254 for t in threads:
2255 t.join()
2256 links.append( t.result )
2257 clusters = []
2258 threads = []
Jon Halla440e872016-03-31 15:15:50 -07002259 for i in main.activeNodes:
2260 t = main.Thread( target=utilities.retry,
Jon Hall5cf14d52015-07-16 12:15:19 -07002261 name="clusters-" + str( i ),
Jon Halla440e872016-03-31 15:15:50 -07002262 args=[ main.CLIs[i].clusters, [ None ] ],
2263 kwargs= { 'sleep': 5, 'attempts': 5,
2264 'randomTime': True } )
Jon Hall5cf14d52015-07-16 12:15:19 -07002265 threads.append( t )
2266 t.start()
2267
2268 for t in threads:
2269 t.join()
2270 clusters.append( t.result )
2271
2272 elapsed = time.time() - startTime
2273 cliTime = time.time() - cliStart
2274 print "Elapsed time: " + str( elapsed )
2275 print "CLI time: " + str( cliTime )
2276
Jon Halla440e872016-03-31 15:15:50 -07002277 if all( e is None for e in devices ) and\
2278 all( e is None for e in hosts ) and\
2279 all( e is None for e in ports ) and\
2280 all( e is None for e in links ) and\
2281 all( e is None for e in clusters ):
2282 topoFailMsg = "Could not get topology from ONOS"
2283 main.log.error( topoFailMsg )
2284 continue # Try again, No use trying to compare
2285
Jon Hall5cf14d52015-07-16 12:15:19 -07002286 mnSwitches = main.Mininet1.getSwitches()
2287 mnLinks = main.Mininet1.getLinks()
2288 mnHosts = main.Mininet1.getHosts()
Jon Halla440e872016-03-31 15:15:50 -07002289 for controller in range( len( main.activeNodes ) ):
2290 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07002291 if devices[ controller ] and ports[ controller ] and\
2292 "Error" not in devices[ controller ] and\
2293 "Error" not in ports[ controller ]:
2294
Jon Hallc6793552016-01-19 14:18:37 -08002295 try:
2296 currentDevicesResult = main.Mininet1.compareSwitches(
2297 mnSwitches,
2298 json.loads( devices[ controller ] ),
2299 json.loads( ports[ controller ] ) )
2300 except ( TypeError, ValueError ) as e:
2301 main.log.exception( "Object not as expected; devices={!r}\nports={!r}".format(
2302 devices[ controller ], ports[ controller ] ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07002303 else:
2304 currentDevicesResult = main.FALSE
2305 utilities.assert_equals( expect=main.TRUE,
2306 actual=currentDevicesResult,
2307 onpass="ONOS" + controllerStr +
2308 " Switches view is correct",
2309 onfail="ONOS" + controllerStr +
2310 " Switches view is incorrect" )
2311
2312 if links[ controller ] and "Error" not in links[ controller ]:
2313 currentLinksResult = main.Mininet1.compareLinks(
2314 mnSwitches, mnLinks,
2315 json.loads( links[ controller ] ) )
2316 else:
2317 currentLinksResult = main.FALSE
2318 utilities.assert_equals( expect=main.TRUE,
2319 actual=currentLinksResult,
2320 onpass="ONOS" + controllerStr +
2321 " links view is correct",
2322 onfail="ONOS" + controllerStr +
2323 " links view is incorrect" )
Jon Hall657cdf62015-12-17 14:40:51 -08002324 if hosts[ controller ] and "Error" not in hosts[ controller ]:
Jon Hall5cf14d52015-07-16 12:15:19 -07002325 currentHostsResult = main.Mininet1.compareHosts(
2326 mnHosts,
2327 hosts[ controller ] )
Jon Hall13b446e2016-01-05 12:17:01 -08002328 elif hosts[ controller ] == []:
2329 currentHostsResult = main.TRUE
Jon Hall5cf14d52015-07-16 12:15:19 -07002330 else:
2331 currentHostsResult = main.FALSE
2332 utilities.assert_equals( expect=main.TRUE,
2333 actual=currentHostsResult,
2334 onpass="ONOS" + controllerStr +
2335 " hosts exist in Mininet",
2336 onfail="ONOS" + controllerStr +
2337 " hosts don't match Mininet" )
2338 # CHECKING HOST ATTACHMENT POINTS
2339 hostAttachment = True
2340 zeroHosts = False
2341 # FIXME: topo-HA/obelisk specific mappings:
2342 # key is mac and value is dpid
2343 mappings = {}
2344 for i in range( 1, 29 ): # hosts 1 through 28
2345 # set up correct variables:
2346 macId = "00:" * 5 + hex( i ).split( "0x" )[1].upper().zfill(2)
2347 if i == 1:
2348 deviceId = "1000".zfill(16)
2349 elif i == 2:
2350 deviceId = "2000".zfill(16)
2351 elif i == 3:
2352 deviceId = "3000".zfill(16)
2353 elif i == 4:
2354 deviceId = "3004".zfill(16)
2355 elif i == 5:
2356 deviceId = "5000".zfill(16)
2357 elif i == 6:
2358 deviceId = "6000".zfill(16)
2359 elif i == 7:
2360 deviceId = "6007".zfill(16)
2361 elif i >= 8 and i <= 17:
2362 dpid = '3' + str( i ).zfill( 3 )
2363 deviceId = dpid.zfill(16)
2364 elif i >= 18 and i <= 27:
2365 dpid = '6' + str( i ).zfill( 3 )
2366 deviceId = dpid.zfill(16)
2367 elif i == 28:
2368 deviceId = "2800".zfill(16)
2369 mappings[ macId ] = deviceId
Jon Halld8f6de82015-12-17 17:04:34 -08002370 if hosts[ controller ] is not None and "Error" not in hosts[ controller ]:
Jon Hall5cf14d52015-07-16 12:15:19 -07002371 if hosts[ controller ] == []:
2372 main.log.warn( "There are no hosts discovered" )
2373 zeroHosts = True
2374 else:
2375 for host in hosts[ controller ]:
2376 mac = None
2377 location = None
2378 device = None
2379 port = None
2380 try:
2381 mac = host.get( 'mac' )
2382 assert mac, "mac field could not be found for this host object"
2383
2384 location = host.get( 'location' )
2385 assert location, "location field could not be found for this host object"
2386
2387 # Trim the protocol identifier off deviceId
2388 device = str( location.get( 'elementId' ) ).split(':')[1]
2389 assert device, "elementId field could not be found for this host location object"
2390
2391 port = location.get( 'port' )
2392 assert port, "port field could not be found for this host location object"
2393
2394 # Now check if this matches where they should be
2395 if mac and device and port:
2396 if str( port ) != "1":
2397 main.log.error( "The attachment port is incorrect for " +
2398 "host " + str( mac ) +
2399 ". Expected: 1 Actual: " + str( port) )
2400 hostAttachment = False
2401 if device != mappings[ str( mac ) ]:
2402 main.log.error( "The attachment device is incorrect for " +
2403 "host " + str( mac ) +
2404 ". Expected: " + mappings[ str( mac ) ] +
2405 " Actual: " + device )
2406 hostAttachment = False
2407 else:
2408 hostAttachment = False
2409 except AssertionError:
2410 main.log.exception( "Json object not as expected" )
2411 main.log.error( repr( host ) )
2412 hostAttachment = False
2413 else:
2414 main.log.error( "No hosts json output or \"Error\"" +
2415 " in output. hosts = " +
2416 repr( hosts[ controller ] ) )
2417 if zeroHosts is False:
2418 hostAttachment = True
2419
2420 # END CHECKING HOST ATTACHMENT POINTS
2421 devicesResults = devicesResults and currentDevicesResult
2422 linksResults = linksResults and currentLinksResult
2423 hostsResults = hostsResults and currentHostsResult
2424 hostAttachmentResults = hostAttachmentResults and\
2425 hostAttachment
2426 topoResult = ( devicesResults and linksResults
2427 and hostsResults and ipResult and
2428 hostAttachmentResults )
Jon Halle9b1fa32015-12-08 15:32:21 -08002429 utilities.assert_equals( expect=True,
2430 actual=topoResult,
2431 onpass="ONOS topology matches Mininet",
Jon Halla440e872016-03-31 15:15:50 -07002432 onfail=topoFailMsg )
Jon Halle9b1fa32015-12-08 15:32:21 -08002433 # End of While loop to pull ONOS state
Jon Hall5cf14d52015-07-16 12:15:19 -07002434
2435 # Compare json objects for hosts and dataplane clusters
2436
2437 # hosts
2438 main.step( "Hosts view is consistent across all ONOS nodes" )
2439 consistentHostsResult = main.TRUE
2440 for controller in range( len( hosts ) ):
Jon Halla440e872016-03-31 15:15:50 -07002441 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hall13b446e2016-01-05 12:17:01 -08002442 if hosts[ controller ] is not None and "Error" not in hosts[ controller ]:
Jon Hall5cf14d52015-07-16 12:15:19 -07002443 if hosts[ controller ] == hosts[ 0 ]:
2444 continue
2445 else: # hosts not consistent
2446 main.log.error( "hosts from ONOS" + controllerStr +
2447 " is inconsistent with ONOS1" )
2448 main.log.warn( repr( hosts[ controller ] ) )
2449 consistentHostsResult = main.FALSE
2450
2451 else:
2452 main.log.error( "Error in getting ONOS hosts from ONOS" +
2453 controllerStr )
2454 consistentHostsResult = main.FALSE
2455 main.log.warn( "ONOS" + controllerStr +
2456 " hosts response: " +
2457 repr( hosts[ controller ] ) )
2458 utilities.assert_equals(
2459 expect=main.TRUE,
2460 actual=consistentHostsResult,
2461 onpass="Hosts view is consistent across all ONOS nodes",
2462 onfail="ONOS nodes have different views of hosts" )
2463
2464 main.step( "Hosts information is correct" )
2465 hostsResults = hostsResults and ipResult
2466 utilities.assert_equals(
2467 expect=main.TRUE,
2468 actual=hostsResults,
2469 onpass="Host information is correct",
2470 onfail="Host information is incorrect" )
2471
2472 main.step( "Host attachment points to the network" )
2473 utilities.assert_equals(
2474 expect=True,
2475 actual=hostAttachmentResults,
2476 onpass="Hosts are correctly attached to the network",
2477 onfail="ONOS did not correctly attach hosts to the network" )
2478
2479 # Strongly connected clusters of devices
2480 main.step( "Clusters view is consistent across all ONOS nodes" )
2481 consistentClustersResult = main.TRUE
2482 for controller in range( len( clusters ) ):
Jon Halla440e872016-03-31 15:15:50 -07002483 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07002484 if "Error" not in clusters[ controller ]:
2485 if clusters[ controller ] == clusters[ 0 ]:
2486 continue
2487 else: # clusters not consistent
2488 main.log.error( "clusters from ONOS" +
2489 controllerStr +
2490 " is inconsistent with ONOS1" )
2491 consistentClustersResult = main.FALSE
Jon Hall5cf14d52015-07-16 12:15:19 -07002492 else:
2493 main.log.error( "Error in getting dataplane clusters " +
2494 "from ONOS" + controllerStr )
2495 consistentClustersResult = main.FALSE
2496 main.log.warn( "ONOS" + controllerStr +
2497 " clusters response: " +
2498 repr( clusters[ controller ] ) )
2499 utilities.assert_equals(
2500 expect=main.TRUE,
2501 actual=consistentClustersResult,
2502 onpass="Clusters view is consistent across all ONOS nodes",
2503 onfail="ONOS nodes have different views of clusters" )
2504
2505 main.step( "There is only one SCC" )
2506 # there should always only be one cluster
2507 try:
2508 numClusters = len( json.loads( clusters[ 0 ] ) )
2509 except ( ValueError, TypeError ):
2510 main.log.exception( "Error parsing clusters[0]: " +
2511 repr( clusters[0] ) )
Jon Halla440e872016-03-31 15:15:50 -07002512 numClusters = "ERROR"
Jon Hall5cf14d52015-07-16 12:15:19 -07002513 clusterResults = main.FALSE
2514 if numClusters == 1:
2515 clusterResults = main.TRUE
2516 utilities.assert_equals(
2517 expect=1,
2518 actual=numClusters,
2519 onpass="ONOS shows 1 SCC",
2520 onfail="ONOS shows " + str( numClusters ) + " SCCs" )
2521
2522 topoResult = ( devicesResults and linksResults
2523 and hostsResults and consistentHostsResult
2524 and consistentClustersResult and clusterResults
2525 and ipResult and hostAttachmentResults )
2526
2527 topoResult = topoResult and int( count <= 2 )
2528 note = "note it takes about " + str( int( cliTime ) ) + \
2529 " seconds for the test to make all the cli calls to fetch " +\
2530 "the topology from each ONOS instance"
2531 main.log.info(
2532 "Very crass estimate for topology discovery/convergence( " +
2533 str( note ) + " ): " + str( elapsed ) + " seconds, " +
2534 str( count ) + " tries" )
2535
2536 main.step( "Device information is correct" )
2537 utilities.assert_equals(
2538 expect=main.TRUE,
2539 actual=devicesResults,
2540 onpass="Device information is correct",
2541 onfail="Device information is incorrect" )
2542
2543 main.step( "Links are correct" )
2544 utilities.assert_equals(
2545 expect=main.TRUE,
2546 actual=linksResults,
2547 onpass="Link are correct",
2548 onfail="Links are incorrect" )
2549
2550 main.step( "Hosts are correct" )
2551 utilities.assert_equals(
2552 expect=main.TRUE,
2553 actual=hostsResults,
2554 onpass="Hosts are correct",
2555 onfail="Hosts are incorrect" )
2556
2557 # FIXME: move this to an ONOS state case
2558 main.step( "Checking ONOS nodes" )
2559 nodesOutput = []
2560 nodeResults = main.TRUE
2561 threads = []
Jon Halla440e872016-03-31 15:15:50 -07002562 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07002563 t = main.Thread( target=main.CLIs[i].nodes,
Jon Hall5cf14d52015-07-16 12:15:19 -07002564 name="nodes-" + str( i ),
2565 args=[ ] )
2566 threads.append( t )
2567 t.start()
2568
2569 for t in threads:
2570 t.join()
2571 nodesOutput.append( t.result )
Jon Halla440e872016-03-31 15:15:50 -07002572 ips = [ main.nodes[node].ip_address for node in main.activeNodes ]
Jon Halle9b1fa32015-12-08 15:32:21 -08002573 ips.sort()
Jon Hall5cf14d52015-07-16 12:15:19 -07002574 for i in nodesOutput:
2575 try:
2576 current = json.loads( i )
Jon Halle9b1fa32015-12-08 15:32:21 -08002577 activeIps = []
2578 currentResult = main.FALSE
Jon Hall5cf14d52015-07-16 12:15:19 -07002579 for node in current:
Jon Hallbd182782016-03-28 16:42:22 -07002580 if node['state'] == 'READY':
Jon Halle9b1fa32015-12-08 15:32:21 -08002581 activeIps.append( node['ip'] )
2582 activeIps.sort()
2583 if ips == activeIps:
2584 currentResult = main.TRUE
Jon Hall5cf14d52015-07-16 12:15:19 -07002585 except ( ValueError, TypeError ):
2586 main.log.error( "Error parsing nodes output" )
2587 main.log.warn( repr( i ) )
Jon Halle9b1fa32015-12-08 15:32:21 -08002588 currentResult = main.FALSE
2589 nodeResults = nodeResults and currentResult
Jon Hall5cf14d52015-07-16 12:15:19 -07002590 utilities.assert_equals( expect=main.TRUE, actual=nodeResults,
2591 onpass="Nodes check successful",
2592 onfail="Nodes check NOT successful" )
Jon Halla440e872016-03-31 15:15:50 -07002593 if not nodeResults:
2594 for cli in main.CLIs:
2595 main.log.debug( "{} components not ACTIVE: \n{}".format(
2596 cli.name,
2597 cli.sendline( "scr:list | grep -v ACTIVE" ) ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07002598
2599 def CASE9( self, main ):
2600 """
2601 Link s3-s28 down
2602 """
2603 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002604 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002605 assert main, "main not defined"
2606 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002607 assert main.CLIs, "main.CLIs not defined"
2608 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002609 # NOTE: You should probably run a topology check after this
2610
2611 linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
2612
2613 description = "Turn off a link to ensure that Link Discovery " +\
2614 "is working properly"
2615 main.case( description )
2616
2617 main.step( "Kill Link between s3 and s28" )
2618 LinkDown = main.Mininet1.link( END1="s3", END2="s28", OPTION="down" )
2619 main.log.info( "Waiting " + str( linkSleep ) +
2620 " seconds for link down to be discovered" )
2621 time.sleep( linkSleep )
2622 utilities.assert_equals( expect=main.TRUE, actual=LinkDown,
2623 onpass="Link down successful",
2624 onfail="Failed to bring link down" )
2625 # TODO do some sort of check here
2626
2627 def CASE10( self, main ):
2628 """
2629 Link s3-s28 up
2630 """
2631 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002632 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002633 assert main, "main not defined"
2634 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002635 assert main.CLIs, "main.CLIs not defined"
2636 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002637 # NOTE: You should probably run a topology check after this
2638
2639 linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
2640
2641 description = "Restore a link to ensure that Link Discovery is " + \
2642 "working properly"
2643 main.case( description )
2644
2645 main.step( "Bring link between s3 and s28 back up" )
2646 LinkUp = main.Mininet1.link( END1="s3", END2="s28", OPTION="up" )
2647 main.log.info( "Waiting " + str( linkSleep ) +
2648 " seconds for link up to be discovered" )
2649 time.sleep( linkSleep )
2650 utilities.assert_equals( expect=main.TRUE, actual=LinkUp,
2651 onpass="Link up successful",
2652 onfail="Failed to bring link up" )
2653 # TODO do some sort of check here
2654
2655 def CASE11( self, main ):
2656 """
2657 Switch Down
2658 """
2659 # NOTE: You should probably run a topology check after this
2660 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002661 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002662 assert main, "main not defined"
2663 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002664 assert main.CLIs, "main.CLIs not defined"
2665 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002666
2667 switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
2668
2669 description = "Killing a switch to ensure it is discovered correctly"
Jon Halla440e872016-03-31 15:15:50 -07002670 onosCli = main.CLIs[ main.activeNodes[0] ]
Jon Hall5cf14d52015-07-16 12:15:19 -07002671 main.case( description )
2672 switch = main.params[ 'kill' ][ 'switch' ]
2673 switchDPID = main.params[ 'kill' ][ 'dpid' ]
2674
2675 # TODO: Make this switch parameterizable
2676 main.step( "Kill " + switch )
2677 main.log.info( "Deleting " + switch )
2678 main.Mininet1.delSwitch( switch )
2679 main.log.info( "Waiting " + str( switchSleep ) +
2680 " seconds for switch down to be discovered" )
2681 time.sleep( switchSleep )
Jon Halla440e872016-03-31 15:15:50 -07002682 device = onosCli.getDevice( dpid=switchDPID )
Jon Hall5cf14d52015-07-16 12:15:19 -07002683 # Peek at the deleted switch
2684 main.log.warn( str( device ) )
2685 result = main.FALSE
2686 if device and device[ 'available' ] is False:
2687 result = main.TRUE
2688 utilities.assert_equals( expect=main.TRUE, actual=result,
2689 onpass="Kill switch successful",
2690 onfail="Failed to kill switch?" )
2691
2692 def CASE12( self, main ):
2693 """
2694 Switch Up
2695 """
2696 # NOTE: You should probably run a topology check after this
2697 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002698 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002699 assert main, "main not defined"
2700 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002701 assert main.CLIs, "main.CLIs not defined"
2702 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002703 assert ONOS1Port, "ONOS1Port not defined"
2704 assert ONOS2Port, "ONOS2Port not defined"
2705 assert ONOS3Port, "ONOS3Port not defined"
2706 assert ONOS4Port, "ONOS4Port not defined"
2707 assert ONOS5Port, "ONOS5Port not defined"
2708 assert ONOS6Port, "ONOS6Port not defined"
2709 assert ONOS7Port, "ONOS7Port not defined"
2710
2711 switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
2712 switch = main.params[ 'kill' ][ 'switch' ]
2713 switchDPID = main.params[ 'kill' ][ 'dpid' ]
2714 links = main.params[ 'kill' ][ 'links' ].split()
Jon Halla440e872016-03-31 15:15:50 -07002715 onosCli = main.CLIs[ main.activeNodes[0] ]
Jon Hall5cf14d52015-07-16 12:15:19 -07002716 description = "Adding a switch to ensure it is discovered correctly"
2717 main.case( description )
2718
2719 main.step( "Add back " + switch )
2720 main.Mininet1.addSwitch( switch, dpid=switchDPID )
2721 for peer in links:
2722 main.Mininet1.addLink( switch, peer )
Jon Halla440e872016-03-31 15:15:50 -07002723 ipList = [ node.ip_address for node in main.nodes ]
Jon Hall5cf14d52015-07-16 12:15:19 -07002724 main.Mininet1.assignSwController( sw=switch, ip=ipList )
2725 main.log.info( "Waiting " + str( switchSleep ) +
2726 " seconds for switch up to be discovered" )
2727 time.sleep( switchSleep )
Jon Halla440e872016-03-31 15:15:50 -07002728 device = onosCli.getDevice( dpid=switchDPID )
Jon Hall5cf14d52015-07-16 12:15:19 -07002729 # Peek at the deleted switch
2730 main.log.warn( str( device ) )
2731 result = main.FALSE
2732 if device and device[ 'available' ]:
2733 result = main.TRUE
2734 utilities.assert_equals( expect=main.TRUE, actual=result,
2735 onpass="add switch successful",
2736 onfail="Failed to add switch?" )
2737
2738 def CASE13( self, main ):
2739 """
2740 Clean up
2741 """
2742 import os
2743 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002744 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002745 assert main, "main not defined"
2746 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002747 assert main.CLIs, "main.CLIs not defined"
2748 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002749
2750 # printing colors to terminal
2751 colors = { 'cyan': '\033[96m', 'purple': '\033[95m',
2752 'blue': '\033[94m', 'green': '\033[92m',
2753 'yellow': '\033[93m', 'red': '\033[91m', 'end': '\033[0m' }
2754 main.case( "Test Cleanup" )
2755 main.step( "Killing tcpdumps" )
2756 main.Mininet2.stopTcpdump()
2757
2758 testname = main.TEST
Jon Hall96091e62015-09-21 17:34:17 -07002759 if main.params[ 'BACKUP' ][ 'ENABLED' ] == "True":
Jon Hall5cf14d52015-07-16 12:15:19 -07002760 main.step( "Copying MN pcap and ONOS log files to test station" )
2761 teststationUser = main.params[ 'BACKUP' ][ 'TESTONUSER' ]
2762 teststationIP = main.params[ 'BACKUP' ][ 'TESTONIP' ]
Jon Hall96091e62015-09-21 17:34:17 -07002763 # NOTE: MN Pcap file is being saved to logdir.
2764 # We scp this file as MN and TestON aren't necessarily the same vm
2765
2766 # FIXME: To be replaced with a Jenkin's post script
Jon Hall5cf14d52015-07-16 12:15:19 -07002767 # TODO: Load these from params
2768 # NOTE: must end in /
2769 logFolder = "/opt/onos/log/"
2770 logFiles = [ "karaf.log", "karaf.log.1" ]
2771 # NOTE: must end in /
Jon Hall5cf14d52015-07-16 12:15:19 -07002772 for f in logFiles:
Jon Halle1a3b752015-07-22 13:02:46 -07002773 for node in main.nodes:
Jon Halla440e872016-03-31 15:15:50 -07002774 dstName = main.logdir + "/" + node.name + "-" + f
Jon Hall96091e62015-09-21 17:34:17 -07002775 main.ONOSbench.secureCopy( node.user_name, node.ip_address,
2776 logFolder + f, dstName )
Jon Hall5cf14d52015-07-16 12:15:19 -07002777 # std*.log's
2778 # NOTE: must end in /
2779 logFolder = "/opt/onos/var/"
2780 logFiles = [ "stderr.log", "stdout.log" ]
2781 # NOTE: must end in /
Jon Hall5cf14d52015-07-16 12:15:19 -07002782 for f in logFiles:
Jon Halle1a3b752015-07-22 13:02:46 -07002783 for node in main.nodes:
Jon Halla440e872016-03-31 15:15:50 -07002784 dstName = main.logdir + "/" + node.name + "-" + f
Jon Hall96091e62015-09-21 17:34:17 -07002785 main.ONOSbench.secureCopy( node.user_name, node.ip_address,
2786 logFolder + f, dstName )
2787 else:
2788 main.log.debug( "skipping saving log files" )
Jon Hall5cf14d52015-07-16 12:15:19 -07002789
2790 main.step( "Stopping Mininet" )
2791 mnResult = main.Mininet1.stopNet()
2792 utilities.assert_equals( expect=main.TRUE, actual=mnResult,
2793 onpass="Mininet stopped",
2794 onfail="MN cleanup NOT successful" )
2795
2796 main.step( "Checking ONOS Logs for errors" )
Jon Halle1a3b752015-07-22 13:02:46 -07002797 for node in main.nodes:
Jon Hall96091e62015-09-21 17:34:17 -07002798 main.log.debug( "Checking logs for errors on " + node.name + ":" )
2799 main.log.warn( main.ONOSbench.checkLogs( node.ip_address ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07002800
2801 try:
2802 timerLog = open( main.logdir + "/Timers.csv", 'w')
2803 # Overwrite with empty line and close
2804 labels = "Gossip Intents"
2805 data = str( gossipTime )
2806 timerLog.write( labels + "\n" + data )
2807 timerLog.close()
2808 except NameError, e:
2809 main.log.exception(e)
2810
2811 def CASE14( self, main ):
2812 """
2813 start election app on all onos nodes
2814 """
Jon Halle1a3b752015-07-22 13:02:46 -07002815 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002816 assert main, "main not defined"
2817 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002818 assert main.CLIs, "main.CLIs not defined"
2819 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002820
2821 main.case("Start Leadership Election app")
2822 main.step( "Install leadership election app" )
Jon Halla440e872016-03-31 15:15:50 -07002823 onosCli = main.CLIs[ main.activeNodes[0] ]
2824 appResult = onosCli.activateApp( "org.onosproject.election" )
Jon Hall5cf14d52015-07-16 12:15:19 -07002825 utilities.assert_equals(
2826 expect=main.TRUE,
2827 actual=appResult,
2828 onpass="Election app installed",
2829 onfail="Something went wrong with installing Leadership election" )
2830
2831 main.step( "Run for election on each node" )
2832 leaderResult = main.TRUE
2833 leaders = []
Jon Halla440e872016-03-31 15:15:50 -07002834 for i in main.activeNodes:
2835 main.CLIs[i].electionTestRun()
2836 for i in main.activeNodes:
2837 cli = main.CLIs[i]
Jon Hall5cf14d52015-07-16 12:15:19 -07002838 leader = cli.electionTestLeader()
2839 if leader is None or leader == main.FALSE:
2840 main.log.error( cli.name + ": Leader for the election app " +
2841 "should be an ONOS node, instead got '" +
2842 str( leader ) + "'" )
2843 leaderResult = main.FALSE
2844 leaders.append( leader )
2845 utilities.assert_equals(
2846 expect=main.TRUE,
2847 actual=leaderResult,
2848 onpass="Successfully ran for leadership",
2849 onfail="Failed to run for leadership" )
2850
2851 main.step( "Check that each node shows the same leader" )
2852 sameLeader = main.TRUE
2853 if len( set( leaders ) ) != 1:
2854 sameLeader = main.FALSE
Jon Halle1a3b752015-07-22 13:02:46 -07002855 main.log.error( "Results of electionTestLeader is order of main.CLIs:" +
Jon Hall5cf14d52015-07-16 12:15:19 -07002856 str( leaders ) )
2857 utilities.assert_equals(
2858 expect=main.TRUE,
2859 actual=sameLeader,
2860 onpass="Leadership is consistent for the election topic",
2861 onfail="Nodes have different leaders" )
2862
2863 def CASE15( self, main ):
2864 """
2865 Check that Leadership Election is still functional
acsmars71adceb2015-08-31 15:09:26 -07002866 15.1 Run election on each node
2867 15.2 Check that each node has the same leaders and candidates
2868 15.3 Find current leader and withdraw
2869 15.4 Check that a new node was elected leader
2870 15.5 Check that that new leader was the candidate of old leader
2871 15.6 Run for election on old leader
2872 15.7 Check that oldLeader is a candidate, and leader if only 1 node
2873 15.8 Make sure that the old leader was added to the candidate list
2874
2875 old and new variable prefixes refer to data from before vs after
2876 withdrawl and later before withdrawl vs after re-election
Jon Hall5cf14d52015-07-16 12:15:19 -07002877 """
2878 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002879 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002880 assert main, "main not defined"
2881 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002882 assert main.CLIs, "main.CLIs not defined"
2883 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002884
Jon Halla440e872016-03-31 15:15:50 -07002885 description = "Check that Leadership Election is still functional"
Jon Hall5cf14d52015-07-16 12:15:19 -07002886 main.case( description )
Jon Halla440e872016-03-31 15:15:50 -07002887 # NOTE: Need to re-run after restarts since being a canidate is not persistant
Jon Hall5cf14d52015-07-16 12:15:19 -07002888
Jon Halla440e872016-03-31 15:15:50 -07002889 oldLeaders = [] # list of lists of each nodes' candidates before
2890 newLeaders = [] # list of lists of each nodes' candidates after
acsmars71adceb2015-08-31 15:09:26 -07002891 oldLeader = '' # the old leader from oldLeaders, None if not same
2892 newLeader = '' # the new leaders fron newLoeaders, None if not same
2893 oldLeaderCLI = None # the CLI of the old leader used for re-electing
2894 expectNoLeader = False # True when there is only one leader
2895 if main.numCtrls == 1:
2896 expectNoLeader = True
2897
2898 main.step( "Run for election on each node" )
2899 electionResult = main.TRUE
2900
Jon Halla440e872016-03-31 15:15:50 -07002901 for i in main.activeNodes: # run test election on each node
2902 if main.CLIs[i].electionTestRun() == main.FALSE:
acsmars71adceb2015-08-31 15:09:26 -07002903 electionResult = main.FALSE
Jon Hall5cf14d52015-07-16 12:15:19 -07002904 utilities.assert_equals(
2905 expect=main.TRUE,
acsmars71adceb2015-08-31 15:09:26 -07002906 actual=electionResult,
2907 onpass="All nodes successfully ran for leadership",
2908 onfail="At least one node failed to run for leadership" )
2909
acsmars3a72bde2015-09-02 14:16:22 -07002910 if electionResult == main.FALSE:
2911 main.log.error(
Jon Halla440e872016-03-31 15:15:50 -07002912 "Skipping Test Case because Election Test App isn't loaded" )
acsmars3a72bde2015-09-02 14:16:22 -07002913 main.skipCase()
2914
acsmars71adceb2015-08-31 15:09:26 -07002915 main.step( "Check that each node shows the same leader and candidates" )
Jon Halla440e872016-03-31 15:15:50 -07002916 failMessage = "Nodes have different leaderboards"
2917 def consistentLeaderboards( nodes ):
2918 TOPIC = 'org.onosproject.election'
2919 # FIXME: use threads
2920 #FIXME: should we retry outside the function?
2921 for n in range( 5 ): # Retry in case election is still happening
2922 leaderList = []
2923 # Get all leaderboards
2924 for cli in nodes:
2925 leaderList.append( cli.specificLeaderCandidate( TOPIC ) )
2926 # Compare leaderboards
2927 result = all( i == leaderList[0] for i in leaderList ) and\
2928 leaderList is not None
2929 main.log.debug( leaderList )
2930 main.log.warn( result )
2931 if result:
2932 return ( result, leaderList )
2933 time.sleep(5) #TODO: paramerterize
2934 main.log.error( "Inconsistent leaderboards:" + str( leaderList ) )
2935 activeCLIs = [ main.CLIs[i] for i in main.activeNodes ]
2936 sameResult, oldLeaders = consistentLeaderboards( activeCLIs )
2937 if sameResult:
2938 oldLeader = oldLeaders[ 0 ][ 0 ]
2939 main.log.warn( oldLeader )
acsmars71adceb2015-08-31 15:09:26 -07002940 else:
Jon Halla440e872016-03-31 15:15:50 -07002941 oldLeader = None
acsmars71adceb2015-08-31 15:09:26 -07002942 utilities.assert_equals(
Jon Halla440e872016-03-31 15:15:50 -07002943 expect=True,
acsmars71adceb2015-08-31 15:09:26 -07002944 actual=sameResult,
Jon Halla440e872016-03-31 15:15:50 -07002945 onpass="Leaderboards are consistent for the election topic",
acsmars71adceb2015-08-31 15:09:26 -07002946 onfail=failMessage )
Jon Hall5cf14d52015-07-16 12:15:19 -07002947
2948 main.step( "Find current leader and withdraw" )
acsmars71adceb2015-08-31 15:09:26 -07002949 withdrawResult = main.TRUE
Jon Hall5cf14d52015-07-16 12:15:19 -07002950 # do some sanity checking on leader before using it
acsmars71adceb2015-08-31 15:09:26 -07002951 if oldLeader is None:
2952 main.log.error( "Leadership isn't consistent." )
2953 withdrawResult = main.FALSE
2954 # Get the CLI of the oldLeader
Jon Halla440e872016-03-31 15:15:50 -07002955 for i in main.activeNodes:
acsmars71adceb2015-08-31 15:09:26 -07002956 if oldLeader == main.nodes[ i ].ip_address:
2957 oldLeaderCLI = main.CLIs[ i ]
Jon Hall5cf14d52015-07-16 12:15:19 -07002958 break
2959 else: # FOR/ELSE statement
2960 main.log.error( "Leader election, could not find current leader" )
2961 if oldLeader:
acsmars71adceb2015-08-31 15:09:26 -07002962 withdrawResult = oldLeaderCLI.electionTestWithdraw()
Jon Hall5cf14d52015-07-16 12:15:19 -07002963 utilities.assert_equals(
2964 expect=main.TRUE,
2965 actual=withdrawResult,
2966 onpass="Node was withdrawn from election",
2967 onfail="Node was not withdrawn from election" )
2968
acsmars71adceb2015-08-31 15:09:26 -07002969 main.step( "Check that a new node was elected leader" )
acsmars71adceb2015-08-31 15:09:26 -07002970 failMessage = "Nodes have different leaders"
acsmars71adceb2015-08-31 15:09:26 -07002971 # Get new leaders and candidates
Jon Halla440e872016-03-31 15:15:50 -07002972 newLeaderResult, newLeaders = consistentLeaderboards( activeCLIs )
2973 if newLeaders[ 0 ][ 0 ] == 'none':
2974 main.log.error( "No leader was elected on at least 1 node" )
2975 if not expectNoLeader:
2976 newLeaderResult = False
2977 if newLeaderResult:
2978 newLeader = newLeaders[ 0 ][ 0 ]
Jon Hall5cf14d52015-07-16 12:15:19 -07002979 else:
Jon Halla440e872016-03-31 15:15:50 -07002980 newLeader = None
acsmars71adceb2015-08-31 15:09:26 -07002981
2982 # Check that the new leader is not the older leader, which was withdrawn
2983 if newLeader == oldLeader:
Jon Halla440e872016-03-31 15:15:50 -07002984 newLeaderResult = False
Jon Hall6e709752016-02-01 13:38:46 -08002985 main.log.error( "All nodes still see old leader: " + str( oldLeader ) +
acsmars71adceb2015-08-31 15:09:26 -07002986 " as the current leader" )
Jon Hall5cf14d52015-07-16 12:15:19 -07002987 utilities.assert_equals(
Jon Halla440e872016-03-31 15:15:50 -07002988 expect=True,
acsmars71adceb2015-08-31 15:09:26 -07002989 actual=newLeaderResult,
Jon Hall5cf14d52015-07-16 12:15:19 -07002990 onpass="Leadership election passed",
2991 onfail="Something went wrong with Leadership election" )
2992
Jon Halla440e872016-03-31 15:15:50 -07002993 main.step( "Check that that new leader was the candidate of old leader" )
2994 # candidates[ 2 ] should become the top candidate after withdrawl
acsmars71adceb2015-08-31 15:09:26 -07002995 correctCandidateResult = main.TRUE
2996 if expectNoLeader:
2997 if newLeader == 'none':
2998 main.log.info( "No leader expected. None found. Pass" )
2999 correctCandidateResult = main.TRUE
3000 else:
3001 main.log.info( "Expected no leader, got: " + str( newLeader ) )
3002 correctCandidateResult = main.FALSE
Jon Halla440e872016-03-31 15:15:50 -07003003 elif len( oldLeaders[0] ) >= 3:
3004 if newLeader == oldLeaders[ 0 ][ 2 ]:
3005 # correct leader was elected
3006 correctCandidateResult = main.TRUE
3007 else:
3008 correctCandidateResult = main.FALSE
3009 main.log.error( "Candidate {} was elected. {} should have had priority.".format(
3010 newLeader, oldLeaders[ 0 ][ 2 ] ) )
3011 else:
3012 main.log.warn( "Could not determine who should be the correct leader" )
3013 main.log.debug( oldLeaders[ 0 ] )
acsmars71adceb2015-08-31 15:09:26 -07003014 correctCandidateResult = main.FALSE
acsmars71adceb2015-08-31 15:09:26 -07003015 utilities.assert_equals(
3016 expect=main.TRUE,
3017 actual=correctCandidateResult,
3018 onpass="Correct Candidate Elected",
3019 onfail="Incorrect Candidate Elected" )
3020
Jon Hall5cf14d52015-07-16 12:15:19 -07003021 main.step( "Run for election on old leader( just so everyone " +
3022 "is in the hat )" )
acsmars71adceb2015-08-31 15:09:26 -07003023 if oldLeaderCLI is not None:
3024 runResult = oldLeaderCLI.electionTestRun()
Jon Hall5cf14d52015-07-16 12:15:19 -07003025 else:
acsmars71adceb2015-08-31 15:09:26 -07003026 main.log.error( "No old leader to re-elect" )
Jon Hall5cf14d52015-07-16 12:15:19 -07003027 runResult = main.FALSE
3028 utilities.assert_equals(
3029 expect=main.TRUE,
3030 actual=runResult,
3031 onpass="App re-ran for election",
3032 onfail="App failed to run for election" )
Jon Halla440e872016-03-31 15:15:50 -07003033
acsmars71adceb2015-08-31 15:09:26 -07003034 main.step(
3035 "Check that oldLeader is a candidate, and leader if only 1 node" )
Jon Hall5cf14d52015-07-16 12:15:19 -07003036 # verify leader didn't just change
Jon Halla440e872016-03-31 15:15:50 -07003037 # Get new leaders and candidates
3038 reRunLeaders = []
3039 time.sleep( 5 ) # Paremterize
3040 positionResult, reRunLeaders = consistentLeaderboards( activeCLIs )
acsmars71adceb2015-08-31 15:09:26 -07003041
3042 # Check that the re-elected node is last on the candidate List
Jon Halla440e872016-03-31 15:15:50 -07003043 if oldLeader != reRunLeaders[ 0 ][ -1 ]:
3044 main.log.error( "Old Leader ({}) not in the proper position: {} ".format( str( oldLeader),
3045 str( reRunLeaders[ 0 ] ) ) )
acsmars71adceb2015-08-31 15:09:26 -07003046 positionResult = main.FALSE
Jon Hall5cf14d52015-07-16 12:15:19 -07003047
3048 utilities.assert_equals(
Jon Halla440e872016-03-31 15:15:50 -07003049 expect=True,
acsmars71adceb2015-08-31 15:09:26 -07003050 actual=positionResult,
Jon Hall5cf14d52015-07-16 12:15:19 -07003051 onpass="Old leader successfully re-ran for election",
3052 onfail="Something went wrong with Leadership election after " +
3053 "the old leader re-ran for election" )
3054
3055 def CASE16( self, main ):
3056 """
3057 Install Distributed Primitives app
3058 """
3059 import time
Jon Halle1a3b752015-07-22 13:02:46 -07003060 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07003061 assert main, "main not defined"
3062 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07003063 assert main.CLIs, "main.CLIs not defined"
3064 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07003065
3066 # Variables for the distributed primitives tests
3067 global pCounterName
Jon Hall5cf14d52015-07-16 12:15:19 -07003068 global pCounterValue
Jon Hall5cf14d52015-07-16 12:15:19 -07003069 global onosSet
3070 global onosSetName
3071 pCounterName = "TestON-Partitions"
Jon Hall5cf14d52015-07-16 12:15:19 -07003072 pCounterValue = 0
Jon Hall5cf14d52015-07-16 12:15:19 -07003073 onosSet = set([])
3074 onosSetName = "TestON-set"
3075
3076 description = "Install Primitives app"
3077 main.case( description )
3078 main.step( "Install Primitives app" )
3079 appName = "org.onosproject.distributedprimitives"
Jon Halla440e872016-03-31 15:15:50 -07003080 node = main.activeNodes[0]
3081 appResults = main.CLIs[node].activateApp( appName )
Jon Hall5cf14d52015-07-16 12:15:19 -07003082 utilities.assert_equals( expect=main.TRUE,
3083 actual=appResults,
3084 onpass="Primitives app activated",
3085 onfail="Primitives app not activated" )
3086 time.sleep( 5 ) # To allow all nodes to activate
3087
3088 def CASE17( self, main ):
3089 """
3090 Check for basic functionality with distributed primitives
3091 """
Jon Hall5cf14d52015-07-16 12:15:19 -07003092 # Make sure variables are defined/set
Jon Halle1a3b752015-07-22 13:02:46 -07003093 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07003094 assert main, "main not defined"
3095 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07003096 assert main.CLIs, "main.CLIs not defined"
3097 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07003098 assert pCounterName, "pCounterName not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07003099 assert onosSetName, "onosSetName not defined"
3100 # NOTE: assert fails if value is 0/None/Empty/False
3101 try:
3102 pCounterValue
3103 except NameError:
3104 main.log.error( "pCounterValue not defined, setting to 0" )
3105 pCounterValue = 0
3106 try:
Jon Hall5cf14d52015-07-16 12:15:19 -07003107 onosSet
3108 except NameError:
3109 main.log.error( "onosSet not defined, setting to empty Set" )
3110 onosSet = set([])
3111 # Variables for the distributed primitives tests. These are local only
3112 addValue = "a"
3113 addAllValue = "a b c d e f"
3114 retainValue = "c d e f"
3115
3116 description = "Check for basic functionality with distributed " +\
3117 "primitives"
3118 main.case( description )
Jon Halle1a3b752015-07-22 13:02:46 -07003119 main.caseExplanation = "Test the methods of the distributed " +\
3120 "primitives (counters and sets) throught the cli"
Jon Hall5cf14d52015-07-16 12:15:19 -07003121 # DISTRIBUTED ATOMIC COUNTERS
Jon Halle1a3b752015-07-22 13:02:46 -07003122 # Partitioned counters
3123 main.step( "Increment then get a default counter on each node" )
Jon Hall5cf14d52015-07-16 12:15:19 -07003124 pCounters = []
3125 threads = []
3126 addedPValues = []
Jon Halla440e872016-03-31 15:15:50 -07003127 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003128 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3129 name="counterAddAndGet-" + str( i ),
Jon Hall5cf14d52015-07-16 12:15:19 -07003130 args=[ pCounterName ] )
3131 pCounterValue += 1
3132 addedPValues.append( pCounterValue )
3133 threads.append( t )
3134 t.start()
3135
3136 for t in threads:
3137 t.join()
3138 pCounters.append( t.result )
3139 # Check that counter incremented numController times
3140 pCounterResults = True
3141 for i in addedPValues:
3142 tmpResult = i in pCounters
3143 pCounterResults = pCounterResults and tmpResult
3144 if not tmpResult:
3145 main.log.error( str( i ) + " is not in partitioned "
3146 "counter incremented results" )
3147 utilities.assert_equals( expect=True,
3148 actual=pCounterResults,
3149 onpass="Default counter incremented",
3150 onfail="Error incrementing default" +
3151 " counter" )
3152
Jon Halle1a3b752015-07-22 13:02:46 -07003153 main.step( "Get then Increment a default counter on each node" )
3154 pCounters = []
3155 threads = []
3156 addedPValues = []
Jon Halla440e872016-03-31 15:15:50 -07003157 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003158 t = main.Thread( target=main.CLIs[i].counterTestGetAndAdd,
3159 name="counterGetAndAdd-" + str( i ),
3160 args=[ pCounterName ] )
3161 addedPValues.append( pCounterValue )
3162 pCounterValue += 1
3163 threads.append( t )
3164 t.start()
3165
3166 for t in threads:
3167 t.join()
3168 pCounters.append( t.result )
3169 # Check that counter incremented numController times
3170 pCounterResults = True
3171 for i in addedPValues:
3172 tmpResult = i in pCounters
3173 pCounterResults = pCounterResults and tmpResult
3174 if not tmpResult:
3175 main.log.error( str( i ) + " is not in partitioned "
3176 "counter incremented results" )
3177 utilities.assert_equals( expect=True,
3178 actual=pCounterResults,
3179 onpass="Default counter incremented",
3180 onfail="Error incrementing default" +
3181 " counter" )
3182
3183 main.step( "Counters we added have the correct values" )
3184 incrementCheck = main.Counters.counterCheck( pCounterName, pCounterValue )
3185 utilities.assert_equals( expect=main.TRUE,
3186 actual=incrementCheck,
3187 onpass="Added counters are correct",
3188 onfail="Added counters are incorrect" )
3189
3190 main.step( "Add -8 to then get a default counter on each node" )
3191 pCounters = []
3192 threads = []
3193 addedPValues = []
Jon Halla440e872016-03-31 15:15:50 -07003194 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003195 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3196 name="counterIncrement-" + str( i ),
3197 args=[ pCounterName ],
3198 kwargs={ "delta": -8 } )
3199 pCounterValue += -8
3200 addedPValues.append( pCounterValue )
3201 threads.append( t )
3202 t.start()
3203
3204 for t in threads:
3205 t.join()
3206 pCounters.append( t.result )
3207 # Check that counter incremented numController times
3208 pCounterResults = True
3209 for i in addedPValues:
3210 tmpResult = i in pCounters
3211 pCounterResults = pCounterResults and tmpResult
3212 if not tmpResult:
3213 main.log.error( str( i ) + " is not in partitioned "
3214 "counter incremented results" )
3215 utilities.assert_equals( expect=True,
3216 actual=pCounterResults,
3217 onpass="Default counter incremented",
3218 onfail="Error incrementing default" +
3219 " counter" )
3220
3221 main.step( "Add 5 to then get a default counter on each node" )
3222 pCounters = []
3223 threads = []
3224 addedPValues = []
Jon Halla440e872016-03-31 15:15:50 -07003225 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003226 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3227 name="counterIncrement-" + str( i ),
3228 args=[ pCounterName ],
3229 kwargs={ "delta": 5 } )
3230 pCounterValue += 5
3231 addedPValues.append( pCounterValue )
3232 threads.append( t )
3233 t.start()
3234
3235 for t in threads:
3236 t.join()
3237 pCounters.append( t.result )
3238 # Check that counter incremented numController times
3239 pCounterResults = True
3240 for i in addedPValues:
3241 tmpResult = i in pCounters
3242 pCounterResults = pCounterResults and tmpResult
3243 if not tmpResult:
3244 main.log.error( str( i ) + " is not in partitioned "
3245 "counter incremented results" )
3246 utilities.assert_equals( expect=True,
3247 actual=pCounterResults,
3248 onpass="Default counter incremented",
3249 onfail="Error incrementing default" +
3250 " counter" )
3251
3252 main.step( "Get then add 5 to a default counter on each node" )
3253 pCounters = []
3254 threads = []
3255 addedPValues = []
Jon Halla440e872016-03-31 15:15:50 -07003256 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003257 t = main.Thread( target=main.CLIs[i].counterTestGetAndAdd,
3258 name="counterIncrement-" + str( i ),
3259 args=[ pCounterName ],
3260 kwargs={ "delta": 5 } )
3261 addedPValues.append( pCounterValue )
3262 pCounterValue += 5
3263 threads.append( t )
3264 t.start()
3265
3266 for t in threads:
3267 t.join()
3268 pCounters.append( t.result )
3269 # Check that counter incremented numController times
3270 pCounterResults = True
3271 for i in addedPValues:
3272 tmpResult = i in pCounters
3273 pCounterResults = pCounterResults and tmpResult
3274 if not tmpResult:
3275 main.log.error( str( i ) + " is not in partitioned "
3276 "counter incremented results" )
3277 utilities.assert_equals( expect=True,
3278 actual=pCounterResults,
3279 onpass="Default counter incremented",
3280 onfail="Error incrementing default" +
3281 " counter" )
3282
3283 main.step( "Counters we added have the correct values" )
3284 incrementCheck = main.Counters.counterCheck( pCounterName, pCounterValue )
3285 utilities.assert_equals( expect=main.TRUE,
3286 actual=incrementCheck,
3287 onpass="Added counters are correct",
3288 onfail="Added counters are incorrect" )
3289
Jon Hall5cf14d52015-07-16 12:15:19 -07003290 # DISTRIBUTED SETS
3291 main.step( "Distributed Set get" )
3292 size = len( onosSet )
3293 getResponses = []
3294 threads = []
Jon Halla440e872016-03-31 15:15:50 -07003295 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003296 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003297 name="setTestGet-" + str( i ),
3298 args=[ onosSetName ] )
3299 threads.append( t )
3300 t.start()
3301 for t in threads:
3302 t.join()
3303 getResponses.append( t.result )
3304
3305 getResults = main.TRUE
Jon Halla440e872016-03-31 15:15:50 -07003306 for i in range( len( main.activeNodes ) ):
3307 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003308 if isinstance( getResponses[ i ], list):
3309 current = set( getResponses[ i ] )
3310 if len( current ) == len( getResponses[ i ] ):
3311 # no repeats
3312 if onosSet != current:
Jon Halla440e872016-03-31 15:15:50 -07003313 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003314 " has incorrect view" +
3315 " of set " + onosSetName + ":\n" +
3316 str( getResponses[ i ] ) )
3317 main.log.debug( "Expected: " + str( onosSet ) )
3318 main.log.debug( "Actual: " + str( current ) )
3319 getResults = main.FALSE
3320 else:
3321 # error, set is not a set
Jon Halla440e872016-03-31 15:15:50 -07003322 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003323 " has repeat elements in" +
3324 " set " + onosSetName + ":\n" +
3325 str( getResponses[ i ] ) )
3326 getResults = main.FALSE
3327 elif getResponses[ i ] == main.ERROR:
3328 getResults = main.FALSE
3329 utilities.assert_equals( expect=main.TRUE,
3330 actual=getResults,
3331 onpass="Set elements are correct",
3332 onfail="Set elements are incorrect" )
3333
3334 main.step( "Distributed Set size" )
3335 sizeResponses = []
3336 threads = []
Jon Halla440e872016-03-31 15:15:50 -07003337 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003338 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07003339 name="setTestSize-" + str( i ),
3340 args=[ onosSetName ] )
3341 threads.append( t )
3342 t.start()
3343 for t in threads:
3344 t.join()
3345 sizeResponses.append( t.result )
3346
3347 sizeResults = main.TRUE
Jon Halla440e872016-03-31 15:15:50 -07003348 for i in range( len( main.activeNodes ) ):
3349 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003350 if size != sizeResponses[ i ]:
3351 sizeResults = main.FALSE
Jon Halla440e872016-03-31 15:15:50 -07003352 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003353 " expected a size of " + str( size ) +
3354 " for set " + onosSetName +
3355 " but got " + str( sizeResponses[ i ] ) )
3356 utilities.assert_equals( expect=main.TRUE,
3357 actual=sizeResults,
3358 onpass="Set sizes are correct",
3359 onfail="Set sizes are incorrect" )
3360
3361 main.step( "Distributed Set add()" )
3362 onosSet.add( addValue )
3363 addResponses = []
3364 threads = []
Jon Halla440e872016-03-31 15:15:50 -07003365 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003366 t = main.Thread( target=main.CLIs[i].setTestAdd,
Jon Hall5cf14d52015-07-16 12:15:19 -07003367 name="setTestAdd-" + str( i ),
3368 args=[ onosSetName, addValue ] )
3369 threads.append( t )
3370 t.start()
3371 for t in threads:
3372 t.join()
3373 addResponses.append( t.result )
3374
3375 # main.TRUE = successfully changed the set
3376 # main.FALSE = action resulted in no change in set
3377 # main.ERROR - Some error in executing the function
3378 addResults = main.TRUE
Jon Halla440e872016-03-31 15:15:50 -07003379 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003380 if addResponses[ i ] == main.TRUE:
3381 # All is well
3382 pass
3383 elif addResponses[ i ] == main.FALSE:
3384 # Already in set, probably fine
3385 pass
3386 elif addResponses[ i ] == main.ERROR:
3387 # Error in execution
3388 addResults = main.FALSE
3389 else:
3390 # unexpected result
3391 addResults = main.FALSE
3392 if addResults != main.TRUE:
3393 main.log.error( "Error executing set add" )
3394
3395 # Check if set is still correct
3396 size = len( onosSet )
3397 getResponses = []
3398 threads = []
Jon Halla440e872016-03-31 15:15:50 -07003399 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003400 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003401 name="setTestGet-" + str( i ),
3402 args=[ onosSetName ] )
3403 threads.append( t )
3404 t.start()
3405 for t in threads:
3406 t.join()
3407 getResponses.append( t.result )
3408 getResults = main.TRUE
Jon Halla440e872016-03-31 15:15:50 -07003409 for i in range( len( main.activeNodes ) ):
3410 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003411 if isinstance( getResponses[ i ], list):
3412 current = set( getResponses[ i ] )
3413 if len( current ) == len( getResponses[ i ] ):
3414 # no repeats
3415 if onosSet != current:
Jon Halla440e872016-03-31 15:15:50 -07003416 main.log.error( "ONOS" + node + " has incorrect view" +
Jon Hall5cf14d52015-07-16 12:15:19 -07003417 " of set " + onosSetName + ":\n" +
3418 str( getResponses[ i ] ) )
3419 main.log.debug( "Expected: " + str( onosSet ) )
3420 main.log.debug( "Actual: " + str( current ) )
3421 getResults = main.FALSE
3422 else:
3423 # error, set is not a set
Jon Halla440e872016-03-31 15:15:50 -07003424 main.log.error( "ONOS" + node + " has repeat elements in" +
Jon Hall5cf14d52015-07-16 12:15:19 -07003425 " set " + onosSetName + ":\n" +
3426 str( getResponses[ i ] ) )
3427 getResults = main.FALSE
3428 elif getResponses[ i ] == main.ERROR:
3429 getResults = main.FALSE
3430 sizeResponses = []
3431 threads = []
Jon Halla440e872016-03-31 15:15:50 -07003432 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003433 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07003434 name="setTestSize-" + str( i ),
3435 args=[ onosSetName ] )
3436 threads.append( t )
3437 t.start()
3438 for t in threads:
3439 t.join()
3440 sizeResponses.append( t.result )
3441 sizeResults = main.TRUE
Jon Halla440e872016-03-31 15:15:50 -07003442 for i in range( len( main.activeNodes ) ):
3443 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003444 if size != sizeResponses[ i ]:
3445 sizeResults = main.FALSE
Jon Halla440e872016-03-31 15:15:50 -07003446 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003447 " expected a size of " + str( size ) +
3448 " for set " + onosSetName +
3449 " but got " + str( sizeResponses[ i ] ) )
3450 addResults = addResults and getResults and sizeResults
3451 utilities.assert_equals( expect=main.TRUE,
3452 actual=addResults,
3453 onpass="Set add correct",
3454 onfail="Set add was incorrect" )
3455
3456 main.step( "Distributed Set addAll()" )
3457 onosSet.update( addAllValue.split() )
3458 addResponses = []
3459 threads = []
Jon Halla440e872016-03-31 15:15:50 -07003460 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003461 t = main.Thread( target=main.CLIs[i].setTestAdd,
Jon Hall5cf14d52015-07-16 12:15:19 -07003462 name="setTestAddAll-" + str( i ),
3463 args=[ onosSetName, addAllValue ] )
3464 threads.append( t )
3465 t.start()
3466 for t in threads:
3467 t.join()
3468 addResponses.append( t.result )
3469
3470 # main.TRUE = successfully changed the set
3471 # main.FALSE = action resulted in no change in set
3472 # main.ERROR - Some error in executing the function
3473 addAllResults = main.TRUE
Jon Halla440e872016-03-31 15:15:50 -07003474 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003475 if addResponses[ i ] == main.TRUE:
3476 # All is well
3477 pass
3478 elif addResponses[ i ] == main.FALSE:
3479 # Already in set, probably fine
3480 pass
3481 elif addResponses[ i ] == main.ERROR:
3482 # Error in execution
3483 addAllResults = main.FALSE
3484 else:
3485 # unexpected result
3486 addAllResults = main.FALSE
3487 if addAllResults != main.TRUE:
3488 main.log.error( "Error executing set addAll" )
3489
3490 # Check if set is still correct
3491 size = len( onosSet )
3492 getResponses = []
3493 threads = []
Jon Halla440e872016-03-31 15:15:50 -07003494 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003495 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003496 name="setTestGet-" + str( i ),
3497 args=[ onosSetName ] )
3498 threads.append( t )
3499 t.start()
3500 for t in threads:
3501 t.join()
3502 getResponses.append( t.result )
3503 getResults = main.TRUE
Jon Halla440e872016-03-31 15:15:50 -07003504 for i in range( len( main.activeNodes ) ):
3505 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003506 if isinstance( getResponses[ i ], list):
3507 current = set( getResponses[ i ] )
3508 if len( current ) == len( getResponses[ i ] ):
3509 # no repeats
3510 if onosSet != current:
Jon Halla440e872016-03-31 15:15:50 -07003511 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003512 " has incorrect view" +
3513 " of set " + onosSetName + ":\n" +
3514 str( getResponses[ i ] ) )
3515 main.log.debug( "Expected: " + str( onosSet ) )
3516 main.log.debug( "Actual: " + str( current ) )
3517 getResults = main.FALSE
3518 else:
3519 # error, set is not a set
Jon Halla440e872016-03-31 15:15:50 -07003520 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003521 " has repeat elements in" +
3522 " set " + onosSetName + ":\n" +
3523 str( getResponses[ i ] ) )
3524 getResults = main.FALSE
3525 elif getResponses[ i ] == main.ERROR:
3526 getResults = main.FALSE
3527 sizeResponses = []
3528 threads = []
Jon Halla440e872016-03-31 15:15:50 -07003529 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003530 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07003531 name="setTestSize-" + str( i ),
3532 args=[ onosSetName ] )
3533 threads.append( t )
3534 t.start()
3535 for t in threads:
3536 t.join()
3537 sizeResponses.append( t.result )
3538 sizeResults = main.TRUE
Jon Halla440e872016-03-31 15:15:50 -07003539 for i in range( len( main.activeNodes ) ):
3540 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003541 if size != sizeResponses[ i ]:
3542 sizeResults = main.FALSE
Jon Halla440e872016-03-31 15:15:50 -07003543 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003544 " expected a size of " + str( size ) +
3545 " for set " + onosSetName +
3546 " but got " + str( sizeResponses[ i ] ) )
3547 addAllResults = addAllResults and getResults and sizeResults
3548 utilities.assert_equals( expect=main.TRUE,
3549 actual=addAllResults,
3550 onpass="Set addAll correct",
3551 onfail="Set addAll was incorrect" )
3552
3553 main.step( "Distributed Set contains()" )
3554 containsResponses = []
3555 threads = []
Jon Halla440e872016-03-31 15:15:50 -07003556 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003557 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003558 name="setContains-" + str( i ),
3559 args=[ onosSetName ],
3560 kwargs={ "values": addValue } )
3561 threads.append( t )
3562 t.start()
3563 for t in threads:
3564 t.join()
3565 # NOTE: This is the tuple
3566 containsResponses.append( t.result )
3567
3568 containsResults = main.TRUE
Jon Halla440e872016-03-31 15:15:50 -07003569 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003570 if containsResponses[ i ] == main.ERROR:
3571 containsResults = main.FALSE
3572 else:
3573 containsResults = containsResults and\
3574 containsResponses[ i ][ 1 ]
3575 utilities.assert_equals( expect=main.TRUE,
3576 actual=containsResults,
3577 onpass="Set contains is functional",
3578 onfail="Set contains failed" )
3579
3580 main.step( "Distributed Set containsAll()" )
3581 containsAllResponses = []
3582 threads = []
Jon Halla440e872016-03-31 15:15:50 -07003583 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003584 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003585 name="setContainsAll-" + str( i ),
3586 args=[ onosSetName ],
3587 kwargs={ "values": addAllValue } )
3588 threads.append( t )
3589 t.start()
3590 for t in threads:
3591 t.join()
3592 # NOTE: This is the tuple
3593 containsAllResponses.append( t.result )
3594
3595 containsAllResults = main.TRUE
Jon Halla440e872016-03-31 15:15:50 -07003596 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003597 if containsResponses[ i ] == main.ERROR:
3598 containsResults = main.FALSE
3599 else:
3600 containsResults = containsResults and\
3601 containsResponses[ i ][ 1 ]
3602 utilities.assert_equals( expect=main.TRUE,
3603 actual=containsAllResults,
3604 onpass="Set containsAll is functional",
3605 onfail="Set containsAll failed" )
3606
3607 main.step( "Distributed Set remove()" )
3608 onosSet.remove( addValue )
3609 removeResponses = []
3610 threads = []
Jon Halla440e872016-03-31 15:15:50 -07003611 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003612 t = main.Thread( target=main.CLIs[i].setTestRemove,
Jon Hall5cf14d52015-07-16 12:15:19 -07003613 name="setTestRemove-" + str( i ),
3614 args=[ onosSetName, addValue ] )
3615 threads.append( t )
3616 t.start()
3617 for t in threads:
3618 t.join()
3619 removeResponses.append( t.result )
3620
3621 # main.TRUE = successfully changed the set
3622 # main.FALSE = action resulted in no change in set
3623 # main.ERROR - Some error in executing the function
3624 removeResults = main.TRUE
Jon Halla440e872016-03-31 15:15:50 -07003625 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003626 if removeResponses[ i ] == main.TRUE:
3627 # All is well
3628 pass
3629 elif removeResponses[ i ] == main.FALSE:
3630 # not in set, probably fine
3631 pass
3632 elif removeResponses[ i ] == main.ERROR:
3633 # Error in execution
3634 removeResults = main.FALSE
3635 else:
3636 # unexpected result
3637 removeResults = main.FALSE
3638 if removeResults != main.TRUE:
3639 main.log.error( "Error executing set remove" )
3640
3641 # Check if set is still correct
3642 size = len( onosSet )
3643 getResponses = []
3644 threads = []
Jon Halla440e872016-03-31 15:15:50 -07003645 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003646 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003647 name="setTestGet-" + str( i ),
3648 args=[ onosSetName ] )
3649 threads.append( t )
3650 t.start()
3651 for t in threads:
3652 t.join()
3653 getResponses.append( t.result )
3654 getResults = main.TRUE
Jon Halla440e872016-03-31 15:15:50 -07003655 for i in range( len( main.activeNodes ) ):
3656 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003657 if isinstance( getResponses[ i ], list):
3658 current = set( getResponses[ i ] )
3659 if len( current ) == len( getResponses[ i ] ):
3660 # no repeats
3661 if onosSet != current:
Jon Halla440e872016-03-31 15:15:50 -07003662 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003663 " has incorrect view" +
3664 " of set " + onosSetName + ":\n" +
3665 str( getResponses[ i ] ) )
3666 main.log.debug( "Expected: " + str( onosSet ) )
3667 main.log.debug( "Actual: " + str( current ) )
3668 getResults = main.FALSE
3669 else:
3670 # error, set is not a set
Jon Halla440e872016-03-31 15:15:50 -07003671 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003672 " has repeat elements in" +
3673 " set " + onosSetName + ":\n" +
3674 str( getResponses[ i ] ) )
3675 getResults = main.FALSE
3676 elif getResponses[ i ] == main.ERROR:
3677 getResults = main.FALSE
3678 sizeResponses = []
3679 threads = []
Jon Halla440e872016-03-31 15:15:50 -07003680 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003681 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07003682 name="setTestSize-" + str( i ),
3683 args=[ onosSetName ] )
3684 threads.append( t )
3685 t.start()
3686 for t in threads:
3687 t.join()
3688 sizeResponses.append( t.result )
3689 sizeResults = main.TRUE
Jon Halla440e872016-03-31 15:15:50 -07003690 for i in range( len( main.activeNodes ) ):
3691 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003692 if size != sizeResponses[ i ]:
3693 sizeResults = main.FALSE
Jon Halla440e872016-03-31 15:15:50 -07003694 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003695 " expected a size of " + str( size ) +
3696 " for set " + onosSetName +
3697 " but got " + str( sizeResponses[ i ] ) )
3698 removeResults = removeResults and getResults and sizeResults
3699 utilities.assert_equals( expect=main.TRUE,
3700 actual=removeResults,
3701 onpass="Set remove correct",
3702 onfail="Set remove was incorrect" )
3703
3704 main.step( "Distributed Set removeAll()" )
3705 onosSet.difference_update( addAllValue.split() )
3706 removeAllResponses = []
3707 threads = []
3708 try:
Jon Halla440e872016-03-31 15:15:50 -07003709 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003710 t = main.Thread( target=main.CLIs[i].setTestRemove,
Jon Hall5cf14d52015-07-16 12:15:19 -07003711 name="setTestRemoveAll-" + str( i ),
3712 args=[ onosSetName, addAllValue ] )
3713 threads.append( t )
3714 t.start()
3715 for t in threads:
3716 t.join()
3717 removeAllResponses.append( t.result )
3718 except Exception, e:
3719 main.log.exception(e)
3720
3721 # main.TRUE = successfully changed the set
3722 # main.FALSE = action resulted in no change in set
3723 # main.ERROR - Some error in executing the function
3724 removeAllResults = main.TRUE
Jon Halla440e872016-03-31 15:15:50 -07003725 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003726 if removeAllResponses[ i ] == main.TRUE:
3727 # All is well
3728 pass
3729 elif removeAllResponses[ i ] == main.FALSE:
3730 # not in set, probably fine
3731 pass
3732 elif removeAllResponses[ i ] == main.ERROR:
3733 # Error in execution
3734 removeAllResults = main.FALSE
3735 else:
3736 # unexpected result
3737 removeAllResults = main.FALSE
3738 if removeAllResults != main.TRUE:
3739 main.log.error( "Error executing set removeAll" )
3740
3741 # Check if set is still correct
3742 size = len( onosSet )
3743 getResponses = []
3744 threads = []
Jon Halla440e872016-03-31 15:15:50 -07003745 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003746 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003747 name="setTestGet-" + str( i ),
3748 args=[ onosSetName ] )
3749 threads.append( t )
3750 t.start()
3751 for t in threads:
3752 t.join()
3753 getResponses.append( t.result )
3754 getResults = main.TRUE
Jon Halla440e872016-03-31 15:15:50 -07003755 for i in range( len( main.activeNodes ) ):
3756 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003757 if isinstance( getResponses[ i ], list):
3758 current = set( getResponses[ i ] )
3759 if len( current ) == len( getResponses[ i ] ):
3760 # no repeats
3761 if onosSet != current:
Jon Halla440e872016-03-31 15:15:50 -07003762 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003763 " has incorrect view" +
3764 " of set " + onosSetName + ":\n" +
3765 str( getResponses[ i ] ) )
3766 main.log.debug( "Expected: " + str( onosSet ) )
3767 main.log.debug( "Actual: " + str( current ) )
3768 getResults = main.FALSE
3769 else:
3770 # error, set is not a set
Jon Halla440e872016-03-31 15:15:50 -07003771 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003772 " has repeat elements in" +
3773 " set " + onosSetName + ":\n" +
3774 str( getResponses[ i ] ) )
3775 getResults = main.FALSE
3776 elif getResponses[ i ] == main.ERROR:
3777 getResults = main.FALSE
3778 sizeResponses = []
3779 threads = []
Jon Halla440e872016-03-31 15:15:50 -07003780 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003781 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07003782 name="setTestSize-" + str( i ),
3783 args=[ onosSetName ] )
3784 threads.append( t )
3785 t.start()
3786 for t in threads:
3787 t.join()
3788 sizeResponses.append( t.result )
3789 sizeResults = main.TRUE
Jon Halla440e872016-03-31 15:15:50 -07003790 for i in range( len( main.activeNodes ) ):
3791 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003792 if size != sizeResponses[ i ]:
3793 sizeResults = main.FALSE
Jon Halla440e872016-03-31 15:15:50 -07003794 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003795 " expected a size of " + str( size ) +
3796 " for set " + onosSetName +
3797 " but got " + str( sizeResponses[ i ] ) )
3798 removeAllResults = removeAllResults and getResults and sizeResults
3799 utilities.assert_equals( expect=main.TRUE,
3800 actual=removeAllResults,
3801 onpass="Set removeAll correct",
3802 onfail="Set removeAll was incorrect" )
3803
3804 main.step( "Distributed Set addAll()" )
3805 onosSet.update( addAllValue.split() )
3806 addResponses = []
3807 threads = []
Jon Halla440e872016-03-31 15:15:50 -07003808 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003809 t = main.Thread( target=main.CLIs[i].setTestAdd,
Jon Hall5cf14d52015-07-16 12:15:19 -07003810 name="setTestAddAll-" + str( i ),
3811 args=[ onosSetName, addAllValue ] )
3812 threads.append( t )
3813 t.start()
3814 for t in threads:
3815 t.join()
3816 addResponses.append( t.result )
3817
3818 # main.TRUE = successfully changed the set
3819 # main.FALSE = action resulted in no change in set
3820 # main.ERROR - Some error in executing the function
3821 addAllResults = main.TRUE
Jon Halla440e872016-03-31 15:15:50 -07003822 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003823 if addResponses[ i ] == main.TRUE:
3824 # All is well
3825 pass
3826 elif addResponses[ i ] == main.FALSE:
3827 # Already in set, probably fine
3828 pass
3829 elif addResponses[ i ] == main.ERROR:
3830 # Error in execution
3831 addAllResults = main.FALSE
3832 else:
3833 # unexpected result
3834 addAllResults = main.FALSE
3835 if addAllResults != main.TRUE:
3836 main.log.error( "Error executing set addAll" )
3837
3838 # Check if set is still correct
3839 size = len( onosSet )
3840 getResponses = []
3841 threads = []
Jon Halla440e872016-03-31 15:15:50 -07003842 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003843 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003844 name="setTestGet-" + str( i ),
3845 args=[ onosSetName ] )
3846 threads.append( t )
3847 t.start()
3848 for t in threads:
3849 t.join()
3850 getResponses.append( t.result )
3851 getResults = main.TRUE
Jon Halla440e872016-03-31 15:15:50 -07003852 for i in range( len( main.activeNodes ) ):
3853 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003854 if isinstance( getResponses[ i ], list):
3855 current = set( getResponses[ i ] )
3856 if len( current ) == len( getResponses[ i ] ):
3857 # no repeats
3858 if onosSet != current:
Jon Halla440e872016-03-31 15:15:50 -07003859 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003860 " has incorrect view" +
3861 " of set " + onosSetName + ":\n" +
3862 str( getResponses[ i ] ) )
3863 main.log.debug( "Expected: " + str( onosSet ) )
3864 main.log.debug( "Actual: " + str( current ) )
3865 getResults = main.FALSE
3866 else:
3867 # error, set is not a set
Jon Halla440e872016-03-31 15:15:50 -07003868 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003869 " has repeat elements in" +
3870 " set " + onosSetName + ":\n" +
3871 str( getResponses[ i ] ) )
3872 getResults = main.FALSE
3873 elif getResponses[ i ] == main.ERROR:
3874 getResults = main.FALSE
3875 sizeResponses = []
3876 threads = []
Jon Halla440e872016-03-31 15:15:50 -07003877 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003878 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07003879 name="setTestSize-" + str( i ),
3880 args=[ onosSetName ] )
3881 threads.append( t )
3882 t.start()
3883 for t in threads:
3884 t.join()
3885 sizeResponses.append( t.result )
3886 sizeResults = main.TRUE
Jon Halla440e872016-03-31 15:15:50 -07003887 for i in range( len( main.activeNodes ) ):
3888 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003889 if size != sizeResponses[ i ]:
3890 sizeResults = main.FALSE
Jon Halla440e872016-03-31 15:15:50 -07003891 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003892 " expected a size of " + str( size ) +
3893 " for set " + onosSetName +
3894 " but got " + str( sizeResponses[ i ] ) )
3895 addAllResults = addAllResults and getResults and sizeResults
3896 utilities.assert_equals( expect=main.TRUE,
3897 actual=addAllResults,
3898 onpass="Set addAll correct",
3899 onfail="Set addAll was incorrect" )
3900
3901 main.step( "Distributed Set clear()" )
3902 onosSet.clear()
3903 clearResponses = []
3904 threads = []
Jon Halla440e872016-03-31 15:15:50 -07003905 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003906 t = main.Thread( target=main.CLIs[i].setTestRemove,
Jon Hall5cf14d52015-07-16 12:15:19 -07003907 name="setTestClear-" + str( i ),
3908 args=[ onosSetName, " "], # Values doesn't matter
3909 kwargs={ "clear": True } )
3910 threads.append( t )
3911 t.start()
3912 for t in threads:
3913 t.join()
3914 clearResponses.append( t.result )
3915
3916 # main.TRUE = successfully changed the set
3917 # main.FALSE = action resulted in no change in set
3918 # main.ERROR - Some error in executing the function
3919 clearResults = main.TRUE
Jon Halla440e872016-03-31 15:15:50 -07003920 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003921 if clearResponses[ i ] == main.TRUE:
3922 # All is well
3923 pass
3924 elif clearResponses[ i ] == main.FALSE:
3925 # Nothing set, probably fine
3926 pass
3927 elif clearResponses[ i ] == main.ERROR:
3928 # Error in execution
3929 clearResults = main.FALSE
3930 else:
3931 # unexpected result
3932 clearResults = main.FALSE
3933 if clearResults != main.TRUE:
3934 main.log.error( "Error executing set clear" )
3935
3936 # Check if set is still correct
3937 size = len( onosSet )
3938 getResponses = []
3939 threads = []
Jon Halla440e872016-03-31 15:15:50 -07003940 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003941 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003942 name="setTestGet-" + str( i ),
3943 args=[ onosSetName ] )
3944 threads.append( t )
3945 t.start()
3946 for t in threads:
3947 t.join()
3948 getResponses.append( t.result )
3949 getResults = main.TRUE
Jon Halla440e872016-03-31 15:15:50 -07003950 for i in range( len( main.activeNodes ) ):
3951 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003952 if isinstance( getResponses[ i ], list):
3953 current = set( getResponses[ i ] )
3954 if len( current ) == len( getResponses[ i ] ):
3955 # no repeats
3956 if onosSet != current:
Jon Halla440e872016-03-31 15:15:50 -07003957 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003958 " has incorrect view" +
3959 " of set " + onosSetName + ":\n" +
3960 str( getResponses[ i ] ) )
3961 main.log.debug( "Expected: " + str( onosSet ) )
3962 main.log.debug( "Actual: " + str( current ) )
3963 getResults = main.FALSE
3964 else:
3965 # error, set is not a set
Jon Halla440e872016-03-31 15:15:50 -07003966 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003967 " has repeat elements in" +
3968 " set " + onosSetName + ":\n" +
3969 str( getResponses[ i ] ) )
3970 getResults = main.FALSE
3971 elif getResponses[ i ] == main.ERROR:
3972 getResults = main.FALSE
3973 sizeResponses = []
3974 threads = []
Jon Halla440e872016-03-31 15:15:50 -07003975 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003976 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07003977 name="setTestSize-" + str( i ),
3978 args=[ onosSetName ] )
3979 threads.append( t )
3980 t.start()
3981 for t in threads:
3982 t.join()
3983 sizeResponses.append( t.result )
3984 sizeResults = main.TRUE
Jon Halla440e872016-03-31 15:15:50 -07003985 for i in range( len( main.activeNodes ) ):
3986 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003987 if size != sizeResponses[ i ]:
3988 sizeResults = main.FALSE
Jon Halla440e872016-03-31 15:15:50 -07003989 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003990 " expected a size of " + str( size ) +
3991 " for set " + onosSetName +
3992 " but got " + str( sizeResponses[ i ] ) )
3993 clearResults = clearResults and getResults and sizeResults
3994 utilities.assert_equals( expect=main.TRUE,
3995 actual=clearResults,
3996 onpass="Set clear correct",
3997 onfail="Set clear was incorrect" )
3998
3999 main.step( "Distributed Set addAll()" )
4000 onosSet.update( addAllValue.split() )
4001 addResponses = []
4002 threads = []
Jon Halla440e872016-03-31 15:15:50 -07004003 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004004 t = main.Thread( target=main.CLIs[i].setTestAdd,
Jon Hall5cf14d52015-07-16 12:15:19 -07004005 name="setTestAddAll-" + str( i ),
4006 args=[ onosSetName, addAllValue ] )
4007 threads.append( t )
4008 t.start()
4009 for t in threads:
4010 t.join()
4011 addResponses.append( t.result )
4012
4013 # main.TRUE = successfully changed the set
4014 # main.FALSE = action resulted in no change in set
4015 # main.ERROR - Some error in executing the function
4016 addAllResults = main.TRUE
Jon Halla440e872016-03-31 15:15:50 -07004017 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07004018 if addResponses[ i ] == main.TRUE:
4019 # All is well
4020 pass
4021 elif addResponses[ i ] == main.FALSE:
4022 # Already in set, probably fine
4023 pass
4024 elif addResponses[ i ] == main.ERROR:
4025 # Error in execution
4026 addAllResults = main.FALSE
4027 else:
4028 # unexpected result
4029 addAllResults = main.FALSE
4030 if addAllResults != main.TRUE:
4031 main.log.error( "Error executing set addAll" )
4032
4033 # Check if set is still correct
4034 size = len( onosSet )
4035 getResponses = []
4036 threads = []
Jon Halla440e872016-03-31 15:15:50 -07004037 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004038 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07004039 name="setTestGet-" + str( i ),
4040 args=[ onosSetName ] )
4041 threads.append( t )
4042 t.start()
4043 for t in threads:
4044 t.join()
4045 getResponses.append( t.result )
4046 getResults = main.TRUE
Jon Halla440e872016-03-31 15:15:50 -07004047 for i in range( len( main.activeNodes ) ):
4048 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004049 if isinstance( getResponses[ i ], list):
4050 current = set( getResponses[ i ] )
4051 if len( current ) == len( getResponses[ i ] ):
4052 # no repeats
4053 if onosSet != current:
Jon Halla440e872016-03-31 15:15:50 -07004054 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004055 " has incorrect view" +
4056 " of set " + onosSetName + ":\n" +
4057 str( getResponses[ i ] ) )
4058 main.log.debug( "Expected: " + str( onosSet ) )
4059 main.log.debug( "Actual: " + str( current ) )
4060 getResults = main.FALSE
4061 else:
4062 # error, set is not a set
Jon Halla440e872016-03-31 15:15:50 -07004063 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004064 " has repeat elements in" +
4065 " set " + onosSetName + ":\n" +
4066 str( getResponses[ i ] ) )
4067 getResults = main.FALSE
4068 elif getResponses[ i ] == main.ERROR:
4069 getResults = main.FALSE
4070 sizeResponses = []
4071 threads = []
Jon Halla440e872016-03-31 15:15:50 -07004072 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004073 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07004074 name="setTestSize-" + str( i ),
4075 args=[ onosSetName ] )
4076 threads.append( t )
4077 t.start()
4078 for t in threads:
4079 t.join()
4080 sizeResponses.append( t.result )
4081 sizeResults = main.TRUE
Jon Halla440e872016-03-31 15:15:50 -07004082 for i in range( len( main.activeNodes ) ):
4083 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004084 if size != sizeResponses[ i ]:
4085 sizeResults = main.FALSE
Jon Halla440e872016-03-31 15:15:50 -07004086 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004087 " expected a size of " + str( size ) +
4088 " for set " + onosSetName +
4089 " but got " + str( sizeResponses[ i ] ) )
4090 addAllResults = addAllResults and getResults and sizeResults
4091 utilities.assert_equals( expect=main.TRUE,
4092 actual=addAllResults,
4093 onpass="Set addAll correct",
4094 onfail="Set addAll was incorrect" )
4095
4096 main.step( "Distributed Set retain()" )
4097 onosSet.intersection_update( retainValue.split() )
4098 retainResponses = []
4099 threads = []
Jon Halla440e872016-03-31 15:15:50 -07004100 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004101 t = main.Thread( target=main.CLIs[i].setTestRemove,
Jon Hall5cf14d52015-07-16 12:15:19 -07004102 name="setTestRetain-" + str( i ),
4103 args=[ onosSetName, retainValue ],
4104 kwargs={ "retain": True } )
4105 threads.append( t )
4106 t.start()
4107 for t in threads:
4108 t.join()
4109 retainResponses.append( t.result )
4110
4111 # main.TRUE = successfully changed the set
4112 # main.FALSE = action resulted in no change in set
4113 # main.ERROR - Some error in executing the function
4114 retainResults = main.TRUE
Jon Halla440e872016-03-31 15:15:50 -07004115 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07004116 if retainResponses[ i ] == main.TRUE:
4117 # All is well
4118 pass
4119 elif retainResponses[ i ] == main.FALSE:
4120 # Already in set, probably fine
4121 pass
4122 elif retainResponses[ i ] == main.ERROR:
4123 # Error in execution
4124 retainResults = main.FALSE
4125 else:
4126 # unexpected result
4127 retainResults = main.FALSE
4128 if retainResults != main.TRUE:
4129 main.log.error( "Error executing set retain" )
4130
4131 # Check if set is still correct
4132 size = len( onosSet )
4133 getResponses = []
4134 threads = []
Jon Halla440e872016-03-31 15:15:50 -07004135 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004136 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07004137 name="setTestGet-" + str( i ),
4138 args=[ onosSetName ] )
4139 threads.append( t )
4140 t.start()
4141 for t in threads:
4142 t.join()
4143 getResponses.append( t.result )
4144 getResults = main.TRUE
Jon Halla440e872016-03-31 15:15:50 -07004145 for i in range( len( main.activeNodes ) ):
4146 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004147 if isinstance( getResponses[ i ], list):
4148 current = set( getResponses[ i ] )
4149 if len( current ) == len( getResponses[ i ] ):
4150 # no repeats
4151 if onosSet != current:
Jon Halla440e872016-03-31 15:15:50 -07004152 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004153 " has incorrect view" +
4154 " of set " + onosSetName + ":\n" +
4155 str( getResponses[ i ] ) )
4156 main.log.debug( "Expected: " + str( onosSet ) )
4157 main.log.debug( "Actual: " + str( current ) )
4158 getResults = main.FALSE
4159 else:
4160 # error, set is not a set
Jon Halla440e872016-03-31 15:15:50 -07004161 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004162 " has repeat elements in" +
4163 " set " + onosSetName + ":\n" +
4164 str( getResponses[ i ] ) )
4165 getResults = main.FALSE
4166 elif getResponses[ i ] == main.ERROR:
4167 getResults = main.FALSE
4168 sizeResponses = []
4169 threads = []
Jon Halla440e872016-03-31 15:15:50 -07004170 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004171 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07004172 name="setTestSize-" + str( i ),
4173 args=[ onosSetName ] )
4174 threads.append( t )
4175 t.start()
4176 for t in threads:
4177 t.join()
4178 sizeResponses.append( t.result )
4179 sizeResults = main.TRUE
Jon Halla440e872016-03-31 15:15:50 -07004180 for i in range( len( main.activeNodes ) ):
4181 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004182 if size != sizeResponses[ i ]:
4183 sizeResults = main.FALSE
Jon Halla440e872016-03-31 15:15:50 -07004184 main.log.error( "ONOS" + node + " expected a size of " +
Jon Hall5cf14d52015-07-16 12:15:19 -07004185 str( size ) + " for set " + onosSetName +
4186 " but got " + str( sizeResponses[ i ] ) )
4187 retainResults = retainResults and getResults and sizeResults
4188 utilities.assert_equals( expect=main.TRUE,
4189 actual=retainResults,
4190 onpass="Set retain correct",
4191 onfail="Set retain was incorrect" )
4192
Jon Hall2a5002c2015-08-21 16:49:11 -07004193 # Transactional maps
4194 main.step( "Partitioned Transactional maps put" )
4195 tMapValue = "Testing"
4196 numKeys = 100
4197 putResult = True
Jon Halla440e872016-03-31 15:15:50 -07004198 node = main.activeNodes[0]
4199 putResponses = main.CLIs[node].transactionalMapPut( numKeys, tMapValue )
4200 if putResponses and len( putResponses ) == 100:
Jon Hall2a5002c2015-08-21 16:49:11 -07004201 for i in putResponses:
4202 if putResponses[ i ][ 'value' ] != tMapValue:
4203 putResult = False
4204 else:
4205 putResult = False
4206 if not putResult:
4207 main.log.debug( "Put response values: " + str( putResponses ) )
4208 utilities.assert_equals( expect=True,
4209 actual=putResult,
4210 onpass="Partitioned Transactional Map put successful",
4211 onfail="Partitioned Transactional Map put values are incorrect" )
4212
4213 main.step( "Partitioned Transactional maps get" )
4214 getCheck = True
4215 for n in range( 1, numKeys + 1 ):
4216 getResponses = []
4217 threads = []
4218 valueCheck = True
Jon Halla440e872016-03-31 15:15:50 -07004219 for i in main.activeNodes:
Jon Hall2a5002c2015-08-21 16:49:11 -07004220 t = main.Thread( target=main.CLIs[i].transactionalMapGet,
4221 name="TMap-get-" + str( i ),
Jon Halla440e872016-03-31 15:15:50 -07004222 args=[ "Key" + str( n ) ] )
Jon Hall2a5002c2015-08-21 16:49:11 -07004223 threads.append( t )
4224 t.start()
4225 for t in threads:
4226 t.join()
4227 getResponses.append( t.result )
4228 for node in getResponses:
4229 if node != tMapValue:
4230 valueCheck = False
4231 if not valueCheck:
4232 main.log.warn( "Values for key 'Key" + str( n ) + "' do not match:" )
4233 main.log.warn( getResponses )
4234 getCheck = getCheck and valueCheck
4235 utilities.assert_equals( expect=True,
4236 actual=getCheck,
4237 onpass="Partitioned Transactional Map get values were correct",
4238 onfail="Partitioned Transactional Map values incorrect" )