blob: 13b40840306956d02b17b21f6e2413e3e144ddf5 [file] [log] [blame]
Jon Hall5cf14d52015-07-16 12:15:19 -07001"""
2Description: This test is to determine if the HA test setup is
3 working correctly. There are no failures so this test should
4 have a 100% pass rate
5
6List of test cases:
7CASE1: Compile ONOS and push it to the test machines
8CASE2: Assign devices to controllers
9CASE21: Assign mastership to controllers
10CASE3: Assign intents
11CASE4: Ping across added host intents
12CASE5: Reading state of ONOS
13CASE6: The Failure case. Since this is the Sanity test, we do nothing.
14CASE7: Check state after control plane failure
15CASE8: Compare topo
16CASE9: Link s3-s28 down
17CASE10: Link s3-s28 up
18CASE11: Switch down
19CASE12: Switch up
20CASE13: Clean up
21CASE14: start election app on all onos nodes
22CASE15: Check that Leadership Election is still functional
23CASE16: Install Distributed Primitives app
24CASE17: Check for basic functionality with distributed primitives
25"""
26
27
28class HAsanity:
29
30 def __init__( self ):
31 self.default = ''
32
33 def CASE1( self, main ):
34 """
35 CASE1 is to compile ONOS and push it to the test machines
36
37 Startup sequence:
38 cell <name>
39 onos-verify-cell
40 NOTE: temporary - onos-remove-raft-logs
41 onos-uninstall
42 start mininet
43 git pull
44 mvn clean install
45 onos-package
46 onos-install -f
47 onos-wait-for-start
48 start cli sessions
49 start tcpdump
50 """
Jon Halle1a3b752015-07-22 13:02:46 -070051 import imp
Jon Hallf3d16e72015-12-16 17:45:08 -080052 import time
Jon Halla440e872016-03-31 15:15:50 -070053 import json
Jon Hall5cf14d52015-07-16 12:15:19 -070054 main.log.info( "ONOS HA Sanity test - initialization" )
55 main.case( "Setting up test environment" )
Jon Hall783bbf92015-07-23 14:33:19 -070056 main.caseExplanation = "Setup the test environment including " +\
Jon Hall5cf14d52015-07-16 12:15:19 -070057 "installing ONOS, starting Mininet and ONOS" +\
58 "cli sessions."
Jon Hall5cf14d52015-07-16 12:15:19 -070059
60 # load some variables from the params file
61 PULLCODE = False
62 if main.params[ 'Git' ] == 'True':
63 PULLCODE = True
64 gitBranch = main.params[ 'branch' ]
65 cellName = main.params[ 'ENV' ][ 'cellName' ]
66
Jon Halle1a3b752015-07-22 13:02:46 -070067 main.numCtrls = int( main.params[ 'num_controllers' ] )
Jon Hall5cf14d52015-07-16 12:15:19 -070068 if main.ONOSbench.maxNodes:
Jon Halle1a3b752015-07-22 13:02:46 -070069 if main.ONOSbench.maxNodes < main.numCtrls:
70 main.numCtrls = int( main.ONOSbench.maxNodes )
Jon Hall5cf14d52015-07-16 12:15:19 -070071 # TODO: refactor how to get onos port, maybe put into component tag?
Jon Halle1a3b752015-07-22 13:02:46 -070072 # set global variables
Jon Hall5cf14d52015-07-16 12:15:19 -070073 global ONOS1Port
74 global ONOS2Port
75 global ONOS3Port
76 global ONOS4Port
77 global ONOS5Port
78 global ONOS6Port
79 global ONOS7Port
Jon Halla440e872016-03-31 15:15:50 -070080 # These are for csv plotting in jenkins
81 global labels
82 global data
83 labels = []
84 data = []
Jon Hall5cf14d52015-07-16 12:15:19 -070085
86 # FIXME: just get controller port from params?
87 # TODO: do we really need all these?
88 ONOS1Port = main.params[ 'CTRL' ][ 'port1' ]
89 ONOS2Port = main.params[ 'CTRL' ][ 'port2' ]
90 ONOS3Port = main.params[ 'CTRL' ][ 'port3' ]
91 ONOS4Port = main.params[ 'CTRL' ][ 'port4' ]
92 ONOS5Port = main.params[ 'CTRL' ][ 'port5' ]
93 ONOS6Port = main.params[ 'CTRL' ][ 'port6' ]
94 ONOS7Port = main.params[ 'CTRL' ][ 'port7' ]
95
Jon Halle1a3b752015-07-22 13:02:46 -070096 try:
Jon Hall53c5e662016-04-13 16:06:56 -070097 from tests.HA.dependencies.HA import HA
Jon Hall41d39f12016-04-11 22:54:35 -070098 main.HA = HA()
Jon Halle1a3b752015-07-22 13:02:46 -070099 except Exception as e:
100 main.log.exception( e )
101 main.cleanup()
102 main.exit()
103
104 main.CLIs = []
105 main.nodes = []
Jon Hall5cf14d52015-07-16 12:15:19 -0700106 ipList = []
Jon Halle1a3b752015-07-22 13:02:46 -0700107 for i in range( 1, main.numCtrls + 1 ):
108 try:
109 main.CLIs.append( getattr( main, 'ONOScli' + str( i ) ) )
110 main.nodes.append( getattr( main, 'ONOS' + str( i ) ) )
111 ipList.append( main.nodes[ -1 ].ip_address )
112 except AttributeError:
113 break
Jon Hall5cf14d52015-07-16 12:15:19 -0700114
115 main.step( "Create cell file" )
116 cellAppString = main.params[ 'ENV' ][ 'appString' ]
117 main.ONOSbench.createCellFile( main.ONOSbench.ip_address, cellName,
118 main.Mininet1.ip_address,
119 cellAppString, ipList )
120 main.step( "Applying cell variable to environment" )
121 cellResult = main.ONOSbench.setCell( cellName )
122 verifyResult = main.ONOSbench.verifyCell()
123
124 # FIXME:this is short term fix
125 main.log.info( "Removing raft logs" )
126 main.ONOSbench.onosRemoveRaftLogs()
127
128 main.log.info( "Uninstalling ONOS" )
Jon Halle1a3b752015-07-22 13:02:46 -0700129 for node in main.nodes:
Jon Hall5cf14d52015-07-16 12:15:19 -0700130 main.ONOSbench.onosUninstall( node.ip_address )
131
132 # Make sure ONOS is DEAD
133 main.log.info( "Killing any ONOS processes" )
134 killResults = main.TRUE
Jon Halle1a3b752015-07-22 13:02:46 -0700135 for node in main.nodes:
Jon Hall5cf14d52015-07-16 12:15:19 -0700136 killed = main.ONOSbench.onosKill( node.ip_address )
137 killResults = killResults and killed
138
139 cleanInstallResult = main.TRUE
140 gitPullResult = main.TRUE
141
142 main.step( "Starting Mininet" )
143 # scp topo file to mininet
144 # TODO: move to params?
145 topoName = "obelisk.py"
146 filePath = main.ONOSbench.home + "/tools/test/topos/"
kelvin-onlabd9e23de2015-08-06 10:34:44 -0700147 main.ONOSbench.scp( main.Mininet1,
148 filePath + topoName,
149 main.Mininet1.home,
150 direction="to" )
Jon Hall5cf14d52015-07-16 12:15:19 -0700151 mnResult = main.Mininet1.startNet( )
152 utilities.assert_equals( expect=main.TRUE, actual=mnResult,
153 onpass="Mininet Started",
154 onfail="Error starting Mininet" )
155
156 main.step( "Git checkout and pull " + gitBranch )
157 if PULLCODE:
158 main.ONOSbench.gitCheckout( gitBranch )
159 gitPullResult = main.ONOSbench.gitPull()
160 # values of 1 or 3 are good
161 utilities.assert_lesser( expect=0, actual=gitPullResult,
162 onpass="Git pull successful",
163 onfail="Git pull failed" )
164 main.ONOSbench.getVersion( report=True )
165
166 main.step( "Using mvn clean install" )
167 cleanInstallResult = main.TRUE
168 if PULLCODE and gitPullResult == main.TRUE:
169 cleanInstallResult = main.ONOSbench.cleanInstall()
170 else:
171 main.log.warn( "Did not pull new code so skipping mvn " +
172 "clean install" )
173 utilities.assert_equals( expect=main.TRUE,
174 actual=cleanInstallResult,
175 onpass="MCI successful",
176 onfail="MCI failed" )
177 # GRAPHS
178 # NOTE: important params here:
179 # job = name of Jenkins job
180 # Plot Name = Plot-HA, only can be used if multiple plots
181 # index = The number of the graph under plot name
182 job = "HAsanity"
183 plotName = "Plot-HA"
Jon Hall843f8bc2016-03-18 14:28:13 -0700184 index = "2"
Jon Hall5cf14d52015-07-16 12:15:19 -0700185 graphs = '<ac:structured-macro ac:name="html">\n'
186 graphs += '<ac:plain-text-body><![CDATA[\n'
187 graphs += '<iframe src="https://onos-jenkins.onlab.us/job/' + job +\
Jon Halla9845df2016-01-15 14:55:58 -0800188 '/plot/' + plotName + '/getPlot?index=' + index +\
Jon Hall5cf14d52015-07-16 12:15:19 -0700189 '&width=500&height=300"' +\
190 'noborder="0" width="500" height="300" scrolling="yes" ' +\
191 'seamless="seamless"></iframe>\n'
192 graphs += ']]></ac:plain-text-body>\n'
193 graphs += '</ac:structured-macro>\n'
194 main.log.wiki(graphs)
195
196 main.step( "Creating ONOS package" )
197 packageResult = main.ONOSbench.onosPackage()
198 utilities.assert_equals( expect=main.TRUE, actual=packageResult,
199 onpass="ONOS package successful",
200 onfail="ONOS package failed" )
201
202 main.step( "Installing ONOS package" )
203 onosInstallResult = main.TRUE
Jon Halle1a3b752015-07-22 13:02:46 -0700204 for node in main.nodes:
Jon Hall5cf14d52015-07-16 12:15:19 -0700205 tmpResult = main.ONOSbench.onosInstall( options="-f",
206 node=node.ip_address )
207 onosInstallResult = onosInstallResult and tmpResult
208 utilities.assert_equals( expect=main.TRUE, actual=onosInstallResult,
209 onpass="ONOS install successful",
210 onfail="ONOS install failed" )
211
212 main.step( "Checking if ONOS is up yet" )
213 for i in range( 2 ):
214 onosIsupResult = main.TRUE
Jon Halle1a3b752015-07-22 13:02:46 -0700215 for node in main.nodes:
Jon Hall5cf14d52015-07-16 12:15:19 -0700216 started = main.ONOSbench.isup( node.ip_address )
217 if not started:
Jon Hallc6793552016-01-19 14:18:37 -0800218 main.log.error( node.name + " hasn't started" )
Jon Hall5cf14d52015-07-16 12:15:19 -0700219 onosIsupResult = onosIsupResult and started
220 if onosIsupResult == main.TRUE:
221 break
222 utilities.assert_equals( expect=main.TRUE, actual=onosIsupResult,
223 onpass="ONOS startup successful",
224 onfail="ONOS startup failed" )
225
226 main.log.step( "Starting ONOS CLI sessions" )
227 cliResults = main.TRUE
228 threads = []
Jon Halle1a3b752015-07-22 13:02:46 -0700229 for i in range( main.numCtrls ):
230 t = main.Thread( target=main.CLIs[i].startOnosCli,
Jon Hall5cf14d52015-07-16 12:15:19 -0700231 name="startOnosCli-" + str( i ),
Jon Halle1a3b752015-07-22 13:02:46 -0700232 args=[main.nodes[i].ip_address] )
Jon Hall5cf14d52015-07-16 12:15:19 -0700233 threads.append( t )
234 t.start()
235
236 for t in threads:
237 t.join()
238 cliResults = cliResults and t.result
239 utilities.assert_equals( expect=main.TRUE, actual=cliResults,
240 onpass="ONOS cli startup successful",
241 onfail="ONOS cli startup failed" )
242
Jon Halla440e872016-03-31 15:15:50 -0700243 # Create a list of active nodes for use when some nodes are stopped
244 main.activeNodes = [ i for i in range( 0, len( main.CLIs ) ) ]
245
Jon Hall5cf14d52015-07-16 12:15:19 -0700246 if main.params[ 'tcpdump' ].lower() == "true":
247 main.step( "Start Packet Capture MN" )
248 main.Mininet2.startTcpdump(
249 str( main.params[ 'MNtcpdump' ][ 'folder' ] ) + str( main.TEST )
250 + "-MN.pcap",
251 intf=main.params[ 'MNtcpdump' ][ 'intf' ],
252 port=main.params[ 'MNtcpdump' ][ 'port' ] )
253
Jon Halla440e872016-03-31 15:15:50 -0700254 main.step( "Checking ONOS nodes" )
Jon Hall41d39f12016-04-11 22:54:35 -0700255 nodeResults = utilities.retry( main.HA.nodesCheck,
256 False,
257 args=[main.activeNodes],
258 attempts=5 )
Jon Halla440e872016-03-31 15:15:50 -0700259
Jon Hall41d39f12016-04-11 22:54:35 -0700260 utilities.assert_equals( expect=True, actual=nodeResults,
Jon Halla440e872016-03-31 15:15:50 -0700261 onpass="Nodes check successful",
262 onfail="Nodes check NOT successful" )
263
264 if not nodeResults:
Jon Hall7ac7bc32016-05-05 10:57:02 -0700265 for i in main.activeNodes:
266 cli = main.CLIs[i]
Jon Halla440e872016-03-31 15:15:50 -0700267 main.log.debug( "{} components not ACTIVE: \n{}".format(
268 cli.name,
269 cli.sendline( "scr:list | grep -v ACTIVE" ) ) )
270
Jon Hall5cf14d52015-07-16 12:15:19 -0700271 if cliResults == main.FALSE:
272 main.log.error( "Failed to start ONOS, stopping test" )
273 main.cleanup()
274 main.exit()
275
Jon Hall172b7ba2016-04-07 18:12:20 -0700276 main.step( "Activate apps defined in the params file" )
277 # get data from the params
278 apps = main.params.get( 'apps' )
279 if apps:
280 apps = apps.split(',')
281 main.log.warn( apps )
282 activateResult = True
283 for app in apps:
284 main.CLIs[ 0 ].app( app, "Activate" )
285 # TODO: check this worked
286 time.sleep( 10 ) # wait for apps to activate
287 for app in apps:
288 state = main.CLIs[ 0 ].appStatus( app )
289 if state == "ACTIVE":
290 activateResult = activeResult and True
291 else:
292 main.log.error( "{} is in {} state".format( app, state ) )
293 activeResult = False
294 utilities.assert_equals( expect=True,
295 actual=activateResult,
296 onpass="Successfully activated apps",
297 onfail="Failed to activate apps" )
298 else:
299 main.log.warn( "No apps were specified to be loaded after startup" )
300
301 main.step( "Set ONOS configurations" )
302 config = main.params.get( 'ONOS_Configuration' )
303 if config:
304 main.log.debug( config )
305 checkResult = main.TRUE
306 for component in config:
307 for setting in config[component]:
308 value = config[component][setting]
309 check = main.CLIs[ 0 ].setCfg( component, setting, value )
310 main.log.info( "Value was changed? {}".format( main.TRUE == check ) )
311 checkResult = check and checkResult
312 utilities.assert_equals( expect=main.TRUE,
313 actual=checkResult,
314 onpass="Successfully set config",
315 onfail="Failed to set config" )
316 else:
317 main.log.warn( "No configurations were specified to be changed after startup" )
318
Jon Hall9d2dcad2016-04-08 10:15:20 -0700319 main.step( "App Ids check" )
320 appCheck = main.TRUE
321 threads = []
322 for i in main.activeNodes:
323 t = main.Thread( target=main.CLIs[i].appToIDCheck,
324 name="appToIDCheck-" + str( i ),
325 args=[] )
326 threads.append( t )
327 t.start()
328
329 for t in threads:
330 t.join()
331 appCheck = appCheck and t.result
332 if appCheck != main.TRUE:
333 node = main.activeNodes[0]
334 main.log.warn( main.CLIs[node].apps() )
335 main.log.warn( main.CLIs[node].appIDs() )
336 utilities.assert_equals( expect=main.TRUE, actual=appCheck,
337 onpass="App Ids seem to be correct",
338 onfail="Something is wrong with app Ids" )
339
Jon Hall5cf14d52015-07-16 12:15:19 -0700340 def CASE2( self, main ):
341 """
342 Assign devices to controllers
343 """
344 import re
Jon Halle1a3b752015-07-22 13:02:46 -0700345 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700346 assert main, "main not defined"
347 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -0700348 assert main.CLIs, "main.CLIs not defined"
349 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700350 assert ONOS1Port, "ONOS1Port not defined"
351 assert ONOS2Port, "ONOS2Port not defined"
352 assert ONOS3Port, "ONOS3Port not defined"
353 assert ONOS4Port, "ONOS4Port not defined"
354 assert ONOS5Port, "ONOS5Port not defined"
355 assert ONOS6Port, "ONOS6Port not defined"
356 assert ONOS7Port, "ONOS7Port not defined"
357
358 main.case( "Assigning devices to controllers" )
Jon Hall783bbf92015-07-23 14:33:19 -0700359 main.caseExplanation = "Assign switches to ONOS using 'ovs-vsctl' " +\
Jon Hall5cf14d52015-07-16 12:15:19 -0700360 "and check that an ONOS node becomes the " +\
361 "master of the device."
362 main.step( "Assign switches to controllers" )
363
364 ipList = []
Jon Halle1a3b752015-07-22 13:02:46 -0700365 for i in range( main.numCtrls ):
366 ipList.append( main.nodes[ i ].ip_address )
Jon Hall5cf14d52015-07-16 12:15:19 -0700367 swList = []
368 for i in range( 1, 29 ):
369 swList.append( "s" + str( i ) )
370 main.Mininet1.assignSwController( sw=swList, ip=ipList )
371
372 mastershipCheck = main.TRUE
373 for i in range( 1, 29 ):
374 response = main.Mininet1.getSwController( "s" + str( i ) )
375 try:
376 main.log.info( str( response ) )
377 except Exception:
378 main.log.info( repr( response ) )
Jon Halle1a3b752015-07-22 13:02:46 -0700379 for node in main.nodes:
Jon Hall5cf14d52015-07-16 12:15:19 -0700380 if re.search( "tcp:" + node.ip_address, response ):
381 mastershipCheck = mastershipCheck and main.TRUE
382 else:
383 main.log.error( "Error, node " + node.ip_address + " is " +
384 "not in the list of controllers s" +
385 str( i ) + " is connecting to." )
386 mastershipCheck = main.FALSE
387 utilities.assert_equals(
388 expect=main.TRUE,
389 actual=mastershipCheck,
390 onpass="Switch mastership assigned correctly",
391 onfail="Switches not assigned correctly to controllers" )
392
393 def CASE21( self, main ):
394 """
395 Assign mastership to controllers
396 """
Jon Hall5cf14d52015-07-16 12:15:19 -0700397 import time
Jon Halle1a3b752015-07-22 13:02:46 -0700398 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700399 assert main, "main not defined"
400 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -0700401 assert main.CLIs, "main.CLIs not defined"
402 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700403 assert ONOS1Port, "ONOS1Port not defined"
404 assert ONOS2Port, "ONOS2Port not defined"
405 assert ONOS3Port, "ONOS3Port not defined"
406 assert ONOS4Port, "ONOS4Port not defined"
407 assert ONOS5Port, "ONOS5Port not defined"
408 assert ONOS6Port, "ONOS6Port not defined"
409 assert ONOS7Port, "ONOS7Port not defined"
410
411 main.case( "Assigning Controller roles for switches" )
Jon Hall783bbf92015-07-23 14:33:19 -0700412 main.caseExplanation = "Check that ONOS is connected to each " +\
Jon Hall5cf14d52015-07-16 12:15:19 -0700413 "device. Then manually assign" +\
414 " mastership to specific ONOS nodes using" +\
415 " 'device-role'"
416 main.step( "Assign mastership of switches to specific controllers" )
417 # Manually assign mastership to the controller we want
418 roleCall = main.TRUE
419
420 ipList = [ ]
421 deviceList = []
Jon Halla440e872016-03-31 15:15:50 -0700422 onosCli = main.CLIs[ main.activeNodes[0] ]
Jon Hall5cf14d52015-07-16 12:15:19 -0700423 try:
424 # Assign mastership to specific controllers. This assignment was
425 # determined for a 7 node cluser, but will work with any sized
426 # cluster
427 for i in range( 1, 29 ): # switches 1 through 28
428 # set up correct variables:
429 if i == 1:
430 c = 0
Jon Halle1a3b752015-07-22 13:02:46 -0700431 ip = main.nodes[ c ].ip_address # ONOS1
Jon Halla440e872016-03-31 15:15:50 -0700432 deviceId = onosCli.getDevice( "1000" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700433 elif i == 2:
Jon Halle1a3b752015-07-22 13:02:46 -0700434 c = 1 % main.numCtrls
435 ip = main.nodes[ c ].ip_address # ONOS2
Jon Halla440e872016-03-31 15:15:50 -0700436 deviceId = onosCli.getDevice( "2000" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700437 elif i == 3:
Jon Halle1a3b752015-07-22 13:02:46 -0700438 c = 1 % main.numCtrls
439 ip = main.nodes[ c ].ip_address # ONOS2
Jon Halla440e872016-03-31 15:15:50 -0700440 deviceId = onosCli.getDevice( "3000" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700441 elif i == 4:
Jon Halle1a3b752015-07-22 13:02:46 -0700442 c = 3 % main.numCtrls
443 ip = main.nodes[ c ].ip_address # ONOS4
Jon Halla440e872016-03-31 15:15:50 -0700444 deviceId = onosCli.getDevice( "3004" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700445 elif i == 5:
Jon Halle1a3b752015-07-22 13:02:46 -0700446 c = 2 % main.numCtrls
447 ip = main.nodes[ c ].ip_address # ONOS3
Jon Halla440e872016-03-31 15:15:50 -0700448 deviceId = onosCli.getDevice( "5000" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700449 elif i == 6:
Jon Halle1a3b752015-07-22 13:02:46 -0700450 c = 2 % main.numCtrls
451 ip = main.nodes[ c ].ip_address # ONOS3
Jon Halla440e872016-03-31 15:15:50 -0700452 deviceId = onosCli.getDevice( "6000" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700453 elif i == 7:
Jon Halle1a3b752015-07-22 13:02:46 -0700454 c = 5 % main.numCtrls
455 ip = main.nodes[ c ].ip_address # ONOS6
Jon Halla440e872016-03-31 15:15:50 -0700456 deviceId = onosCli.getDevice( "6007" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700457 elif i >= 8 and i <= 17:
Jon Halle1a3b752015-07-22 13:02:46 -0700458 c = 4 % main.numCtrls
459 ip = main.nodes[ c ].ip_address # ONOS5
Jon Hall5cf14d52015-07-16 12:15:19 -0700460 dpid = '3' + str( i ).zfill( 3 )
Jon Halla440e872016-03-31 15:15:50 -0700461 deviceId = onosCli.getDevice( dpid ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700462 elif i >= 18 and i <= 27:
Jon Halle1a3b752015-07-22 13:02:46 -0700463 c = 6 % main.numCtrls
464 ip = main.nodes[ c ].ip_address # ONOS7
Jon Hall5cf14d52015-07-16 12:15:19 -0700465 dpid = '6' + str( i ).zfill( 3 )
Jon Halla440e872016-03-31 15:15:50 -0700466 deviceId = onosCli.getDevice( dpid ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700467 elif i == 28:
468 c = 0
Jon Halle1a3b752015-07-22 13:02:46 -0700469 ip = main.nodes[ c ].ip_address # ONOS1
Jon Halla440e872016-03-31 15:15:50 -0700470 deviceId = onosCli.getDevice( "2800" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700471 else:
472 main.log.error( "You didn't write an else statement for " +
473 "switch s" + str( i ) )
474 roleCall = main.FALSE
475 # Assign switch
476 assert deviceId, "No device id for s" + str( i ) + " in ONOS"
477 # TODO: make this controller dynamic
Jon Halla440e872016-03-31 15:15:50 -0700478 roleCall = roleCall and onosCli.deviceRole( deviceId, ip )
Jon Hall5cf14d52015-07-16 12:15:19 -0700479 ipList.append( ip )
480 deviceList.append( deviceId )
481 except ( AttributeError, AssertionError ):
482 main.log.exception( "Something is wrong with ONOS device view" )
Jon Halla440e872016-03-31 15:15:50 -0700483 main.log.info( onosCli.devices() )
Jon Hall5cf14d52015-07-16 12:15:19 -0700484 utilities.assert_equals(
485 expect=main.TRUE,
486 actual=roleCall,
487 onpass="Re-assigned switch mastership to designated controller",
488 onfail="Something wrong with deviceRole calls" )
489
490 main.step( "Check mastership was correctly assigned" )
491 roleCheck = main.TRUE
492 # NOTE: This is due to the fact that device mastership change is not
493 # atomic and is actually a multi step process
494 time.sleep( 5 )
495 for i in range( len( ipList ) ):
496 ip = ipList[i]
497 deviceId = deviceList[i]
498 # Check assignment
Jon Halla440e872016-03-31 15:15:50 -0700499 master = onosCli.getRole( deviceId ).get( 'master' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700500 if ip in master:
501 roleCheck = roleCheck and main.TRUE
502 else:
503 roleCheck = roleCheck and main.FALSE
504 main.log.error( "Error, controller " + ip + " is not" +
505 " master " + "of device " +
506 str( deviceId ) + ". Master is " +
507 repr( master ) + "." )
508 utilities.assert_equals(
509 expect=main.TRUE,
510 actual=roleCheck,
511 onpass="Switches were successfully reassigned to designated " +
512 "controller",
513 onfail="Switches were not successfully reassigned" )
514
515 def CASE3( self, main ):
516 """
517 Assign intents
518 """
519 import time
520 import json
Jon Halle1a3b752015-07-22 13:02:46 -0700521 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700522 assert main, "main not defined"
523 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -0700524 assert main.CLIs, "main.CLIs not defined"
525 assert main.nodes, "main.nodes not defined"
Jon Halla440e872016-03-31 15:15:50 -0700526 try:
527 labels
528 except NameError:
529 main.log.error( "labels not defined, setting to []" )
530 labels = []
531 try:
532 data
533 except NameError:
534 main.log.error( "data not defined, setting to []" )
535 data = []
Jon Hall5cf14d52015-07-16 12:15:19 -0700536 main.case( "Adding host Intents" )
Jon Hall783bbf92015-07-23 14:33:19 -0700537 main.caseExplanation = "Discover hosts by using pingall then " +\
Jon Hall5cf14d52015-07-16 12:15:19 -0700538 "assign predetermined host-to-host intents." +\
539 " After installation, check that the intent" +\
540 " is distributed to all nodes and the state" +\
541 " is INSTALLED"
542
543 # install onos-app-fwd
544 main.step( "Install reactive forwarding app" )
Jon Halla440e872016-03-31 15:15:50 -0700545 onosCli = main.CLIs[ main.activeNodes[0] ]
546 installResults = onosCli.activateApp( "org.onosproject.fwd" )
Jon Hall5cf14d52015-07-16 12:15:19 -0700547 utilities.assert_equals( expect=main.TRUE, actual=installResults,
548 onpass="Install fwd successful",
549 onfail="Install fwd failed" )
550
551 main.step( "Check app ids" )
552 appCheck = main.TRUE
553 threads = []
Jon Halla440e872016-03-31 15:15:50 -0700554 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -0700555 t = main.Thread( target=main.CLIs[i].appToIDCheck,
Jon Hall5cf14d52015-07-16 12:15:19 -0700556 name="appToIDCheck-" + str( i ),
557 args=[] )
558 threads.append( t )
559 t.start()
560
561 for t in threads:
562 t.join()
563 appCheck = appCheck and t.result
564 if appCheck != main.TRUE:
Jon Halla440e872016-03-31 15:15:50 -0700565 main.log.warn( onosCli.apps() )
566 main.log.warn( onosCli.appIDs() )
Jon Hall5cf14d52015-07-16 12:15:19 -0700567 utilities.assert_equals( expect=main.TRUE, actual=appCheck,
568 onpass="App Ids seem to be correct",
569 onfail="Something is wrong with app Ids" )
570
571 main.step( "Discovering Hosts( Via pingall for now )" )
572 # FIXME: Once we have a host discovery mechanism, use that instead
573 # REACTIVE FWD test
574 pingResult = main.FALSE
Jon Hall96091e62015-09-21 17:34:17 -0700575 passMsg = "Reactive Pingall test passed"
576 time1 = time.time()
577 pingResult = main.Mininet1.pingall()
578 time2 = time.time()
579 if not pingResult:
580 main.log.warn("First pingall failed. Trying again...")
Jon Hall5cf14d52015-07-16 12:15:19 -0700581 pingResult = main.Mininet1.pingall()
Jon Hall96091e62015-09-21 17:34:17 -0700582 passMsg += " on the second try"
583 utilities.assert_equals(
584 expect=main.TRUE,
585 actual=pingResult,
586 onpass= passMsg,
587 onfail="Reactive Pingall failed, " +
588 "one or more ping pairs failed" )
589 main.log.info( "Time for pingall: %2f seconds" %
590 ( time2 - time1 ) )
Jon Hall5cf14d52015-07-16 12:15:19 -0700591 # timeout for fwd flows
592 time.sleep( 11 )
593 # uninstall onos-app-fwd
594 main.step( "Uninstall reactive forwarding app" )
Jon Halla440e872016-03-31 15:15:50 -0700595 node = main.activeNodes[0]
596 uninstallResult = main.CLIs[node].deactivateApp( "org.onosproject.fwd" )
Jon Hall5cf14d52015-07-16 12:15:19 -0700597 utilities.assert_equals( expect=main.TRUE, actual=uninstallResult,
598 onpass="Uninstall fwd successful",
599 onfail="Uninstall fwd failed" )
Jon Hall5cf14d52015-07-16 12:15:19 -0700600
601 main.step( "Check app ids" )
602 threads = []
603 appCheck2 = main.TRUE
Jon Halla440e872016-03-31 15:15:50 -0700604 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -0700605 t = main.Thread( target=main.CLIs[i].appToIDCheck,
Jon Hall5cf14d52015-07-16 12:15:19 -0700606 name="appToIDCheck-" + str( i ),
607 args=[] )
608 threads.append( t )
609 t.start()
610
611 for t in threads:
612 t.join()
613 appCheck2 = appCheck2 and t.result
614 if appCheck2 != main.TRUE:
Jon Halla440e872016-03-31 15:15:50 -0700615 node = main.activeNodes[0]
616 main.log.warn( main.CLIs[node].apps() )
617 main.log.warn( main.CLIs[node].appIDs() )
Jon Hall5cf14d52015-07-16 12:15:19 -0700618 utilities.assert_equals( expect=main.TRUE, actual=appCheck2,
619 onpass="App Ids seem to be correct",
620 onfail="Something is wrong with app Ids" )
621
622 main.step( "Add host intents via cli" )
623 intentIds = []
Jon Hall6e709752016-02-01 13:38:46 -0800624 # TODO: move the host numbers to params
625 # Maybe look at all the paths we ping?
Jon Hall5cf14d52015-07-16 12:15:19 -0700626 intentAddResult = True
627 hostResult = main.TRUE
628 for i in range( 8, 18 ):
629 main.log.info( "Adding host intent between h" + str( i ) +
630 " and h" + str( i + 10 ) )
631 host1 = "00:00:00:00:00:" + \
632 str( hex( i )[ 2: ] ).zfill( 2 ).upper()
633 host2 = "00:00:00:00:00:" + \
634 str( hex( i + 10 )[ 2: ] ).zfill( 2 ).upper()
635 # NOTE: getHost can return None
Jon Halla440e872016-03-31 15:15:50 -0700636 host1Dict = onosCli.getHost( host1 )
637 host2Dict = onosCli.getHost( host2 )
Jon Hall5cf14d52015-07-16 12:15:19 -0700638 host1Id = None
639 host2Id = None
640 if host1Dict and host2Dict:
641 host1Id = host1Dict.get( 'id', None )
642 host2Id = host2Dict.get( 'id', None )
643 if host1Id and host2Id:
Jon Halla440e872016-03-31 15:15:50 -0700644 nodeNum = ( i % len( main.activeNodes ) )
645 node = main.activeNodes[nodeNum]
646 tmpId = main.CLIs[node].addHostIntent( host1Id, host2Id )
Jon Hall5cf14d52015-07-16 12:15:19 -0700647 if tmpId:
648 main.log.info( "Added intent with id: " + tmpId )
649 intentIds.append( tmpId )
650 else:
651 main.log.error( "addHostIntent returned: " +
652 repr( tmpId ) )
653 else:
654 main.log.error( "Error, getHost() failed for h" + str( i ) +
655 " and/or h" + str( i + 10 ) )
Jon Halla440e872016-03-31 15:15:50 -0700656 node = main.activeNodes[0]
657 hosts = main.CLIs[node].hosts()
Jon Hall5cf14d52015-07-16 12:15:19 -0700658 main.log.warn( "Hosts output: " )
659 try:
660 main.log.warn( json.dumps( json.loads( hosts ),
661 sort_keys=True,
662 indent=4,
663 separators=( ',', ': ' ) ) )
664 except ( ValueError, TypeError ):
665 main.log.warn( repr( hosts ) )
666 hostResult = main.FALSE
667 utilities.assert_equals( expect=main.TRUE, actual=hostResult,
668 onpass="Found a host id for each host",
669 onfail="Error looking up host ids" )
670
671 intentStart = time.time()
Jon Halla440e872016-03-31 15:15:50 -0700672 onosIds = onosCli.getAllIntentsId()
Jon Hall5cf14d52015-07-16 12:15:19 -0700673 main.log.info( "Submitted intents: " + str( intentIds ) )
674 main.log.info( "Intents in ONOS: " + str( onosIds ) )
675 for intent in intentIds:
676 if intent in onosIds:
677 pass # intent submitted is in onos
678 else:
679 intentAddResult = False
680 if intentAddResult:
681 intentStop = time.time()
682 else:
683 intentStop = None
684 # Print the intent states
Jon Halla440e872016-03-31 15:15:50 -0700685 intents = onosCli.intents()
Jon Hall5cf14d52015-07-16 12:15:19 -0700686 intentStates = []
687 installedCheck = True
688 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
689 count = 0
690 try:
691 for intent in json.loads( intents ):
692 state = intent.get( 'state', None )
693 if "INSTALLED" not in state:
694 installedCheck = False
695 intentId = intent.get( 'id', None )
696 intentStates.append( ( intentId, state ) )
697 except ( ValueError, TypeError ):
698 main.log.exception( "Error parsing intents" )
699 # add submitted intents not in the store
700 tmplist = [ i for i, s in intentStates ]
701 missingIntents = False
702 for i in intentIds:
703 if i not in tmplist:
704 intentStates.append( ( i, " - " ) )
705 missingIntents = True
706 intentStates.sort()
707 for i, s in intentStates:
708 count += 1
709 main.log.info( "%-6s%-15s%-15s" %
710 ( str( count ), str( i ), str( s ) ) )
Jon Halla440e872016-03-31 15:15:50 -0700711 leaders = onosCli.leaders()
Jon Hall5cf14d52015-07-16 12:15:19 -0700712 try:
713 missing = False
714 if leaders:
715 parsedLeaders = json.loads( leaders )
716 main.log.warn( json.dumps( parsedLeaders,
717 sort_keys=True,
718 indent=4,
719 separators=( ',', ': ' ) ) )
720 # check for all intent partitions
721 topics = []
722 for i in range( 14 ):
723 topics.append( "intent-partition-" + str( i ) )
724 main.log.debug( topics )
725 ONOStopics = [ j['topic'] for j in parsedLeaders ]
726 for topic in topics:
727 if topic not in ONOStopics:
728 main.log.error( "Error: " + topic +
729 " not in leaders" )
730 missing = True
731 else:
732 main.log.error( "leaders() returned None" )
733 except ( ValueError, TypeError ):
734 main.log.exception( "Error parsing leaders" )
735 main.log.error( repr( leaders ) )
736 # Check all nodes
737 if missing:
Jon Halla440e872016-03-31 15:15:50 -0700738 for i in main.activeNodes:
739 response = main.CLIs[i].leaders( jsonFormat=False)
740 main.log.warn( str( main.CLIs[i].name ) + " leaders output: \n" +
Jon Hall5cf14d52015-07-16 12:15:19 -0700741 str( response ) )
742
Jon Halla440e872016-03-31 15:15:50 -0700743 partitions = onosCli.partitions()
Jon Hall5cf14d52015-07-16 12:15:19 -0700744 try:
745 if partitions :
746 parsedPartitions = json.loads( partitions )
747 main.log.warn( json.dumps( parsedPartitions,
748 sort_keys=True,
749 indent=4,
750 separators=( ',', ': ' ) ) )
751 # TODO check for a leader in all paritions
752 # TODO check for consistency among nodes
753 else:
754 main.log.error( "partitions() returned None" )
755 except ( ValueError, TypeError ):
756 main.log.exception( "Error parsing partitions" )
757 main.log.error( repr( partitions ) )
Jon Halla440e872016-03-31 15:15:50 -0700758 pendingMap = onosCli.pendingMap()
Jon Hall5cf14d52015-07-16 12:15:19 -0700759 try:
760 if pendingMap :
761 parsedPending = json.loads( pendingMap )
762 main.log.warn( json.dumps( parsedPending,
763 sort_keys=True,
764 indent=4,
765 separators=( ',', ': ' ) ) )
766 # TODO check something here?
767 else:
768 main.log.error( "pendingMap() returned None" )
769 except ( ValueError, TypeError ):
770 main.log.exception( "Error parsing pending map" )
771 main.log.error( repr( pendingMap ) )
772
773 intentAddResult = bool( intentAddResult and not missingIntents and
774 installedCheck )
775 if not intentAddResult:
776 main.log.error( "Error in pushing host intents to ONOS" )
777
778 main.step( "Intent Anti-Entropy dispersion" )
Jon Halla440e872016-03-31 15:15:50 -0700779 for j in range(100):
Jon Hall5cf14d52015-07-16 12:15:19 -0700780 correct = True
781 main.log.info( "Submitted intents: " + str( sorted( intentIds ) ) )
Jon Halla440e872016-03-31 15:15:50 -0700782 for i in main.activeNodes:
Jon Hall5cf14d52015-07-16 12:15:19 -0700783 onosIds = []
Jon Halla440e872016-03-31 15:15:50 -0700784 ids = main.CLIs[i].getAllIntentsId()
Jon Hall5cf14d52015-07-16 12:15:19 -0700785 onosIds.append( ids )
Jon Halla440e872016-03-31 15:15:50 -0700786 main.log.debug( "Intents in " + main.CLIs[i].name + ": " +
Jon Hall5cf14d52015-07-16 12:15:19 -0700787 str( sorted( onosIds ) ) )
788 if sorted( ids ) != sorted( intentIds ):
789 main.log.warn( "Set of intent IDs doesn't match" )
790 correct = False
791 break
792 else:
Jon Halla440e872016-03-31 15:15:50 -0700793 intents = json.loads( main.CLIs[i].intents() )
Jon Hall5cf14d52015-07-16 12:15:19 -0700794 for intent in intents:
795 if intent[ 'state' ] != "INSTALLED":
796 main.log.warn( "Intent " + intent[ 'id' ] +
797 " is " + intent[ 'state' ] )
798 correct = False
799 break
800 if correct:
801 break
802 else:
803 time.sleep(1)
804 if not intentStop:
805 intentStop = time.time()
806 global gossipTime
807 gossipTime = intentStop - intentStart
808 main.log.info( "It took about " + str( gossipTime ) +
809 " seconds for all intents to appear in each node" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700810 gossipPeriod = int( main.params['timers']['gossip'] )
Jon Halla440e872016-03-31 15:15:50 -0700811 maxGossipTime = gossipPeriod * len( main.activeNodes )
Jon Hall5cf14d52015-07-16 12:15:19 -0700812 utilities.assert_greater_equals(
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700813 expect=maxGossipTime, actual=gossipTime,
Jon Hall5cf14d52015-07-16 12:15:19 -0700814 onpass="ECM anti-entropy for intents worked within " +
815 "expected time",
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700816 onfail="Intent ECM anti-entropy took too long. " +
817 "Expected time:{}, Actual time:{}".format( maxGossipTime,
818 gossipTime ) )
819 if gossipTime <= maxGossipTime:
Jon Hall5cf14d52015-07-16 12:15:19 -0700820 intentAddResult = True
821
822 if not intentAddResult or "key" in pendingMap:
823 import time
824 installedCheck = True
825 main.log.info( "Sleeping 60 seconds to see if intents are found" )
826 time.sleep( 60 )
Jon Halla440e872016-03-31 15:15:50 -0700827 onosIds = onosCli.getAllIntentsId()
Jon Hall5cf14d52015-07-16 12:15:19 -0700828 main.log.info( "Submitted intents: " + str( intentIds ) )
829 main.log.info( "Intents in ONOS: " + str( onosIds ) )
830 # Print the intent states
Jon Halla440e872016-03-31 15:15:50 -0700831 intents = onosCli.intents()
Jon Hall5cf14d52015-07-16 12:15:19 -0700832 intentStates = []
833 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
834 count = 0
835 try:
836 for intent in json.loads( intents ):
837 # Iter through intents of a node
838 state = intent.get( 'state', None )
839 if "INSTALLED" not in state:
840 installedCheck = False
841 intentId = intent.get( 'id', None )
842 intentStates.append( ( intentId, state ) )
843 except ( ValueError, TypeError ):
844 main.log.exception( "Error parsing intents" )
845 # add submitted intents not in the store
846 tmplist = [ i for i, s in intentStates ]
847 for i in intentIds:
848 if i not in tmplist:
849 intentStates.append( ( i, " - " ) )
850 intentStates.sort()
851 for i, s in intentStates:
852 count += 1
853 main.log.info( "%-6s%-15s%-15s" %
854 ( str( count ), str( i ), str( s ) ) )
Jon Halla440e872016-03-31 15:15:50 -0700855 leaders = onosCli.leaders()
Jon Hall5cf14d52015-07-16 12:15:19 -0700856 try:
857 missing = False
858 if leaders:
859 parsedLeaders = json.loads( leaders )
860 main.log.warn( json.dumps( parsedLeaders,
861 sort_keys=True,
862 indent=4,
863 separators=( ',', ': ' ) ) )
864 # check for all intent partitions
865 # check for election
866 topics = []
867 for i in range( 14 ):
868 topics.append( "intent-partition-" + str( i ) )
869 # FIXME: this should only be after we start the app
870 topics.append( "org.onosproject.election" )
871 main.log.debug( topics )
872 ONOStopics = [ j['topic'] for j in parsedLeaders ]
873 for topic in topics:
874 if topic not in ONOStopics:
875 main.log.error( "Error: " + topic +
876 " not in leaders" )
877 missing = True
878 else:
879 main.log.error( "leaders() returned None" )
880 except ( ValueError, TypeError ):
881 main.log.exception( "Error parsing leaders" )
882 main.log.error( repr( leaders ) )
883 # Check all nodes
884 if missing:
Jon Halla440e872016-03-31 15:15:50 -0700885 for i in main.activeNodes:
886 node = main.CLIs[i]
Jon Hall5cf14d52015-07-16 12:15:19 -0700887 response = node.leaders( jsonFormat=False)
888 main.log.warn( str( node.name ) + " leaders output: \n" +
889 str( response ) )
890
Jon Halla440e872016-03-31 15:15:50 -0700891 partitions = onosCli.partitions()
Jon Hall5cf14d52015-07-16 12:15:19 -0700892 try:
893 if partitions :
894 parsedPartitions = json.loads( partitions )
895 main.log.warn( json.dumps( parsedPartitions,
896 sort_keys=True,
897 indent=4,
898 separators=( ',', ': ' ) ) )
899 # TODO check for a leader in all paritions
900 # TODO check for consistency among nodes
901 else:
902 main.log.error( "partitions() returned None" )
903 except ( ValueError, TypeError ):
904 main.log.exception( "Error parsing partitions" )
905 main.log.error( repr( partitions ) )
Jon Halla440e872016-03-31 15:15:50 -0700906 pendingMap = onosCli.pendingMap()
Jon Hall5cf14d52015-07-16 12:15:19 -0700907 try:
908 if pendingMap :
909 parsedPending = json.loads( pendingMap )
910 main.log.warn( json.dumps( parsedPending,
911 sort_keys=True,
912 indent=4,
913 separators=( ',', ': ' ) ) )
914 # TODO check something here?
915 else:
916 main.log.error( "pendingMap() returned None" )
917 except ( ValueError, TypeError ):
918 main.log.exception( "Error parsing pending map" )
919 main.log.error( repr( pendingMap ) )
920
921 def CASE4( self, main ):
922 """
923 Ping across added host intents
924 """
925 import json
926 import time
Jon Halle1a3b752015-07-22 13:02:46 -0700927 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700928 assert main, "main not defined"
929 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -0700930 assert main.CLIs, "main.CLIs not defined"
931 assert main.nodes, "main.nodes not defined"
Jon Halla440e872016-03-31 15:15:50 -0700932 main.case( "Verify connectivity by sending traffic across Intents" )
Jon Hall783bbf92015-07-23 14:33:19 -0700933 main.caseExplanation = "Ping across added host intents to check " +\
Jon Hall5cf14d52015-07-16 12:15:19 -0700934 "functionality and check the state of " +\
935 "the intent"
Jon Hall5cf14d52015-07-16 12:15:19 -0700936
Jon Hall41d39f12016-04-11 22:54:35 -0700937 onosCli = main.CLIs[ main.activeNodes[0] ]
Jon Hall5cf14d52015-07-16 12:15:19 -0700938 main.step( "Check Intent state" )
939 installedCheck = False
940 loopCount = 0
941 while not installedCheck and loopCount < 40:
942 installedCheck = True
943 # Print the intent states
Jon Halla440e872016-03-31 15:15:50 -0700944 intents = onosCli.intents()
Jon Hall5cf14d52015-07-16 12:15:19 -0700945 intentStates = []
946 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
947 count = 0
948 # Iter through intents of a node
949 try:
950 for intent in json.loads( intents ):
951 state = intent.get( 'state', None )
952 if "INSTALLED" not in state:
953 installedCheck = False
954 intentId = intent.get( 'id', None )
955 intentStates.append( ( intentId, state ) )
956 except ( ValueError, TypeError ):
957 main.log.exception( "Error parsing intents." )
958 # Print states
959 intentStates.sort()
960 for i, s in intentStates:
961 count += 1
962 main.log.info( "%-6s%-15s%-15s" %
963 ( str( count ), str( i ), str( s ) ) )
964 if not installedCheck:
965 time.sleep( 1 )
966 loopCount += 1
967 utilities.assert_equals( expect=True, actual=installedCheck,
968 onpass="Intents are all INSTALLED",
969 onfail="Intents are not all in " +
970 "INSTALLED state" )
971
Jon Hall9d2dcad2016-04-08 10:15:20 -0700972 main.step( "Ping across added host intents" )
Jon Hall9d2dcad2016-04-08 10:15:20 -0700973 PingResult = main.TRUE
974 for i in range( 8, 18 ):
975 ping = main.Mininet1.pingHost( src="h" + str( i ),
976 target="h" + str( i + 10 ) )
977 PingResult = PingResult and ping
978 if ping == main.FALSE:
979 main.log.warn( "Ping failed between h" + str( i ) +
980 " and h" + str( i + 10 ) )
981 elif ping == main.TRUE:
982 main.log.info( "Ping test passed!" )
983 # Don't set PingResult or you'd override failures
984 if PingResult == main.FALSE:
985 main.log.error(
986 "Intents have not been installed correctly, pings failed." )
987 # TODO: pretty print
988 main.log.warn( "ONOS1 intents: " )
989 try:
990 tmpIntents = onosCli.intents()
991 main.log.warn( json.dumps( json.loads( tmpIntents ),
992 sort_keys=True,
993 indent=4,
994 separators=( ',', ': ' ) ) )
995 except ( ValueError, TypeError ):
996 main.log.warn( repr( tmpIntents ) )
997 utilities.assert_equals(
998 expect=main.TRUE,
999 actual=PingResult,
1000 onpass="Intents have been installed correctly and pings work",
1001 onfail="Intents have not been installed correctly, pings failed." )
1002
Jon Hall5cf14d52015-07-16 12:15:19 -07001003 main.step( "Check leadership of topics" )
Jon Halla440e872016-03-31 15:15:50 -07001004 leaders = onosCli.leaders()
Jon Hall5cf14d52015-07-16 12:15:19 -07001005 topicCheck = main.TRUE
1006 try:
1007 if leaders:
1008 parsedLeaders = json.loads( leaders )
1009 main.log.warn( json.dumps( parsedLeaders,
1010 sort_keys=True,
1011 indent=4,
1012 separators=( ',', ': ' ) ) )
1013 # check for all intent partitions
1014 # check for election
1015 # TODO: Look at Devices as topics now that it uses this system
1016 topics = []
1017 for i in range( 14 ):
1018 topics.append( "intent-partition-" + str( i ) )
1019 # FIXME: this should only be after we start the app
1020 # FIXME: topics.append( "org.onosproject.election" )
1021 # Print leaders output
1022 main.log.debug( topics )
1023 ONOStopics = [ j['topic'] for j in parsedLeaders ]
1024 for topic in topics:
1025 if topic not in ONOStopics:
1026 main.log.error( "Error: " + topic +
1027 " not in leaders" )
1028 topicCheck = main.FALSE
1029 else:
1030 main.log.error( "leaders() returned None" )
1031 topicCheck = main.FALSE
1032 except ( ValueError, TypeError ):
1033 topicCheck = main.FALSE
1034 main.log.exception( "Error parsing leaders" )
1035 main.log.error( repr( leaders ) )
1036 # TODO: Check for a leader of these topics
1037 # Check all nodes
1038 if topicCheck:
Jon Halla440e872016-03-31 15:15:50 -07001039 for i in main.activeNodes:
1040 node = main.CLIs[i]
Jon Hall5cf14d52015-07-16 12:15:19 -07001041 response = node.leaders( jsonFormat=False)
1042 main.log.warn( str( node.name ) + " leaders output: \n" +
1043 str( response ) )
1044
1045 utilities.assert_equals( expect=main.TRUE, actual=topicCheck,
1046 onpass="intent Partitions is in leaders",
1047 onfail="Some topics were lost " )
1048 # Print partitions
Jon Halla440e872016-03-31 15:15:50 -07001049 partitions = onosCli.partitions()
Jon Hall5cf14d52015-07-16 12:15:19 -07001050 try:
1051 if partitions :
1052 parsedPartitions = json.loads( partitions )
1053 main.log.warn( json.dumps( parsedPartitions,
1054 sort_keys=True,
1055 indent=4,
1056 separators=( ',', ': ' ) ) )
1057 # TODO check for a leader in all paritions
1058 # TODO check for consistency among nodes
1059 else:
1060 main.log.error( "partitions() returned None" )
1061 except ( ValueError, TypeError ):
1062 main.log.exception( "Error parsing partitions" )
1063 main.log.error( repr( partitions ) )
1064 # Print Pending Map
Jon Halla440e872016-03-31 15:15:50 -07001065 pendingMap = onosCli.pendingMap()
Jon Hall5cf14d52015-07-16 12:15:19 -07001066 try:
1067 if pendingMap :
1068 parsedPending = json.loads( pendingMap )
1069 main.log.warn( json.dumps( parsedPending,
1070 sort_keys=True,
1071 indent=4,
1072 separators=( ',', ': ' ) ) )
1073 # TODO check something here?
1074 else:
1075 main.log.error( "pendingMap() returned None" )
1076 except ( ValueError, TypeError ):
1077 main.log.exception( "Error parsing pending map" )
1078 main.log.error( repr( pendingMap ) )
1079
1080 if not installedCheck:
1081 main.log.info( "Waiting 60 seconds to see if the state of " +
1082 "intents change" )
1083 time.sleep( 60 )
1084 # Print the intent states
Jon Halla440e872016-03-31 15:15:50 -07001085 intents = onosCli.intents()
Jon Hall5cf14d52015-07-16 12:15:19 -07001086 intentStates = []
1087 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
1088 count = 0
1089 # Iter through intents of a node
1090 try:
1091 for intent in json.loads( intents ):
1092 state = intent.get( 'state', None )
1093 if "INSTALLED" not in state:
1094 installedCheck = False
1095 intentId = intent.get( 'id', None )
1096 intentStates.append( ( intentId, state ) )
1097 except ( ValueError, TypeError ):
1098 main.log.exception( "Error parsing intents." )
1099 intentStates.sort()
1100 for i, s in intentStates:
1101 count += 1
1102 main.log.info( "%-6s%-15s%-15s" %
1103 ( str( count ), str( i ), str( s ) ) )
Jon Halla440e872016-03-31 15:15:50 -07001104 leaders = onosCli.leaders()
Jon Hall5cf14d52015-07-16 12:15:19 -07001105 try:
1106 missing = False
1107 if leaders:
1108 parsedLeaders = json.loads( leaders )
1109 main.log.warn( json.dumps( parsedLeaders,
1110 sort_keys=True,
1111 indent=4,
1112 separators=( ',', ': ' ) ) )
1113 # check for all intent partitions
1114 # check for election
1115 topics = []
1116 for i in range( 14 ):
1117 topics.append( "intent-partition-" + str( i ) )
1118 # FIXME: this should only be after we start the app
1119 topics.append( "org.onosproject.election" )
1120 main.log.debug( topics )
1121 ONOStopics = [ j['topic'] for j in parsedLeaders ]
1122 for topic in topics:
1123 if topic not in ONOStopics:
1124 main.log.error( "Error: " + topic +
1125 " not in leaders" )
1126 missing = True
1127 else:
1128 main.log.error( "leaders() returned None" )
1129 except ( ValueError, TypeError ):
1130 main.log.exception( "Error parsing leaders" )
1131 main.log.error( repr( leaders ) )
1132 if missing:
Jon Halla440e872016-03-31 15:15:50 -07001133 for i in main.activeNodes:
1134 node = main.CLIs[i]
Jon Hall5cf14d52015-07-16 12:15:19 -07001135 response = node.leaders( jsonFormat=False)
1136 main.log.warn( str( node.name ) + " leaders output: \n" +
1137 str( response ) )
1138
Jon Halla440e872016-03-31 15:15:50 -07001139 partitions = onosCli.partitions()
Jon Hall5cf14d52015-07-16 12:15:19 -07001140 try:
1141 if partitions :
1142 parsedPartitions = json.loads( partitions )
1143 main.log.warn( json.dumps( parsedPartitions,
1144 sort_keys=True,
1145 indent=4,
1146 separators=( ',', ': ' ) ) )
1147 # TODO check for a leader in all paritions
1148 # TODO check for consistency among nodes
1149 else:
1150 main.log.error( "partitions() returned None" )
1151 except ( ValueError, TypeError ):
1152 main.log.exception( "Error parsing partitions" )
1153 main.log.error( repr( partitions ) )
Jon Halla440e872016-03-31 15:15:50 -07001154 pendingMap = onosCli.pendingMap()
Jon Hall5cf14d52015-07-16 12:15:19 -07001155 try:
1156 if pendingMap :
1157 parsedPending = json.loads( pendingMap )
1158 main.log.warn( json.dumps( parsedPending,
1159 sort_keys=True,
1160 indent=4,
1161 separators=( ',', ': ' ) ) )
1162 # TODO check something here?
1163 else:
1164 main.log.error( "pendingMap() returned None" )
1165 except ( ValueError, TypeError ):
1166 main.log.exception( "Error parsing pending map" )
1167 main.log.error( repr( pendingMap ) )
1168 # Print flowrules
Jon Halla440e872016-03-31 15:15:50 -07001169 node = main.activeNodes[0]
1170 main.log.debug( main.CLIs[node].flows( jsonFormat=False ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001171 main.step( "Wait a minute then ping again" )
1172 # the wait is above
1173 PingResult = main.TRUE
1174 for i in range( 8, 18 ):
1175 ping = main.Mininet1.pingHost( src="h" + str( i ),
1176 target="h" + str( i + 10 ) )
1177 PingResult = PingResult and ping
1178 if ping == main.FALSE:
1179 main.log.warn( "Ping failed between h" + str( i ) +
1180 " and h" + str( i + 10 ) )
1181 elif ping == main.TRUE:
1182 main.log.info( "Ping test passed!" )
1183 # Don't set PingResult or you'd override failures
1184 if PingResult == main.FALSE:
1185 main.log.error(
1186 "Intents have not been installed correctly, pings failed." )
1187 # TODO: pretty print
1188 main.log.warn( "ONOS1 intents: " )
1189 try:
Jon Halla440e872016-03-31 15:15:50 -07001190 tmpIntents = onosCli.intents()
Jon Hall5cf14d52015-07-16 12:15:19 -07001191 main.log.warn( json.dumps( json.loads( tmpIntents ),
1192 sort_keys=True,
1193 indent=4,
1194 separators=( ',', ': ' ) ) )
1195 except ( ValueError, TypeError ):
1196 main.log.warn( repr( tmpIntents ) )
1197 utilities.assert_equals(
1198 expect=main.TRUE,
1199 actual=PingResult,
1200 onpass="Intents have been installed correctly and pings work",
1201 onfail="Intents have not been installed correctly, pings failed." )
1202
1203 def CASE5( self, main ):
1204 """
1205 Reading state of ONOS
1206 """
1207 import json
1208 import time
Jon Halle1a3b752015-07-22 13:02:46 -07001209 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07001210 assert main, "main not defined"
1211 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07001212 assert main.CLIs, "main.CLIs not defined"
1213 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07001214
1215 main.case( "Setting up and gathering data for current state" )
1216 # The general idea for this test case is to pull the state of
1217 # ( intents,flows, topology,... ) from each ONOS node
1218 # We can then compare them with each other and also with past states
1219
1220 main.step( "Check that each switch has a master" )
1221 global mastershipState
1222 mastershipState = '[]'
1223
1224 # Assert that each device has a master
1225 rolesNotNull = main.TRUE
1226 threads = []
Jon Halla440e872016-03-31 15:15:50 -07001227 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001228 t = main.Thread( target=main.CLIs[i].rolesNotNull,
Jon Hall5cf14d52015-07-16 12:15:19 -07001229 name="rolesNotNull-" + str( i ),
1230 args=[] )
1231 threads.append( t )
1232 t.start()
1233
1234 for t in threads:
1235 t.join()
1236 rolesNotNull = rolesNotNull and t.result
1237 utilities.assert_equals(
1238 expect=main.TRUE,
1239 actual=rolesNotNull,
1240 onpass="Each device has a master",
1241 onfail="Some devices don't have a master assigned" )
1242
1243 main.step( "Get the Mastership of each switch from each controller" )
1244 ONOSMastership = []
1245 mastershipCheck = main.FALSE
1246 consistentMastership = True
1247 rolesResults = True
1248 threads = []
Jon Halla440e872016-03-31 15:15:50 -07001249 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001250 t = main.Thread( target=main.CLIs[i].roles,
Jon Hall5cf14d52015-07-16 12:15:19 -07001251 name="roles-" + str( i ),
1252 args=[] )
1253 threads.append( t )
1254 t.start()
1255
1256 for t in threads:
1257 t.join()
1258 ONOSMastership.append( t.result )
1259
Jon Halla440e872016-03-31 15:15:50 -07001260 for i in range( len( ONOSMastership ) ):
1261 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001262 if not ONOSMastership[i] or "Error" in ONOSMastership[i]:
Jon Halla440e872016-03-31 15:15:50 -07001263 main.log.error( "Error in getting ONOS" + node + " roles" )
1264 main.log.warn( "ONOS" + node + " mastership response: " +
1265 repr( ONOSMastership[i] ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001266 rolesResults = False
1267 utilities.assert_equals(
1268 expect=True,
1269 actual=rolesResults,
1270 onpass="No error in reading roles output",
1271 onfail="Error in reading roles from ONOS" )
1272
1273 main.step( "Check for consistency in roles from each controller" )
1274 if all([ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
1275 main.log.info(
1276 "Switch roles are consistent across all ONOS nodes" )
1277 else:
1278 consistentMastership = False
1279 utilities.assert_equals(
1280 expect=True,
1281 actual=consistentMastership,
1282 onpass="Switch roles are consistent across all ONOS nodes",
1283 onfail="ONOS nodes have different views of switch roles" )
1284
1285 if rolesResults and not consistentMastership:
Jon Halla440e872016-03-31 15:15:50 -07001286 for i in range( len( main.activeNodes ) ):
1287 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001288 try:
1289 main.log.warn(
Jon Halla440e872016-03-31 15:15:50 -07001290 "ONOS" + node + " roles: ",
Jon Hall5cf14d52015-07-16 12:15:19 -07001291 json.dumps(
1292 json.loads( ONOSMastership[ i ] ),
1293 sort_keys=True,
1294 indent=4,
1295 separators=( ',', ': ' ) ) )
1296 except ( ValueError, TypeError ):
1297 main.log.warn( repr( ONOSMastership[ i ] ) )
1298 elif rolesResults and consistentMastership:
1299 mastershipCheck = main.TRUE
1300 mastershipState = ONOSMastership[ 0 ]
1301
1302 main.step( "Get the intents from each controller" )
1303 global intentState
1304 intentState = []
1305 ONOSIntents = []
1306 intentCheck = main.FALSE
1307 consistentIntents = True
1308 intentsResults = True
1309 threads = []
Jon Halla440e872016-03-31 15:15:50 -07001310 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001311 t = main.Thread( target=main.CLIs[i].intents,
Jon Hall5cf14d52015-07-16 12:15:19 -07001312 name="intents-" + str( i ),
1313 args=[],
1314 kwargs={ 'jsonFormat': True } )
1315 threads.append( t )
1316 t.start()
1317
1318 for t in threads:
1319 t.join()
1320 ONOSIntents.append( t.result )
1321
Jon Halla440e872016-03-31 15:15:50 -07001322 for i in range( len( ONOSIntents ) ):
1323 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001324 if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
Jon Halla440e872016-03-31 15:15:50 -07001325 main.log.error( "Error in getting ONOS" + node + " intents" )
1326 main.log.warn( "ONOS" + node + " intents response: " +
Jon Hall5cf14d52015-07-16 12:15:19 -07001327 repr( ONOSIntents[ i ] ) )
1328 intentsResults = False
1329 utilities.assert_equals(
1330 expect=True,
1331 actual=intentsResults,
1332 onpass="No error in reading intents output",
1333 onfail="Error in reading intents from ONOS" )
1334
1335 main.step( "Check for consistency in Intents from each controller" )
1336 if all([ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
1337 main.log.info( "Intents are consistent across all ONOS " +
1338 "nodes" )
1339 else:
1340 consistentIntents = False
1341 main.log.error( "Intents not consistent" )
1342 utilities.assert_equals(
1343 expect=True,
1344 actual=consistentIntents,
1345 onpass="Intents are consistent across all ONOS nodes",
1346 onfail="ONOS nodes have different views of intents" )
1347
1348 if intentsResults:
1349 # Try to make it easy to figure out what is happening
1350 #
1351 # Intent ONOS1 ONOS2 ...
1352 # 0x01 INSTALLED INSTALLING
1353 # ... ... ...
1354 # ... ... ...
1355 title = " Id"
Jon Halla440e872016-03-31 15:15:50 -07001356 for n in main.activeNodes:
Jon Hall5cf14d52015-07-16 12:15:19 -07001357 title += " " * 10 + "ONOS" + str( n + 1 )
1358 main.log.warn( title )
Jon Halle1a3b752015-07-22 13:02:46 -07001359 # get all intent keys in the cluster
Jon Hall5cf14d52015-07-16 12:15:19 -07001360 keys = []
1361 try:
1362 # Get the set of all intent keys
1363 for nodeStr in ONOSIntents:
1364 node = json.loads( nodeStr )
1365 for intent in node:
1366 keys.append( intent.get( 'id' ) )
1367 keys = set( keys )
1368 # For each intent key, print the state on each node
1369 for key in keys:
1370 row = "%-13s" % key
1371 for nodeStr in ONOSIntents:
1372 node = json.loads( nodeStr )
1373 for intent in node:
1374 if intent.get( 'id', "Error" ) == key:
1375 row += "%-15s" % intent.get( 'state' )
1376 main.log.warn( row )
1377 # End of intent state table
1378 except ValueError as e:
1379 main.log.exception( e )
1380 main.log.debug( "nodeStr was: " + repr( nodeStr ) )
1381
1382 if intentsResults and not consistentIntents:
1383 # print the json objects
Jon Halla440e872016-03-31 15:15:50 -07001384 n = str( main.activeNodes[-1] + 1 )
1385 main.log.debug( "ONOS" + n + " intents: " )
Jon Hall5cf14d52015-07-16 12:15:19 -07001386 main.log.debug( json.dumps( json.loads( ONOSIntents[ -1 ] ),
1387 sort_keys=True,
1388 indent=4,
1389 separators=( ',', ': ' ) ) )
Jon Halla440e872016-03-31 15:15:50 -07001390 for i in range( len( ONOSIntents ) ):
1391 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001392 if ONOSIntents[ i ] != ONOSIntents[ -1 ]:
Jon Halla440e872016-03-31 15:15:50 -07001393 main.log.debug( "ONOS" + node + " intents: " )
Jon Hall5cf14d52015-07-16 12:15:19 -07001394 main.log.debug( json.dumps( json.loads( ONOSIntents[i] ),
1395 sort_keys=True,
1396 indent=4,
1397 separators=( ',', ': ' ) ) )
1398 else:
Jon Halla440e872016-03-31 15:15:50 -07001399 main.log.debug( "ONOS" + node + " intents match ONOS" +
1400 n + " intents" )
Jon Hall5cf14d52015-07-16 12:15:19 -07001401 elif intentsResults and consistentIntents:
1402 intentCheck = main.TRUE
1403 intentState = ONOSIntents[ 0 ]
1404
1405 main.step( "Get the flows from each controller" )
1406 global flowState
1407 flowState = []
1408 ONOSFlows = []
1409 ONOSFlowsJson = []
1410 flowCheck = main.FALSE
1411 consistentFlows = True
1412 flowsResults = True
1413 threads = []
Jon Halla440e872016-03-31 15:15:50 -07001414 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001415 t = main.Thread( target=main.CLIs[i].flows,
Jon Hall5cf14d52015-07-16 12:15:19 -07001416 name="flows-" + str( i ),
1417 args=[],
1418 kwargs={ 'jsonFormat': True } )
1419 threads.append( t )
1420 t.start()
1421
1422 # NOTE: Flows command can take some time to run
1423 time.sleep(30)
1424 for t in threads:
1425 t.join()
1426 result = t.result
1427 ONOSFlows.append( result )
1428
Jon Halla440e872016-03-31 15:15:50 -07001429 for i in range( len( ONOSFlows ) ):
1430 num = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001431 if not ONOSFlows[ i ] or "Error" in ONOSFlows[ i ]:
1432 main.log.error( "Error in getting ONOS" + num + " flows" )
1433 main.log.warn( "ONOS" + num + " flows response: " +
1434 repr( ONOSFlows[ i ] ) )
1435 flowsResults = False
1436 ONOSFlowsJson.append( None )
1437 else:
1438 try:
1439 ONOSFlowsJson.append( json.loads( ONOSFlows[ i ] ) )
1440 except ( ValueError, TypeError ):
1441 # FIXME: change this to log.error?
1442 main.log.exception( "Error in parsing ONOS" + num +
1443 " response as json." )
1444 main.log.error( repr( ONOSFlows[ i ] ) )
1445 ONOSFlowsJson.append( None )
1446 flowsResults = False
1447 utilities.assert_equals(
1448 expect=True,
1449 actual=flowsResults,
1450 onpass="No error in reading flows output",
1451 onfail="Error in reading flows from ONOS" )
1452
1453 main.step( "Check for consistency in Flows from each controller" )
1454 tmp = [ len( i ) == len( ONOSFlowsJson[ 0 ] ) for i in ONOSFlowsJson ]
1455 if all( tmp ):
1456 main.log.info( "Flow count is consistent across all ONOS nodes" )
1457 else:
1458 consistentFlows = False
1459 utilities.assert_equals(
1460 expect=True,
1461 actual=consistentFlows,
1462 onpass="The flow count is consistent across all ONOS nodes",
1463 onfail="ONOS nodes have different flow counts" )
1464
1465 if flowsResults and not consistentFlows:
Jon Halla440e872016-03-31 15:15:50 -07001466 for i in range( len( ONOSFlows ) ):
1467 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001468 try:
1469 main.log.warn(
Jon Halla440e872016-03-31 15:15:50 -07001470 "ONOS" + node + " flows: " +
Jon Hall5cf14d52015-07-16 12:15:19 -07001471 json.dumps( json.loads( ONOSFlows[i] ), sort_keys=True,
1472 indent=4, separators=( ',', ': ' ) ) )
1473 except ( ValueError, TypeError ):
Jon Halla440e872016-03-31 15:15:50 -07001474 main.log.warn( "ONOS" + node + " flows: " +
1475 repr( ONOSFlows[ i ] ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001476 elif flowsResults and consistentFlows:
1477 flowCheck = main.TRUE
1478 flowState = ONOSFlows[ 0 ]
1479
1480 main.step( "Get the OF Table entries" )
1481 global flows
1482 flows = []
1483 for i in range( 1, 29 ):
GlennRC68467eb2015-11-16 18:01:01 -08001484 flows.append( main.Mininet1.getFlowTable( "s" + str( i ), version="1.3", debug=False ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001485 if flowCheck == main.FALSE:
1486 for table in flows:
1487 main.log.warn( table )
1488 # TODO: Compare switch flow tables with ONOS flow tables
1489
1490 main.step( "Start continuous pings" )
1491 main.Mininet2.pingLong(
1492 src=main.params[ 'PING' ][ 'source1' ],
1493 target=main.params[ 'PING' ][ 'target1' ],
1494 pingTime=500 )
1495 main.Mininet2.pingLong(
1496 src=main.params[ 'PING' ][ 'source2' ],
1497 target=main.params[ 'PING' ][ 'target2' ],
1498 pingTime=500 )
1499 main.Mininet2.pingLong(
1500 src=main.params[ 'PING' ][ 'source3' ],
1501 target=main.params[ 'PING' ][ 'target3' ],
1502 pingTime=500 )
1503 main.Mininet2.pingLong(
1504 src=main.params[ 'PING' ][ 'source4' ],
1505 target=main.params[ 'PING' ][ 'target4' ],
1506 pingTime=500 )
1507 main.Mininet2.pingLong(
1508 src=main.params[ 'PING' ][ 'source5' ],
1509 target=main.params[ 'PING' ][ 'target5' ],
1510 pingTime=500 )
1511 main.Mininet2.pingLong(
1512 src=main.params[ 'PING' ][ 'source6' ],
1513 target=main.params[ 'PING' ][ 'target6' ],
1514 pingTime=500 )
1515 main.Mininet2.pingLong(
1516 src=main.params[ 'PING' ][ 'source7' ],
1517 target=main.params[ 'PING' ][ 'target7' ],
1518 pingTime=500 )
1519 main.Mininet2.pingLong(
1520 src=main.params[ 'PING' ][ 'source8' ],
1521 target=main.params[ 'PING' ][ 'target8' ],
1522 pingTime=500 )
1523 main.Mininet2.pingLong(
1524 src=main.params[ 'PING' ][ 'source9' ],
1525 target=main.params[ 'PING' ][ 'target9' ],
1526 pingTime=500 )
1527 main.Mininet2.pingLong(
1528 src=main.params[ 'PING' ][ 'source10' ],
1529 target=main.params[ 'PING' ][ 'target10' ],
1530 pingTime=500 )
1531
1532 main.step( "Collecting topology information from ONOS" )
1533 devices = []
1534 threads = []
Jon Halla440e872016-03-31 15:15:50 -07001535 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001536 t = main.Thread( target=main.CLIs[i].devices,
Jon Hall5cf14d52015-07-16 12:15:19 -07001537 name="devices-" + str( i ),
1538 args=[ ] )
1539 threads.append( t )
1540 t.start()
1541
1542 for t in threads:
1543 t.join()
1544 devices.append( t.result )
1545 hosts = []
1546 threads = []
Jon Halla440e872016-03-31 15:15:50 -07001547 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001548 t = main.Thread( target=main.CLIs[i].hosts,
Jon Hall5cf14d52015-07-16 12:15:19 -07001549 name="hosts-" + str( i ),
1550 args=[ ] )
1551 threads.append( t )
1552 t.start()
1553
1554 for t in threads:
1555 t.join()
1556 try:
1557 hosts.append( json.loads( t.result ) )
1558 except ( ValueError, TypeError ):
1559 # FIXME: better handling of this, print which node
1560 # Maybe use thread name?
1561 main.log.exception( "Error parsing json output of hosts" )
Jon Hallf3d16e72015-12-16 17:45:08 -08001562 main.log.warn( repr( t.result ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001563 hosts.append( None )
1564
1565 ports = []
1566 threads = []
Jon Halla440e872016-03-31 15:15:50 -07001567 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001568 t = main.Thread( target=main.CLIs[i].ports,
Jon Hall5cf14d52015-07-16 12:15:19 -07001569 name="ports-" + str( i ),
1570 args=[ ] )
1571 threads.append( t )
1572 t.start()
1573
1574 for t in threads:
1575 t.join()
1576 ports.append( t.result )
1577 links = []
1578 threads = []
Jon Halla440e872016-03-31 15:15:50 -07001579 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001580 t = main.Thread( target=main.CLIs[i].links,
Jon Hall5cf14d52015-07-16 12:15:19 -07001581 name="links-" + str( i ),
1582 args=[ ] )
1583 threads.append( t )
1584 t.start()
1585
1586 for t in threads:
1587 t.join()
1588 links.append( t.result )
1589 clusters = []
1590 threads = []
Jon Halla440e872016-03-31 15:15:50 -07001591 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001592 t = main.Thread( target=main.CLIs[i].clusters,
Jon Hall5cf14d52015-07-16 12:15:19 -07001593 name="clusters-" + str( i ),
1594 args=[ ] )
1595 threads.append( t )
1596 t.start()
1597
1598 for t in threads:
1599 t.join()
1600 clusters.append( t.result )
1601 # Compare json objects for hosts and dataplane clusters
1602
1603 # hosts
1604 main.step( "Host view is consistent across ONOS nodes" )
1605 consistentHostsResult = main.TRUE
1606 for controller in range( len( hosts ) ):
Jon Halla440e872016-03-31 15:15:50 -07001607 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hallf3d16e72015-12-16 17:45:08 -08001608 if hosts[ controller ] and "Error" not in hosts[ controller ]:
Jon Hall5cf14d52015-07-16 12:15:19 -07001609 if hosts[ controller ] == hosts[ 0 ]:
1610 continue
1611 else: # hosts not consistent
1612 main.log.error( "hosts from ONOS" +
1613 controllerStr +
1614 " is inconsistent with ONOS1" )
1615 main.log.warn( repr( hosts[ controller ] ) )
1616 consistentHostsResult = main.FALSE
1617
1618 else:
1619 main.log.error( "Error in getting ONOS hosts from ONOS" +
1620 controllerStr )
1621 consistentHostsResult = main.FALSE
1622 main.log.warn( "ONOS" + controllerStr +
1623 " hosts response: " +
1624 repr( hosts[ controller ] ) )
1625 utilities.assert_equals(
1626 expect=main.TRUE,
1627 actual=consistentHostsResult,
1628 onpass="Hosts view is consistent across all ONOS nodes",
1629 onfail="ONOS nodes have different views of hosts" )
1630
1631 main.step( "Each host has an IP address" )
1632 ipResult = main.TRUE
1633 for controller in range( 0, len( hosts ) ):
Jon Halla440e872016-03-31 15:15:50 -07001634 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hallf3d16e72015-12-16 17:45:08 -08001635 if hosts[ controller ]:
1636 for host in hosts[ controller ]:
1637 if not host.get( 'ipAddresses', [ ] ):
1638 main.log.error( "Error with host ips on controller" +
1639 controllerStr + ": " + str( host ) )
1640 ipResult = main.FALSE
Jon Hall5cf14d52015-07-16 12:15:19 -07001641 utilities.assert_equals(
1642 expect=main.TRUE,
1643 actual=ipResult,
1644 onpass="The ips of the hosts aren't empty",
1645 onfail="The ip of at least one host is missing" )
1646
1647 # Strongly connected clusters of devices
1648 main.step( "Cluster view is consistent across ONOS nodes" )
1649 consistentClustersResult = main.TRUE
1650 for controller in range( len( clusters ) ):
Jon Halla440e872016-03-31 15:15:50 -07001651 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001652 if "Error" not in clusters[ controller ]:
1653 if clusters[ controller ] == clusters[ 0 ]:
1654 continue
1655 else: # clusters not consistent
1656 main.log.error( "clusters from ONOS" + controllerStr +
1657 " is inconsistent with ONOS1" )
1658 consistentClustersResult = main.FALSE
1659
1660 else:
1661 main.log.error( "Error in getting dataplane clusters " +
1662 "from ONOS" + controllerStr )
1663 consistentClustersResult = main.FALSE
1664 main.log.warn( "ONOS" + controllerStr +
1665 " clusters response: " +
1666 repr( clusters[ controller ] ) )
1667 utilities.assert_equals(
1668 expect=main.TRUE,
1669 actual=consistentClustersResult,
1670 onpass="Clusters view is consistent across all ONOS nodes",
1671 onfail="ONOS nodes have different views of clusters" )
Jon Hall172b7ba2016-04-07 18:12:20 -07001672 if consistentClustersResult != main.TRUE:
1673 main.log.debug( clusters )
Jon Hall5cf14d52015-07-16 12:15:19 -07001674 # there should always only be one cluster
1675 main.step( "Cluster view correct across ONOS nodes" )
1676 try:
1677 numClusters = len( json.loads( clusters[ 0 ] ) )
1678 except ( ValueError, TypeError ):
1679 main.log.exception( "Error parsing clusters[0]: " +
1680 repr( clusters[ 0 ] ) )
Jon Hall6e709752016-02-01 13:38:46 -08001681 numClusters = "ERROR"
Jon Hall5cf14d52015-07-16 12:15:19 -07001682 clusterResults = main.FALSE
1683 if numClusters == 1:
1684 clusterResults = main.TRUE
1685 utilities.assert_equals(
1686 expect=1,
1687 actual=numClusters,
1688 onpass="ONOS shows 1 SCC",
1689 onfail="ONOS shows " + str( numClusters ) + " SCCs" )
1690
1691 main.step( "Comparing ONOS topology to MN" )
1692 devicesResults = main.TRUE
1693 linksResults = main.TRUE
1694 hostsResults = main.TRUE
1695 mnSwitches = main.Mininet1.getSwitches()
1696 mnLinks = main.Mininet1.getLinks()
1697 mnHosts = main.Mininet1.getHosts()
Jon Halla440e872016-03-31 15:15:50 -07001698 for controller in main.activeNodes:
1699 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001700 if devices[ controller ] and ports[ controller ] and\
1701 "Error" not in devices[ controller ] and\
1702 "Error" not in ports[ controller ]:
Jon Halla440e872016-03-31 15:15:50 -07001703 currentDevicesResult = main.Mininet1.compareSwitches(
1704 mnSwitches,
1705 json.loads( devices[ controller ] ),
1706 json.loads( ports[ controller ] ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001707 else:
1708 currentDevicesResult = main.FALSE
1709 utilities.assert_equals( expect=main.TRUE,
1710 actual=currentDevicesResult,
1711 onpass="ONOS" + controllerStr +
1712 " Switches view is correct",
1713 onfail="ONOS" + controllerStr +
1714 " Switches view is incorrect" )
1715 if links[ controller ] and "Error" not in links[ controller ]:
1716 currentLinksResult = main.Mininet1.compareLinks(
1717 mnSwitches, mnLinks,
1718 json.loads( links[ controller ] ) )
1719 else:
1720 currentLinksResult = main.FALSE
1721 utilities.assert_equals( expect=main.TRUE,
1722 actual=currentLinksResult,
1723 onpass="ONOS" + controllerStr +
1724 " links view is correct",
1725 onfail="ONOS" + controllerStr +
1726 " links view is incorrect" )
1727
Jon Hall657cdf62015-12-17 14:40:51 -08001728 if hosts[ controller ] and "Error" not in hosts[ controller ]:
Jon Hall5cf14d52015-07-16 12:15:19 -07001729 currentHostsResult = main.Mininet1.compareHosts(
1730 mnHosts,
1731 hosts[ controller ] )
1732 else:
1733 currentHostsResult = main.FALSE
1734 utilities.assert_equals( expect=main.TRUE,
1735 actual=currentHostsResult,
1736 onpass="ONOS" + controllerStr +
1737 " hosts exist in Mininet",
1738 onfail="ONOS" + controllerStr +
1739 " hosts don't match Mininet" )
1740
1741 devicesResults = devicesResults and currentDevicesResult
1742 linksResults = linksResults and currentLinksResult
1743 hostsResults = hostsResults and currentHostsResult
1744
1745 main.step( "Device information is correct" )
1746 utilities.assert_equals(
1747 expect=main.TRUE,
1748 actual=devicesResults,
1749 onpass="Device information is correct",
1750 onfail="Device information is incorrect" )
1751
1752 main.step( "Links are correct" )
1753 utilities.assert_equals(
1754 expect=main.TRUE,
1755 actual=linksResults,
1756 onpass="Link are correct",
1757 onfail="Links are incorrect" )
1758
1759 main.step( "Hosts are correct" )
1760 utilities.assert_equals(
1761 expect=main.TRUE,
1762 actual=hostsResults,
1763 onpass="Hosts are correct",
1764 onfail="Hosts are incorrect" )
1765
1766 def CASE6( self, main ):
1767 """
1768 The Failure case. Since this is the Sanity test, we do nothing.
1769 """
1770 import time
Jon Halle1a3b752015-07-22 13:02:46 -07001771 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07001772 assert main, "main not defined"
1773 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07001774 assert main.CLIs, "main.CLIs not defined"
1775 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07001776 main.case( "Wait 60 seconds instead of inducing a failure" )
1777 time.sleep( 60 )
1778 utilities.assert_equals(
1779 expect=main.TRUE,
1780 actual=main.TRUE,
1781 onpass="Sleeping 60 seconds",
1782 onfail="Something is terribly wrong with my math" )
1783
1784 def CASE7( self, main ):
1785 """
1786 Check state after ONOS failure
1787 """
1788 import json
Jon Halle1a3b752015-07-22 13:02:46 -07001789 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07001790 assert main, "main not defined"
1791 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07001792 assert main.CLIs, "main.CLIs not defined"
1793 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07001794 main.case( "Running ONOS Constant State Tests" )
1795
1796 main.step( "Check that each switch has a master" )
1797 # Assert that each device has a master
1798 rolesNotNull = main.TRUE
1799 threads = []
Jon Halla440e872016-03-31 15:15:50 -07001800 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001801 t = main.Thread( target=main.CLIs[i].rolesNotNull,
Jon Hall5cf14d52015-07-16 12:15:19 -07001802 name="rolesNotNull-" + str( i ),
1803 args=[ ] )
1804 threads.append( t )
1805 t.start()
1806
1807 for t in threads:
1808 t.join()
1809 rolesNotNull = rolesNotNull and t.result
1810 utilities.assert_equals(
1811 expect=main.TRUE,
1812 actual=rolesNotNull,
1813 onpass="Each device has a master",
1814 onfail="Some devices don't have a master assigned" )
1815
1816 main.step( "Read device roles from ONOS" )
1817 ONOSMastership = []
1818 mastershipCheck = main.FALSE
1819 consistentMastership = True
1820 rolesResults = True
1821 threads = []
Jon Halla440e872016-03-31 15:15:50 -07001822 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001823 t = main.Thread( target=main.CLIs[i].roles,
Jon Hall5cf14d52015-07-16 12:15:19 -07001824 name="roles-" + str( i ),
1825 args=[] )
1826 threads.append( t )
1827 t.start()
1828
1829 for t in threads:
1830 t.join()
1831 ONOSMastership.append( t.result )
1832
Jon Halla440e872016-03-31 15:15:50 -07001833 for i in range( len( ONOSMastership ) ):
1834 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001835 if not ONOSMastership[i] or "Error" in ONOSMastership[i]:
Jon Halla440e872016-03-31 15:15:50 -07001836 main.log.error( "Error in getting ONOS" + node + " roles" )
1837 main.log.warn( "ONOS" + node + " mastership response: " +
1838 repr( ONOSMastership[i] ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001839 rolesResults = False
1840 utilities.assert_equals(
1841 expect=True,
1842 actual=rolesResults,
1843 onpass="No error in reading roles output",
1844 onfail="Error in reading roles from ONOS" )
1845
1846 main.step( "Check for consistency in roles from each controller" )
1847 if all([ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
1848 main.log.info(
1849 "Switch roles are consistent across all ONOS nodes" )
1850 else:
1851 consistentMastership = False
1852 utilities.assert_equals(
1853 expect=True,
1854 actual=consistentMastership,
1855 onpass="Switch roles are consistent across all ONOS nodes",
1856 onfail="ONOS nodes have different views of switch roles" )
1857
1858 if rolesResults and not consistentMastership:
Jon Halla440e872016-03-31 15:15:50 -07001859 for i in range( len( ONOSMastership ) ):
1860 node = str( main.activeNodes[i] + 1 )
1861 main.log.warn( "ONOS" + node + " roles: ",
1862 json.dumps( json.loads( ONOSMastership[ i ] ),
1863 sort_keys=True,
1864 indent=4,
1865 separators=( ',', ': ' ) ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001866
1867 description2 = "Compare switch roles from before failure"
1868 main.step( description2 )
1869 try:
1870 currentJson = json.loads( ONOSMastership[0] )
1871 oldJson = json.loads( mastershipState )
1872 except ( ValueError, TypeError ):
1873 main.log.exception( "Something is wrong with parsing " +
1874 "ONOSMastership[0] or mastershipState" )
1875 main.log.error( "ONOSMastership[0]: " + repr( ONOSMastership[0] ) )
1876 main.log.error( "mastershipState" + repr( mastershipState ) )
1877 main.cleanup()
1878 main.exit()
1879 mastershipCheck = main.TRUE
1880 for i in range( 1, 29 ):
1881 switchDPID = str(
1882 main.Mininet1.getSwitchDPID( switch="s" + str( i ) ) )
1883 current = [ switch[ 'master' ] for switch in currentJson
1884 if switchDPID in switch[ 'id' ] ]
1885 old = [ switch[ 'master' ] for switch in oldJson
1886 if switchDPID in switch[ 'id' ] ]
1887 if current == old:
1888 mastershipCheck = mastershipCheck and main.TRUE
1889 else:
1890 main.log.warn( "Mastership of switch %s changed" % switchDPID )
1891 mastershipCheck = main.FALSE
1892 utilities.assert_equals(
1893 expect=main.TRUE,
1894 actual=mastershipCheck,
1895 onpass="Mastership of Switches was not changed",
1896 onfail="Mastership of some switches changed" )
1897 mastershipCheck = mastershipCheck and consistentMastership
1898
1899 main.step( "Get the intents and compare across all nodes" )
1900 ONOSIntents = []
1901 intentCheck = main.FALSE
1902 consistentIntents = True
1903 intentsResults = True
1904 threads = []
Jon Halla440e872016-03-31 15:15:50 -07001905 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001906 t = main.Thread( target=main.CLIs[i].intents,
Jon Hall5cf14d52015-07-16 12:15:19 -07001907 name="intents-" + str( i ),
1908 args=[],
1909 kwargs={ 'jsonFormat': True } )
1910 threads.append( t )
1911 t.start()
1912
1913 for t in threads:
1914 t.join()
1915 ONOSIntents.append( t.result )
1916
Jon Halla440e872016-03-31 15:15:50 -07001917 for i in range( len( ONOSIntents) ):
1918 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001919 if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
Jon Halla440e872016-03-31 15:15:50 -07001920 main.log.error( "Error in getting ONOS" + node + " intents" )
1921 main.log.warn( "ONOS" + node + " intents response: " +
Jon Hall5cf14d52015-07-16 12:15:19 -07001922 repr( ONOSIntents[ i ] ) )
1923 intentsResults = False
1924 utilities.assert_equals(
1925 expect=True,
1926 actual=intentsResults,
1927 onpass="No error in reading intents output",
1928 onfail="Error in reading intents from ONOS" )
1929
1930 main.step( "Check for consistency in Intents from each controller" )
1931 if all([ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
1932 main.log.info( "Intents are consistent across all ONOS " +
1933 "nodes" )
1934 else:
1935 consistentIntents = False
1936
1937 # Try to make it easy to figure out what is happening
1938 #
1939 # Intent ONOS1 ONOS2 ...
1940 # 0x01 INSTALLED INSTALLING
1941 # ... ... ...
1942 # ... ... ...
1943 title = " ID"
Jon Halla440e872016-03-31 15:15:50 -07001944 for n in main.activeNodes:
Jon Hall5cf14d52015-07-16 12:15:19 -07001945 title += " " * 10 + "ONOS" + str( n + 1 )
1946 main.log.warn( title )
1947 # get all intent keys in the cluster
1948 keys = []
1949 for nodeStr in ONOSIntents:
1950 node = json.loads( nodeStr )
1951 for intent in node:
1952 keys.append( intent.get( 'id' ) )
1953 keys = set( keys )
1954 for key in keys:
1955 row = "%-13s" % key
1956 for nodeStr in ONOSIntents:
1957 node = json.loads( nodeStr )
1958 for intent in node:
1959 if intent.get( 'id' ) == key:
1960 row += "%-15s" % intent.get( 'state' )
1961 main.log.warn( row )
1962 # End table view
1963
1964 utilities.assert_equals(
1965 expect=True,
1966 actual=consistentIntents,
1967 onpass="Intents are consistent across all ONOS nodes",
1968 onfail="ONOS nodes have different views of intents" )
1969 intentStates = []
1970 for node in ONOSIntents: # Iter through ONOS nodes
1971 nodeStates = []
1972 # Iter through intents of a node
1973 try:
1974 for intent in json.loads( node ):
1975 nodeStates.append( intent[ 'state' ] )
1976 except ( ValueError, TypeError ):
1977 main.log.exception( "Error in parsing intents" )
1978 main.log.error( repr( node ) )
1979 intentStates.append( nodeStates )
1980 out = [ (i, nodeStates.count( i ) ) for i in set( nodeStates ) ]
1981 main.log.info( dict( out ) )
1982
1983 if intentsResults and not consistentIntents:
Jon Halla440e872016-03-31 15:15:50 -07001984 for i in range( len( main.activeNodes ) ):
1985 node = str( main.activeNodes[i] + 1 )
1986 main.log.warn( "ONOS" + node + " intents: " )
Jon Hall5cf14d52015-07-16 12:15:19 -07001987 main.log.warn( json.dumps(
1988 json.loads( ONOSIntents[ i ] ),
1989 sort_keys=True,
1990 indent=4,
1991 separators=( ',', ': ' ) ) )
1992 elif intentsResults and consistentIntents:
1993 intentCheck = main.TRUE
1994
1995 # NOTE: Store has no durability, so intents are lost across system
1996 # restarts
1997 main.step( "Compare current intents with intents before the failure" )
1998 # NOTE: this requires case 5 to pass for intentState to be set.
1999 # maybe we should stop the test if that fails?
2000 sameIntents = main.FALSE
Jon Halla440e872016-03-31 15:15:50 -07002001 try:
2002 intentState
2003 except NameError:
2004 main.log.warn( "No previous intent state was saved" )
2005 else:
2006 if intentState and intentState == ONOSIntents[ 0 ]:
2007 sameIntents = main.TRUE
2008 main.log.info( "Intents are consistent with before failure" )
2009 # TODO: possibly the states have changed? we may need to figure out
2010 # what the acceptable states are
2011 elif len( intentState ) == len( ONOSIntents[ 0 ] ):
2012 sameIntents = main.TRUE
2013 try:
2014 before = json.loads( intentState )
2015 after = json.loads( ONOSIntents[ 0 ] )
2016 for intent in before:
2017 if intent not in after:
2018 sameIntents = main.FALSE
2019 main.log.debug( "Intent is not currently in ONOS " +
2020 "(at least in the same form):" )
2021 main.log.debug( json.dumps( intent ) )
2022 except ( ValueError, TypeError ):
2023 main.log.exception( "Exception printing intents" )
2024 main.log.debug( repr( ONOSIntents[0] ) )
2025 main.log.debug( repr( intentState ) )
2026 if sameIntents == main.FALSE:
2027 try:
2028 main.log.debug( "ONOS intents before: " )
2029 main.log.debug( json.dumps( json.loads( intentState ),
2030 sort_keys=True, indent=4,
2031 separators=( ',', ': ' ) ) )
2032 main.log.debug( "Current ONOS intents: " )
2033 main.log.debug( json.dumps( json.loads( ONOSIntents[ 0 ] ),
2034 sort_keys=True, indent=4,
2035 separators=( ',', ': ' ) ) )
2036 except ( ValueError, TypeError ):
2037 main.log.exception( "Exception printing intents" )
2038 main.log.debug( repr( ONOSIntents[0] ) )
2039 main.log.debug( repr( intentState ) )
2040 utilities.assert_equals(
2041 expect=main.TRUE,
2042 actual=sameIntents,
2043 onpass="Intents are consistent with before failure",
2044 onfail="The Intents changed during failure" )
Jon Hall5cf14d52015-07-16 12:15:19 -07002045 intentCheck = intentCheck and sameIntents
2046
2047 main.step( "Get the OF Table entries and compare to before " +
2048 "component failure" )
2049 FlowTables = main.TRUE
Jon Hall5cf14d52015-07-16 12:15:19 -07002050 for i in range( 28 ):
2051 main.log.info( "Checking flow table on s" + str( i + 1 ) )
GlennRC68467eb2015-11-16 18:01:01 -08002052 tmpFlows = main.Mininet1.getFlowTable( "s" + str( i + 1 ), version="1.3", debug=False )
Jon Hall41d39f12016-04-11 22:54:35 -07002053 curSwitch = main.Mininet1.flowTableComp( flows[i], tmpFlows )
2054 FlowTables = FlowTables and curSwitch
2055 if curSwitch == main.FALSE:
GlennRC68467eb2015-11-16 18:01:01 -08002056 main.log.warn( "Differences in flow table for switch: s{}".format( i + 1 ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07002057 utilities.assert_equals(
2058 expect=main.TRUE,
2059 actual=FlowTables,
2060 onpass="No changes were found in the flow tables",
2061 onfail="Changes were found in the flow tables" )
2062
2063 main.Mininet2.pingLongKill()
2064 '''
2065 main.step( "Check the continuous pings to ensure that no packets " +
2066 "were dropped during component failure" )
2067 main.Mininet2.pingKill( main.params[ 'TESTONUSER' ],
2068 main.params[ 'TESTONIP' ] )
2069 LossInPings = main.FALSE
2070 # NOTE: checkForLoss returns main.FALSE with 0% packet loss
2071 for i in range( 8, 18 ):
2072 main.log.info(
2073 "Checking for a loss in pings along flow from s" +
2074 str( i ) )
2075 LossInPings = main.Mininet2.checkForLoss(
2076 "/tmp/ping.h" +
2077 str( i ) ) or LossInPings
2078 if LossInPings == main.TRUE:
2079 main.log.info( "Loss in ping detected" )
2080 elif LossInPings == main.ERROR:
2081 main.log.info( "There are multiple mininet process running" )
2082 elif LossInPings == main.FALSE:
2083 main.log.info( "No Loss in the pings" )
2084 main.log.info( "No loss of dataplane connectivity" )
2085 utilities.assert_equals(
2086 expect=main.FALSE,
2087 actual=LossInPings,
2088 onpass="No Loss of connectivity",
2089 onfail="Loss of dataplane connectivity detected" )
2090 '''
2091
2092 main.step( "Leadership Election is still functional" )
2093 # Test of LeadershipElection
Jon Halla440e872016-03-31 15:15:50 -07002094 leaderList = []
2095
Jon Hall5cf14d52015-07-16 12:15:19 -07002096 # NOTE: this only works for the sanity test. In case of failures,
2097 # leader will likely change
Jon Halla440e872016-03-31 15:15:50 -07002098 leader = main.nodes[ main.activeNodes[ 0 ] ].ip_address
Jon Hall5cf14d52015-07-16 12:15:19 -07002099 leaderResult = main.TRUE
Jon Halla440e872016-03-31 15:15:50 -07002100
2101 for i in main.activeNodes:
2102 cli = main.CLIs[i]
Jon Hall5cf14d52015-07-16 12:15:19 -07002103 leaderN = cli.electionTestLeader()
Jon Halla440e872016-03-31 15:15:50 -07002104 leaderList.append( leaderN )
Jon Hall5cf14d52015-07-16 12:15:19 -07002105 # verify leader is ONOS1
2106 if leaderN == leader:
2107 # all is well
2108 # NOTE: In failure scenario, this could be a new node, maybe
2109 # check != ONOS1
2110 pass
2111 elif leaderN == main.FALSE:
2112 # error in response
2113 main.log.error( "Something is wrong with " +
2114 "electionTestLeader function, check the" +
2115 " error logs" )
2116 leaderResult = main.FALSE
Jon Halla440e872016-03-31 15:15:50 -07002117 elif leaderN is None:
2118 main.log.error( cli.name +
2119 " shows no leader for the election-app was" +
2120 " elected after the old one died" )
Jon Hall5cf14d52015-07-16 12:15:19 -07002121 leaderResult = main.FALSE
Jon Halla440e872016-03-31 15:15:50 -07002122 if len( set( leaderList ) ) != 1:
2123 leaderResult = main.FALSE
2124 main.log.error(
2125 "Inconsistent view of leader for the election test app" )
2126 # TODO: print the list
Jon Hall5cf14d52015-07-16 12:15:19 -07002127 utilities.assert_equals(
2128 expect=main.TRUE,
2129 actual=leaderResult,
2130 onpass="Leadership election passed",
2131 onfail="Something went wrong with Leadership election" )
2132
2133 def CASE8( self, main ):
2134 """
2135 Compare topo
2136 """
2137 import json
2138 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002139 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002140 assert main, "main not defined"
2141 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002142 assert main.CLIs, "main.CLIs not defined"
2143 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002144
2145 main.case( "Compare ONOS Topology view to Mininet topology" )
Jon Hall783bbf92015-07-23 14:33:19 -07002146 main.caseExplanation = "Compare topology objects between Mininet" +\
Jon Hall5cf14d52015-07-16 12:15:19 -07002147 " and ONOS"
Jon Hall5cf14d52015-07-16 12:15:19 -07002148 topoResult = main.FALSE
Jon Halla440e872016-03-31 15:15:50 -07002149 topoFailMsg = "ONOS topology don't match Mininet"
Jon Hall5cf14d52015-07-16 12:15:19 -07002150 elapsed = 0
2151 count = 0
Jon Halle9b1fa32015-12-08 15:32:21 -08002152 main.step( "Comparing ONOS topology to MN topology" )
Jon Hall5cf14d52015-07-16 12:15:19 -07002153 startTime = time.time()
2154 # Give time for Gossip to work
Jon Halle9b1fa32015-12-08 15:32:21 -08002155 while topoResult == main.FALSE and ( elapsed < 60 or count < 3 ):
Jon Hall96091e62015-09-21 17:34:17 -07002156 devicesResults = main.TRUE
2157 linksResults = main.TRUE
2158 hostsResults = main.TRUE
2159 hostAttachmentResults = True
Jon Hall5cf14d52015-07-16 12:15:19 -07002160 count += 1
2161 cliStart = time.time()
2162 devices = []
2163 threads = []
Jon Halla440e872016-03-31 15:15:50 -07002164 for i in main.activeNodes:
2165 t = main.Thread( target=utilities.retry,
Jon Hall5cf14d52015-07-16 12:15:19 -07002166 name="devices-" + str( i ),
Jon Halla440e872016-03-31 15:15:50 -07002167 args=[ main.CLIs[i].devices, [ None ] ],
2168 kwargs= { 'sleep': 5, 'attempts': 5,
2169 'randomTime': True } )
Jon Hall5cf14d52015-07-16 12:15:19 -07002170 threads.append( t )
2171 t.start()
2172
2173 for t in threads:
2174 t.join()
2175 devices.append( t.result )
2176 hosts = []
2177 ipResult = main.TRUE
2178 threads = []
Jon Halla440e872016-03-31 15:15:50 -07002179 for i in main.activeNodes:
Jon Halld8f6de82015-12-17 17:04:34 -08002180 t = main.Thread( target=utilities.retry,
Jon Hall5cf14d52015-07-16 12:15:19 -07002181 name="hosts-" + str( i ),
Jon Halld8f6de82015-12-17 17:04:34 -08002182 args=[ main.CLIs[i].hosts, [ None ] ],
2183 kwargs= { 'sleep': 5, 'attempts': 5,
2184 'randomTime': True } )
Jon Hall5cf14d52015-07-16 12:15:19 -07002185 threads.append( t )
2186 t.start()
2187
2188 for t in threads:
2189 t.join()
2190 try:
2191 hosts.append( json.loads( t.result ) )
2192 except ( ValueError, TypeError ):
2193 main.log.exception( "Error parsing hosts results" )
2194 main.log.error( repr( t.result ) )
Jon Hallf3d16e72015-12-16 17:45:08 -08002195 hosts.append( None )
Jon Hall5cf14d52015-07-16 12:15:19 -07002196 for controller in range( 0, len( hosts ) ):
Jon Halla440e872016-03-31 15:15:50 -07002197 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hallacd1b182015-12-17 11:43:20 -08002198 if hosts[ controller ]:
2199 for host in hosts[ controller ]:
2200 if host is None or host.get( 'ipAddresses', [] ) == []:
2201 main.log.error(
2202 "Error with host ipAddresses on controller" +
2203 controllerStr + ": " + str( host ) )
2204 ipResult = main.FALSE
Jon Hall5cf14d52015-07-16 12:15:19 -07002205 ports = []
2206 threads = []
Jon Halla440e872016-03-31 15:15:50 -07002207 for i in main.activeNodes:
2208 t = main.Thread( target=utilities.retry,
Jon Hall5cf14d52015-07-16 12:15:19 -07002209 name="ports-" + str( i ),
Jon Halla440e872016-03-31 15:15:50 -07002210 args=[ main.CLIs[i].ports, [ None ] ],
2211 kwargs= { 'sleep': 5, 'attempts': 5,
2212 'randomTime': True } )
Jon Hall5cf14d52015-07-16 12:15:19 -07002213 threads.append( t )
2214 t.start()
2215
2216 for t in threads:
2217 t.join()
2218 ports.append( t.result )
2219 links = []
2220 threads = []
Jon Halla440e872016-03-31 15:15:50 -07002221 for i in main.activeNodes:
2222 t = main.Thread( target=utilities.retry,
Jon Hall5cf14d52015-07-16 12:15:19 -07002223 name="links-" + str( i ),
Jon Halla440e872016-03-31 15:15:50 -07002224 args=[ main.CLIs[i].links, [ None ] ],
2225 kwargs= { 'sleep': 5, 'attempts': 5,
2226 'randomTime': True } )
Jon Hall5cf14d52015-07-16 12:15:19 -07002227 threads.append( t )
2228 t.start()
2229
2230 for t in threads:
2231 t.join()
2232 links.append( t.result )
2233 clusters = []
2234 threads = []
Jon Halla440e872016-03-31 15:15:50 -07002235 for i in main.activeNodes:
2236 t = main.Thread( target=utilities.retry,
Jon Hall5cf14d52015-07-16 12:15:19 -07002237 name="clusters-" + str( i ),
Jon Halla440e872016-03-31 15:15:50 -07002238 args=[ main.CLIs[i].clusters, [ None ] ],
2239 kwargs= { 'sleep': 5, 'attempts': 5,
2240 'randomTime': True } )
Jon Hall5cf14d52015-07-16 12:15:19 -07002241 threads.append( t )
2242 t.start()
2243
2244 for t in threads:
2245 t.join()
2246 clusters.append( t.result )
2247
2248 elapsed = time.time() - startTime
2249 cliTime = time.time() - cliStart
2250 print "Elapsed time: " + str( elapsed )
2251 print "CLI time: " + str( cliTime )
2252
Jon Halla440e872016-03-31 15:15:50 -07002253 if all( e is None for e in devices ) and\
2254 all( e is None for e in hosts ) and\
2255 all( e is None for e in ports ) and\
2256 all( e is None for e in links ) and\
2257 all( e is None for e in clusters ):
2258 topoFailMsg = "Could not get topology from ONOS"
2259 main.log.error( topoFailMsg )
2260 continue # Try again, No use trying to compare
2261
Jon Hall5cf14d52015-07-16 12:15:19 -07002262 mnSwitches = main.Mininet1.getSwitches()
2263 mnLinks = main.Mininet1.getLinks()
2264 mnHosts = main.Mininet1.getHosts()
Jon Halla440e872016-03-31 15:15:50 -07002265 for controller in range( len( main.activeNodes ) ):
2266 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07002267 if devices[ controller ] and ports[ controller ] and\
2268 "Error" not in devices[ controller ] and\
2269 "Error" not in ports[ controller ]:
2270
Jon Hallc6793552016-01-19 14:18:37 -08002271 try:
2272 currentDevicesResult = main.Mininet1.compareSwitches(
2273 mnSwitches,
2274 json.loads( devices[ controller ] ),
2275 json.loads( ports[ controller ] ) )
2276 except ( TypeError, ValueError ) as e:
2277 main.log.exception( "Object not as expected; devices={!r}\nports={!r}".format(
2278 devices[ controller ], ports[ controller ] ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07002279 else:
2280 currentDevicesResult = main.FALSE
2281 utilities.assert_equals( expect=main.TRUE,
2282 actual=currentDevicesResult,
2283 onpass="ONOS" + controllerStr +
2284 " Switches view is correct",
2285 onfail="ONOS" + controllerStr +
2286 " Switches view is incorrect" )
2287
2288 if links[ controller ] and "Error" not in links[ controller ]:
2289 currentLinksResult = main.Mininet1.compareLinks(
2290 mnSwitches, mnLinks,
2291 json.loads( links[ controller ] ) )
2292 else:
2293 currentLinksResult = main.FALSE
2294 utilities.assert_equals( expect=main.TRUE,
2295 actual=currentLinksResult,
2296 onpass="ONOS" + controllerStr +
2297 " links view is correct",
2298 onfail="ONOS" + controllerStr +
2299 " links view is incorrect" )
Jon Hall657cdf62015-12-17 14:40:51 -08002300 if hosts[ controller ] and "Error" not in hosts[ controller ]:
Jon Hall5cf14d52015-07-16 12:15:19 -07002301 currentHostsResult = main.Mininet1.compareHosts(
2302 mnHosts,
2303 hosts[ controller ] )
Jon Hall13b446e2016-01-05 12:17:01 -08002304 elif hosts[ controller ] == []:
2305 currentHostsResult = main.TRUE
Jon Hall5cf14d52015-07-16 12:15:19 -07002306 else:
2307 currentHostsResult = main.FALSE
2308 utilities.assert_equals( expect=main.TRUE,
2309 actual=currentHostsResult,
2310 onpass="ONOS" + controllerStr +
2311 " hosts exist in Mininet",
2312 onfail="ONOS" + controllerStr +
2313 " hosts don't match Mininet" )
2314 # CHECKING HOST ATTACHMENT POINTS
2315 hostAttachment = True
2316 zeroHosts = False
2317 # FIXME: topo-HA/obelisk specific mappings:
2318 # key is mac and value is dpid
2319 mappings = {}
2320 for i in range( 1, 29 ): # hosts 1 through 28
2321 # set up correct variables:
2322 macId = "00:" * 5 + hex( i ).split( "0x" )[1].upper().zfill(2)
2323 if i == 1:
2324 deviceId = "1000".zfill(16)
2325 elif i == 2:
2326 deviceId = "2000".zfill(16)
2327 elif i == 3:
2328 deviceId = "3000".zfill(16)
2329 elif i == 4:
2330 deviceId = "3004".zfill(16)
2331 elif i == 5:
2332 deviceId = "5000".zfill(16)
2333 elif i == 6:
2334 deviceId = "6000".zfill(16)
2335 elif i == 7:
2336 deviceId = "6007".zfill(16)
2337 elif i >= 8 and i <= 17:
2338 dpid = '3' + str( i ).zfill( 3 )
2339 deviceId = dpid.zfill(16)
2340 elif i >= 18 and i <= 27:
2341 dpid = '6' + str( i ).zfill( 3 )
2342 deviceId = dpid.zfill(16)
2343 elif i == 28:
2344 deviceId = "2800".zfill(16)
2345 mappings[ macId ] = deviceId
Jon Halld8f6de82015-12-17 17:04:34 -08002346 if hosts[ controller ] is not None and "Error" not in hosts[ controller ]:
Jon Hall5cf14d52015-07-16 12:15:19 -07002347 if hosts[ controller ] == []:
2348 main.log.warn( "There are no hosts discovered" )
2349 zeroHosts = True
2350 else:
2351 for host in hosts[ controller ]:
2352 mac = None
2353 location = None
2354 device = None
2355 port = None
2356 try:
2357 mac = host.get( 'mac' )
2358 assert mac, "mac field could not be found for this host object"
2359
2360 location = host.get( 'location' )
2361 assert location, "location field could not be found for this host object"
2362
2363 # Trim the protocol identifier off deviceId
2364 device = str( location.get( 'elementId' ) ).split(':')[1]
2365 assert device, "elementId field could not be found for this host location object"
2366
2367 port = location.get( 'port' )
2368 assert port, "port field could not be found for this host location object"
2369
2370 # Now check if this matches where they should be
2371 if mac and device and port:
2372 if str( port ) != "1":
2373 main.log.error( "The attachment port is incorrect for " +
2374 "host " + str( mac ) +
2375 ". Expected: 1 Actual: " + str( port) )
2376 hostAttachment = False
2377 if device != mappings[ str( mac ) ]:
2378 main.log.error( "The attachment device is incorrect for " +
2379 "host " + str( mac ) +
2380 ". Expected: " + mappings[ str( mac ) ] +
2381 " Actual: " + device )
2382 hostAttachment = False
2383 else:
2384 hostAttachment = False
2385 except AssertionError:
2386 main.log.exception( "Json object not as expected" )
2387 main.log.error( repr( host ) )
2388 hostAttachment = False
2389 else:
2390 main.log.error( "No hosts json output or \"Error\"" +
2391 " in output. hosts = " +
2392 repr( hosts[ controller ] ) )
2393 if zeroHosts is False:
2394 hostAttachment = True
2395
2396 # END CHECKING HOST ATTACHMENT POINTS
2397 devicesResults = devicesResults and currentDevicesResult
2398 linksResults = linksResults and currentLinksResult
2399 hostsResults = hostsResults and currentHostsResult
2400 hostAttachmentResults = hostAttachmentResults and\
2401 hostAttachment
2402 topoResult = ( devicesResults and linksResults
2403 and hostsResults and ipResult and
2404 hostAttachmentResults )
Jon Halle9b1fa32015-12-08 15:32:21 -08002405 utilities.assert_equals( expect=True,
2406 actual=topoResult,
2407 onpass="ONOS topology matches Mininet",
Jon Halla440e872016-03-31 15:15:50 -07002408 onfail=topoFailMsg )
Jon Halle9b1fa32015-12-08 15:32:21 -08002409 # End of While loop to pull ONOS state
Jon Hall5cf14d52015-07-16 12:15:19 -07002410
2411 # Compare json objects for hosts and dataplane clusters
2412
2413 # hosts
2414 main.step( "Hosts view is consistent across all ONOS nodes" )
2415 consistentHostsResult = main.TRUE
2416 for controller in range( len( hosts ) ):
Jon Halla440e872016-03-31 15:15:50 -07002417 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hall13b446e2016-01-05 12:17:01 -08002418 if hosts[ controller ] is not None and "Error" not in hosts[ controller ]:
Jon Hall5cf14d52015-07-16 12:15:19 -07002419 if hosts[ controller ] == hosts[ 0 ]:
2420 continue
2421 else: # hosts not consistent
2422 main.log.error( "hosts from ONOS" + controllerStr +
2423 " is inconsistent with ONOS1" )
2424 main.log.warn( repr( hosts[ controller ] ) )
2425 consistentHostsResult = main.FALSE
2426
2427 else:
2428 main.log.error( "Error in getting ONOS hosts from ONOS" +
2429 controllerStr )
2430 consistentHostsResult = main.FALSE
2431 main.log.warn( "ONOS" + controllerStr +
2432 " hosts response: " +
2433 repr( hosts[ controller ] ) )
2434 utilities.assert_equals(
2435 expect=main.TRUE,
2436 actual=consistentHostsResult,
2437 onpass="Hosts view is consistent across all ONOS nodes",
2438 onfail="ONOS nodes have different views of hosts" )
2439
2440 main.step( "Hosts information is correct" )
2441 hostsResults = hostsResults and ipResult
2442 utilities.assert_equals(
2443 expect=main.TRUE,
2444 actual=hostsResults,
2445 onpass="Host information is correct",
2446 onfail="Host information is incorrect" )
2447
2448 main.step( "Host attachment points to the network" )
2449 utilities.assert_equals(
2450 expect=True,
2451 actual=hostAttachmentResults,
2452 onpass="Hosts are correctly attached to the network",
2453 onfail="ONOS did not correctly attach hosts to the network" )
2454
2455 # Strongly connected clusters of devices
2456 main.step( "Clusters view is consistent across all ONOS nodes" )
2457 consistentClustersResult = main.TRUE
2458 for controller in range( len( clusters ) ):
Jon Halla440e872016-03-31 15:15:50 -07002459 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07002460 if "Error" not in clusters[ controller ]:
2461 if clusters[ controller ] == clusters[ 0 ]:
2462 continue
2463 else: # clusters not consistent
2464 main.log.error( "clusters from ONOS" +
2465 controllerStr +
2466 " is inconsistent with ONOS1" )
2467 consistentClustersResult = main.FALSE
Jon Hall5cf14d52015-07-16 12:15:19 -07002468 else:
2469 main.log.error( "Error in getting dataplane clusters " +
2470 "from ONOS" + controllerStr )
2471 consistentClustersResult = main.FALSE
2472 main.log.warn( "ONOS" + controllerStr +
2473 " clusters response: " +
2474 repr( clusters[ controller ] ) )
2475 utilities.assert_equals(
2476 expect=main.TRUE,
2477 actual=consistentClustersResult,
2478 onpass="Clusters view is consistent across all ONOS nodes",
2479 onfail="ONOS nodes have different views of clusters" )
2480
2481 main.step( "There is only one SCC" )
2482 # there should always only be one cluster
2483 try:
2484 numClusters = len( json.loads( clusters[ 0 ] ) )
2485 except ( ValueError, TypeError ):
2486 main.log.exception( "Error parsing clusters[0]: " +
2487 repr( clusters[0] ) )
Jon Halla440e872016-03-31 15:15:50 -07002488 numClusters = "ERROR"
Jon Hall5cf14d52015-07-16 12:15:19 -07002489 clusterResults = main.FALSE
2490 if numClusters == 1:
2491 clusterResults = main.TRUE
2492 utilities.assert_equals(
2493 expect=1,
2494 actual=numClusters,
2495 onpass="ONOS shows 1 SCC",
2496 onfail="ONOS shows " + str( numClusters ) + " SCCs" )
2497
2498 topoResult = ( devicesResults and linksResults
2499 and hostsResults and consistentHostsResult
2500 and consistentClustersResult and clusterResults
2501 and ipResult and hostAttachmentResults )
2502
2503 topoResult = topoResult and int( count <= 2 )
2504 note = "note it takes about " + str( int( cliTime ) ) + \
2505 " seconds for the test to make all the cli calls to fetch " +\
2506 "the topology from each ONOS instance"
2507 main.log.info(
2508 "Very crass estimate for topology discovery/convergence( " +
2509 str( note ) + " ): " + str( elapsed ) + " seconds, " +
2510 str( count ) + " tries" )
2511
2512 main.step( "Device information is correct" )
2513 utilities.assert_equals(
2514 expect=main.TRUE,
2515 actual=devicesResults,
2516 onpass="Device information is correct",
2517 onfail="Device information is incorrect" )
2518
2519 main.step( "Links are correct" )
2520 utilities.assert_equals(
2521 expect=main.TRUE,
2522 actual=linksResults,
2523 onpass="Link are correct",
2524 onfail="Links are incorrect" )
2525
2526 main.step( "Hosts are correct" )
2527 utilities.assert_equals(
2528 expect=main.TRUE,
2529 actual=hostsResults,
2530 onpass="Hosts are correct",
2531 onfail="Hosts are incorrect" )
2532
2533 # FIXME: move this to an ONOS state case
2534 main.step( "Checking ONOS nodes" )
Jon Hall41d39f12016-04-11 22:54:35 -07002535 nodeResults = utilities.retry( main.HA.nodesCheck,
2536 False,
2537 args=[main.activeNodes],
2538 attempts=5 )
Jon Hall5cf14d52015-07-16 12:15:19 -07002539
Jon Hall41d39f12016-04-11 22:54:35 -07002540 utilities.assert_equals( expect=True, actual=nodeResults,
Jon Hall5cf14d52015-07-16 12:15:19 -07002541 onpass="Nodes check successful",
2542 onfail="Nodes check NOT successful" )
Jon Halla440e872016-03-31 15:15:50 -07002543 if not nodeResults:
Jon Hall41d39f12016-04-11 22:54:35 -07002544 for i in main.activeNodes:
Jon Halla440e872016-03-31 15:15:50 -07002545 main.log.debug( "{} components not ACTIVE: \n{}".format(
Jon Hall41d39f12016-04-11 22:54:35 -07002546 main.CLIs[i].name,
2547 main.CLIs[i].sendline( "scr:list | grep -v ACTIVE" ) ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07002548
2549 def CASE9( self, main ):
2550 """
2551 Link s3-s28 down
2552 """
2553 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002554 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002555 assert main, "main not defined"
2556 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002557 assert main.CLIs, "main.CLIs not defined"
2558 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002559 # NOTE: You should probably run a topology check after this
2560
2561 linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
2562
2563 description = "Turn off a link to ensure that Link Discovery " +\
2564 "is working properly"
2565 main.case( description )
2566
2567 main.step( "Kill Link between s3 and s28" )
2568 LinkDown = main.Mininet1.link( END1="s3", END2="s28", OPTION="down" )
2569 main.log.info( "Waiting " + str( linkSleep ) +
2570 " seconds for link down to be discovered" )
2571 time.sleep( linkSleep )
2572 utilities.assert_equals( expect=main.TRUE, actual=LinkDown,
2573 onpass="Link down successful",
2574 onfail="Failed to bring link down" )
2575 # TODO do some sort of check here
2576
2577 def CASE10( self, main ):
2578 """
2579 Link s3-s28 up
2580 """
2581 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002582 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002583 assert main, "main not defined"
2584 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002585 assert main.CLIs, "main.CLIs not defined"
2586 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002587 # NOTE: You should probably run a topology check after this
2588
2589 linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
2590
2591 description = "Restore a link to ensure that Link Discovery is " + \
2592 "working properly"
2593 main.case( description )
2594
2595 main.step( "Bring link between s3 and s28 back up" )
2596 LinkUp = main.Mininet1.link( END1="s3", END2="s28", OPTION="up" )
2597 main.log.info( "Waiting " + str( linkSleep ) +
2598 " seconds for link up to be discovered" )
2599 time.sleep( linkSleep )
2600 utilities.assert_equals( expect=main.TRUE, actual=LinkUp,
2601 onpass="Link up successful",
2602 onfail="Failed to bring link up" )
2603 # TODO do some sort of check here
2604
2605 def CASE11( self, main ):
2606 """
2607 Switch Down
2608 """
2609 # NOTE: You should probably run a topology check after this
2610 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002611 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002612 assert main, "main not defined"
2613 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002614 assert main.CLIs, "main.CLIs not defined"
2615 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002616
2617 switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
2618
2619 description = "Killing a switch to ensure it is discovered correctly"
Jon Halla440e872016-03-31 15:15:50 -07002620 onosCli = main.CLIs[ main.activeNodes[0] ]
Jon Hall5cf14d52015-07-16 12:15:19 -07002621 main.case( description )
2622 switch = main.params[ 'kill' ][ 'switch' ]
2623 switchDPID = main.params[ 'kill' ][ 'dpid' ]
2624
2625 # TODO: Make this switch parameterizable
2626 main.step( "Kill " + switch )
2627 main.log.info( "Deleting " + switch )
2628 main.Mininet1.delSwitch( switch )
2629 main.log.info( "Waiting " + str( switchSleep ) +
2630 " seconds for switch down to be discovered" )
2631 time.sleep( switchSleep )
Jon Halla440e872016-03-31 15:15:50 -07002632 device = onosCli.getDevice( dpid=switchDPID )
Jon Hall5cf14d52015-07-16 12:15:19 -07002633 # Peek at the deleted switch
2634 main.log.warn( str( device ) )
2635 result = main.FALSE
2636 if device and device[ 'available' ] is False:
2637 result = main.TRUE
2638 utilities.assert_equals( expect=main.TRUE, actual=result,
2639 onpass="Kill switch successful",
2640 onfail="Failed to kill switch?" )
2641
2642 def CASE12( self, main ):
2643 """
2644 Switch Up
2645 """
2646 # NOTE: You should probably run a topology check after this
2647 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002648 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002649 assert main, "main not defined"
2650 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002651 assert main.CLIs, "main.CLIs not defined"
2652 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002653 assert ONOS1Port, "ONOS1Port not defined"
2654 assert ONOS2Port, "ONOS2Port not defined"
2655 assert ONOS3Port, "ONOS3Port not defined"
2656 assert ONOS4Port, "ONOS4Port not defined"
2657 assert ONOS5Port, "ONOS5Port not defined"
2658 assert ONOS6Port, "ONOS6Port not defined"
2659 assert ONOS7Port, "ONOS7Port not defined"
2660
2661 switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
2662 switch = main.params[ 'kill' ][ 'switch' ]
2663 switchDPID = main.params[ 'kill' ][ 'dpid' ]
2664 links = main.params[ 'kill' ][ 'links' ].split()
Jon Halla440e872016-03-31 15:15:50 -07002665 onosCli = main.CLIs[ main.activeNodes[0] ]
Jon Hall5cf14d52015-07-16 12:15:19 -07002666 description = "Adding a switch to ensure it is discovered correctly"
2667 main.case( description )
2668
2669 main.step( "Add back " + switch )
2670 main.Mininet1.addSwitch( switch, dpid=switchDPID )
2671 for peer in links:
2672 main.Mininet1.addLink( switch, peer )
Jon Halla440e872016-03-31 15:15:50 -07002673 ipList = [ node.ip_address for node in main.nodes ]
Jon Hall5cf14d52015-07-16 12:15:19 -07002674 main.Mininet1.assignSwController( sw=switch, ip=ipList )
2675 main.log.info( "Waiting " + str( switchSleep ) +
2676 " seconds for switch up to be discovered" )
2677 time.sleep( switchSleep )
Jon Halla440e872016-03-31 15:15:50 -07002678 device = onosCli.getDevice( dpid=switchDPID )
Jon Hall5cf14d52015-07-16 12:15:19 -07002679 # Peek at the deleted switch
2680 main.log.warn( str( device ) )
2681 result = main.FALSE
2682 if device and device[ 'available' ]:
2683 result = main.TRUE
2684 utilities.assert_equals( expect=main.TRUE, actual=result,
2685 onpass="add switch successful",
2686 onfail="Failed to add switch?" )
2687
2688 def CASE13( self, main ):
2689 """
2690 Clean up
2691 """
2692 import os
2693 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002694 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002695 assert main, "main not defined"
2696 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002697 assert main.CLIs, "main.CLIs not defined"
2698 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002699
2700 # printing colors to terminal
2701 colors = { 'cyan': '\033[96m', 'purple': '\033[95m',
2702 'blue': '\033[94m', 'green': '\033[92m',
2703 'yellow': '\033[93m', 'red': '\033[91m', 'end': '\033[0m' }
2704 main.case( "Test Cleanup" )
2705 main.step( "Killing tcpdumps" )
2706 main.Mininet2.stopTcpdump()
2707
2708 testname = main.TEST
Jon Hall96091e62015-09-21 17:34:17 -07002709 if main.params[ 'BACKUP' ][ 'ENABLED' ] == "True":
Jon Hall5cf14d52015-07-16 12:15:19 -07002710 main.step( "Copying MN pcap and ONOS log files to test station" )
2711 teststationUser = main.params[ 'BACKUP' ][ 'TESTONUSER' ]
2712 teststationIP = main.params[ 'BACKUP' ][ 'TESTONIP' ]
Jon Hall96091e62015-09-21 17:34:17 -07002713 # NOTE: MN Pcap file is being saved to logdir.
2714 # We scp this file as MN and TestON aren't necessarily the same vm
2715
2716 # FIXME: To be replaced with a Jenkin's post script
Jon Hall5cf14d52015-07-16 12:15:19 -07002717 # TODO: Load these from params
2718 # NOTE: must end in /
2719 logFolder = "/opt/onos/log/"
2720 logFiles = [ "karaf.log", "karaf.log.1" ]
2721 # NOTE: must end in /
Jon Hall5cf14d52015-07-16 12:15:19 -07002722 for f in logFiles:
Jon Halle1a3b752015-07-22 13:02:46 -07002723 for node in main.nodes:
Jon Halla440e872016-03-31 15:15:50 -07002724 dstName = main.logdir + "/" + node.name + "-" + f
Jon Hall96091e62015-09-21 17:34:17 -07002725 main.ONOSbench.secureCopy( node.user_name, node.ip_address,
2726 logFolder + f, dstName )
Jon Hall5cf14d52015-07-16 12:15:19 -07002727 # std*.log's
2728 # NOTE: must end in /
2729 logFolder = "/opt/onos/var/"
2730 logFiles = [ "stderr.log", "stdout.log" ]
2731 # NOTE: must end in /
Jon Hall5cf14d52015-07-16 12:15:19 -07002732 for f in logFiles:
Jon Halle1a3b752015-07-22 13:02:46 -07002733 for node in main.nodes:
Jon Halla440e872016-03-31 15:15:50 -07002734 dstName = main.logdir + "/" + node.name + "-" + f
Jon Hall96091e62015-09-21 17:34:17 -07002735 main.ONOSbench.secureCopy( node.user_name, node.ip_address,
2736 logFolder + f, dstName )
2737 else:
2738 main.log.debug( "skipping saving log files" )
Jon Hall5cf14d52015-07-16 12:15:19 -07002739
2740 main.step( "Stopping Mininet" )
2741 mnResult = main.Mininet1.stopNet()
2742 utilities.assert_equals( expect=main.TRUE, actual=mnResult,
2743 onpass="Mininet stopped",
2744 onfail="MN cleanup NOT successful" )
2745
2746 main.step( "Checking ONOS Logs for errors" )
Jon Halle1a3b752015-07-22 13:02:46 -07002747 for node in main.nodes:
Jon Hall96091e62015-09-21 17:34:17 -07002748 main.log.debug( "Checking logs for errors on " + node.name + ":" )
2749 main.log.warn( main.ONOSbench.checkLogs( node.ip_address ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07002750
2751 try:
2752 timerLog = open( main.logdir + "/Timers.csv", 'w')
2753 # Overwrite with empty line and close
2754 labels = "Gossip Intents"
2755 data = str( gossipTime )
2756 timerLog.write( labels + "\n" + data )
2757 timerLog.close()
2758 except NameError, e:
2759 main.log.exception(e)
2760
2761 def CASE14( self, main ):
2762 """
2763 start election app on all onos nodes
2764 """
Jon Halle1a3b752015-07-22 13:02:46 -07002765 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002766 assert main, "main not defined"
2767 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002768 assert main.CLIs, "main.CLIs not defined"
2769 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002770
2771 main.case("Start Leadership Election app")
2772 main.step( "Install leadership election app" )
Jon Halla440e872016-03-31 15:15:50 -07002773 onosCli = main.CLIs[ main.activeNodes[0] ]
2774 appResult = onosCli.activateApp( "org.onosproject.election" )
Jon Hall5cf14d52015-07-16 12:15:19 -07002775 utilities.assert_equals(
2776 expect=main.TRUE,
2777 actual=appResult,
2778 onpass="Election app installed",
2779 onfail="Something went wrong with installing Leadership election" )
2780
2781 main.step( "Run for election on each node" )
Jon Halla440e872016-03-31 15:15:50 -07002782 for i in main.activeNodes:
2783 main.CLIs[i].electionTestRun()
Jon Hall25463a82016-04-13 14:03:52 -07002784 time.sleep(5)
2785 activeCLIs = [ main.CLIs[i] for i in main.activeNodes ]
2786 sameResult, leaders = main.HA.consistentLeaderboards( activeCLIs )
Jon Hall5cf14d52015-07-16 12:15:19 -07002787 utilities.assert_equals(
Jon Hall25463a82016-04-13 14:03:52 -07002788 expect=True,
2789 actual=sameResult,
2790 onpass="All nodes see the same leaderboards",
2791 onfail="Inconsistent leaderboards" )
Jon Hall5cf14d52015-07-16 12:15:19 -07002792
Jon Hall25463a82016-04-13 14:03:52 -07002793 if sameResult:
2794 leader = leaders[ 0 ][ 0 ]
2795 if main.nodes[main.activeNodes[0]].ip_address in leader:
2796 correctLeader = True
2797 else:
2798 correctLeader = False
2799 main.step( "First node was elected leader" )
2800 utilities.assert_equals(
2801 expect=True,
2802 actual=correctLeader,
2803 onpass="Correct leader was elected",
2804 onfail="Incorrect leader" )
Jon Hall5cf14d52015-07-16 12:15:19 -07002805
2806 def CASE15( self, main ):
2807 """
2808 Check that Leadership Election is still functional
acsmars71adceb2015-08-31 15:09:26 -07002809 15.1 Run election on each node
2810 15.2 Check that each node has the same leaders and candidates
2811 15.3 Find current leader and withdraw
2812 15.4 Check that a new node was elected leader
2813 15.5 Check that that new leader was the candidate of old leader
2814 15.6 Run for election on old leader
2815 15.7 Check that oldLeader is a candidate, and leader if only 1 node
2816 15.8 Make sure that the old leader was added to the candidate list
2817
2818 old and new variable prefixes refer to data from before vs after
2819 withdrawl and later before withdrawl vs after re-election
Jon Hall5cf14d52015-07-16 12:15:19 -07002820 """
2821 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002822 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002823 assert main, "main not defined"
2824 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002825 assert main.CLIs, "main.CLIs not defined"
2826 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002827
Jon Halla440e872016-03-31 15:15:50 -07002828 description = "Check that Leadership Election is still functional"
Jon Hall5cf14d52015-07-16 12:15:19 -07002829 main.case( description )
Jon Halla440e872016-03-31 15:15:50 -07002830 # NOTE: Need to re-run after restarts since being a canidate is not persistant
Jon Hall5cf14d52015-07-16 12:15:19 -07002831
Jon Halla440e872016-03-31 15:15:50 -07002832 oldLeaders = [] # list of lists of each nodes' candidates before
2833 newLeaders = [] # list of lists of each nodes' candidates after
acsmars71adceb2015-08-31 15:09:26 -07002834 oldLeader = '' # the old leader from oldLeaders, None if not same
2835 newLeader = '' # the new leaders fron newLoeaders, None if not same
2836 oldLeaderCLI = None # the CLI of the old leader used for re-electing
2837 expectNoLeader = False # True when there is only one leader
2838 if main.numCtrls == 1:
2839 expectNoLeader = True
2840
2841 main.step( "Run for election on each node" )
2842 electionResult = main.TRUE
2843
Jon Halla440e872016-03-31 15:15:50 -07002844 for i in main.activeNodes: # run test election on each node
2845 if main.CLIs[i].electionTestRun() == main.FALSE:
acsmars71adceb2015-08-31 15:09:26 -07002846 electionResult = main.FALSE
Jon Hall5cf14d52015-07-16 12:15:19 -07002847 utilities.assert_equals(
2848 expect=main.TRUE,
acsmars71adceb2015-08-31 15:09:26 -07002849 actual=electionResult,
2850 onpass="All nodes successfully ran for leadership",
2851 onfail="At least one node failed to run for leadership" )
2852
acsmars3a72bde2015-09-02 14:16:22 -07002853 if electionResult == main.FALSE:
2854 main.log.error(
Jon Halla440e872016-03-31 15:15:50 -07002855 "Skipping Test Case because Election Test App isn't loaded" )
acsmars3a72bde2015-09-02 14:16:22 -07002856 main.skipCase()
2857
acsmars71adceb2015-08-31 15:09:26 -07002858 main.step( "Check that each node shows the same leader and candidates" )
Jon Halla440e872016-03-31 15:15:50 -07002859 failMessage = "Nodes have different leaderboards"
Jon Halla440e872016-03-31 15:15:50 -07002860 activeCLIs = [ main.CLIs[i] for i in main.activeNodes ]
Jon Hall41d39f12016-04-11 22:54:35 -07002861 sameResult, oldLeaders = main.HA.consistentLeaderboards( activeCLIs )
Jon Halla440e872016-03-31 15:15:50 -07002862 if sameResult:
2863 oldLeader = oldLeaders[ 0 ][ 0 ]
2864 main.log.warn( oldLeader )
acsmars71adceb2015-08-31 15:09:26 -07002865 else:
Jon Halla440e872016-03-31 15:15:50 -07002866 oldLeader = None
acsmars71adceb2015-08-31 15:09:26 -07002867 utilities.assert_equals(
Jon Halla440e872016-03-31 15:15:50 -07002868 expect=True,
acsmars71adceb2015-08-31 15:09:26 -07002869 actual=sameResult,
Jon Halla440e872016-03-31 15:15:50 -07002870 onpass="Leaderboards are consistent for the election topic",
acsmars71adceb2015-08-31 15:09:26 -07002871 onfail=failMessage )
Jon Hall5cf14d52015-07-16 12:15:19 -07002872
2873 main.step( "Find current leader and withdraw" )
acsmars71adceb2015-08-31 15:09:26 -07002874 withdrawResult = main.TRUE
Jon Hall5cf14d52015-07-16 12:15:19 -07002875 # do some sanity checking on leader before using it
acsmars71adceb2015-08-31 15:09:26 -07002876 if oldLeader is None:
2877 main.log.error( "Leadership isn't consistent." )
2878 withdrawResult = main.FALSE
2879 # Get the CLI of the oldLeader
Jon Halla440e872016-03-31 15:15:50 -07002880 for i in main.activeNodes:
acsmars71adceb2015-08-31 15:09:26 -07002881 if oldLeader == main.nodes[ i ].ip_address:
2882 oldLeaderCLI = main.CLIs[ i ]
Jon Hall5cf14d52015-07-16 12:15:19 -07002883 break
2884 else: # FOR/ELSE statement
2885 main.log.error( "Leader election, could not find current leader" )
2886 if oldLeader:
acsmars71adceb2015-08-31 15:09:26 -07002887 withdrawResult = oldLeaderCLI.electionTestWithdraw()
Jon Hall5cf14d52015-07-16 12:15:19 -07002888 utilities.assert_equals(
2889 expect=main.TRUE,
2890 actual=withdrawResult,
2891 onpass="Node was withdrawn from election",
2892 onfail="Node was not withdrawn from election" )
2893
acsmars71adceb2015-08-31 15:09:26 -07002894 main.step( "Check that a new node was elected leader" )
acsmars71adceb2015-08-31 15:09:26 -07002895 failMessage = "Nodes have different leaders"
acsmars71adceb2015-08-31 15:09:26 -07002896 # Get new leaders and candidates
Jon Hall41d39f12016-04-11 22:54:35 -07002897 newLeaderResult, newLeaders = main.HA.consistentLeaderboards( activeCLIs )
Jon Hall3a7843a2016-04-12 03:01:09 -07002898 newLeader = None
Jon Halla440e872016-03-31 15:15:50 -07002899 if newLeaderResult:
Jon Hall3a7843a2016-04-12 03:01:09 -07002900 if newLeaders[ 0 ][ 0 ] == 'none':
2901 main.log.error( "No leader was elected on at least 1 node" )
2902 if not expectNoLeader:
2903 newLeaderResult = False
Jon Hall25463a82016-04-13 14:03:52 -07002904 newLeader = newLeaders[ 0 ][ 0 ]
acsmars71adceb2015-08-31 15:09:26 -07002905
2906 # Check that the new leader is not the older leader, which was withdrawn
2907 if newLeader == oldLeader:
Jon Halla440e872016-03-31 15:15:50 -07002908 newLeaderResult = False
Jon Hall6e709752016-02-01 13:38:46 -08002909 main.log.error( "All nodes still see old leader: " + str( oldLeader ) +
acsmars71adceb2015-08-31 15:09:26 -07002910 " as the current leader" )
Jon Hall5cf14d52015-07-16 12:15:19 -07002911 utilities.assert_equals(
Jon Halla440e872016-03-31 15:15:50 -07002912 expect=True,
acsmars71adceb2015-08-31 15:09:26 -07002913 actual=newLeaderResult,
Jon Hall5cf14d52015-07-16 12:15:19 -07002914 onpass="Leadership election passed",
2915 onfail="Something went wrong with Leadership election" )
2916
Jon Halla440e872016-03-31 15:15:50 -07002917 main.step( "Check that that new leader was the candidate of old leader" )
2918 # candidates[ 2 ] should become the top candidate after withdrawl
acsmars71adceb2015-08-31 15:09:26 -07002919 correctCandidateResult = main.TRUE
2920 if expectNoLeader:
2921 if newLeader == 'none':
2922 main.log.info( "No leader expected. None found. Pass" )
2923 correctCandidateResult = main.TRUE
2924 else:
2925 main.log.info( "Expected no leader, got: " + str( newLeader ) )
2926 correctCandidateResult = main.FALSE
Jon Halla440e872016-03-31 15:15:50 -07002927 elif len( oldLeaders[0] ) >= 3:
2928 if newLeader == oldLeaders[ 0 ][ 2 ]:
2929 # correct leader was elected
2930 correctCandidateResult = main.TRUE
2931 else:
2932 correctCandidateResult = main.FALSE
2933 main.log.error( "Candidate {} was elected. {} should have had priority.".format(
2934 newLeader, oldLeaders[ 0 ][ 2 ] ) )
2935 else:
2936 main.log.warn( "Could not determine who should be the correct leader" )
2937 main.log.debug( oldLeaders[ 0 ] )
acsmars71adceb2015-08-31 15:09:26 -07002938 correctCandidateResult = main.FALSE
acsmars71adceb2015-08-31 15:09:26 -07002939 utilities.assert_equals(
2940 expect=main.TRUE,
2941 actual=correctCandidateResult,
2942 onpass="Correct Candidate Elected",
2943 onfail="Incorrect Candidate Elected" )
2944
Jon Hall5cf14d52015-07-16 12:15:19 -07002945 main.step( "Run for election on old leader( just so everyone " +
2946 "is in the hat )" )
acsmars71adceb2015-08-31 15:09:26 -07002947 if oldLeaderCLI is not None:
2948 runResult = oldLeaderCLI.electionTestRun()
Jon Hall5cf14d52015-07-16 12:15:19 -07002949 else:
acsmars71adceb2015-08-31 15:09:26 -07002950 main.log.error( "No old leader to re-elect" )
Jon Hall5cf14d52015-07-16 12:15:19 -07002951 runResult = main.FALSE
2952 utilities.assert_equals(
2953 expect=main.TRUE,
2954 actual=runResult,
2955 onpass="App re-ran for election",
2956 onfail="App failed to run for election" )
Jon Halla440e872016-03-31 15:15:50 -07002957
acsmars71adceb2015-08-31 15:09:26 -07002958 main.step(
2959 "Check that oldLeader is a candidate, and leader if only 1 node" )
Jon Hall5cf14d52015-07-16 12:15:19 -07002960 # verify leader didn't just change
Jon Halla440e872016-03-31 15:15:50 -07002961 # Get new leaders and candidates
2962 reRunLeaders = []
2963 time.sleep( 5 ) # Paremterize
Jon Hall41d39f12016-04-11 22:54:35 -07002964 positionResult, reRunLeaders = main.HA.consistentLeaderboards( activeCLIs )
acsmars71adceb2015-08-31 15:09:26 -07002965
2966 # Check that the re-elected node is last on the candidate List
Jon Hall3a7843a2016-04-12 03:01:09 -07002967 if not reRunLeaders[0]:
2968 positionResult = main.FALSE
2969 elif oldLeader != reRunLeaders[ 0 ][ -1 ]:
Jon Halla440e872016-03-31 15:15:50 -07002970 main.log.error( "Old Leader ({}) not in the proper position: {} ".format( str( oldLeader),
2971 str( reRunLeaders[ 0 ] ) ) )
acsmars71adceb2015-08-31 15:09:26 -07002972 positionResult = main.FALSE
Jon Hall5cf14d52015-07-16 12:15:19 -07002973 utilities.assert_equals(
Jon Halla440e872016-03-31 15:15:50 -07002974 expect=True,
acsmars71adceb2015-08-31 15:09:26 -07002975 actual=positionResult,
Jon Hall5cf14d52015-07-16 12:15:19 -07002976 onpass="Old leader successfully re-ran for election",
2977 onfail="Something went wrong with Leadership election after " +
2978 "the old leader re-ran for election" )
2979
2980 def CASE16( self, main ):
2981 """
2982 Install Distributed Primitives app
2983 """
2984 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002985 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002986 assert main, "main not defined"
2987 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002988 assert main.CLIs, "main.CLIs not defined"
2989 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002990
2991 # Variables for the distributed primitives tests
2992 global pCounterName
Jon Hall5cf14d52015-07-16 12:15:19 -07002993 global pCounterValue
Jon Hall5cf14d52015-07-16 12:15:19 -07002994 global onosSet
2995 global onosSetName
2996 pCounterName = "TestON-Partitions"
Jon Hall5cf14d52015-07-16 12:15:19 -07002997 pCounterValue = 0
Jon Hall5cf14d52015-07-16 12:15:19 -07002998 onosSet = set([])
2999 onosSetName = "TestON-set"
3000
3001 description = "Install Primitives app"
3002 main.case( description )
3003 main.step( "Install Primitives app" )
3004 appName = "org.onosproject.distributedprimitives"
Jon Halla440e872016-03-31 15:15:50 -07003005 node = main.activeNodes[0]
3006 appResults = main.CLIs[node].activateApp( appName )
Jon Hall5cf14d52015-07-16 12:15:19 -07003007 utilities.assert_equals( expect=main.TRUE,
3008 actual=appResults,
3009 onpass="Primitives app activated",
3010 onfail="Primitives app not activated" )
3011 time.sleep( 5 ) # To allow all nodes to activate
3012
3013 def CASE17( self, main ):
3014 """
3015 Check for basic functionality with distributed primitives
3016 """
Jon Hall5cf14d52015-07-16 12:15:19 -07003017 # Make sure variables are defined/set
Jon Halle1a3b752015-07-22 13:02:46 -07003018 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07003019 assert main, "main not defined"
3020 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07003021 assert main.CLIs, "main.CLIs not defined"
3022 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07003023 assert pCounterName, "pCounterName not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07003024 assert onosSetName, "onosSetName not defined"
3025 # NOTE: assert fails if value is 0/None/Empty/False
3026 try:
3027 pCounterValue
3028 except NameError:
3029 main.log.error( "pCounterValue not defined, setting to 0" )
3030 pCounterValue = 0
3031 try:
Jon Hall5cf14d52015-07-16 12:15:19 -07003032 onosSet
3033 except NameError:
3034 main.log.error( "onosSet not defined, setting to empty Set" )
3035 onosSet = set([])
3036 # Variables for the distributed primitives tests. These are local only
3037 addValue = "a"
3038 addAllValue = "a b c d e f"
3039 retainValue = "c d e f"
3040
3041 description = "Check for basic functionality with distributed " +\
3042 "primitives"
3043 main.case( description )
Jon Halle1a3b752015-07-22 13:02:46 -07003044 main.caseExplanation = "Test the methods of the distributed " +\
3045 "primitives (counters and sets) throught the cli"
Jon Hall5cf14d52015-07-16 12:15:19 -07003046 # DISTRIBUTED ATOMIC COUNTERS
Jon Halle1a3b752015-07-22 13:02:46 -07003047 # Partitioned counters
3048 main.step( "Increment then get a default counter on each node" )
Jon Hall5cf14d52015-07-16 12:15:19 -07003049 pCounters = []
3050 threads = []
3051 addedPValues = []
Jon Halla440e872016-03-31 15:15:50 -07003052 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003053 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3054 name="counterAddAndGet-" + str( i ),
Jon Hall5cf14d52015-07-16 12:15:19 -07003055 args=[ pCounterName ] )
3056 pCounterValue += 1
3057 addedPValues.append( pCounterValue )
3058 threads.append( t )
3059 t.start()
3060
3061 for t in threads:
3062 t.join()
3063 pCounters.append( t.result )
3064 # Check that counter incremented numController times
3065 pCounterResults = True
3066 for i in addedPValues:
3067 tmpResult = i in pCounters
3068 pCounterResults = pCounterResults and tmpResult
3069 if not tmpResult:
3070 main.log.error( str( i ) + " is not in partitioned "
3071 "counter incremented results" )
3072 utilities.assert_equals( expect=True,
3073 actual=pCounterResults,
3074 onpass="Default counter incremented",
3075 onfail="Error incrementing default" +
3076 " counter" )
3077
Jon Halle1a3b752015-07-22 13:02:46 -07003078 main.step( "Get then Increment a default counter on each node" )
3079 pCounters = []
3080 threads = []
3081 addedPValues = []
Jon Halla440e872016-03-31 15:15:50 -07003082 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003083 t = main.Thread( target=main.CLIs[i].counterTestGetAndAdd,
3084 name="counterGetAndAdd-" + str( i ),
3085 args=[ pCounterName ] )
3086 addedPValues.append( pCounterValue )
3087 pCounterValue += 1
3088 threads.append( t )
3089 t.start()
3090
3091 for t in threads:
3092 t.join()
3093 pCounters.append( t.result )
3094 # Check that counter incremented numController times
3095 pCounterResults = True
3096 for i in addedPValues:
3097 tmpResult = i in pCounters
3098 pCounterResults = pCounterResults and tmpResult
3099 if not tmpResult:
3100 main.log.error( str( i ) + " is not in partitioned "
3101 "counter incremented results" )
3102 utilities.assert_equals( expect=True,
3103 actual=pCounterResults,
3104 onpass="Default counter incremented",
3105 onfail="Error incrementing default" +
3106 " counter" )
3107
3108 main.step( "Counters we added have the correct values" )
Jon Hall41d39f12016-04-11 22:54:35 -07003109 incrementCheck = main.HA.counterCheck( pCounterName, pCounterValue )
Jon Halle1a3b752015-07-22 13:02:46 -07003110 utilities.assert_equals( expect=main.TRUE,
3111 actual=incrementCheck,
3112 onpass="Added counters are correct",
3113 onfail="Added counters are incorrect" )
3114
3115 main.step( "Add -8 to then get a default counter on each node" )
3116 pCounters = []
3117 threads = []
3118 addedPValues = []
Jon Halla440e872016-03-31 15:15:50 -07003119 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003120 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3121 name="counterIncrement-" + str( i ),
3122 args=[ pCounterName ],
3123 kwargs={ "delta": -8 } )
3124 pCounterValue += -8
3125 addedPValues.append( pCounterValue )
3126 threads.append( t )
3127 t.start()
3128
3129 for t in threads:
3130 t.join()
3131 pCounters.append( t.result )
3132 # Check that counter incremented numController times
3133 pCounterResults = True
3134 for i in addedPValues:
3135 tmpResult = i in pCounters
3136 pCounterResults = pCounterResults and tmpResult
3137 if not tmpResult:
3138 main.log.error( str( i ) + " is not in partitioned "
3139 "counter incremented results" )
3140 utilities.assert_equals( expect=True,
3141 actual=pCounterResults,
3142 onpass="Default counter incremented",
3143 onfail="Error incrementing default" +
3144 " counter" )
3145
3146 main.step( "Add 5 to then get a default counter on each node" )
3147 pCounters = []
3148 threads = []
3149 addedPValues = []
Jon Halla440e872016-03-31 15:15:50 -07003150 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003151 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3152 name="counterIncrement-" + str( i ),
3153 args=[ pCounterName ],
3154 kwargs={ "delta": 5 } )
3155 pCounterValue += 5
3156 addedPValues.append( pCounterValue )
3157 threads.append( t )
3158 t.start()
3159
3160 for t in threads:
3161 t.join()
3162 pCounters.append( t.result )
3163 # Check that counter incremented numController times
3164 pCounterResults = True
3165 for i in addedPValues:
3166 tmpResult = i in pCounters
3167 pCounterResults = pCounterResults and tmpResult
3168 if not tmpResult:
3169 main.log.error( str( i ) + " is not in partitioned "
3170 "counter incremented results" )
3171 utilities.assert_equals( expect=True,
3172 actual=pCounterResults,
3173 onpass="Default counter incremented",
3174 onfail="Error incrementing default" +
3175 " counter" )
3176
3177 main.step( "Get then add 5 to a default counter on each node" )
3178 pCounters = []
3179 threads = []
3180 addedPValues = []
Jon Halla440e872016-03-31 15:15:50 -07003181 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003182 t = main.Thread( target=main.CLIs[i].counterTestGetAndAdd,
3183 name="counterIncrement-" + str( i ),
3184 args=[ pCounterName ],
3185 kwargs={ "delta": 5 } )
3186 addedPValues.append( pCounterValue )
3187 pCounterValue += 5
3188 threads.append( t )
3189 t.start()
3190
3191 for t in threads:
3192 t.join()
3193 pCounters.append( t.result )
3194 # Check that counter incremented numController times
3195 pCounterResults = True
3196 for i in addedPValues:
3197 tmpResult = i in pCounters
3198 pCounterResults = pCounterResults and tmpResult
3199 if not tmpResult:
3200 main.log.error( str( i ) + " is not in partitioned "
3201 "counter incremented results" )
3202 utilities.assert_equals( expect=True,
3203 actual=pCounterResults,
3204 onpass="Default counter incremented",
3205 onfail="Error incrementing default" +
3206 " counter" )
3207
3208 main.step( "Counters we added have the correct values" )
Jon Hall41d39f12016-04-11 22:54:35 -07003209 incrementCheck = main.HA.counterCheck( pCounterName, pCounterValue )
Jon Halle1a3b752015-07-22 13:02:46 -07003210 utilities.assert_equals( expect=main.TRUE,
3211 actual=incrementCheck,
3212 onpass="Added counters are correct",
3213 onfail="Added counters are incorrect" )
3214
Jon Hall5cf14d52015-07-16 12:15:19 -07003215 # DISTRIBUTED SETS
3216 main.step( "Distributed Set get" )
3217 size = len( onosSet )
3218 getResponses = []
3219 threads = []
Jon Halla440e872016-03-31 15:15:50 -07003220 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003221 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003222 name="setTestGet-" + str( i ),
3223 args=[ onosSetName ] )
3224 threads.append( t )
3225 t.start()
3226 for t in threads:
3227 t.join()
3228 getResponses.append( t.result )
3229
3230 getResults = main.TRUE
Jon Halla440e872016-03-31 15:15:50 -07003231 for i in range( len( main.activeNodes ) ):
3232 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003233 if isinstance( getResponses[ i ], list):
3234 current = set( getResponses[ i ] )
3235 if len( current ) == len( getResponses[ i ] ):
3236 # no repeats
3237 if onosSet != current:
Jon Halla440e872016-03-31 15:15:50 -07003238 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003239 " has incorrect view" +
3240 " of set " + onosSetName + ":\n" +
3241 str( getResponses[ i ] ) )
3242 main.log.debug( "Expected: " + str( onosSet ) )
3243 main.log.debug( "Actual: " + str( current ) )
3244 getResults = main.FALSE
3245 else:
3246 # error, set is not a set
Jon Halla440e872016-03-31 15:15:50 -07003247 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003248 " has repeat elements in" +
3249 " set " + onosSetName + ":\n" +
3250 str( getResponses[ i ] ) )
3251 getResults = main.FALSE
3252 elif getResponses[ i ] == main.ERROR:
3253 getResults = main.FALSE
3254 utilities.assert_equals( expect=main.TRUE,
3255 actual=getResults,
3256 onpass="Set elements are correct",
3257 onfail="Set elements are incorrect" )
3258
3259 main.step( "Distributed Set size" )
3260 sizeResponses = []
3261 threads = []
Jon Halla440e872016-03-31 15:15:50 -07003262 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003263 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07003264 name="setTestSize-" + str( i ),
3265 args=[ onosSetName ] )
3266 threads.append( t )
3267 t.start()
3268 for t in threads:
3269 t.join()
3270 sizeResponses.append( t.result )
3271
3272 sizeResults = main.TRUE
Jon Halla440e872016-03-31 15:15:50 -07003273 for i in range( len( main.activeNodes ) ):
3274 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003275 if size != sizeResponses[ i ]:
3276 sizeResults = main.FALSE
Jon Halla440e872016-03-31 15:15:50 -07003277 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003278 " expected a size of " + str( size ) +
3279 " for set " + onosSetName +
3280 " but got " + str( sizeResponses[ i ] ) )
3281 utilities.assert_equals( expect=main.TRUE,
3282 actual=sizeResults,
3283 onpass="Set sizes are correct",
3284 onfail="Set sizes are incorrect" )
3285
3286 main.step( "Distributed Set add()" )
3287 onosSet.add( addValue )
3288 addResponses = []
3289 threads = []
Jon Halla440e872016-03-31 15:15:50 -07003290 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003291 t = main.Thread( target=main.CLIs[i].setTestAdd,
Jon Hall5cf14d52015-07-16 12:15:19 -07003292 name="setTestAdd-" + str( i ),
3293 args=[ onosSetName, addValue ] )
3294 threads.append( t )
3295 t.start()
3296 for t in threads:
3297 t.join()
3298 addResponses.append( t.result )
3299
3300 # main.TRUE = successfully changed the set
3301 # main.FALSE = action resulted in no change in set
3302 # main.ERROR - Some error in executing the function
3303 addResults = main.TRUE
Jon Halla440e872016-03-31 15:15:50 -07003304 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003305 if addResponses[ i ] == main.TRUE:
3306 # All is well
3307 pass
3308 elif addResponses[ i ] == main.FALSE:
3309 # Already in set, probably fine
3310 pass
3311 elif addResponses[ i ] == main.ERROR:
3312 # Error in execution
3313 addResults = main.FALSE
3314 else:
3315 # unexpected result
3316 addResults = main.FALSE
3317 if addResults != main.TRUE:
3318 main.log.error( "Error executing set add" )
3319
3320 # Check if set is still correct
3321 size = len( onosSet )
3322 getResponses = []
3323 threads = []
Jon Halla440e872016-03-31 15:15:50 -07003324 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003325 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003326 name="setTestGet-" + str( i ),
3327 args=[ onosSetName ] )
3328 threads.append( t )
3329 t.start()
3330 for t in threads:
3331 t.join()
3332 getResponses.append( t.result )
3333 getResults = main.TRUE
Jon Halla440e872016-03-31 15:15:50 -07003334 for i in range( len( main.activeNodes ) ):
3335 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003336 if isinstance( getResponses[ i ], list):
3337 current = set( getResponses[ i ] )
3338 if len( current ) == len( getResponses[ i ] ):
3339 # no repeats
3340 if onosSet != current:
Jon Halla440e872016-03-31 15:15:50 -07003341 main.log.error( "ONOS" + node + " has incorrect view" +
Jon Hall5cf14d52015-07-16 12:15:19 -07003342 " of set " + onosSetName + ":\n" +
3343 str( getResponses[ i ] ) )
3344 main.log.debug( "Expected: " + str( onosSet ) )
3345 main.log.debug( "Actual: " + str( current ) )
3346 getResults = main.FALSE
3347 else:
3348 # error, set is not a set
Jon Halla440e872016-03-31 15:15:50 -07003349 main.log.error( "ONOS" + node + " has repeat elements in" +
Jon Hall5cf14d52015-07-16 12:15:19 -07003350 " set " + onosSetName + ":\n" +
3351 str( getResponses[ i ] ) )
3352 getResults = main.FALSE
3353 elif getResponses[ i ] == main.ERROR:
3354 getResults = main.FALSE
3355 sizeResponses = []
3356 threads = []
Jon Halla440e872016-03-31 15:15:50 -07003357 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003358 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07003359 name="setTestSize-" + str( i ),
3360 args=[ onosSetName ] )
3361 threads.append( t )
3362 t.start()
3363 for t in threads:
3364 t.join()
3365 sizeResponses.append( t.result )
3366 sizeResults = main.TRUE
Jon Halla440e872016-03-31 15:15:50 -07003367 for i in range( len( main.activeNodes ) ):
3368 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003369 if size != sizeResponses[ i ]:
3370 sizeResults = main.FALSE
Jon Halla440e872016-03-31 15:15:50 -07003371 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003372 " expected a size of " + str( size ) +
3373 " for set " + onosSetName +
3374 " but got " + str( sizeResponses[ i ] ) )
3375 addResults = addResults and getResults and sizeResults
3376 utilities.assert_equals( expect=main.TRUE,
3377 actual=addResults,
3378 onpass="Set add correct",
3379 onfail="Set add was incorrect" )
3380
3381 main.step( "Distributed Set addAll()" )
3382 onosSet.update( addAllValue.split() )
3383 addResponses = []
3384 threads = []
Jon Halla440e872016-03-31 15:15:50 -07003385 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003386 t = main.Thread( target=main.CLIs[i].setTestAdd,
Jon Hall5cf14d52015-07-16 12:15:19 -07003387 name="setTestAddAll-" + str( i ),
3388 args=[ onosSetName, addAllValue ] )
3389 threads.append( t )
3390 t.start()
3391 for t in threads:
3392 t.join()
3393 addResponses.append( t.result )
3394
3395 # main.TRUE = successfully changed the set
3396 # main.FALSE = action resulted in no change in set
3397 # main.ERROR - Some error in executing the function
3398 addAllResults = main.TRUE
Jon Halla440e872016-03-31 15:15:50 -07003399 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003400 if addResponses[ i ] == main.TRUE:
3401 # All is well
3402 pass
3403 elif addResponses[ i ] == main.FALSE:
3404 # Already in set, probably fine
3405 pass
3406 elif addResponses[ i ] == main.ERROR:
3407 # Error in execution
3408 addAllResults = main.FALSE
3409 else:
3410 # unexpected result
3411 addAllResults = main.FALSE
3412 if addAllResults != main.TRUE:
3413 main.log.error( "Error executing set addAll" )
3414
3415 # Check if set is still correct
3416 size = len( onosSet )
3417 getResponses = []
3418 threads = []
Jon Halla440e872016-03-31 15:15:50 -07003419 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003420 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003421 name="setTestGet-" + str( i ),
3422 args=[ onosSetName ] )
3423 threads.append( t )
3424 t.start()
3425 for t in threads:
3426 t.join()
3427 getResponses.append( t.result )
3428 getResults = main.TRUE
Jon Halla440e872016-03-31 15:15:50 -07003429 for i in range( len( main.activeNodes ) ):
3430 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003431 if isinstance( getResponses[ i ], list):
3432 current = set( getResponses[ i ] )
3433 if len( current ) == len( getResponses[ i ] ):
3434 # no repeats
3435 if onosSet != current:
Jon Halla440e872016-03-31 15:15:50 -07003436 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003437 " has incorrect view" +
3438 " of set " + onosSetName + ":\n" +
3439 str( getResponses[ i ] ) )
3440 main.log.debug( "Expected: " + str( onosSet ) )
3441 main.log.debug( "Actual: " + str( current ) )
3442 getResults = main.FALSE
3443 else:
3444 # error, set is not a set
Jon Halla440e872016-03-31 15:15:50 -07003445 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003446 " has repeat elements in" +
3447 " set " + onosSetName + ":\n" +
3448 str( getResponses[ i ] ) )
3449 getResults = main.FALSE
3450 elif getResponses[ i ] == main.ERROR:
3451 getResults = main.FALSE
3452 sizeResponses = []
3453 threads = []
Jon Halla440e872016-03-31 15:15:50 -07003454 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003455 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07003456 name="setTestSize-" + str( i ),
3457 args=[ onosSetName ] )
3458 threads.append( t )
3459 t.start()
3460 for t in threads:
3461 t.join()
3462 sizeResponses.append( t.result )
3463 sizeResults = main.TRUE
Jon Halla440e872016-03-31 15:15:50 -07003464 for i in range( len( main.activeNodes ) ):
3465 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003466 if size != sizeResponses[ i ]:
3467 sizeResults = main.FALSE
Jon Halla440e872016-03-31 15:15:50 -07003468 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003469 " expected a size of " + str( size ) +
3470 " for set " + onosSetName +
3471 " but got " + str( sizeResponses[ i ] ) )
3472 addAllResults = addAllResults and getResults and sizeResults
3473 utilities.assert_equals( expect=main.TRUE,
3474 actual=addAllResults,
3475 onpass="Set addAll correct",
3476 onfail="Set addAll was incorrect" )
3477
3478 main.step( "Distributed Set contains()" )
3479 containsResponses = []
3480 threads = []
Jon Halla440e872016-03-31 15:15:50 -07003481 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003482 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003483 name="setContains-" + str( i ),
3484 args=[ onosSetName ],
3485 kwargs={ "values": addValue } )
3486 threads.append( t )
3487 t.start()
3488 for t in threads:
3489 t.join()
3490 # NOTE: This is the tuple
3491 containsResponses.append( t.result )
3492
3493 containsResults = main.TRUE
Jon Halla440e872016-03-31 15:15:50 -07003494 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003495 if containsResponses[ i ] == main.ERROR:
3496 containsResults = main.FALSE
3497 else:
3498 containsResults = containsResults and\
3499 containsResponses[ i ][ 1 ]
3500 utilities.assert_equals( expect=main.TRUE,
3501 actual=containsResults,
3502 onpass="Set contains is functional",
3503 onfail="Set contains failed" )
3504
3505 main.step( "Distributed Set containsAll()" )
3506 containsAllResponses = []
3507 threads = []
Jon Halla440e872016-03-31 15:15:50 -07003508 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003509 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003510 name="setContainsAll-" + str( i ),
3511 args=[ onosSetName ],
3512 kwargs={ "values": addAllValue } )
3513 threads.append( t )
3514 t.start()
3515 for t in threads:
3516 t.join()
3517 # NOTE: This is the tuple
3518 containsAllResponses.append( t.result )
3519
3520 containsAllResults = main.TRUE
Jon Halla440e872016-03-31 15:15:50 -07003521 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003522 if containsResponses[ i ] == main.ERROR:
3523 containsResults = main.FALSE
3524 else:
3525 containsResults = containsResults and\
3526 containsResponses[ i ][ 1 ]
3527 utilities.assert_equals( expect=main.TRUE,
3528 actual=containsAllResults,
3529 onpass="Set containsAll is functional",
3530 onfail="Set containsAll failed" )
3531
3532 main.step( "Distributed Set remove()" )
3533 onosSet.remove( addValue )
3534 removeResponses = []
3535 threads = []
Jon Halla440e872016-03-31 15:15:50 -07003536 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003537 t = main.Thread( target=main.CLIs[i].setTestRemove,
Jon Hall5cf14d52015-07-16 12:15:19 -07003538 name="setTestRemove-" + str( i ),
3539 args=[ onosSetName, addValue ] )
3540 threads.append( t )
3541 t.start()
3542 for t in threads:
3543 t.join()
3544 removeResponses.append( t.result )
3545
3546 # main.TRUE = successfully changed the set
3547 # main.FALSE = action resulted in no change in set
3548 # main.ERROR - Some error in executing the function
3549 removeResults = main.TRUE
Jon Halla440e872016-03-31 15:15:50 -07003550 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003551 if removeResponses[ i ] == main.TRUE:
3552 # All is well
3553 pass
3554 elif removeResponses[ i ] == main.FALSE:
3555 # not in set, probably fine
3556 pass
3557 elif removeResponses[ i ] == main.ERROR:
3558 # Error in execution
3559 removeResults = main.FALSE
3560 else:
3561 # unexpected result
3562 removeResults = main.FALSE
3563 if removeResults != main.TRUE:
3564 main.log.error( "Error executing set remove" )
3565
3566 # Check if set is still correct
3567 size = len( onosSet )
3568 getResponses = []
3569 threads = []
Jon Halla440e872016-03-31 15:15:50 -07003570 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003571 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003572 name="setTestGet-" + str( i ),
3573 args=[ onosSetName ] )
3574 threads.append( t )
3575 t.start()
3576 for t in threads:
3577 t.join()
3578 getResponses.append( t.result )
3579 getResults = main.TRUE
Jon Halla440e872016-03-31 15:15:50 -07003580 for i in range( len( main.activeNodes ) ):
3581 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003582 if isinstance( getResponses[ i ], list):
3583 current = set( getResponses[ i ] )
3584 if len( current ) == len( getResponses[ i ] ):
3585 # no repeats
3586 if onosSet != current:
Jon Halla440e872016-03-31 15:15:50 -07003587 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003588 " has incorrect view" +
3589 " of set " + onosSetName + ":\n" +
3590 str( getResponses[ i ] ) )
3591 main.log.debug( "Expected: " + str( onosSet ) )
3592 main.log.debug( "Actual: " + str( current ) )
3593 getResults = main.FALSE
3594 else:
3595 # error, set is not a set
Jon Halla440e872016-03-31 15:15:50 -07003596 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003597 " has repeat elements in" +
3598 " set " + onosSetName + ":\n" +
3599 str( getResponses[ i ] ) )
3600 getResults = main.FALSE
3601 elif getResponses[ i ] == main.ERROR:
3602 getResults = main.FALSE
3603 sizeResponses = []
3604 threads = []
Jon Halla440e872016-03-31 15:15:50 -07003605 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003606 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07003607 name="setTestSize-" + str( i ),
3608 args=[ onosSetName ] )
3609 threads.append( t )
3610 t.start()
3611 for t in threads:
3612 t.join()
3613 sizeResponses.append( t.result )
3614 sizeResults = main.TRUE
Jon Halla440e872016-03-31 15:15:50 -07003615 for i in range( len( main.activeNodes ) ):
3616 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003617 if size != sizeResponses[ i ]:
3618 sizeResults = main.FALSE
Jon Halla440e872016-03-31 15:15:50 -07003619 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003620 " expected a size of " + str( size ) +
3621 " for set " + onosSetName +
3622 " but got " + str( sizeResponses[ i ] ) )
3623 removeResults = removeResults and getResults and sizeResults
3624 utilities.assert_equals( expect=main.TRUE,
3625 actual=removeResults,
3626 onpass="Set remove correct",
3627 onfail="Set remove was incorrect" )
3628
3629 main.step( "Distributed Set removeAll()" )
3630 onosSet.difference_update( addAllValue.split() )
3631 removeAllResponses = []
3632 threads = []
3633 try:
Jon Halla440e872016-03-31 15:15:50 -07003634 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003635 t = main.Thread( target=main.CLIs[i].setTestRemove,
Jon Hall5cf14d52015-07-16 12:15:19 -07003636 name="setTestRemoveAll-" + str( i ),
3637 args=[ onosSetName, addAllValue ] )
3638 threads.append( t )
3639 t.start()
3640 for t in threads:
3641 t.join()
3642 removeAllResponses.append( t.result )
3643 except Exception, e:
3644 main.log.exception(e)
3645
3646 # main.TRUE = successfully changed the set
3647 # main.FALSE = action resulted in no change in set
3648 # main.ERROR - Some error in executing the function
3649 removeAllResults = main.TRUE
Jon Halla440e872016-03-31 15:15:50 -07003650 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003651 if removeAllResponses[ i ] == main.TRUE:
3652 # All is well
3653 pass
3654 elif removeAllResponses[ i ] == main.FALSE:
3655 # not in set, probably fine
3656 pass
3657 elif removeAllResponses[ i ] == main.ERROR:
3658 # Error in execution
3659 removeAllResults = main.FALSE
3660 else:
3661 # unexpected result
3662 removeAllResults = main.FALSE
3663 if removeAllResults != main.TRUE:
3664 main.log.error( "Error executing set removeAll" )
3665
3666 # Check if set is still correct
3667 size = len( onosSet )
3668 getResponses = []
3669 threads = []
Jon Halla440e872016-03-31 15:15:50 -07003670 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003671 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003672 name="setTestGet-" + str( i ),
3673 args=[ onosSetName ] )
3674 threads.append( t )
3675 t.start()
3676 for t in threads:
3677 t.join()
3678 getResponses.append( t.result )
3679 getResults = main.TRUE
Jon Halla440e872016-03-31 15:15:50 -07003680 for i in range( len( main.activeNodes ) ):
3681 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003682 if isinstance( getResponses[ i ], list):
3683 current = set( getResponses[ i ] )
3684 if len( current ) == len( getResponses[ i ] ):
3685 # no repeats
3686 if onosSet != current:
Jon Halla440e872016-03-31 15:15:50 -07003687 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003688 " has incorrect view" +
3689 " of set " + onosSetName + ":\n" +
3690 str( getResponses[ i ] ) )
3691 main.log.debug( "Expected: " + str( onosSet ) )
3692 main.log.debug( "Actual: " + str( current ) )
3693 getResults = main.FALSE
3694 else:
3695 # error, set is not a set
Jon Halla440e872016-03-31 15:15:50 -07003696 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003697 " has repeat elements in" +
3698 " set " + onosSetName + ":\n" +
3699 str( getResponses[ i ] ) )
3700 getResults = main.FALSE
3701 elif getResponses[ i ] == main.ERROR:
3702 getResults = main.FALSE
3703 sizeResponses = []
3704 threads = []
Jon Halla440e872016-03-31 15:15:50 -07003705 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003706 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07003707 name="setTestSize-" + str( i ),
3708 args=[ onosSetName ] )
3709 threads.append( t )
3710 t.start()
3711 for t in threads:
3712 t.join()
3713 sizeResponses.append( t.result )
3714 sizeResults = main.TRUE
Jon Halla440e872016-03-31 15:15:50 -07003715 for i in range( len( main.activeNodes ) ):
3716 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003717 if size != sizeResponses[ i ]:
3718 sizeResults = main.FALSE
Jon Halla440e872016-03-31 15:15:50 -07003719 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003720 " expected a size of " + str( size ) +
3721 " for set " + onosSetName +
3722 " but got " + str( sizeResponses[ i ] ) )
3723 removeAllResults = removeAllResults and getResults and sizeResults
3724 utilities.assert_equals( expect=main.TRUE,
3725 actual=removeAllResults,
3726 onpass="Set removeAll correct",
3727 onfail="Set removeAll was incorrect" )
3728
3729 main.step( "Distributed Set addAll()" )
3730 onosSet.update( addAllValue.split() )
3731 addResponses = []
3732 threads = []
Jon Halla440e872016-03-31 15:15:50 -07003733 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003734 t = main.Thread( target=main.CLIs[i].setTestAdd,
Jon Hall5cf14d52015-07-16 12:15:19 -07003735 name="setTestAddAll-" + str( i ),
3736 args=[ onosSetName, addAllValue ] )
3737 threads.append( t )
3738 t.start()
3739 for t in threads:
3740 t.join()
3741 addResponses.append( t.result )
3742
3743 # main.TRUE = successfully changed the set
3744 # main.FALSE = action resulted in no change in set
3745 # main.ERROR - Some error in executing the function
3746 addAllResults = main.TRUE
Jon Halla440e872016-03-31 15:15:50 -07003747 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003748 if addResponses[ i ] == main.TRUE:
3749 # All is well
3750 pass
3751 elif addResponses[ i ] == main.FALSE:
3752 # Already in set, probably fine
3753 pass
3754 elif addResponses[ i ] == main.ERROR:
3755 # Error in execution
3756 addAllResults = main.FALSE
3757 else:
3758 # unexpected result
3759 addAllResults = main.FALSE
3760 if addAllResults != main.TRUE:
3761 main.log.error( "Error executing set addAll" )
3762
3763 # Check if set is still correct
3764 size = len( onosSet )
3765 getResponses = []
3766 threads = []
Jon Halla440e872016-03-31 15:15:50 -07003767 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003768 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003769 name="setTestGet-" + str( i ),
3770 args=[ onosSetName ] )
3771 threads.append( t )
3772 t.start()
3773 for t in threads:
3774 t.join()
3775 getResponses.append( t.result )
3776 getResults = main.TRUE
Jon Halla440e872016-03-31 15:15:50 -07003777 for i in range( len( main.activeNodes ) ):
3778 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003779 if isinstance( getResponses[ i ], list):
3780 current = set( getResponses[ i ] )
3781 if len( current ) == len( getResponses[ i ] ):
3782 # no repeats
3783 if onosSet != current:
Jon Halla440e872016-03-31 15:15:50 -07003784 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003785 " has incorrect view" +
3786 " of set " + onosSetName + ":\n" +
3787 str( getResponses[ i ] ) )
3788 main.log.debug( "Expected: " + str( onosSet ) )
3789 main.log.debug( "Actual: " + str( current ) )
3790 getResults = main.FALSE
3791 else:
3792 # error, set is not a set
Jon Halla440e872016-03-31 15:15:50 -07003793 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003794 " has repeat elements in" +
3795 " set " + onosSetName + ":\n" +
3796 str( getResponses[ i ] ) )
3797 getResults = main.FALSE
3798 elif getResponses[ i ] == main.ERROR:
3799 getResults = main.FALSE
3800 sizeResponses = []
3801 threads = []
Jon Halla440e872016-03-31 15:15:50 -07003802 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003803 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07003804 name="setTestSize-" + str( i ),
3805 args=[ onosSetName ] )
3806 threads.append( t )
3807 t.start()
3808 for t in threads:
3809 t.join()
3810 sizeResponses.append( t.result )
3811 sizeResults = main.TRUE
Jon Halla440e872016-03-31 15:15:50 -07003812 for i in range( len( main.activeNodes ) ):
3813 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003814 if size != sizeResponses[ i ]:
3815 sizeResults = main.FALSE
Jon Halla440e872016-03-31 15:15:50 -07003816 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003817 " expected a size of " + str( size ) +
3818 " for set " + onosSetName +
3819 " but got " + str( sizeResponses[ i ] ) )
3820 addAllResults = addAllResults and getResults and sizeResults
3821 utilities.assert_equals( expect=main.TRUE,
3822 actual=addAllResults,
3823 onpass="Set addAll correct",
3824 onfail="Set addAll was incorrect" )
3825
3826 main.step( "Distributed Set clear()" )
3827 onosSet.clear()
3828 clearResponses = []
3829 threads = []
Jon Halla440e872016-03-31 15:15:50 -07003830 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003831 t = main.Thread( target=main.CLIs[i].setTestRemove,
Jon Hall5cf14d52015-07-16 12:15:19 -07003832 name="setTestClear-" + str( i ),
3833 args=[ onosSetName, " "], # Values doesn't matter
3834 kwargs={ "clear": True } )
3835 threads.append( t )
3836 t.start()
3837 for t in threads:
3838 t.join()
3839 clearResponses.append( t.result )
3840
3841 # main.TRUE = successfully changed the set
3842 # main.FALSE = action resulted in no change in set
3843 # main.ERROR - Some error in executing the function
3844 clearResults = main.TRUE
Jon Halla440e872016-03-31 15:15:50 -07003845 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003846 if clearResponses[ i ] == main.TRUE:
3847 # All is well
3848 pass
3849 elif clearResponses[ i ] == main.FALSE:
3850 # Nothing set, probably fine
3851 pass
3852 elif clearResponses[ i ] == main.ERROR:
3853 # Error in execution
3854 clearResults = main.FALSE
3855 else:
3856 # unexpected result
3857 clearResults = main.FALSE
3858 if clearResults != main.TRUE:
3859 main.log.error( "Error executing set clear" )
3860
3861 # Check if set is still correct
3862 size = len( onosSet )
3863 getResponses = []
3864 threads = []
Jon Halla440e872016-03-31 15:15:50 -07003865 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003866 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003867 name="setTestGet-" + str( i ),
3868 args=[ onosSetName ] )
3869 threads.append( t )
3870 t.start()
3871 for t in threads:
3872 t.join()
3873 getResponses.append( t.result )
3874 getResults = main.TRUE
Jon Halla440e872016-03-31 15:15:50 -07003875 for i in range( len( main.activeNodes ) ):
3876 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003877 if isinstance( getResponses[ i ], list):
3878 current = set( getResponses[ i ] )
3879 if len( current ) == len( getResponses[ i ] ):
3880 # no repeats
3881 if onosSet != current:
Jon Halla440e872016-03-31 15:15:50 -07003882 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003883 " has incorrect view" +
3884 " of set " + onosSetName + ":\n" +
3885 str( getResponses[ i ] ) )
3886 main.log.debug( "Expected: " + str( onosSet ) )
3887 main.log.debug( "Actual: " + str( current ) )
3888 getResults = main.FALSE
3889 else:
3890 # error, set is not a set
Jon Halla440e872016-03-31 15:15:50 -07003891 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003892 " has repeat elements in" +
3893 " set " + onosSetName + ":\n" +
3894 str( getResponses[ i ] ) )
3895 getResults = main.FALSE
3896 elif getResponses[ i ] == main.ERROR:
3897 getResults = main.FALSE
3898 sizeResponses = []
3899 threads = []
Jon Halla440e872016-03-31 15:15:50 -07003900 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003901 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07003902 name="setTestSize-" + str( i ),
3903 args=[ onosSetName ] )
3904 threads.append( t )
3905 t.start()
3906 for t in threads:
3907 t.join()
3908 sizeResponses.append( t.result )
3909 sizeResults = main.TRUE
Jon Halla440e872016-03-31 15:15:50 -07003910 for i in range( len( main.activeNodes ) ):
3911 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003912 if size != sizeResponses[ i ]:
3913 sizeResults = main.FALSE
Jon Halla440e872016-03-31 15:15:50 -07003914 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003915 " expected a size of " + str( size ) +
3916 " for set " + onosSetName +
3917 " but got " + str( sizeResponses[ i ] ) )
3918 clearResults = clearResults and getResults and sizeResults
3919 utilities.assert_equals( expect=main.TRUE,
3920 actual=clearResults,
3921 onpass="Set clear correct",
3922 onfail="Set clear was incorrect" )
3923
3924 main.step( "Distributed Set addAll()" )
3925 onosSet.update( addAllValue.split() )
3926 addResponses = []
3927 threads = []
Jon Halla440e872016-03-31 15:15:50 -07003928 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003929 t = main.Thread( target=main.CLIs[i].setTestAdd,
Jon Hall5cf14d52015-07-16 12:15:19 -07003930 name="setTestAddAll-" + str( i ),
3931 args=[ onosSetName, addAllValue ] )
3932 threads.append( t )
3933 t.start()
3934 for t in threads:
3935 t.join()
3936 addResponses.append( t.result )
3937
3938 # main.TRUE = successfully changed the set
3939 # main.FALSE = action resulted in no change in set
3940 # main.ERROR - Some error in executing the function
3941 addAllResults = main.TRUE
Jon Halla440e872016-03-31 15:15:50 -07003942 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003943 if addResponses[ i ] == main.TRUE:
3944 # All is well
3945 pass
3946 elif addResponses[ i ] == main.FALSE:
3947 # Already in set, probably fine
3948 pass
3949 elif addResponses[ i ] == main.ERROR:
3950 # Error in execution
3951 addAllResults = main.FALSE
3952 else:
3953 # unexpected result
3954 addAllResults = main.FALSE
3955 if addAllResults != main.TRUE:
3956 main.log.error( "Error executing set addAll" )
3957
3958 # Check if set is still correct
3959 size = len( onosSet )
3960 getResponses = []
3961 threads = []
Jon Halla440e872016-03-31 15:15:50 -07003962 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003963 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003964 name="setTestGet-" + str( i ),
3965 args=[ onosSetName ] )
3966 threads.append( t )
3967 t.start()
3968 for t in threads:
3969 t.join()
3970 getResponses.append( t.result )
3971 getResults = main.TRUE
Jon Halla440e872016-03-31 15:15:50 -07003972 for i in range( len( main.activeNodes ) ):
3973 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003974 if isinstance( getResponses[ i ], list):
3975 current = set( getResponses[ i ] )
3976 if len( current ) == len( getResponses[ i ] ):
3977 # no repeats
3978 if onosSet != current:
Jon Halla440e872016-03-31 15:15:50 -07003979 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003980 " has incorrect view" +
3981 " of set " + onosSetName + ":\n" +
3982 str( getResponses[ i ] ) )
3983 main.log.debug( "Expected: " + str( onosSet ) )
3984 main.log.debug( "Actual: " + str( current ) )
3985 getResults = main.FALSE
3986 else:
3987 # error, set is not a set
Jon Halla440e872016-03-31 15:15:50 -07003988 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003989 " has repeat elements in" +
3990 " set " + onosSetName + ":\n" +
3991 str( getResponses[ i ] ) )
3992 getResults = main.FALSE
3993 elif getResponses[ i ] == main.ERROR:
3994 getResults = main.FALSE
3995 sizeResponses = []
3996 threads = []
Jon Halla440e872016-03-31 15:15:50 -07003997 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003998 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07003999 name="setTestSize-" + str( i ),
4000 args=[ onosSetName ] )
4001 threads.append( t )
4002 t.start()
4003 for t in threads:
4004 t.join()
4005 sizeResponses.append( t.result )
4006 sizeResults = main.TRUE
Jon Halla440e872016-03-31 15:15:50 -07004007 for i in range( len( main.activeNodes ) ):
4008 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004009 if size != sizeResponses[ i ]:
4010 sizeResults = main.FALSE
Jon Halla440e872016-03-31 15:15:50 -07004011 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004012 " expected a size of " + str( size ) +
4013 " for set " + onosSetName +
4014 " but got " + str( sizeResponses[ i ] ) )
4015 addAllResults = addAllResults and getResults and sizeResults
4016 utilities.assert_equals( expect=main.TRUE,
4017 actual=addAllResults,
4018 onpass="Set addAll correct",
4019 onfail="Set addAll was incorrect" )
4020
4021 main.step( "Distributed Set retain()" )
4022 onosSet.intersection_update( retainValue.split() )
4023 retainResponses = []
4024 threads = []
Jon Halla440e872016-03-31 15:15:50 -07004025 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004026 t = main.Thread( target=main.CLIs[i].setTestRemove,
Jon Hall5cf14d52015-07-16 12:15:19 -07004027 name="setTestRetain-" + str( i ),
4028 args=[ onosSetName, retainValue ],
4029 kwargs={ "retain": True } )
4030 threads.append( t )
4031 t.start()
4032 for t in threads:
4033 t.join()
4034 retainResponses.append( t.result )
4035
4036 # main.TRUE = successfully changed the set
4037 # main.FALSE = action resulted in no change in set
4038 # main.ERROR - Some error in executing the function
4039 retainResults = main.TRUE
Jon Halla440e872016-03-31 15:15:50 -07004040 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07004041 if retainResponses[ i ] == main.TRUE:
4042 # All is well
4043 pass
4044 elif retainResponses[ i ] == main.FALSE:
4045 # Already in set, probably fine
4046 pass
4047 elif retainResponses[ i ] == main.ERROR:
4048 # Error in execution
4049 retainResults = main.FALSE
4050 else:
4051 # unexpected result
4052 retainResults = main.FALSE
4053 if retainResults != main.TRUE:
4054 main.log.error( "Error executing set retain" )
4055
4056 # Check if set is still correct
4057 size = len( onosSet )
4058 getResponses = []
4059 threads = []
Jon Halla440e872016-03-31 15:15:50 -07004060 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004061 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07004062 name="setTestGet-" + str( i ),
4063 args=[ onosSetName ] )
4064 threads.append( t )
4065 t.start()
4066 for t in threads:
4067 t.join()
4068 getResponses.append( t.result )
4069 getResults = main.TRUE
Jon Halla440e872016-03-31 15:15:50 -07004070 for i in range( len( main.activeNodes ) ):
4071 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004072 if isinstance( getResponses[ i ], list):
4073 current = set( getResponses[ i ] )
4074 if len( current ) == len( getResponses[ i ] ):
4075 # no repeats
4076 if onosSet != current:
Jon Halla440e872016-03-31 15:15:50 -07004077 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004078 " has incorrect view" +
4079 " of set " + onosSetName + ":\n" +
4080 str( getResponses[ i ] ) )
4081 main.log.debug( "Expected: " + str( onosSet ) )
4082 main.log.debug( "Actual: " + str( current ) )
4083 getResults = main.FALSE
4084 else:
4085 # error, set is not a set
Jon Halla440e872016-03-31 15:15:50 -07004086 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004087 " has repeat elements in" +
4088 " set " + onosSetName + ":\n" +
4089 str( getResponses[ i ] ) )
4090 getResults = main.FALSE
4091 elif getResponses[ i ] == main.ERROR:
4092 getResults = main.FALSE
4093 sizeResponses = []
4094 threads = []
Jon Halla440e872016-03-31 15:15:50 -07004095 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004096 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07004097 name="setTestSize-" + str( i ),
4098 args=[ onosSetName ] )
4099 threads.append( t )
4100 t.start()
4101 for t in threads:
4102 t.join()
4103 sizeResponses.append( t.result )
4104 sizeResults = main.TRUE
Jon Halla440e872016-03-31 15:15:50 -07004105 for i in range( len( main.activeNodes ) ):
4106 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004107 if size != sizeResponses[ i ]:
4108 sizeResults = main.FALSE
Jon Halla440e872016-03-31 15:15:50 -07004109 main.log.error( "ONOS" + node + " expected a size of " +
Jon Hall5cf14d52015-07-16 12:15:19 -07004110 str( size ) + " for set " + onosSetName +
4111 " but got " + str( sizeResponses[ i ] ) )
4112 retainResults = retainResults and getResults and sizeResults
4113 utilities.assert_equals( expect=main.TRUE,
4114 actual=retainResults,
4115 onpass="Set retain correct",
4116 onfail="Set retain was incorrect" )
4117
Jon Hall2a5002c2015-08-21 16:49:11 -07004118 # Transactional maps
4119 main.step( "Partitioned Transactional maps put" )
4120 tMapValue = "Testing"
4121 numKeys = 100
4122 putResult = True
Jon Halla440e872016-03-31 15:15:50 -07004123 node = main.activeNodes[0]
4124 putResponses = main.CLIs[node].transactionalMapPut( numKeys, tMapValue )
4125 if putResponses and len( putResponses ) == 100:
Jon Hall2a5002c2015-08-21 16:49:11 -07004126 for i in putResponses:
4127 if putResponses[ i ][ 'value' ] != tMapValue:
4128 putResult = False
4129 else:
4130 putResult = False
4131 if not putResult:
4132 main.log.debug( "Put response values: " + str( putResponses ) )
4133 utilities.assert_equals( expect=True,
4134 actual=putResult,
4135 onpass="Partitioned Transactional Map put successful",
4136 onfail="Partitioned Transactional Map put values are incorrect" )
4137
4138 main.step( "Partitioned Transactional maps get" )
4139 getCheck = True
4140 for n in range( 1, numKeys + 1 ):
4141 getResponses = []
4142 threads = []
4143 valueCheck = True
Jon Halla440e872016-03-31 15:15:50 -07004144 for i in main.activeNodes:
Jon Hall2a5002c2015-08-21 16:49:11 -07004145 t = main.Thread( target=main.CLIs[i].transactionalMapGet,
4146 name="TMap-get-" + str( i ),
Jon Halla440e872016-03-31 15:15:50 -07004147 args=[ "Key" + str( n ) ] )
Jon Hall2a5002c2015-08-21 16:49:11 -07004148 threads.append( t )
4149 t.start()
4150 for t in threads:
4151 t.join()
4152 getResponses.append( t.result )
4153 for node in getResponses:
4154 if node != tMapValue:
4155 valueCheck = False
4156 if not valueCheck:
4157 main.log.warn( "Values for key 'Key" + str( n ) + "' do not match:" )
4158 main.log.warn( getResponses )
4159 getCheck = getCheck and valueCheck
4160 utilities.assert_equals( expect=True,
4161 actual=getCheck,
4162 onpass="Partitioned Transactional Map get values were correct",
4163 onfail="Partitioned Transactional Map values incorrect" )