blob: c6aa669b03056cbf50a2a962da3a995e2d91e1c0 [file] [log] [blame]
Jon Hall5cf14d52015-07-16 12:15:19 -07001"""
2Description: This test is to determine if ONOS can handle
3 a minority of it's nodes restarting
4
5List of test cases:
6CASE1: Compile ONOS and push it to the test machines
7CASE2: Assign devices to controllers
8CASE21: Assign mastership to controllers
9CASE3: Assign intents
10CASE4: Ping across added host intents
11CASE5: Reading state of ONOS
Jon Hallb3ed8ed2015-10-28 16:43:55 -070012CASE61: The Failure inducing case.
13CASE62: The Failure recovery case.
Jon Hall5cf14d52015-07-16 12:15:19 -070014CASE7: Check state after control plane failure
15CASE8: Compare topo
16CASE9: Link s3-s28 down
17CASE10: Link s3-s28 up
18CASE11: Switch down
19CASE12: Switch up
20CASE13: Clean up
21CASE14: start election app on all onos nodes
22CASE15: Check that Leadership Election is still functional
23CASE16: Install Distributed Primitives app
24CASE17: Check for basic functionality with distributed primitives
25"""
26
27
Jon Hallb3ed8ed2015-10-28 16:43:55 -070028class HAkillNodes:
Jon Hall5cf14d52015-07-16 12:15:19 -070029
30 def __init__( self ):
31 self.default = ''
32
33 def CASE1( self, main ):
34 """
35 CASE1 is to compile ONOS and push it to the test machines
36
37 Startup sequence:
38 cell <name>
39 onos-verify-cell
40 NOTE: temporary - onos-remove-raft-logs
41 onos-uninstall
42 start mininet
43 git pull
44 mvn clean install
45 onos-package
46 onos-install -f
47 onos-wait-for-start
48 start cli sessions
49 start tcpdump
50 """
Jon Halle1a3b752015-07-22 13:02:46 -070051 import imp
Jon Hall3b489db2015-10-05 14:38:37 -070052 import pexpect
Jon Hall6e709752016-02-01 13:38:46 -080053 import time
Jon Halla440e872016-03-31 15:15:50 -070054 import json
Jon Hall6e709752016-02-01 13:38:46 -080055 main.log.info( "ONOS HA test: Restart a minority of ONOS nodes - " +
Jon Hall5cf14d52015-07-16 12:15:19 -070056 "initialization" )
57 main.case( "Setting up test environment" )
Jon Hall783bbf92015-07-23 14:33:19 -070058 main.caseExplanation = "Setup the test environment including " +\
Jon Hall5cf14d52015-07-16 12:15:19 -070059 "installing ONOS, starting Mininet and ONOS" +\
60 "cli sessions."
Jon Hall5cf14d52015-07-16 12:15:19 -070061
62 # load some variables from the params file
63 PULLCODE = False
64 if main.params[ 'Git' ] == 'True':
65 PULLCODE = True
66 gitBranch = main.params[ 'branch' ]
67 cellName = main.params[ 'ENV' ][ 'cellName' ]
68
Jon Halle1a3b752015-07-22 13:02:46 -070069 main.numCtrls = int( main.params[ 'num_controllers' ] )
Jon Hall5cf14d52015-07-16 12:15:19 -070070 if main.ONOSbench.maxNodes:
Jon Halle1a3b752015-07-22 13:02:46 -070071 if main.ONOSbench.maxNodes < main.numCtrls:
72 main.numCtrls = int( main.ONOSbench.maxNodes )
73 # set global variables
Jon Hall5cf14d52015-07-16 12:15:19 -070074 global ONOS1Port
75 global ONOS2Port
76 global ONOS3Port
77 global ONOS4Port
78 global ONOS5Port
79 global ONOS6Port
80 global ONOS7Port
Jon Halla440e872016-03-31 15:15:50 -070081 # These are for csv plotting in jenkins
82 global labels
83 global data
84 labels = []
85 data = []
Jon Hall5cf14d52015-07-16 12:15:19 -070086
87 # FIXME: just get controller port from params?
88 # TODO: do we really need all these?
89 ONOS1Port = main.params[ 'CTRL' ][ 'port1' ]
90 ONOS2Port = main.params[ 'CTRL' ][ 'port2' ]
91 ONOS3Port = main.params[ 'CTRL' ][ 'port3' ]
92 ONOS4Port = main.params[ 'CTRL' ][ 'port4' ]
93 ONOS5Port = main.params[ 'CTRL' ][ 'port5' ]
94 ONOS6Port = main.params[ 'CTRL' ][ 'port6' ]
95 ONOS7Port = main.params[ 'CTRL' ][ 'port7' ]
96
Jon Halle1a3b752015-07-22 13:02:46 -070097 try:
Jon Hall53c5e662016-04-13 16:06:56 -070098 from tests.HA.dependencies.HA import HA
Jon Hall41d39f12016-04-11 22:54:35 -070099 main.HA = HA()
Jon Halle1a3b752015-07-22 13:02:46 -0700100 except Exception as e:
101 main.log.exception( e )
102 main.cleanup()
103 main.exit()
104
105 main.CLIs = []
106 main.nodes = []
Jon Hall5cf14d52015-07-16 12:15:19 -0700107 ipList = []
Jon Halle1a3b752015-07-22 13:02:46 -0700108 for i in range( 1, main.numCtrls + 1 ):
109 try:
110 main.CLIs.append( getattr( main, 'ONOScli' + str( i ) ) )
111 main.nodes.append( getattr( main, 'ONOS' + str( i ) ) )
112 ipList.append( main.nodes[ -1 ].ip_address )
113 except AttributeError:
114 break
Jon Hall5cf14d52015-07-16 12:15:19 -0700115
116 main.step( "Create cell file" )
117 cellAppString = main.params[ 'ENV' ][ 'appString' ]
118 main.ONOSbench.createCellFile( main.ONOSbench.ip_address, cellName,
119 main.Mininet1.ip_address,
120 cellAppString, ipList )
121 main.step( "Applying cell variable to environment" )
122 cellResult = main.ONOSbench.setCell( cellName )
123 verifyResult = main.ONOSbench.verifyCell()
124
125 # FIXME:this is short term fix
126 main.log.info( "Removing raft logs" )
127 main.ONOSbench.onosRemoveRaftLogs()
128
129 main.log.info( "Uninstalling ONOS" )
Jon Halle1a3b752015-07-22 13:02:46 -0700130 for node in main.nodes:
Jon Hall5cf14d52015-07-16 12:15:19 -0700131 main.ONOSbench.onosUninstall( node.ip_address )
132
133 # Make sure ONOS is DEAD
134 main.log.info( "Killing any ONOS processes" )
135 killResults = main.TRUE
Jon Halle1a3b752015-07-22 13:02:46 -0700136 for node in main.nodes:
Jon Hall5cf14d52015-07-16 12:15:19 -0700137 killed = main.ONOSbench.onosKill( node.ip_address )
138 killResults = killResults and killed
139
140 cleanInstallResult = main.TRUE
141 gitPullResult = main.TRUE
142
143 main.step( "Starting Mininet" )
144 # scp topo file to mininet
145 # TODO: move to params?
146 topoName = "obelisk.py"
147 filePath = main.ONOSbench.home + "/tools/test/topos/"
kelvin-onlabd9e23de2015-08-06 10:34:44 -0700148 main.ONOSbench.scp( main.Mininet1,
149 filePath + topoName,
150 main.Mininet1.home,
151 direction="to" )
Jon Hall5cf14d52015-07-16 12:15:19 -0700152 mnResult = main.Mininet1.startNet( )
153 utilities.assert_equals( expect=main.TRUE, actual=mnResult,
154 onpass="Mininet Started",
155 onfail="Error starting Mininet" )
156
157 main.step( "Git checkout and pull " + gitBranch )
158 if PULLCODE:
159 main.ONOSbench.gitCheckout( gitBranch )
160 gitPullResult = main.ONOSbench.gitPull()
161 # values of 1 or 3 are good
162 utilities.assert_lesser( expect=0, actual=gitPullResult,
163 onpass="Git pull successful",
164 onfail="Git pull failed" )
165 main.ONOSbench.getVersion( report=True )
166
167 main.step( "Using mvn clean install" )
168 cleanInstallResult = main.TRUE
169 if PULLCODE and gitPullResult == main.TRUE:
170 cleanInstallResult = main.ONOSbench.cleanInstall()
171 else:
172 main.log.warn( "Did not pull new code so skipping mvn " +
173 "clean install" )
174 utilities.assert_equals( expect=main.TRUE,
175 actual=cleanInstallResult,
176 onpass="MCI successful",
177 onfail="MCI failed" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700178
179 main.step( "Make sure ONOS service doesn't automatically respawn" )
180 handle = main.ONOSbench.handle
181 handle.sendline( "sed -i -e 's/^respawn$/#respawn/g' tools/package/init/onos.conf" )
182 handle.expect( "\$" ) # $ from the command
183 handle.expect( "\$" ) # $ from the prompt
184
Jon Hall5cf14d52015-07-16 12:15:19 -0700185 # GRAPHS
186 # NOTE: important params here:
187 # job = name of Jenkins job
188 # Plot Name = Plot-HA, only can be used if multiple plots
189 # index = The number of the graph under plot name
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700190 job = "HAkillNodes"
Jon Hall5cf14d52015-07-16 12:15:19 -0700191 plotName = "Plot-HA"
Jon Hall843f8bc2016-03-18 14:28:13 -0700192 index = "2"
Jon Hall5cf14d52015-07-16 12:15:19 -0700193 graphs = '<ac:structured-macro ac:name="html">\n'
194 graphs += '<ac:plain-text-body><![CDATA[\n'
195 graphs += '<iframe src="https://onos-jenkins.onlab.us/job/' + job +\
Jon Halla9845df2016-01-15 14:55:58 -0800196 '/plot/' + plotName + '/getPlot?index=' + index +\
Jon Hall5cf14d52015-07-16 12:15:19 -0700197 '&width=500&height=300"' +\
198 'noborder="0" width="500" height="300" scrolling="yes" ' +\
199 'seamless="seamless"></iframe>\n'
200 graphs += ']]></ac:plain-text-body>\n'
201 graphs += '</ac:structured-macro>\n'
202 main.log.wiki(graphs)
203
204 main.step( "Creating ONOS package" )
Jon Hall3b489db2015-10-05 14:38:37 -0700205 # copy gen-partions file to ONOS
206 # NOTE: this assumes TestON and ONOS are on the same machine
Jon Hall53c5e662016-04-13 16:06:56 -0700207 srcFile = main.testDir + "/HA/dependencies/onos-gen-partitions"
Jon Hall3b489db2015-10-05 14:38:37 -0700208 dstDir = main.ONOSbench.home + "/tools/test/bin/onos-gen-partitions"
209 cpResult = main.ONOSbench.secureCopy( main.ONOSbench.user_name,
210 main.ONOSbench.ip_address,
211 srcFile,
212 dstDir,
213 pwd=main.ONOSbench.pwd,
214 direction="from" )
Jon Hall5cf14d52015-07-16 12:15:19 -0700215 packageResult = main.ONOSbench.onosPackage()
216 utilities.assert_equals( expect=main.TRUE, actual=packageResult,
217 onpass="ONOS package successful",
218 onfail="ONOS package failed" )
219
220 main.step( "Installing ONOS package" )
221 onosInstallResult = main.TRUE
Jon Halle1a3b752015-07-22 13:02:46 -0700222 for node in main.nodes:
Jon Hall5cf14d52015-07-16 12:15:19 -0700223 tmpResult = main.ONOSbench.onosInstall( options="-f",
224 node=node.ip_address )
225 onosInstallResult = onosInstallResult and tmpResult
226 utilities.assert_equals( expect=main.TRUE, actual=onosInstallResult,
227 onpass="ONOS install successful",
228 onfail="ONOS install failed" )
Jon Hall3b489db2015-10-05 14:38:37 -0700229 # clean up gen-partitions file
230 try:
231 main.ONOSbench.handle.sendline( "cd " + main.ONOSbench.home )
232 main.ONOSbench.handle.expect( main.ONOSbench.home + "\$" )
233 main.ONOSbench.handle.sendline( "git checkout -- tools/test/bin/onos-gen-partitions" )
234 main.ONOSbench.handle.expect( main.ONOSbench.home + "\$" )
235 main.log.info( " Cleaning custom gen partitions file, response was: \n" +
236 str( main.ONOSbench.handle.before ) )
237 except ( pexpect.TIMEOUT, pexpect.EOF ):
238 main.log.exception( "ONOSbench: pexpect exception found:" +
239 main.ONOSbench.handle.before )
240 main.cleanup()
241 main.exit()
Jon Hall5cf14d52015-07-16 12:15:19 -0700242
243 main.step( "Checking if ONOS is up yet" )
244 for i in range( 2 ):
245 onosIsupResult = main.TRUE
Jon Halle1a3b752015-07-22 13:02:46 -0700246 for node in main.nodes:
Jon Hall5cf14d52015-07-16 12:15:19 -0700247 started = main.ONOSbench.isup( node.ip_address )
248 if not started:
Jon Hallc6793552016-01-19 14:18:37 -0800249 main.log.error( node.name + " hasn't started" )
Jon Hall5cf14d52015-07-16 12:15:19 -0700250 onosIsupResult = onosIsupResult and started
251 if onosIsupResult == main.TRUE:
252 break
253 utilities.assert_equals( expect=main.TRUE, actual=onosIsupResult,
254 onpass="ONOS startup successful",
255 onfail="ONOS startup failed" )
256
Jon Hall6509dbf2016-06-21 17:01:17 -0700257 main.step( "Starting ONOS CLI sessions" )
Jon Hall5cf14d52015-07-16 12:15:19 -0700258 cliResults = main.TRUE
259 threads = []
Jon Halle1a3b752015-07-22 13:02:46 -0700260 for i in range( main.numCtrls ):
261 t = main.Thread( target=main.CLIs[i].startOnosCli,
Jon Hall5cf14d52015-07-16 12:15:19 -0700262 name="startOnosCli-" + str( i ),
Jon Halle1a3b752015-07-22 13:02:46 -0700263 args=[main.nodes[i].ip_address] )
Jon Hall5cf14d52015-07-16 12:15:19 -0700264 threads.append( t )
265 t.start()
266
267 for t in threads:
268 t.join()
269 cliResults = cliResults and t.result
270 utilities.assert_equals( expect=main.TRUE, actual=cliResults,
271 onpass="ONOS cli startup successful",
272 onfail="ONOS cli startup failed" )
273
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700274 # Create a list of active nodes for use when some nodes are stopped
275 main.activeNodes = [ i for i in range( 0, len( main.CLIs ) ) ]
276
Jon Hall5cf14d52015-07-16 12:15:19 -0700277 if main.params[ 'tcpdump' ].lower() == "true":
278 main.step( "Start Packet Capture MN" )
279 main.Mininet2.startTcpdump(
280 str( main.params[ 'MNtcpdump' ][ 'folder' ] ) + str( main.TEST )
281 + "-MN.pcap",
282 intf=main.params[ 'MNtcpdump' ][ 'intf' ],
283 port=main.params[ 'MNtcpdump' ][ 'port' ] )
284
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700285 main.step( "Clean up ONOS service changes" )
286 handle.sendline( "git checkout -- tools/package/init/onos.conf" )
287 handle.expect( "\$" )
288
Jon Halla440e872016-03-31 15:15:50 -0700289 main.step( "Checking ONOS nodes" )
Jon Hall41d39f12016-04-11 22:54:35 -0700290 nodeResults = utilities.retry( main.HA.nodesCheck,
291 False,
292 args=[main.activeNodes],
293 attempts=5 )
Jon Halla440e872016-03-31 15:15:50 -0700294
Jon Hall41d39f12016-04-11 22:54:35 -0700295 utilities.assert_equals( expect=True, actual=nodeResults,
Jon Halla440e872016-03-31 15:15:50 -0700296 onpass="Nodes check successful",
297 onfail="Nodes check NOT successful" )
298
299 if not nodeResults:
Jon Hall7ac7bc32016-05-05 10:57:02 -0700300 for i in main.activeNodes:
301 cli = main.CLIs[i]
Jon Halla440e872016-03-31 15:15:50 -0700302 main.log.debug( "{} components not ACTIVE: \n{}".format(
303 cli.name,
304 cli.sendline( "scr:list | grep -v ACTIVE" ) ) )
Jon Hall5cf14d52015-07-16 12:15:19 -0700305 main.log.error( "Failed to start ONOS, stopping test" )
306 main.cleanup()
307 main.exit()
308
Jon Hall172b7ba2016-04-07 18:12:20 -0700309 main.step( "Activate apps defined in the params file" )
310 # get data from the params
311 apps = main.params.get( 'apps' )
312 if apps:
313 apps = apps.split(',')
314 main.log.warn( apps )
315 activateResult = True
316 for app in apps:
317 main.CLIs[ 0 ].app( app, "Activate" )
318 # TODO: check this worked
319 time.sleep( 10 ) # wait for apps to activate
320 for app in apps:
321 state = main.CLIs[ 0 ].appStatus( app )
322 if state == "ACTIVE":
323 activateResult = activeResult and True
324 else:
325 main.log.error( "{} is in {} state".format( app, state ) )
326 activeResult = False
327 utilities.assert_equals( expect=True,
328 actual=activateResult,
329 onpass="Successfully activated apps",
330 onfail="Failed to activate apps" )
331 else:
332 main.log.warn( "No apps were specified to be loaded after startup" )
333
334 main.step( "Set ONOS configurations" )
335 config = main.params.get( 'ONOS_Configuration' )
336 if config:
337 main.log.debug( config )
338 checkResult = main.TRUE
339 for component in config:
340 for setting in config[component]:
341 value = config[component][setting]
342 check = main.CLIs[ 0 ].setCfg( component, setting, value )
343 main.log.info( "Value was changed? {}".format( main.TRUE == check ) )
344 checkResult = check and checkResult
345 utilities.assert_equals( expect=main.TRUE,
346 actual=checkResult,
347 onpass="Successfully set config",
348 onfail="Failed to set config" )
349 else:
350 main.log.warn( "No configurations were specified to be changed after startup" )
351
Jon Hall9d2dcad2016-04-08 10:15:20 -0700352 main.step( "App Ids check" )
353 appCheck = main.TRUE
354 threads = []
355 for i in main.activeNodes:
356 t = main.Thread( target=main.CLIs[i].appToIDCheck,
357 name="appToIDCheck-" + str( i ),
358 args=[] )
359 threads.append( t )
360 t.start()
361
362 for t in threads:
363 t.join()
364 appCheck = appCheck and t.result
365 if appCheck != main.TRUE:
366 node = main.activeNodes[0]
367 main.log.warn( main.CLIs[node].apps() )
368 main.log.warn( main.CLIs[node].appIDs() )
369 utilities.assert_equals( expect=main.TRUE, actual=appCheck,
370 onpass="App Ids seem to be correct",
371 onfail="Something is wrong with app Ids" )
372
Jon Hall5cf14d52015-07-16 12:15:19 -0700373 def CASE2( self, main ):
374 """
375 Assign devices to controllers
376 """
377 import re
Jon Halle1a3b752015-07-22 13:02:46 -0700378 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700379 assert main, "main not defined"
380 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -0700381 assert main.CLIs, "main.CLIs not defined"
382 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700383 assert ONOS1Port, "ONOS1Port not defined"
384 assert ONOS2Port, "ONOS2Port not defined"
385 assert ONOS3Port, "ONOS3Port not defined"
386 assert ONOS4Port, "ONOS4Port not defined"
387 assert ONOS5Port, "ONOS5Port not defined"
388 assert ONOS6Port, "ONOS6Port not defined"
389 assert ONOS7Port, "ONOS7Port not defined"
390
391 main.case( "Assigning devices to controllers" )
Jon Hall783bbf92015-07-23 14:33:19 -0700392 main.caseExplanation = "Assign switches to ONOS using 'ovs-vsctl' " +\
Jon Hall5cf14d52015-07-16 12:15:19 -0700393 "and check that an ONOS node becomes the " +\
394 "master of the device."
395 main.step( "Assign switches to controllers" )
396
397 ipList = []
Jon Halle1a3b752015-07-22 13:02:46 -0700398 for i in range( main.numCtrls ):
399 ipList.append( main.nodes[ i ].ip_address )
Jon Hall5cf14d52015-07-16 12:15:19 -0700400 swList = []
401 for i in range( 1, 29 ):
402 swList.append( "s" + str( i ) )
403 main.Mininet1.assignSwController( sw=swList, ip=ipList )
404
405 mastershipCheck = main.TRUE
406 for i in range( 1, 29 ):
407 response = main.Mininet1.getSwController( "s" + str( i ) )
408 try:
409 main.log.info( str( response ) )
410 except Exception:
411 main.log.info( repr( response ) )
Jon Halle1a3b752015-07-22 13:02:46 -0700412 for node in main.nodes:
Jon Hall5cf14d52015-07-16 12:15:19 -0700413 if re.search( "tcp:" + node.ip_address, response ):
414 mastershipCheck = mastershipCheck and main.TRUE
415 else:
416 main.log.error( "Error, node " + node.ip_address + " is " +
417 "not in the list of controllers s" +
418 str( i ) + " is connecting to." )
419 mastershipCheck = main.FALSE
420 utilities.assert_equals(
421 expect=main.TRUE,
422 actual=mastershipCheck,
423 onpass="Switch mastership assigned correctly",
424 onfail="Switches not assigned correctly to controllers" )
425
426 def CASE21( self, main ):
427 """
428 Assign mastership to controllers
429 """
Jon Hall5cf14d52015-07-16 12:15:19 -0700430 import time
Jon Halle1a3b752015-07-22 13:02:46 -0700431 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700432 assert main, "main not defined"
433 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -0700434 assert main.CLIs, "main.CLIs not defined"
435 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700436 assert ONOS1Port, "ONOS1Port not defined"
437 assert ONOS2Port, "ONOS2Port not defined"
438 assert ONOS3Port, "ONOS3Port not defined"
439 assert ONOS4Port, "ONOS4Port not defined"
440 assert ONOS5Port, "ONOS5Port not defined"
441 assert ONOS6Port, "ONOS6Port not defined"
442 assert ONOS7Port, "ONOS7Port not defined"
443
444 main.case( "Assigning Controller roles for switches" )
Jon Hall783bbf92015-07-23 14:33:19 -0700445 main.caseExplanation = "Check that ONOS is connected to each " +\
Jon Hall5cf14d52015-07-16 12:15:19 -0700446 "device. Then manually assign" +\
447 " mastership to specific ONOS nodes using" +\
448 " 'device-role'"
449 main.step( "Assign mastership of switches to specific controllers" )
450 # Manually assign mastership to the controller we want
451 roleCall = main.TRUE
452
453 ipList = [ ]
454 deviceList = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700455 onosCli = main.CLIs[ main.activeNodes[0] ]
Jon Hall5cf14d52015-07-16 12:15:19 -0700456 try:
457 # Assign mastership to specific controllers. This assignment was
458 # determined for a 7 node cluser, but will work with any sized
459 # cluster
460 for i in range( 1, 29 ): # switches 1 through 28
461 # set up correct variables:
462 if i == 1:
463 c = 0
Jon Halle1a3b752015-07-22 13:02:46 -0700464 ip = main.nodes[ c ].ip_address # ONOS1
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700465 deviceId = onosCli.getDevice( "1000" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700466 elif i == 2:
Jon Halle1a3b752015-07-22 13:02:46 -0700467 c = 1 % main.numCtrls
468 ip = main.nodes[ c ].ip_address # ONOS2
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700469 deviceId = onosCli.getDevice( "2000" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700470 elif i == 3:
Jon Halle1a3b752015-07-22 13:02:46 -0700471 c = 1 % main.numCtrls
472 ip = main.nodes[ c ].ip_address # ONOS2
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700473 deviceId = onosCli.getDevice( "3000" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700474 elif i == 4:
Jon Halle1a3b752015-07-22 13:02:46 -0700475 c = 3 % main.numCtrls
476 ip = main.nodes[ c ].ip_address # ONOS4
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700477 deviceId = onosCli.getDevice( "3004" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700478 elif i == 5:
Jon Halle1a3b752015-07-22 13:02:46 -0700479 c = 2 % main.numCtrls
480 ip = main.nodes[ c ].ip_address # ONOS3
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700481 deviceId = onosCli.getDevice( "5000" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700482 elif i == 6:
Jon Halle1a3b752015-07-22 13:02:46 -0700483 c = 2 % main.numCtrls
484 ip = main.nodes[ c ].ip_address # ONOS3
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700485 deviceId = onosCli.getDevice( "6000" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700486 elif i == 7:
Jon Halle1a3b752015-07-22 13:02:46 -0700487 c = 5 % main.numCtrls
488 ip = main.nodes[ c ].ip_address # ONOS6
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700489 deviceId = onosCli.getDevice( "6007" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700490 elif i >= 8 and i <= 17:
Jon Halle1a3b752015-07-22 13:02:46 -0700491 c = 4 % main.numCtrls
492 ip = main.nodes[ c ].ip_address # ONOS5
Jon Hall5cf14d52015-07-16 12:15:19 -0700493 dpid = '3' + str( i ).zfill( 3 )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700494 deviceId = onosCli.getDevice( dpid ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700495 elif i >= 18 and i <= 27:
Jon Halle1a3b752015-07-22 13:02:46 -0700496 c = 6 % main.numCtrls
497 ip = main.nodes[ c ].ip_address # ONOS7
Jon Hall5cf14d52015-07-16 12:15:19 -0700498 dpid = '6' + str( i ).zfill( 3 )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700499 deviceId = onosCli.getDevice( dpid ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700500 elif i == 28:
501 c = 0
Jon Halle1a3b752015-07-22 13:02:46 -0700502 ip = main.nodes[ c ].ip_address # ONOS1
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700503 deviceId = onosCli.getDevice( "2800" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700504 else:
505 main.log.error( "You didn't write an else statement for " +
506 "switch s" + str( i ) )
507 roleCall = main.FALSE
508 # Assign switch
509 assert deviceId, "No device id for s" + str( i ) + " in ONOS"
510 # TODO: make this controller dynamic
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700511 roleCall = roleCall and onosCli.deviceRole( deviceId, ip )
Jon Hall5cf14d52015-07-16 12:15:19 -0700512 ipList.append( ip )
513 deviceList.append( deviceId )
514 except ( AttributeError, AssertionError ):
515 main.log.exception( "Something is wrong with ONOS device view" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700516 main.log.info( onosCli.devices() )
Jon Hall5cf14d52015-07-16 12:15:19 -0700517 utilities.assert_equals(
518 expect=main.TRUE,
519 actual=roleCall,
520 onpass="Re-assigned switch mastership to designated controller",
521 onfail="Something wrong with deviceRole calls" )
522
523 main.step( "Check mastership was correctly assigned" )
524 roleCheck = main.TRUE
525 # NOTE: This is due to the fact that device mastership change is not
526 # atomic and is actually a multi step process
527 time.sleep( 5 )
528 for i in range( len( ipList ) ):
529 ip = ipList[i]
530 deviceId = deviceList[i]
531 # Check assignment
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700532 master = onosCli.getRole( deviceId ).get( 'master' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700533 if ip in master:
534 roleCheck = roleCheck and main.TRUE
535 else:
536 roleCheck = roleCheck and main.FALSE
537 main.log.error( "Error, controller " + ip + " is not" +
538 " master " + "of device " +
539 str( deviceId ) + ". Master is " +
540 repr( master ) + "." )
541 utilities.assert_equals(
542 expect=main.TRUE,
543 actual=roleCheck,
544 onpass="Switches were successfully reassigned to designated " +
545 "controller",
546 onfail="Switches were not successfully reassigned" )
547
548 def CASE3( self, main ):
549 """
550 Assign intents
551 """
552 import time
553 import json
Jon Halle1a3b752015-07-22 13:02:46 -0700554 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700555 assert main, "main not defined"
556 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -0700557 assert main.CLIs, "main.CLIs not defined"
558 assert main.nodes, "main.nodes not defined"
Jon Halla440e872016-03-31 15:15:50 -0700559 try:
560 labels
561 except NameError:
562 main.log.error( "labels not defined, setting to []" )
563 labels = []
564 try:
565 data
566 except NameError:
567 main.log.error( "data not defined, setting to []" )
568 data = []
Jon Hall5cf14d52015-07-16 12:15:19 -0700569 main.case( "Adding host Intents" )
Jon Hall783bbf92015-07-23 14:33:19 -0700570 main.caseExplanation = "Discover hosts by using pingall then " +\
Jon Hall5cf14d52015-07-16 12:15:19 -0700571 "assign predetermined host-to-host intents." +\
572 " After installation, check that the intent" +\
573 " is distributed to all nodes and the state" +\
574 " is INSTALLED"
575
576 # install onos-app-fwd
577 main.step( "Install reactive forwarding app" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700578 onosCli = main.CLIs[ main.activeNodes[0] ]
579 installResults = onosCli.activateApp( "org.onosproject.fwd" )
Jon Hall5cf14d52015-07-16 12:15:19 -0700580 utilities.assert_equals( expect=main.TRUE, actual=installResults,
581 onpass="Install fwd successful",
582 onfail="Install fwd failed" )
583
584 main.step( "Check app ids" )
585 appCheck = main.TRUE
586 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700587 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -0700588 t = main.Thread( target=main.CLIs[i].appToIDCheck,
Jon Hall5cf14d52015-07-16 12:15:19 -0700589 name="appToIDCheck-" + str( i ),
590 args=[] )
591 threads.append( t )
592 t.start()
593
594 for t in threads:
595 t.join()
596 appCheck = appCheck and t.result
597 if appCheck != main.TRUE:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700598 main.log.warn( onosCli.apps() )
599 main.log.warn( onosCli.appIDs() )
Jon Hall5cf14d52015-07-16 12:15:19 -0700600 utilities.assert_equals( expect=main.TRUE, actual=appCheck,
601 onpass="App Ids seem to be correct",
602 onfail="Something is wrong with app Ids" )
603
604 main.step( "Discovering Hosts( Via pingall for now )" )
605 # FIXME: Once we have a host discovery mechanism, use that instead
606 # REACTIVE FWD test
607 pingResult = main.FALSE
Jon Hall96091e62015-09-21 17:34:17 -0700608 passMsg = "Reactive Pingall test passed"
609 time1 = time.time()
610 pingResult = main.Mininet1.pingall()
611 time2 = time.time()
612 if not pingResult:
613 main.log.warn("First pingall failed. Trying again...")
Jon Hall5cf14d52015-07-16 12:15:19 -0700614 pingResult = main.Mininet1.pingall()
Jon Hall96091e62015-09-21 17:34:17 -0700615 passMsg += " on the second try"
616 utilities.assert_equals(
617 expect=main.TRUE,
618 actual=pingResult,
619 onpass= passMsg,
620 onfail="Reactive Pingall failed, " +
621 "one or more ping pairs failed" )
622 main.log.info( "Time for pingall: %2f seconds" %
623 ( time2 - time1 ) )
Jon Hall5cf14d52015-07-16 12:15:19 -0700624 # timeout for fwd flows
625 time.sleep( 11 )
626 # uninstall onos-app-fwd
627 main.step( "Uninstall reactive forwarding app" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700628 node = main.activeNodes[0]
629 uninstallResult = main.CLIs[node].deactivateApp( "org.onosproject.fwd" )
Jon Hall5cf14d52015-07-16 12:15:19 -0700630 utilities.assert_equals( expect=main.TRUE, actual=uninstallResult,
631 onpass="Uninstall fwd successful",
632 onfail="Uninstall fwd failed" )
633
634 main.step( "Check app ids" )
635 threads = []
636 appCheck2 = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700637 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -0700638 t = main.Thread( target=main.CLIs[i].appToIDCheck,
Jon Hall5cf14d52015-07-16 12:15:19 -0700639 name="appToIDCheck-" + str( i ),
640 args=[] )
641 threads.append( t )
642 t.start()
643
644 for t in threads:
645 t.join()
646 appCheck2 = appCheck2 and t.result
647 if appCheck2 != main.TRUE:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700648 node = main.activeNodes[0]
649 main.log.warn( main.CLIs[node].apps() )
650 main.log.warn( main.CLIs[node].appIDs() )
Jon Hall5cf14d52015-07-16 12:15:19 -0700651 utilities.assert_equals( expect=main.TRUE, actual=appCheck2,
652 onpass="App Ids seem to be correct",
653 onfail="Something is wrong with app Ids" )
654
655 main.step( "Add host intents via cli" )
656 intentIds = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700657 # TODO: move the host numbers to params
658 # Maybe look at all the paths we ping?
Jon Hall5cf14d52015-07-16 12:15:19 -0700659 intentAddResult = True
660 hostResult = main.TRUE
661 for i in range( 8, 18 ):
662 main.log.info( "Adding host intent between h" + str( i ) +
663 " and h" + str( i + 10 ) )
664 host1 = "00:00:00:00:00:" + \
665 str( hex( i )[ 2: ] ).zfill( 2 ).upper()
666 host2 = "00:00:00:00:00:" + \
667 str( hex( i + 10 )[ 2: ] ).zfill( 2 ).upper()
668 # NOTE: getHost can return None
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700669 host1Dict = onosCli.getHost( host1 )
670 host2Dict = onosCli.getHost( host2 )
Jon Hall5cf14d52015-07-16 12:15:19 -0700671 host1Id = None
672 host2Id = None
673 if host1Dict and host2Dict:
674 host1Id = host1Dict.get( 'id', None )
675 host2Id = host2Dict.get( 'id', None )
676 if host1Id and host2Id:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700677 nodeNum = ( i % len( main.activeNodes ) )
678 node = main.activeNodes[nodeNum]
679 tmpId = main.CLIs[node].addHostIntent( host1Id, host2Id )
Jon Hall5cf14d52015-07-16 12:15:19 -0700680 if tmpId:
681 main.log.info( "Added intent with id: " + tmpId )
682 intentIds.append( tmpId )
683 else:
684 main.log.error( "addHostIntent returned: " +
685 repr( tmpId ) )
686 else:
687 main.log.error( "Error, getHost() failed for h" + str( i ) +
688 " and/or h" + str( i + 10 ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700689 node = main.activeNodes[0]
690 hosts = main.CLIs[node].hosts()
Jon Hall5cf14d52015-07-16 12:15:19 -0700691 main.log.warn( "Hosts output: " )
692 try:
693 main.log.warn( json.dumps( json.loads( hosts ),
694 sort_keys=True,
695 indent=4,
696 separators=( ',', ': ' ) ) )
697 except ( ValueError, TypeError ):
698 main.log.warn( repr( hosts ) )
699 hostResult = main.FALSE
700 utilities.assert_equals( expect=main.TRUE, actual=hostResult,
701 onpass="Found a host id for each host",
702 onfail="Error looking up host ids" )
703
704 intentStart = time.time()
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700705 onosIds = onosCli.getAllIntentsId()
Jon Hall5cf14d52015-07-16 12:15:19 -0700706 main.log.info( "Submitted intents: " + str( intentIds ) )
707 main.log.info( "Intents in ONOS: " + str( onosIds ) )
708 for intent in intentIds:
709 if intent in onosIds:
710 pass # intent submitted is in onos
711 else:
712 intentAddResult = False
713 if intentAddResult:
714 intentStop = time.time()
715 else:
716 intentStop = None
717 # Print the intent states
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700718 intents = onosCli.intents()
Jon Hall5cf14d52015-07-16 12:15:19 -0700719 intentStates = []
720 installedCheck = True
721 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
722 count = 0
723 try:
724 for intent in json.loads( intents ):
725 state = intent.get( 'state', None )
726 if "INSTALLED" not in state:
727 installedCheck = False
728 intentId = intent.get( 'id', None )
729 intentStates.append( ( intentId, state ) )
730 except ( ValueError, TypeError ):
731 main.log.exception( "Error parsing intents" )
732 # add submitted intents not in the store
733 tmplist = [ i for i, s in intentStates ]
734 missingIntents = False
735 for i in intentIds:
736 if i not in tmplist:
737 intentStates.append( ( i, " - " ) )
738 missingIntents = True
739 intentStates.sort()
740 for i, s in intentStates:
741 count += 1
742 main.log.info( "%-6s%-15s%-15s" %
743 ( str( count ), str( i ), str( s ) ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700744 leaders = onosCli.leaders()
Jon Hall5cf14d52015-07-16 12:15:19 -0700745 try:
746 missing = False
747 if leaders:
748 parsedLeaders = json.loads( leaders )
749 main.log.warn( json.dumps( parsedLeaders,
750 sort_keys=True,
751 indent=4,
752 separators=( ',', ': ' ) ) )
753 # check for all intent partitions
754 topics = []
755 for i in range( 14 ):
756 topics.append( "intent-partition-" + str( i ) )
757 main.log.debug( topics )
758 ONOStopics = [ j['topic'] for j in parsedLeaders ]
759 for topic in topics:
760 if topic not in ONOStopics:
761 main.log.error( "Error: " + topic +
762 " not in leaders" )
763 missing = True
764 else:
765 main.log.error( "leaders() returned None" )
766 except ( ValueError, TypeError ):
767 main.log.exception( "Error parsing leaders" )
768 main.log.error( repr( leaders ) )
769 # Check all nodes
770 if missing:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700771 for i in main.activeNodes:
772 response = main.CLIs[i].leaders( jsonFormat=False)
773 main.log.warn( str( main.CLIs[i].name ) + " leaders output: \n" +
Jon Hall5cf14d52015-07-16 12:15:19 -0700774 str( response ) )
775
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700776 partitions = onosCli.partitions()
Jon Hall5cf14d52015-07-16 12:15:19 -0700777 try:
778 if partitions :
779 parsedPartitions = json.loads( partitions )
780 main.log.warn( json.dumps( parsedPartitions,
781 sort_keys=True,
782 indent=4,
783 separators=( ',', ': ' ) ) )
784 # TODO check for a leader in all paritions
785 # TODO check for consistency among nodes
786 else:
787 main.log.error( "partitions() returned None" )
788 except ( ValueError, TypeError ):
789 main.log.exception( "Error parsing partitions" )
790 main.log.error( repr( partitions ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700791 pendingMap = onosCli.pendingMap()
Jon Hall5cf14d52015-07-16 12:15:19 -0700792 try:
793 if pendingMap :
794 parsedPending = json.loads( pendingMap )
795 main.log.warn( json.dumps( parsedPending,
796 sort_keys=True,
797 indent=4,
798 separators=( ',', ': ' ) ) )
799 # TODO check something here?
800 else:
801 main.log.error( "pendingMap() returned None" )
802 except ( ValueError, TypeError ):
803 main.log.exception( "Error parsing pending map" )
804 main.log.error( repr( pendingMap ) )
805
806 intentAddResult = bool( intentAddResult and not missingIntents and
807 installedCheck )
808 if not intentAddResult:
809 main.log.error( "Error in pushing host intents to ONOS" )
810
811 main.step( "Intent Anti-Entropy dispersion" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700812 for j in range(100):
Jon Hall5cf14d52015-07-16 12:15:19 -0700813 correct = True
814 main.log.info( "Submitted intents: " + str( sorted( intentIds ) ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700815 for i in main.activeNodes:
Jon Hall5cf14d52015-07-16 12:15:19 -0700816 onosIds = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700817 ids = main.CLIs[i].getAllIntentsId()
Jon Hall5cf14d52015-07-16 12:15:19 -0700818 onosIds.append( ids )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700819 main.log.debug( "Intents in " + main.CLIs[i].name + ": " +
Jon Hall5cf14d52015-07-16 12:15:19 -0700820 str( sorted( onosIds ) ) )
821 if sorted( ids ) != sorted( intentIds ):
822 main.log.warn( "Set of intent IDs doesn't match" )
823 correct = False
824 break
825 else:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700826 intents = json.loads( main.CLIs[i].intents() )
Jon Hall5cf14d52015-07-16 12:15:19 -0700827 for intent in intents:
828 if intent[ 'state' ] != "INSTALLED":
829 main.log.warn( "Intent " + intent[ 'id' ] +
830 " is " + intent[ 'state' ] )
831 correct = False
832 break
833 if correct:
834 break
835 else:
836 time.sleep(1)
837 if not intentStop:
838 intentStop = time.time()
839 global gossipTime
840 gossipTime = intentStop - intentStart
841 main.log.info( "It took about " + str( gossipTime ) +
842 " seconds for all intents to appear in each node" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700843 gossipPeriod = int( main.params['timers']['gossip'] )
844 maxGossipTime = gossipPeriod * len( main.activeNodes )
Jon Hall5cf14d52015-07-16 12:15:19 -0700845 utilities.assert_greater_equals(
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700846 expect=maxGossipTime, actual=gossipTime,
Jon Hall5cf14d52015-07-16 12:15:19 -0700847 onpass="ECM anti-entropy for intents worked within " +
848 "expected time",
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700849 onfail="Intent ECM anti-entropy took too long. " +
850 "Expected time:{}, Actual time:{}".format( maxGossipTime,
851 gossipTime ) )
852 if gossipTime <= maxGossipTime:
Jon Hall5cf14d52015-07-16 12:15:19 -0700853 intentAddResult = True
854
855 if not intentAddResult or "key" in pendingMap:
856 import time
857 installedCheck = True
858 main.log.info( "Sleeping 60 seconds to see if intents are found" )
859 time.sleep( 60 )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700860 onosIds = onosCli.getAllIntentsId()
Jon Hall5cf14d52015-07-16 12:15:19 -0700861 main.log.info( "Submitted intents: " + str( intentIds ) )
862 main.log.info( "Intents in ONOS: " + str( onosIds ) )
863 # Print the intent states
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700864 intents = onosCli.intents()
Jon Hall5cf14d52015-07-16 12:15:19 -0700865 intentStates = []
866 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
867 count = 0
868 try:
869 for intent in json.loads( intents ):
870 # Iter through intents of a node
871 state = intent.get( 'state', None )
872 if "INSTALLED" not in state:
873 installedCheck = False
874 intentId = intent.get( 'id', None )
875 intentStates.append( ( intentId, state ) )
876 except ( ValueError, TypeError ):
877 main.log.exception( "Error parsing intents" )
878 # add submitted intents not in the store
879 tmplist = [ i for i, s in intentStates ]
880 for i in intentIds:
881 if i not in tmplist:
882 intentStates.append( ( i, " - " ) )
883 intentStates.sort()
884 for i, s in intentStates:
885 count += 1
886 main.log.info( "%-6s%-15s%-15s" %
887 ( str( count ), str( i ), str( s ) ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700888 leaders = onosCli.leaders()
Jon Hall5cf14d52015-07-16 12:15:19 -0700889 try:
890 missing = False
891 if leaders:
892 parsedLeaders = json.loads( leaders )
893 main.log.warn( json.dumps( parsedLeaders,
894 sort_keys=True,
895 indent=4,
896 separators=( ',', ': ' ) ) )
897 # check for all intent partitions
898 # check for election
899 topics = []
900 for i in range( 14 ):
901 topics.append( "intent-partition-" + str( i ) )
902 # FIXME: this should only be after we start the app
903 topics.append( "org.onosproject.election" )
904 main.log.debug( topics )
905 ONOStopics = [ j['topic'] for j in parsedLeaders ]
906 for topic in topics:
907 if topic not in ONOStopics:
908 main.log.error( "Error: " + topic +
909 " not in leaders" )
910 missing = True
911 else:
912 main.log.error( "leaders() returned None" )
913 except ( ValueError, TypeError ):
914 main.log.exception( "Error parsing leaders" )
915 main.log.error( repr( leaders ) )
916 # Check all nodes
917 if missing:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700918 for i in main.activeNodes:
919 node = main.CLIs[i]
Jon Hall5cf14d52015-07-16 12:15:19 -0700920 response = node.leaders( jsonFormat=False)
921 main.log.warn( str( node.name ) + " leaders output: \n" +
922 str( response ) )
923
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700924 partitions = onosCli.partitions()
Jon Hall5cf14d52015-07-16 12:15:19 -0700925 try:
926 if partitions :
927 parsedPartitions = json.loads( partitions )
928 main.log.warn( json.dumps( parsedPartitions,
929 sort_keys=True,
930 indent=4,
931 separators=( ',', ': ' ) ) )
932 # TODO check for a leader in all paritions
933 # TODO check for consistency among nodes
934 else:
935 main.log.error( "partitions() returned None" )
936 except ( ValueError, TypeError ):
937 main.log.exception( "Error parsing partitions" )
938 main.log.error( repr( partitions ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700939 pendingMap = onosCli.pendingMap()
Jon Hall5cf14d52015-07-16 12:15:19 -0700940 try:
941 if pendingMap :
942 parsedPending = json.loads( pendingMap )
943 main.log.warn( json.dumps( parsedPending,
944 sort_keys=True,
945 indent=4,
946 separators=( ',', ': ' ) ) )
947 # TODO check something here?
948 else:
949 main.log.error( "pendingMap() returned None" )
950 except ( ValueError, TypeError ):
951 main.log.exception( "Error parsing pending map" )
952 main.log.error( repr( pendingMap ) )
953
954 def CASE4( self, main ):
955 """
956 Ping across added host intents
957 """
958 import json
959 import time
Jon Halle1a3b752015-07-22 13:02:46 -0700960 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700961 assert main, "main not defined"
962 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -0700963 assert main.CLIs, "main.CLIs not defined"
964 assert main.nodes, "main.nodes not defined"
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700965 main.case( "Verify connectivity by sending traffic across Intents" )
Jon Hall783bbf92015-07-23 14:33:19 -0700966 main.caseExplanation = "Ping across added host intents to check " +\
Jon Hall5cf14d52015-07-16 12:15:19 -0700967 "functionality and check the state of " +\
968 "the intent"
Jon Hall5cf14d52015-07-16 12:15:19 -0700969
Jon Hall41d39f12016-04-11 22:54:35 -0700970 onosCli = main.CLIs[ main.activeNodes[0] ]
Jon Hall5cf14d52015-07-16 12:15:19 -0700971 main.step( "Check Intent state" )
972 installedCheck = False
973 loopCount = 0
974 while not installedCheck and loopCount < 40:
975 installedCheck = True
976 # Print the intent states
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700977 intents = onosCli.intents()
Jon Hall5cf14d52015-07-16 12:15:19 -0700978 intentStates = []
979 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
980 count = 0
981 # Iter through intents of a node
982 try:
983 for intent in json.loads( intents ):
984 state = intent.get( 'state', None )
985 if "INSTALLED" not in state:
986 installedCheck = False
987 intentId = intent.get( 'id', None )
988 intentStates.append( ( intentId, state ) )
989 except ( ValueError, TypeError ):
990 main.log.exception( "Error parsing intents." )
991 # Print states
992 intentStates.sort()
993 for i, s in intentStates:
994 count += 1
995 main.log.info( "%-6s%-15s%-15s" %
996 ( str( count ), str( i ), str( s ) ) )
997 if not installedCheck:
998 time.sleep( 1 )
999 loopCount += 1
1000 utilities.assert_equals( expect=True, actual=installedCheck,
1001 onpass="Intents are all INSTALLED",
1002 onfail="Intents are not all in " +
1003 "INSTALLED state" )
1004
Jon Hall9d2dcad2016-04-08 10:15:20 -07001005 main.step( "Ping across added host intents" )
Jon Hall9d2dcad2016-04-08 10:15:20 -07001006 PingResult = main.TRUE
1007 for i in range( 8, 18 ):
1008 ping = main.Mininet1.pingHost( src="h" + str( i ),
1009 target="h" + str( i + 10 ) )
1010 PingResult = PingResult and ping
1011 if ping == main.FALSE:
1012 main.log.warn( "Ping failed between h" + str( i ) +
1013 " and h" + str( i + 10 ) )
1014 elif ping == main.TRUE:
1015 main.log.info( "Ping test passed!" )
1016 # Don't set PingResult or you'd override failures
1017 if PingResult == main.FALSE:
1018 main.log.error(
1019 "Intents have not been installed correctly, pings failed." )
1020 # TODO: pretty print
1021 main.log.warn( "ONOS1 intents: " )
1022 try:
1023 tmpIntents = onosCli.intents()
1024 main.log.warn( json.dumps( json.loads( tmpIntents ),
1025 sort_keys=True,
1026 indent=4,
1027 separators=( ',', ': ' ) ) )
1028 except ( ValueError, TypeError ):
1029 main.log.warn( repr( tmpIntents ) )
1030 utilities.assert_equals(
1031 expect=main.TRUE,
1032 actual=PingResult,
1033 onpass="Intents have been installed correctly and pings work",
1034 onfail="Intents have not been installed correctly, pings failed." )
1035
Jon Hall5cf14d52015-07-16 12:15:19 -07001036 main.step( "Check leadership of topics" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001037 leaders = onosCli.leaders()
Jon Hall5cf14d52015-07-16 12:15:19 -07001038 topicCheck = main.TRUE
1039 try:
1040 if leaders:
1041 parsedLeaders = json.loads( leaders )
1042 main.log.warn( json.dumps( parsedLeaders,
1043 sort_keys=True,
1044 indent=4,
1045 separators=( ',', ': ' ) ) )
1046 # check for all intent partitions
1047 # check for election
1048 # TODO: Look at Devices as topics now that it uses this system
1049 topics = []
1050 for i in range( 14 ):
1051 topics.append( "intent-partition-" + str( i ) )
1052 # FIXME: this should only be after we start the app
1053 # FIXME: topics.append( "org.onosproject.election" )
1054 # Print leaders output
1055 main.log.debug( topics )
1056 ONOStopics = [ j['topic'] for j in parsedLeaders ]
1057 for topic in topics:
1058 if topic not in ONOStopics:
1059 main.log.error( "Error: " + topic +
1060 " not in leaders" )
1061 topicCheck = main.FALSE
1062 else:
1063 main.log.error( "leaders() returned None" )
1064 topicCheck = main.FALSE
1065 except ( ValueError, TypeError ):
1066 topicCheck = main.FALSE
1067 main.log.exception( "Error parsing leaders" )
1068 main.log.error( repr( leaders ) )
1069 # TODO: Check for a leader of these topics
1070 # Check all nodes
1071 if topicCheck:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001072 for i in main.activeNodes:
1073 node = main.CLIs[i]
Jon Hall5cf14d52015-07-16 12:15:19 -07001074 response = node.leaders( jsonFormat=False)
1075 main.log.warn( str( node.name ) + " leaders output: \n" +
1076 str( response ) )
1077
1078 utilities.assert_equals( expect=main.TRUE, actual=topicCheck,
1079 onpass="intent Partitions is in leaders",
1080 onfail="Some topics were lost " )
1081 # Print partitions
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001082 partitions = onosCli.partitions()
Jon Hall5cf14d52015-07-16 12:15:19 -07001083 try:
1084 if partitions :
1085 parsedPartitions = json.loads( partitions )
1086 main.log.warn( json.dumps( parsedPartitions,
1087 sort_keys=True,
1088 indent=4,
1089 separators=( ',', ': ' ) ) )
1090 # TODO check for a leader in all paritions
1091 # TODO check for consistency among nodes
1092 else:
1093 main.log.error( "partitions() returned None" )
1094 except ( ValueError, TypeError ):
1095 main.log.exception( "Error parsing partitions" )
1096 main.log.error( repr( partitions ) )
1097 # Print Pending Map
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001098 pendingMap = onosCli.pendingMap()
Jon Hall5cf14d52015-07-16 12:15:19 -07001099 try:
1100 if pendingMap :
1101 parsedPending = json.loads( pendingMap )
1102 main.log.warn( json.dumps( parsedPending,
1103 sort_keys=True,
1104 indent=4,
1105 separators=( ',', ': ' ) ) )
1106 # TODO check something here?
1107 else:
1108 main.log.error( "pendingMap() returned None" )
1109 except ( ValueError, TypeError ):
1110 main.log.exception( "Error parsing pending map" )
1111 main.log.error( repr( pendingMap ) )
1112
1113 if not installedCheck:
1114 main.log.info( "Waiting 60 seconds to see if the state of " +
1115 "intents change" )
1116 time.sleep( 60 )
1117 # Print the intent states
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001118 intents = onosCli.intents()
Jon Hall5cf14d52015-07-16 12:15:19 -07001119 intentStates = []
1120 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
1121 count = 0
1122 # Iter through intents of a node
1123 try:
1124 for intent in json.loads( intents ):
1125 state = intent.get( 'state', None )
1126 if "INSTALLED" not in state:
1127 installedCheck = False
1128 intentId = intent.get( 'id', None )
1129 intentStates.append( ( intentId, state ) )
1130 except ( ValueError, TypeError ):
1131 main.log.exception( "Error parsing intents." )
1132 intentStates.sort()
1133 for i, s in intentStates:
1134 count += 1
1135 main.log.info( "%-6s%-15s%-15s" %
1136 ( str( count ), str( i ), str( s ) ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001137 leaders = onosCli.leaders()
Jon Hall5cf14d52015-07-16 12:15:19 -07001138 try:
1139 missing = False
1140 if leaders:
1141 parsedLeaders = json.loads( leaders )
1142 main.log.warn( json.dumps( parsedLeaders,
1143 sort_keys=True,
1144 indent=4,
1145 separators=( ',', ': ' ) ) )
1146 # check for all intent partitions
1147 # check for election
1148 topics = []
1149 for i in range( 14 ):
1150 topics.append( "intent-partition-" + str( i ) )
1151 # FIXME: this should only be after we start the app
1152 topics.append( "org.onosproject.election" )
1153 main.log.debug( topics )
1154 ONOStopics = [ j['topic'] for j in parsedLeaders ]
1155 for topic in topics:
1156 if topic not in ONOStopics:
1157 main.log.error( "Error: " + topic +
1158 " not in leaders" )
1159 missing = True
1160 else:
1161 main.log.error( "leaders() returned None" )
1162 except ( ValueError, TypeError ):
1163 main.log.exception( "Error parsing leaders" )
1164 main.log.error( repr( leaders ) )
1165 if missing:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001166 for i in main.activeNodes:
1167 node = main.CLIs[i]
Jon Hall5cf14d52015-07-16 12:15:19 -07001168 response = node.leaders( jsonFormat=False)
1169 main.log.warn( str( node.name ) + " leaders output: \n" +
1170 str( response ) )
1171
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001172 partitions = onosCli.partitions()
Jon Hall5cf14d52015-07-16 12:15:19 -07001173 try:
1174 if partitions :
1175 parsedPartitions = json.loads( partitions )
1176 main.log.warn( json.dumps( parsedPartitions,
1177 sort_keys=True,
1178 indent=4,
1179 separators=( ',', ': ' ) ) )
1180 # TODO check for a leader in all paritions
1181 # TODO check for consistency among nodes
1182 else:
1183 main.log.error( "partitions() returned None" )
1184 except ( ValueError, TypeError ):
1185 main.log.exception( "Error parsing partitions" )
1186 main.log.error( repr( partitions ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001187 pendingMap = onosCli.pendingMap()
Jon Hall5cf14d52015-07-16 12:15:19 -07001188 try:
1189 if pendingMap :
1190 parsedPending = json.loads( pendingMap )
1191 main.log.warn( json.dumps( parsedPending,
1192 sort_keys=True,
1193 indent=4,
1194 separators=( ',', ': ' ) ) )
1195 # TODO check something here?
1196 else:
1197 main.log.error( "pendingMap() returned None" )
1198 except ( ValueError, TypeError ):
1199 main.log.exception( "Error parsing pending map" )
1200 main.log.error( repr( pendingMap ) )
1201 # Print flowrules
Jon Hall41d39f12016-04-11 22:54:35 -07001202 main.log.debug( onosCli.flows( jsonFormat=False ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001203 main.step( "Wait a minute then ping again" )
1204 # the wait is above
1205 PingResult = main.TRUE
1206 for i in range( 8, 18 ):
1207 ping = main.Mininet1.pingHost( src="h" + str( i ),
1208 target="h" + str( i + 10 ) )
1209 PingResult = PingResult and ping
1210 if ping == main.FALSE:
1211 main.log.warn( "Ping failed between h" + str( i ) +
1212 " and h" + str( i + 10 ) )
1213 elif ping == main.TRUE:
1214 main.log.info( "Ping test passed!" )
1215 # Don't set PingResult or you'd override failures
1216 if PingResult == main.FALSE:
1217 main.log.error(
1218 "Intents have not been installed correctly, pings failed." )
1219 # TODO: pretty print
1220 main.log.warn( "ONOS1 intents: " )
1221 try:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001222 tmpIntents = onosCli.intents()
Jon Hall5cf14d52015-07-16 12:15:19 -07001223 main.log.warn( json.dumps( json.loads( tmpIntents ),
1224 sort_keys=True,
1225 indent=4,
1226 separators=( ',', ': ' ) ) )
1227 except ( ValueError, TypeError ):
1228 main.log.warn( repr( tmpIntents ) )
1229 utilities.assert_equals(
1230 expect=main.TRUE,
1231 actual=PingResult,
1232 onpass="Intents have been installed correctly and pings work",
1233 onfail="Intents have not been installed correctly, pings failed." )
1234
1235 def CASE5( self, main ):
1236 """
1237 Reading state of ONOS
1238 """
1239 import json
1240 import time
Jon Halle1a3b752015-07-22 13:02:46 -07001241 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07001242 assert main, "main not defined"
1243 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07001244 assert main.CLIs, "main.CLIs not defined"
1245 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07001246
1247 main.case( "Setting up and gathering data for current state" )
1248 # The general idea for this test case is to pull the state of
1249 # ( intents,flows, topology,... ) from each ONOS node
1250 # We can then compare them with each other and also with past states
1251
1252 main.step( "Check that each switch has a master" )
1253 global mastershipState
1254 mastershipState = '[]'
1255
1256 # Assert that each device has a master
1257 rolesNotNull = main.TRUE
1258 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001259 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001260 t = main.Thread( target=main.CLIs[i].rolesNotNull,
Jon Hall5cf14d52015-07-16 12:15:19 -07001261 name="rolesNotNull-" + str( i ),
1262 args=[] )
1263 threads.append( t )
1264 t.start()
1265
1266 for t in threads:
1267 t.join()
1268 rolesNotNull = rolesNotNull and t.result
1269 utilities.assert_equals(
1270 expect=main.TRUE,
1271 actual=rolesNotNull,
1272 onpass="Each device has a master",
1273 onfail="Some devices don't have a master assigned" )
1274
1275 main.step( "Get the Mastership of each switch from each controller" )
1276 ONOSMastership = []
1277 mastershipCheck = main.FALSE
1278 consistentMastership = True
1279 rolesResults = True
1280 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001281 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001282 t = main.Thread( target=main.CLIs[i].roles,
Jon Hall5cf14d52015-07-16 12:15:19 -07001283 name="roles-" + str( i ),
1284 args=[] )
1285 threads.append( t )
1286 t.start()
1287
1288 for t in threads:
1289 t.join()
1290 ONOSMastership.append( t.result )
1291
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001292 for i in range( len( ONOSMastership ) ):
1293 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001294 if not ONOSMastership[i] or "Error" in ONOSMastership[i]:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001295 main.log.error( "Error in getting ONOS" + node + " roles" )
1296 main.log.warn( "ONOS" + node + " mastership response: " +
1297 repr( ONOSMastership[i] ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001298 rolesResults = False
1299 utilities.assert_equals(
1300 expect=True,
1301 actual=rolesResults,
1302 onpass="No error in reading roles output",
1303 onfail="Error in reading roles from ONOS" )
1304
1305 main.step( "Check for consistency in roles from each controller" )
1306 if all([ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
1307 main.log.info(
1308 "Switch roles are consistent across all ONOS nodes" )
1309 else:
1310 consistentMastership = False
1311 utilities.assert_equals(
1312 expect=True,
1313 actual=consistentMastership,
1314 onpass="Switch roles are consistent across all ONOS nodes",
1315 onfail="ONOS nodes have different views of switch roles" )
1316
1317 if rolesResults and not consistentMastership:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001318 for i in range( len( main.activeNodes ) ):
1319 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001320 try:
1321 main.log.warn(
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001322 "ONOS" + node + " roles: ",
Jon Hall5cf14d52015-07-16 12:15:19 -07001323 json.dumps(
1324 json.loads( ONOSMastership[ i ] ),
1325 sort_keys=True,
1326 indent=4,
1327 separators=( ',', ': ' ) ) )
1328 except ( ValueError, TypeError ):
1329 main.log.warn( repr( ONOSMastership[ i ] ) )
1330 elif rolesResults and consistentMastership:
1331 mastershipCheck = main.TRUE
1332 mastershipState = ONOSMastership[ 0 ]
1333
1334 main.step( "Get the intents from each controller" )
1335 global intentState
1336 intentState = []
1337 ONOSIntents = []
1338 intentCheck = main.FALSE
1339 consistentIntents = True
1340 intentsResults = True
1341 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001342 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001343 t = main.Thread( target=main.CLIs[i].intents,
Jon Hall5cf14d52015-07-16 12:15:19 -07001344 name="intents-" + str( i ),
1345 args=[],
1346 kwargs={ 'jsonFormat': True } )
1347 threads.append( t )
1348 t.start()
1349
1350 for t in threads:
1351 t.join()
1352 ONOSIntents.append( t.result )
1353
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001354 for i in range( len( ONOSIntents ) ):
1355 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001356 if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001357 main.log.error( "Error in getting ONOS" + node + " intents" )
1358 main.log.warn( "ONOS" + node + " intents response: " +
Jon Hall5cf14d52015-07-16 12:15:19 -07001359 repr( ONOSIntents[ i ] ) )
1360 intentsResults = False
1361 utilities.assert_equals(
1362 expect=True,
1363 actual=intentsResults,
1364 onpass="No error in reading intents output",
1365 onfail="Error in reading intents from ONOS" )
1366
1367 main.step( "Check for consistency in Intents from each controller" )
1368 if all([ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
1369 main.log.info( "Intents are consistent across all ONOS " +
1370 "nodes" )
1371 else:
1372 consistentIntents = False
1373 main.log.error( "Intents not consistent" )
1374 utilities.assert_equals(
1375 expect=True,
1376 actual=consistentIntents,
1377 onpass="Intents are consistent across all ONOS nodes",
1378 onfail="ONOS nodes have different views of intents" )
1379
1380 if intentsResults:
1381 # Try to make it easy to figure out what is happening
1382 #
1383 # Intent ONOS1 ONOS2 ...
1384 # 0x01 INSTALLED INSTALLING
1385 # ... ... ...
1386 # ... ... ...
1387 title = " Id"
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001388 for n in main.activeNodes:
Jon Hall5cf14d52015-07-16 12:15:19 -07001389 title += " " * 10 + "ONOS" + str( n + 1 )
1390 main.log.warn( title )
1391 # get all intent keys in the cluster
1392 keys = []
Jon Halla440e872016-03-31 15:15:50 -07001393 try:
1394 # Get the set of all intent keys
Jon Hall5cf14d52015-07-16 12:15:19 -07001395 for nodeStr in ONOSIntents:
1396 node = json.loads( nodeStr )
1397 for intent in node:
Jon Halla440e872016-03-31 15:15:50 -07001398 keys.append( intent.get( 'id' ) )
1399 keys = set( keys )
1400 # For each intent key, print the state on each node
1401 for key in keys:
1402 row = "%-13s" % key
1403 for nodeStr in ONOSIntents:
1404 node = json.loads( nodeStr )
1405 for intent in node:
1406 if intent.get( 'id', "Error" ) == key:
1407 row += "%-15s" % intent.get( 'state' )
1408 main.log.warn( row )
1409 # End of intent state table
1410 except ValueError as e:
1411 main.log.exception( e )
1412 main.log.debug( "nodeStr was: " + repr( nodeStr ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001413
1414 if intentsResults and not consistentIntents:
1415 # print the json objects
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001416 n = str( main.activeNodes[-1] + 1 )
1417 main.log.debug( "ONOS" + n + " intents: " )
Jon Hall5cf14d52015-07-16 12:15:19 -07001418 main.log.debug( json.dumps( json.loads( ONOSIntents[ -1 ] ),
1419 sort_keys=True,
1420 indent=4,
1421 separators=( ',', ': ' ) ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001422 for i in range( len( ONOSIntents ) ):
1423 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001424 if ONOSIntents[ i ] != ONOSIntents[ -1 ]:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001425 main.log.debug( "ONOS" + node + " intents: " )
Jon Hall5cf14d52015-07-16 12:15:19 -07001426 main.log.debug( json.dumps( json.loads( ONOSIntents[i] ),
1427 sort_keys=True,
1428 indent=4,
1429 separators=( ',', ': ' ) ) )
1430 else:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001431 main.log.debug( "ONOS" + node + " intents match ONOS" +
1432 n + " intents" )
Jon Hall5cf14d52015-07-16 12:15:19 -07001433 elif intentsResults and consistentIntents:
1434 intentCheck = main.TRUE
1435 intentState = ONOSIntents[ 0 ]
1436
1437 main.step( "Get the flows from each controller" )
1438 global flowState
1439 flowState = []
1440 ONOSFlows = []
1441 ONOSFlowsJson = []
1442 flowCheck = main.FALSE
1443 consistentFlows = True
1444 flowsResults = True
1445 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001446 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001447 t = main.Thread( target=main.CLIs[i].flows,
Jon Hall5cf14d52015-07-16 12:15:19 -07001448 name="flows-" + str( i ),
1449 args=[],
1450 kwargs={ 'jsonFormat': True } )
1451 threads.append( t )
1452 t.start()
1453
1454 # NOTE: Flows command can take some time to run
1455 time.sleep(30)
1456 for t in threads:
1457 t.join()
1458 result = t.result
1459 ONOSFlows.append( result )
1460
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001461 for i in range( len( ONOSFlows ) ):
1462 num = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001463 if not ONOSFlows[ i ] or "Error" in ONOSFlows[ i ]:
1464 main.log.error( "Error in getting ONOS" + num + " flows" )
1465 main.log.warn( "ONOS" + num + " flows response: " +
1466 repr( ONOSFlows[ i ] ) )
1467 flowsResults = False
1468 ONOSFlowsJson.append( None )
1469 else:
1470 try:
1471 ONOSFlowsJson.append( json.loads( ONOSFlows[ i ] ) )
1472 except ( ValueError, TypeError ):
1473 # FIXME: change this to log.error?
1474 main.log.exception( "Error in parsing ONOS" + num +
1475 " response as json." )
1476 main.log.error( repr( ONOSFlows[ i ] ) )
1477 ONOSFlowsJson.append( None )
1478 flowsResults = False
1479 utilities.assert_equals(
1480 expect=True,
1481 actual=flowsResults,
1482 onpass="No error in reading flows output",
1483 onfail="Error in reading flows from ONOS" )
1484
1485 main.step( "Check for consistency in Flows from each controller" )
1486 tmp = [ len( i ) == len( ONOSFlowsJson[ 0 ] ) for i in ONOSFlowsJson ]
1487 if all( tmp ):
1488 main.log.info( "Flow count is consistent across all ONOS nodes" )
1489 else:
1490 consistentFlows = False
1491 utilities.assert_equals(
1492 expect=True,
1493 actual=consistentFlows,
1494 onpass="The flow count is consistent across all ONOS nodes",
1495 onfail="ONOS nodes have different flow counts" )
1496
1497 if flowsResults and not consistentFlows:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001498 for i in range( len( ONOSFlows ) ):
1499 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001500 try:
1501 main.log.warn(
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001502 "ONOS" + node + " flows: " +
Jon Hall5cf14d52015-07-16 12:15:19 -07001503 json.dumps( json.loads( ONOSFlows[i] ), sort_keys=True,
1504 indent=4, separators=( ',', ': ' ) ) )
1505 except ( ValueError, TypeError ):
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001506 main.log.warn( "ONOS" + node + " flows: " +
1507 repr( ONOSFlows[ i ] ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001508 elif flowsResults and consistentFlows:
1509 flowCheck = main.TRUE
1510 flowState = ONOSFlows[ 0 ]
1511
1512 main.step( "Get the OF Table entries" )
1513 global flows
1514 flows = []
1515 for i in range( 1, 29 ):
GlennRC68467eb2015-11-16 18:01:01 -08001516 flows.append( main.Mininet1.getFlowTable( "s" + str( i ), version="1.3", debug=False ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001517 if flowCheck == main.FALSE:
1518 for table in flows:
1519 main.log.warn( table )
1520 # TODO: Compare switch flow tables with ONOS flow tables
1521
1522 main.step( "Start continuous pings" )
1523 main.Mininet2.pingLong(
1524 src=main.params[ 'PING' ][ 'source1' ],
1525 target=main.params[ 'PING' ][ 'target1' ],
1526 pingTime=500 )
1527 main.Mininet2.pingLong(
1528 src=main.params[ 'PING' ][ 'source2' ],
1529 target=main.params[ 'PING' ][ 'target2' ],
1530 pingTime=500 )
1531 main.Mininet2.pingLong(
1532 src=main.params[ 'PING' ][ 'source3' ],
1533 target=main.params[ 'PING' ][ 'target3' ],
1534 pingTime=500 )
1535 main.Mininet2.pingLong(
1536 src=main.params[ 'PING' ][ 'source4' ],
1537 target=main.params[ 'PING' ][ 'target4' ],
1538 pingTime=500 )
1539 main.Mininet2.pingLong(
1540 src=main.params[ 'PING' ][ 'source5' ],
1541 target=main.params[ 'PING' ][ 'target5' ],
1542 pingTime=500 )
1543 main.Mininet2.pingLong(
1544 src=main.params[ 'PING' ][ 'source6' ],
1545 target=main.params[ 'PING' ][ 'target6' ],
1546 pingTime=500 )
1547 main.Mininet2.pingLong(
1548 src=main.params[ 'PING' ][ 'source7' ],
1549 target=main.params[ 'PING' ][ 'target7' ],
1550 pingTime=500 )
1551 main.Mininet2.pingLong(
1552 src=main.params[ 'PING' ][ 'source8' ],
1553 target=main.params[ 'PING' ][ 'target8' ],
1554 pingTime=500 )
1555 main.Mininet2.pingLong(
1556 src=main.params[ 'PING' ][ 'source9' ],
1557 target=main.params[ 'PING' ][ 'target9' ],
1558 pingTime=500 )
1559 main.Mininet2.pingLong(
1560 src=main.params[ 'PING' ][ 'source10' ],
1561 target=main.params[ 'PING' ][ 'target10' ],
1562 pingTime=500 )
1563
1564 main.step( "Collecting topology information from ONOS" )
1565 devices = []
1566 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001567 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001568 t = main.Thread( target=main.CLIs[i].devices,
Jon Hall5cf14d52015-07-16 12:15:19 -07001569 name="devices-" + str( i ),
1570 args=[ ] )
1571 threads.append( t )
1572 t.start()
1573
1574 for t in threads:
1575 t.join()
1576 devices.append( t.result )
1577 hosts = []
1578 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001579 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001580 t = main.Thread( target=main.CLIs[i].hosts,
Jon Hall5cf14d52015-07-16 12:15:19 -07001581 name="hosts-" + str( i ),
1582 args=[ ] )
1583 threads.append( t )
1584 t.start()
1585
1586 for t in threads:
1587 t.join()
1588 try:
1589 hosts.append( json.loads( t.result ) )
1590 except ( ValueError, TypeError ):
1591 # FIXME: better handling of this, print which node
1592 # Maybe use thread name?
1593 main.log.exception( "Error parsing json output of hosts" )
Jon Hallf3d16e72015-12-16 17:45:08 -08001594 main.log.warn( repr( t.result ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001595 hosts.append( None )
1596
1597 ports = []
1598 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001599 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001600 t = main.Thread( target=main.CLIs[i].ports,
Jon Hall5cf14d52015-07-16 12:15:19 -07001601 name="ports-" + str( i ),
1602 args=[ ] )
1603 threads.append( t )
1604 t.start()
1605
1606 for t in threads:
1607 t.join()
1608 ports.append( t.result )
1609 links = []
1610 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001611 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001612 t = main.Thread( target=main.CLIs[i].links,
Jon Hall5cf14d52015-07-16 12:15:19 -07001613 name="links-" + str( i ),
1614 args=[ ] )
1615 threads.append( t )
1616 t.start()
1617
1618 for t in threads:
1619 t.join()
1620 links.append( t.result )
1621 clusters = []
1622 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001623 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001624 t = main.Thread( target=main.CLIs[i].clusters,
Jon Hall5cf14d52015-07-16 12:15:19 -07001625 name="clusters-" + str( i ),
1626 args=[ ] )
1627 threads.append( t )
1628 t.start()
1629
1630 for t in threads:
1631 t.join()
1632 clusters.append( t.result )
1633 # Compare json objects for hosts and dataplane clusters
1634
1635 # hosts
1636 main.step( "Host view is consistent across ONOS nodes" )
1637 consistentHostsResult = main.TRUE
1638 for controller in range( len( hosts ) ):
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001639 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hallf3d16e72015-12-16 17:45:08 -08001640 if hosts[ controller ] and "Error" not in hosts[ controller ]:
Jon Hall5cf14d52015-07-16 12:15:19 -07001641 if hosts[ controller ] == hosts[ 0 ]:
1642 continue
1643 else: # hosts not consistent
1644 main.log.error( "hosts from ONOS" +
1645 controllerStr +
1646 " is inconsistent with ONOS1" )
1647 main.log.warn( repr( hosts[ controller ] ) )
1648 consistentHostsResult = main.FALSE
1649
1650 else:
1651 main.log.error( "Error in getting ONOS hosts from ONOS" +
1652 controllerStr )
1653 consistentHostsResult = main.FALSE
1654 main.log.warn( "ONOS" + controllerStr +
1655 " hosts response: " +
1656 repr( hosts[ controller ] ) )
1657 utilities.assert_equals(
1658 expect=main.TRUE,
1659 actual=consistentHostsResult,
1660 onpass="Hosts view is consistent across all ONOS nodes",
1661 onfail="ONOS nodes have different views of hosts" )
1662
1663 main.step( "Each host has an IP address" )
1664 ipResult = main.TRUE
1665 for controller in range( 0, len( hosts ) ):
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001666 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hallf3d16e72015-12-16 17:45:08 -08001667 if hosts[ controller ]:
1668 for host in hosts[ controller ]:
1669 if not host.get( 'ipAddresses', [ ] ):
1670 main.log.error( "Error with host ips on controller" +
1671 controllerStr + ": " + str( host ) )
1672 ipResult = main.FALSE
Jon Hall5cf14d52015-07-16 12:15:19 -07001673 utilities.assert_equals(
1674 expect=main.TRUE,
1675 actual=ipResult,
1676 onpass="The ips of the hosts aren't empty",
1677 onfail="The ip of at least one host is missing" )
1678
1679 # Strongly connected clusters of devices
1680 main.step( "Cluster view is consistent across ONOS nodes" )
1681 consistentClustersResult = main.TRUE
1682 for controller in range( len( clusters ) ):
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001683 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001684 if "Error" not in clusters[ controller ]:
1685 if clusters[ controller ] == clusters[ 0 ]:
1686 continue
1687 else: # clusters not consistent
1688 main.log.error( "clusters from ONOS" + controllerStr +
1689 " is inconsistent with ONOS1" )
1690 consistentClustersResult = main.FALSE
1691
1692 else:
1693 main.log.error( "Error in getting dataplane clusters " +
1694 "from ONOS" + controllerStr )
1695 consistentClustersResult = main.FALSE
1696 main.log.warn( "ONOS" + controllerStr +
1697 " clusters response: " +
1698 repr( clusters[ controller ] ) )
1699 utilities.assert_equals(
1700 expect=main.TRUE,
1701 actual=consistentClustersResult,
1702 onpass="Clusters view is consistent across all ONOS nodes",
1703 onfail="ONOS nodes have different views of clusters" )
Jon Hall64948022016-05-12 13:38:50 -07001704 if not consistentClustersResult:
Jon Hall172b7ba2016-04-07 18:12:20 -07001705 main.log.debug( clusters )
Jon Hall64948022016-05-12 13:38:50 -07001706
Jon Hall5cf14d52015-07-16 12:15:19 -07001707 # there should always only be one cluster
1708 main.step( "Cluster view correct across ONOS nodes" )
1709 try:
1710 numClusters = len( json.loads( clusters[ 0 ] ) )
1711 except ( ValueError, TypeError ):
1712 main.log.exception( "Error parsing clusters[0]: " +
1713 repr( clusters[ 0 ] ) )
Jon Hall6e709752016-02-01 13:38:46 -08001714 numClusters = "ERROR"
Jon Hall5cf14d52015-07-16 12:15:19 -07001715 clusterResults = main.FALSE
1716 if numClusters == 1:
1717 clusterResults = main.TRUE
1718 utilities.assert_equals(
1719 expect=1,
1720 actual=numClusters,
1721 onpass="ONOS shows 1 SCC",
1722 onfail="ONOS shows " + str( numClusters ) + " SCCs" )
1723
1724 main.step( "Comparing ONOS topology to MN" )
1725 devicesResults = main.TRUE
1726 linksResults = main.TRUE
1727 hostsResults = main.TRUE
1728 mnSwitches = main.Mininet1.getSwitches()
1729 mnLinks = main.Mininet1.getLinks()
1730 mnHosts = main.Mininet1.getHosts()
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001731 for controller in main.activeNodes:
1732 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001733 if devices[ controller ] and ports[ controller ] and\
1734 "Error" not in devices[ controller ] and\
1735 "Error" not in ports[ controller ]:
Jon Hall6e709752016-02-01 13:38:46 -08001736 currentDevicesResult = main.Mininet1.compareSwitches(
1737 mnSwitches,
1738 json.loads( devices[ controller ] ),
1739 json.loads( ports[ controller ] ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001740 else:
1741 currentDevicesResult = main.FALSE
1742 utilities.assert_equals( expect=main.TRUE,
1743 actual=currentDevicesResult,
1744 onpass="ONOS" + controllerStr +
1745 " Switches view is correct",
1746 onfail="ONOS" + controllerStr +
1747 " Switches view is incorrect" )
1748 if links[ controller ] and "Error" not in links[ controller ]:
1749 currentLinksResult = main.Mininet1.compareLinks(
1750 mnSwitches, mnLinks,
1751 json.loads( links[ controller ] ) )
1752 else:
1753 currentLinksResult = main.FALSE
1754 utilities.assert_equals( expect=main.TRUE,
1755 actual=currentLinksResult,
1756 onpass="ONOS" + controllerStr +
1757 " links view is correct",
1758 onfail="ONOS" + controllerStr +
1759 " links view is incorrect" )
1760
Jon Hall657cdf62015-12-17 14:40:51 -08001761 if hosts[ controller ] and "Error" not in hosts[ controller ]:
Jon Hall5cf14d52015-07-16 12:15:19 -07001762 currentHostsResult = main.Mininet1.compareHosts(
1763 mnHosts,
1764 hosts[ controller ] )
1765 else:
1766 currentHostsResult = main.FALSE
1767 utilities.assert_equals( expect=main.TRUE,
1768 actual=currentHostsResult,
1769 onpass="ONOS" + controllerStr +
1770 " hosts exist in Mininet",
1771 onfail="ONOS" + controllerStr +
1772 " hosts don't match Mininet" )
1773
1774 devicesResults = devicesResults and currentDevicesResult
1775 linksResults = linksResults and currentLinksResult
1776 hostsResults = hostsResults and currentHostsResult
1777
1778 main.step( "Device information is correct" )
1779 utilities.assert_equals(
1780 expect=main.TRUE,
1781 actual=devicesResults,
1782 onpass="Device information is correct",
1783 onfail="Device information is incorrect" )
1784
1785 main.step( "Links are correct" )
1786 utilities.assert_equals(
1787 expect=main.TRUE,
1788 actual=linksResults,
1789 onpass="Link are correct",
1790 onfail="Links are incorrect" )
1791
1792 main.step( "Hosts are correct" )
1793 utilities.assert_equals(
1794 expect=main.TRUE,
1795 actual=hostsResults,
1796 onpass="Hosts are correct",
1797 onfail="Hosts are incorrect" )
1798
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001799 def CASE61( self, main ):
Jon Hall5cf14d52015-07-16 12:15:19 -07001800 """
1801 The Failure case.
1802 """
Jon Halle1a3b752015-07-22 13:02:46 -07001803 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07001804 assert main, "main not defined"
1805 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07001806 assert main.CLIs, "main.CLIs not defined"
1807 assert main.nodes, "main.nodes not defined"
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001808 main.case( "Kill minority of ONOS nodes" )
Jon Hall96091e62015-09-21 17:34:17 -07001809
1810 main.step( "Checking ONOS Logs for errors" )
1811 for node in main.nodes:
1812 main.log.debug( "Checking logs for errors on " + node.name + ":" )
1813 main.log.warn( main.ONOSbench.checkLogs( node.ip_address ) )
1814
Jon Hall3b489db2015-10-05 14:38:37 -07001815 n = len( main.nodes ) # Number of nodes
1816 p = ( ( n + 1 ) / 2 ) + 1 # Number of partitions
1817 main.kill = [ 0 ] # ONOS node to kill, listed by index in main.nodes
1818 if n > 3:
1819 main.kill.append( p - 1 )
1820 # NOTE: This only works for cluster sizes of 3,5, or 7.
1821
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001822 main.step( "Kill " + str( len( main.kill ) ) + " ONOS nodes" )
Jon Hall3b489db2015-10-05 14:38:37 -07001823 killResults = main.TRUE
1824 for i in main.kill:
1825 killResults = killResults and\
1826 main.ONOSbench.onosKill( main.nodes[i].ip_address )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001827 main.activeNodes.remove( i )
Jon Hall5cf14d52015-07-16 12:15:19 -07001828 utilities.assert_equals( expect=main.TRUE, actual=killResults,
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001829 onpass="ONOS nodes killed successfully",
1830 onfail="ONOS nodes NOT successfully killed" )
1831
1832 def CASE62( self, main ):
1833 """
1834 The bring up stopped nodes
1835 """
1836 import time
1837 assert main.numCtrls, "main.numCtrls not defined"
1838 assert main, "main not defined"
1839 assert utilities.assert_equals, "utilities.assert_equals not defined"
1840 assert main.CLIs, "main.CLIs not defined"
1841 assert main.nodes, "main.nodes not defined"
1842 assert main.kill, "main.kill not defined"
1843 main.case( "Restart minority of ONOS nodes" )
1844
1845 main.step( "Restarting " + str( len( main.kill ) ) + " ONOS nodes" )
1846 startResults = main.TRUE
1847 restartTime = time.time()
1848 for i in main.kill:
1849 startResults = startResults and\
1850 main.ONOSbench.onosStart( main.nodes[i].ip_address )
1851 utilities.assert_equals( expect=main.TRUE, actual=startResults,
1852 onpass="ONOS nodes started successfully",
1853 onfail="ONOS nodes NOT successfully started" )
Jon Hall5cf14d52015-07-16 12:15:19 -07001854
1855 main.step( "Checking if ONOS is up yet" )
1856 count = 0
1857 onosIsupResult = main.FALSE
1858 while onosIsupResult == main.FALSE and count < 10:
Jon Hall3b489db2015-10-05 14:38:37 -07001859 onosIsupResult = main.TRUE
1860 for i in main.kill:
1861 onosIsupResult = onosIsupResult and\
1862 main.ONOSbench.isup( main.nodes[i].ip_address )
Jon Hall5cf14d52015-07-16 12:15:19 -07001863 count = count + 1
Jon Hall5cf14d52015-07-16 12:15:19 -07001864 utilities.assert_equals( expect=main.TRUE, actual=onosIsupResult,
1865 onpass="ONOS restarted successfully",
1866 onfail="ONOS restart NOT successful" )
1867
Jon Halle1a3b752015-07-22 13:02:46 -07001868 main.step( "Restarting ONOS main.CLIs" )
Jon Hall3b489db2015-10-05 14:38:37 -07001869 cliResults = main.TRUE
1870 for i in main.kill:
1871 cliResults = cliResults and\
1872 main.CLIs[i].startOnosCli( main.nodes[i].ip_address )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001873 main.activeNodes.append( i )
Jon Hall5cf14d52015-07-16 12:15:19 -07001874 utilities.assert_equals( expect=main.TRUE, actual=cliResults,
1875 onpass="ONOS cli restarted",
1876 onfail="ONOS cli did not restart" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001877 main.activeNodes.sort()
1878 try:
1879 assert list( set( main.activeNodes ) ) == main.activeNodes,\
1880 "List of active nodes has duplicates, this likely indicates something was run out of order"
1881 except AssertionError:
1882 main.log.exception( "" )
1883 main.cleanup()
1884 main.exit()
Jon Hall5cf14d52015-07-16 12:15:19 -07001885
1886 # Grab the time of restart so we chan check how long the gossip
1887 # protocol has had time to work
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001888 main.restartTime = time.time() - restartTime
Jon Hall5cf14d52015-07-16 12:15:19 -07001889 main.log.debug( "Restart time: " + str( main.restartTime ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001890 # TODO: MAke this configurable. Also, we are breaking the above timer
1891 time.sleep( 60 )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001892 node = main.activeNodes[0]
1893 main.log.debug( main.CLIs[node].nodes( jsonFormat=False ) )
1894 main.log.debug( main.CLIs[node].leaders( jsonFormat=False ) )
1895 main.log.debug( main.CLIs[node].partitions( jsonFormat=False ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001896
Jon Halla440e872016-03-31 15:15:50 -07001897 main.step( "Rerun for election on the node(s) that were killed" )
1898 runResults = main.TRUE
1899 for i in main.kill:
1900 runResults = runResults and\
1901 main.CLIs[i].electionTestRun()
1902 utilities.assert_equals( expect=main.TRUE, actual=runResults,
1903 onpass="ONOS nodes reran for election topic",
1904 onfail="Errror rerunning for election" )
1905
Jon Hall5cf14d52015-07-16 12:15:19 -07001906 def CASE7( self, main ):
1907 """
1908 Check state after ONOS failure
1909 """
1910 import json
Jon Halle1a3b752015-07-22 13:02:46 -07001911 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07001912 assert main, "main not defined"
1913 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07001914 assert main.CLIs, "main.CLIs not defined"
1915 assert main.nodes, "main.nodes not defined"
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001916 try:
1917 main.kill
1918 except AttributeError:
1919 main.kill = []
1920
Jon Hall5cf14d52015-07-16 12:15:19 -07001921 main.case( "Running ONOS Constant State Tests" )
1922
1923 main.step( "Check that each switch has a master" )
1924 # Assert that each device has a master
1925 rolesNotNull = main.TRUE
1926 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001927 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001928 t = main.Thread( target=main.CLIs[i].rolesNotNull,
Jon Hall5cf14d52015-07-16 12:15:19 -07001929 name="rolesNotNull-" + str( i ),
1930 args=[ ] )
1931 threads.append( t )
1932 t.start()
1933
1934 for t in threads:
1935 t.join()
1936 rolesNotNull = rolesNotNull and t.result
1937 utilities.assert_equals(
1938 expect=main.TRUE,
1939 actual=rolesNotNull,
1940 onpass="Each device has a master",
1941 onfail="Some devices don't have a master assigned" )
1942
1943 main.step( "Read device roles from ONOS" )
1944 ONOSMastership = []
Jon Halla440e872016-03-31 15:15:50 -07001945 mastershipCheck = main.FALSE
Jon Hall5cf14d52015-07-16 12:15:19 -07001946 consistentMastership = True
1947 rolesResults = True
1948 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001949 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001950 t = main.Thread( target=main.CLIs[i].roles,
Jon Hall5cf14d52015-07-16 12:15:19 -07001951 name="roles-" + str( i ),
1952 args=[] )
1953 threads.append( t )
1954 t.start()
1955
1956 for t in threads:
1957 t.join()
1958 ONOSMastership.append( t.result )
1959
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001960 for i in range( len( ONOSMastership ) ):
1961 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001962 if not ONOSMastership[i] or "Error" in ONOSMastership[i]:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001963 main.log.error( "Error in getting ONOS" + node + " roles" )
1964 main.log.warn( "ONOS" + node + " mastership response: " +
1965 repr( ONOSMastership[i] ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001966 rolesResults = False
1967 utilities.assert_equals(
1968 expect=True,
1969 actual=rolesResults,
1970 onpass="No error in reading roles output",
1971 onfail="Error in reading roles from ONOS" )
1972
1973 main.step( "Check for consistency in roles from each controller" )
1974 if all([ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
1975 main.log.info(
1976 "Switch roles are consistent across all ONOS nodes" )
1977 else:
1978 consistentMastership = False
1979 utilities.assert_equals(
1980 expect=True,
1981 actual=consistentMastership,
1982 onpass="Switch roles are consistent across all ONOS nodes",
1983 onfail="ONOS nodes have different views of switch roles" )
1984
1985 if rolesResults and not consistentMastership:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001986 for i in range( len( ONOSMastership ) ):
1987 node = str( main.activeNodes[i] + 1 )
1988 main.log.warn( "ONOS" + node + " roles: ",
1989 json.dumps( json.loads( ONOSMastership[ i ] ),
1990 sort_keys=True,
1991 indent=4,
1992 separators=( ',', ': ' ) ) )
Jon Halla440e872016-03-31 15:15:50 -07001993 elif rolesResults and consistentMastership:
1994 mastershipCheck = main.TRUE
Jon Hall5cf14d52015-07-16 12:15:19 -07001995
1996 # NOTE: we expect mastership to change on controller failure
Jon Hall5cf14d52015-07-16 12:15:19 -07001997
1998 main.step( "Get the intents and compare across all nodes" )
1999 ONOSIntents = []
2000 intentCheck = main.FALSE
2001 consistentIntents = True
2002 intentsResults = True
2003 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002004 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07002005 t = main.Thread( target=main.CLIs[i].intents,
Jon Hall5cf14d52015-07-16 12:15:19 -07002006 name="intents-" + str( i ),
2007 args=[],
2008 kwargs={ 'jsonFormat': True } )
2009 threads.append( t )
2010 t.start()
2011
2012 for t in threads:
2013 t.join()
2014 ONOSIntents.append( t.result )
2015
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002016 for i in range( len( ONOSIntents) ):
2017 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07002018 if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002019 main.log.error( "Error in getting ONOS" + node + " intents" )
2020 main.log.warn( "ONOS" + node + " intents response: " +
Jon Hall5cf14d52015-07-16 12:15:19 -07002021 repr( ONOSIntents[ i ] ) )
2022 intentsResults = False
2023 utilities.assert_equals(
2024 expect=True,
2025 actual=intentsResults,
2026 onpass="No error in reading intents output",
2027 onfail="Error in reading intents from ONOS" )
2028
2029 main.step( "Check for consistency in Intents from each controller" )
2030 if all([ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
2031 main.log.info( "Intents are consistent across all ONOS " +
2032 "nodes" )
2033 else:
2034 consistentIntents = False
2035
2036 # Try to make it easy to figure out what is happening
2037 #
2038 # Intent ONOS1 ONOS2 ...
2039 # 0x01 INSTALLED INSTALLING
2040 # ... ... ...
2041 # ... ... ...
2042 title = " ID"
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002043 for n in main.activeNodes:
Jon Hall5cf14d52015-07-16 12:15:19 -07002044 title += " " * 10 + "ONOS" + str( n + 1 )
2045 main.log.warn( title )
2046 # get all intent keys in the cluster
2047 keys = []
2048 for nodeStr in ONOSIntents:
2049 node = json.loads( nodeStr )
2050 for intent in node:
2051 keys.append( intent.get( 'id' ) )
2052 keys = set( keys )
2053 for key in keys:
2054 row = "%-13s" % key
2055 for nodeStr in ONOSIntents:
2056 node = json.loads( nodeStr )
2057 for intent in node:
2058 if intent.get( 'id' ) == key:
2059 row += "%-15s" % intent.get( 'state' )
2060 main.log.warn( row )
2061 # End table view
2062
2063 utilities.assert_equals(
2064 expect=True,
2065 actual=consistentIntents,
2066 onpass="Intents are consistent across all ONOS nodes",
2067 onfail="ONOS nodes have different views of intents" )
2068 intentStates = []
2069 for node in ONOSIntents: # Iter through ONOS nodes
2070 nodeStates = []
2071 # Iter through intents of a node
2072 try:
2073 for intent in json.loads( node ):
2074 nodeStates.append( intent[ 'state' ] )
2075 except ( ValueError, TypeError ):
2076 main.log.exception( "Error in parsing intents" )
2077 main.log.error( repr( node ) )
2078 intentStates.append( nodeStates )
2079 out = [ (i, nodeStates.count( i ) ) for i in set( nodeStates ) ]
2080 main.log.info( dict( out ) )
2081
2082 if intentsResults and not consistentIntents:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002083 for i in range( len( main.activeNodes ) ):
2084 node = str( main.activeNodes[i] + 1 )
2085 main.log.warn( "ONOS" + node + " intents: " )
Jon Hall5cf14d52015-07-16 12:15:19 -07002086 main.log.warn( json.dumps(
2087 json.loads( ONOSIntents[ i ] ),
2088 sort_keys=True,
2089 indent=4,
2090 separators=( ',', ': ' ) ) )
2091 elif intentsResults and consistentIntents:
2092 intentCheck = main.TRUE
2093
2094 # NOTE: Store has no durability, so intents are lost across system
2095 # restarts
2096 main.step( "Compare current intents with intents before the failure" )
2097 # NOTE: this requires case 5 to pass for intentState to be set.
2098 # maybe we should stop the test if that fails?
2099 sameIntents = main.FALSE
Jon Halla440e872016-03-31 15:15:50 -07002100 try:
2101 intentState
2102 except NameError:
2103 main.log.warn( "No previous intent state was saved" )
2104 else:
2105 if intentState and intentState == ONOSIntents[ 0 ]:
2106 sameIntents = main.TRUE
2107 main.log.info( "Intents are consistent with before failure" )
2108 # TODO: possibly the states have changed? we may need to figure out
2109 # what the acceptable states are
2110 elif len( intentState ) == len( ONOSIntents[ 0 ] ):
2111 sameIntents = main.TRUE
2112 try:
2113 before = json.loads( intentState )
2114 after = json.loads( ONOSIntents[ 0 ] )
2115 for intent in before:
2116 if intent not in after:
2117 sameIntents = main.FALSE
2118 main.log.debug( "Intent is not currently in ONOS " +
2119 "(at least in the same form):" )
2120 main.log.debug( json.dumps( intent ) )
2121 except ( ValueError, TypeError ):
2122 main.log.exception( "Exception printing intents" )
2123 main.log.debug( repr( ONOSIntents[0] ) )
2124 main.log.debug( repr( intentState ) )
2125 if sameIntents == main.FALSE:
2126 try:
2127 main.log.debug( "ONOS intents before: " )
2128 main.log.debug( json.dumps( json.loads( intentState ),
2129 sort_keys=True, indent=4,
2130 separators=( ',', ': ' ) ) )
2131 main.log.debug( "Current ONOS intents: " )
2132 main.log.debug( json.dumps( json.loads( ONOSIntents[ 0 ] ),
2133 sort_keys=True, indent=4,
2134 separators=( ',', ': ' ) ) )
2135 except ( ValueError, TypeError ):
2136 main.log.exception( "Exception printing intents" )
2137 main.log.debug( repr( ONOSIntents[0] ) )
2138 main.log.debug( repr( intentState ) )
2139 utilities.assert_equals(
2140 expect=main.TRUE,
2141 actual=sameIntents,
2142 onpass="Intents are consistent with before failure",
2143 onfail="The Intents changed during failure" )
Jon Hall5cf14d52015-07-16 12:15:19 -07002144 intentCheck = intentCheck and sameIntents
2145
2146 main.step( "Get the OF Table entries and compare to before " +
2147 "component failure" )
2148 FlowTables = main.TRUE
Jon Hall5cf14d52015-07-16 12:15:19 -07002149 for i in range( 28 ):
2150 main.log.info( "Checking flow table on s" + str( i + 1 ) )
GlennRC68467eb2015-11-16 18:01:01 -08002151 tmpFlows = main.Mininet1.getFlowTable( "s" + str( i + 1 ), version="1.3", debug=False )
Jon Hall41d39f12016-04-11 22:54:35 -07002152 curSwitch = main.Mininet1.flowTableComp( flows[i], tmpFlows )
2153 FlowTables = FlowTables and curSwitch
2154 if curSwitch == main.FALSE:
GlennRC68467eb2015-11-16 18:01:01 -08002155 main.log.warn( "Differences in flow table for switch: s{}".format( i + 1 ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07002156 utilities.assert_equals(
2157 expect=main.TRUE,
2158 actual=FlowTables,
2159 onpass="No changes were found in the flow tables",
2160 onfail="Changes were found in the flow tables" )
2161
2162 main.Mininet2.pingLongKill()
2163 '''
2164 main.step( "Check the continuous pings to ensure that no packets " +
2165 "were dropped during component failure" )
2166 main.Mininet2.pingKill( main.params[ 'TESTONUSER' ],
2167 main.params[ 'TESTONIP' ] )
2168 LossInPings = main.FALSE
2169 # NOTE: checkForLoss returns main.FALSE with 0% packet loss
2170 for i in range( 8, 18 ):
2171 main.log.info(
2172 "Checking for a loss in pings along flow from s" +
2173 str( i ) )
2174 LossInPings = main.Mininet2.checkForLoss(
2175 "/tmp/ping.h" +
2176 str( i ) ) or LossInPings
2177 if LossInPings == main.TRUE:
2178 main.log.info( "Loss in ping detected" )
2179 elif LossInPings == main.ERROR:
2180 main.log.info( "There are multiple mininet process running" )
2181 elif LossInPings == main.FALSE:
2182 main.log.info( "No Loss in the pings" )
2183 main.log.info( "No loss of dataplane connectivity" )
2184 utilities.assert_equals(
2185 expect=main.FALSE,
2186 actual=LossInPings,
2187 onpass="No Loss of connectivity",
2188 onfail="Loss of dataplane connectivity detected" )
2189 '''
2190
2191 main.step( "Leadership Election is still functional" )
2192 # Test of LeadershipElection
2193 leaderList = []
Jon Hall5cf14d52015-07-16 12:15:19 -07002194
Jon Hall3b489db2015-10-05 14:38:37 -07002195 restarted = []
2196 for i in main.kill:
2197 restarted.append( main.nodes[i].ip_address )
Jon Hall5cf14d52015-07-16 12:15:19 -07002198 leaderResult = main.TRUE
Jon Hall3b489db2015-10-05 14:38:37 -07002199
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002200 for i in main.activeNodes:
2201 cli = main.CLIs[i]
Jon Hall5cf14d52015-07-16 12:15:19 -07002202 leaderN = cli.electionTestLeader()
2203 leaderList.append( leaderN )
2204 if leaderN == main.FALSE:
2205 # error in response
2206 main.log.error( "Something is wrong with " +
2207 "electionTestLeader function, check the" +
2208 " error logs" )
2209 leaderResult = main.FALSE
2210 elif leaderN is None:
2211 main.log.error( cli.name +
2212 " shows no leader for the election-app was" +
2213 " elected after the old one died" )
2214 leaderResult = main.FALSE
2215 elif leaderN in restarted:
2216 main.log.error( cli.name + " shows " + str( leaderN ) +
2217 " as leader for the election-app, but it " +
2218 "was restarted" )
2219 leaderResult = main.FALSE
2220 if len( set( leaderList ) ) != 1:
2221 leaderResult = main.FALSE
2222 main.log.error(
2223 "Inconsistent view of leader for the election test app" )
2224 # TODO: print the list
2225 utilities.assert_equals(
2226 expect=main.TRUE,
2227 actual=leaderResult,
2228 onpass="Leadership election passed",
2229 onfail="Something went wrong with Leadership election" )
2230
2231 def CASE8( self, main ):
2232 """
2233 Compare topo
2234 """
2235 import json
2236 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002237 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002238 assert main, "main not defined"
2239 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002240 assert main.CLIs, "main.CLIs not defined"
2241 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002242
2243 main.case( "Compare ONOS Topology view to Mininet topology" )
Jon Hall783bbf92015-07-23 14:33:19 -07002244 main.caseExplanation = "Compare topology objects between Mininet" +\
Jon Hall5cf14d52015-07-16 12:15:19 -07002245 " and ONOS"
Jon Hall5cf14d52015-07-16 12:15:19 -07002246 topoResult = main.FALSE
Jon Hall6e709752016-02-01 13:38:46 -08002247 topoFailMsg = "ONOS topology don't match Mininet"
Jon Hall5cf14d52015-07-16 12:15:19 -07002248 elapsed = 0
2249 count = 0
Jon Halle9b1fa32015-12-08 15:32:21 -08002250 main.step( "Comparing ONOS topology to MN topology" )
Jon Hall5cf14d52015-07-16 12:15:19 -07002251 startTime = time.time()
2252 # Give time for Gossip to work
Jon Halle9b1fa32015-12-08 15:32:21 -08002253 while topoResult == main.FALSE and ( elapsed < 60 or count < 3 ):
Jon Hall96091e62015-09-21 17:34:17 -07002254 devicesResults = main.TRUE
2255 linksResults = main.TRUE
2256 hostsResults = main.TRUE
2257 hostAttachmentResults = True
Jon Hall5cf14d52015-07-16 12:15:19 -07002258 count += 1
2259 cliStart = time.time()
2260 devices = []
2261 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002262 for i in main.activeNodes:
Jon Hall6e709752016-02-01 13:38:46 -08002263 t = main.Thread( target=utilities.retry,
Jon Hall5cf14d52015-07-16 12:15:19 -07002264 name="devices-" + str( i ),
Jon Hall6e709752016-02-01 13:38:46 -08002265 args=[ main.CLIs[i].devices, [ None ] ],
2266 kwargs= { 'sleep': 5, 'attempts': 5,
2267 'randomTime': True } )
Jon Hall5cf14d52015-07-16 12:15:19 -07002268 threads.append( t )
2269 t.start()
2270
2271 for t in threads:
2272 t.join()
2273 devices.append( t.result )
2274 hosts = []
2275 ipResult = main.TRUE
2276 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002277 for i in main.activeNodes:
Jon Halld8f6de82015-12-17 17:04:34 -08002278 t = main.Thread( target=utilities.retry,
Jon Hall5cf14d52015-07-16 12:15:19 -07002279 name="hosts-" + str( i ),
Jon Halld8f6de82015-12-17 17:04:34 -08002280 args=[ main.CLIs[i].hosts, [ None ] ],
2281 kwargs= { 'sleep': 5, 'attempts': 5,
2282 'randomTime': True } )
Jon Hall5cf14d52015-07-16 12:15:19 -07002283 threads.append( t )
2284 t.start()
2285
2286 for t in threads:
2287 t.join()
2288 try:
2289 hosts.append( json.loads( t.result ) )
2290 except ( ValueError, TypeError ):
2291 main.log.exception( "Error parsing hosts results" )
2292 main.log.error( repr( t.result ) )
Jon Hallf3d16e72015-12-16 17:45:08 -08002293 hosts.append( None )
Jon Hall5cf14d52015-07-16 12:15:19 -07002294 for controller in range( 0, len( hosts ) ):
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002295 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hallacd1b182015-12-17 11:43:20 -08002296 if hosts[ controller ]:
2297 for host in hosts[ controller ]:
2298 if host is None or host.get( 'ipAddresses', [] ) == []:
2299 main.log.error(
2300 "Error with host ipAddresses on controller" +
2301 controllerStr + ": " + str( host ) )
2302 ipResult = main.FALSE
Jon Hall5cf14d52015-07-16 12:15:19 -07002303 ports = []
2304 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002305 for i in main.activeNodes:
Jon Hall6e709752016-02-01 13:38:46 -08002306 t = main.Thread( target=utilities.retry,
Jon Hall5cf14d52015-07-16 12:15:19 -07002307 name="ports-" + str( i ),
Jon Hall6e709752016-02-01 13:38:46 -08002308 args=[ main.CLIs[i].ports, [ None ] ],
2309 kwargs= { 'sleep': 5, 'attempts': 5,
2310 'randomTime': True } )
Jon Hall5cf14d52015-07-16 12:15:19 -07002311 threads.append( t )
2312 t.start()
2313
2314 for t in threads:
2315 t.join()
2316 ports.append( t.result )
2317 links = []
2318 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002319 for i in main.activeNodes:
Jon Hall6e709752016-02-01 13:38:46 -08002320 t = main.Thread( target=utilities.retry,
Jon Hall5cf14d52015-07-16 12:15:19 -07002321 name="links-" + str( i ),
Jon Hall6e709752016-02-01 13:38:46 -08002322 args=[ main.CLIs[i].links, [ None ] ],
2323 kwargs= { 'sleep': 5, 'attempts': 5,
2324 'randomTime': True } )
Jon Hall5cf14d52015-07-16 12:15:19 -07002325 threads.append( t )
2326 t.start()
2327
2328 for t in threads:
2329 t.join()
2330 links.append( t.result )
2331 clusters = []
2332 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002333 for i in main.activeNodes:
Jon Hall6e709752016-02-01 13:38:46 -08002334 t = main.Thread( target=utilities.retry,
Jon Hall5cf14d52015-07-16 12:15:19 -07002335 name="clusters-" + str( i ),
Jon Hall6e709752016-02-01 13:38:46 -08002336 args=[ main.CLIs[i].clusters, [ None ] ],
2337 kwargs= { 'sleep': 5, 'attempts': 5,
2338 'randomTime': True } )
Jon Hall5cf14d52015-07-16 12:15:19 -07002339 threads.append( t )
2340 t.start()
2341
2342 for t in threads:
2343 t.join()
2344 clusters.append( t.result )
2345
2346 elapsed = time.time() - startTime
2347 cliTime = time.time() - cliStart
2348 print "Elapsed time: " + str( elapsed )
2349 print "CLI time: " + str( cliTime )
2350
Jon Hall6e709752016-02-01 13:38:46 -08002351 if all( e is None for e in devices ) and\
2352 all( e is None for e in hosts ) and\
2353 all( e is None for e in ports ) and\
2354 all( e is None for e in links ) and\
2355 all( e is None for e in clusters ):
2356 topoFailMsg = "Could not get topology from ONOS"
2357 main.log.error( topoFailMsg )
2358 continue # Try again, No use trying to compare
2359
Jon Hall5cf14d52015-07-16 12:15:19 -07002360 mnSwitches = main.Mininet1.getSwitches()
2361 mnLinks = main.Mininet1.getLinks()
2362 mnHosts = main.Mininet1.getHosts()
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002363 for controller in range( len( main.activeNodes ) ):
2364 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07002365 if devices[ controller ] and ports[ controller ] and\
2366 "Error" not in devices[ controller ] and\
2367 "Error" not in ports[ controller ]:
2368
Jon Hallc6793552016-01-19 14:18:37 -08002369 try:
2370 currentDevicesResult = main.Mininet1.compareSwitches(
2371 mnSwitches,
2372 json.loads( devices[ controller ] ),
2373 json.loads( ports[ controller ] ) )
2374 except ( TypeError, ValueError ) as e:
2375 main.log.exception( "Object not as expected; devices={!r}\nports={!r}".format(
2376 devices[ controller ], ports[ controller ] ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07002377 else:
2378 currentDevicesResult = main.FALSE
2379 utilities.assert_equals( expect=main.TRUE,
2380 actual=currentDevicesResult,
2381 onpass="ONOS" + controllerStr +
2382 " Switches view is correct",
2383 onfail="ONOS" + controllerStr +
2384 " Switches view is incorrect" )
2385
2386 if links[ controller ] and "Error" not in links[ controller ]:
2387 currentLinksResult = main.Mininet1.compareLinks(
2388 mnSwitches, mnLinks,
2389 json.loads( links[ controller ] ) )
2390 else:
2391 currentLinksResult = main.FALSE
2392 utilities.assert_equals( expect=main.TRUE,
2393 actual=currentLinksResult,
2394 onpass="ONOS" + controllerStr +
2395 " links view is correct",
2396 onfail="ONOS" + controllerStr +
2397 " links view is incorrect" )
Jon Hall657cdf62015-12-17 14:40:51 -08002398 if hosts[ controller ] and "Error" not in hosts[ controller ]:
Jon Hall5cf14d52015-07-16 12:15:19 -07002399 currentHostsResult = main.Mininet1.compareHosts(
2400 mnHosts,
2401 hosts[ controller ] )
Jon Hall13b446e2016-01-05 12:17:01 -08002402 elif hosts[ controller ] == []:
2403 currentHostsResult = main.TRUE
Jon Hall5cf14d52015-07-16 12:15:19 -07002404 else:
2405 currentHostsResult = main.FALSE
2406 utilities.assert_equals( expect=main.TRUE,
2407 actual=currentHostsResult,
2408 onpass="ONOS" + controllerStr +
2409 " hosts exist in Mininet",
2410 onfail="ONOS" + controllerStr +
2411 " hosts don't match Mininet" )
2412 # CHECKING HOST ATTACHMENT POINTS
2413 hostAttachment = True
2414 zeroHosts = False
2415 # FIXME: topo-HA/obelisk specific mappings:
2416 # key is mac and value is dpid
2417 mappings = {}
2418 for i in range( 1, 29 ): # hosts 1 through 28
2419 # set up correct variables:
2420 macId = "00:" * 5 + hex( i ).split( "0x" )[1].upper().zfill(2)
2421 if i == 1:
2422 deviceId = "1000".zfill(16)
2423 elif i == 2:
2424 deviceId = "2000".zfill(16)
2425 elif i == 3:
2426 deviceId = "3000".zfill(16)
2427 elif i == 4:
2428 deviceId = "3004".zfill(16)
2429 elif i == 5:
2430 deviceId = "5000".zfill(16)
2431 elif i == 6:
2432 deviceId = "6000".zfill(16)
2433 elif i == 7:
2434 deviceId = "6007".zfill(16)
2435 elif i >= 8 and i <= 17:
2436 dpid = '3' + str( i ).zfill( 3 )
2437 deviceId = dpid.zfill(16)
2438 elif i >= 18 and i <= 27:
2439 dpid = '6' + str( i ).zfill( 3 )
2440 deviceId = dpid.zfill(16)
2441 elif i == 28:
2442 deviceId = "2800".zfill(16)
2443 mappings[ macId ] = deviceId
Jon Halld8f6de82015-12-17 17:04:34 -08002444 if hosts[ controller ] is not None and "Error" not in hosts[ controller ]:
Jon Hall5cf14d52015-07-16 12:15:19 -07002445 if hosts[ controller ] == []:
2446 main.log.warn( "There are no hosts discovered" )
2447 zeroHosts = True
2448 else:
2449 for host in hosts[ controller ]:
2450 mac = None
2451 location = None
2452 device = None
2453 port = None
2454 try:
2455 mac = host.get( 'mac' )
2456 assert mac, "mac field could not be found for this host object"
2457
2458 location = host.get( 'location' )
2459 assert location, "location field could not be found for this host object"
2460
2461 # Trim the protocol identifier off deviceId
2462 device = str( location.get( 'elementId' ) ).split(':')[1]
2463 assert device, "elementId field could not be found for this host location object"
2464
2465 port = location.get( 'port' )
2466 assert port, "port field could not be found for this host location object"
2467
2468 # Now check if this matches where they should be
2469 if mac and device and port:
2470 if str( port ) != "1":
2471 main.log.error( "The attachment port is incorrect for " +
2472 "host " + str( mac ) +
2473 ". Expected: 1 Actual: " + str( port) )
2474 hostAttachment = False
2475 if device != mappings[ str( mac ) ]:
2476 main.log.error( "The attachment device is incorrect for " +
2477 "host " + str( mac ) +
2478 ". Expected: " + mappings[ str( mac ) ] +
2479 " Actual: " + device )
2480 hostAttachment = False
2481 else:
2482 hostAttachment = False
2483 except AssertionError:
2484 main.log.exception( "Json object not as expected" )
2485 main.log.error( repr( host ) )
2486 hostAttachment = False
2487 else:
2488 main.log.error( "No hosts json output or \"Error\"" +
2489 " in output. hosts = " +
2490 repr( hosts[ controller ] ) )
2491 if zeroHosts is False:
2492 hostAttachment = True
2493
2494 # END CHECKING HOST ATTACHMENT POINTS
2495 devicesResults = devicesResults and currentDevicesResult
2496 linksResults = linksResults and currentLinksResult
2497 hostsResults = hostsResults and currentHostsResult
2498 hostAttachmentResults = hostAttachmentResults and\
2499 hostAttachment
Jon Halla440e872016-03-31 15:15:50 -07002500 topoResult = ( devicesResults and linksResults
2501 and hostsResults and ipResult and
2502 hostAttachmentResults )
Jon Halle9b1fa32015-12-08 15:32:21 -08002503 utilities.assert_equals( expect=True,
2504 actual=topoResult,
2505 onpass="ONOS topology matches Mininet",
Jon Hall6e709752016-02-01 13:38:46 -08002506 onfail=topoFailMsg )
Jon Halle9b1fa32015-12-08 15:32:21 -08002507 # End of While loop to pull ONOS state
Jon Hall5cf14d52015-07-16 12:15:19 -07002508
2509 # Compare json objects for hosts and dataplane clusters
2510
2511 # hosts
2512 main.step( "Hosts view is consistent across all ONOS nodes" )
2513 consistentHostsResult = main.TRUE
2514 for controller in range( len( hosts ) ):
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002515 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hall13b446e2016-01-05 12:17:01 -08002516 if hosts[ controller ] is not None and "Error" not in hosts[ controller ]:
Jon Hall5cf14d52015-07-16 12:15:19 -07002517 if hosts[ controller ] == hosts[ 0 ]:
2518 continue
2519 else: # hosts not consistent
2520 main.log.error( "hosts from ONOS" + controllerStr +
2521 " is inconsistent with ONOS1" )
2522 main.log.warn( repr( hosts[ controller ] ) )
2523 consistentHostsResult = main.FALSE
2524
2525 else:
2526 main.log.error( "Error in getting ONOS hosts from ONOS" +
2527 controllerStr )
2528 consistentHostsResult = main.FALSE
2529 main.log.warn( "ONOS" + controllerStr +
2530 " hosts response: " +
2531 repr( hosts[ controller ] ) )
2532 utilities.assert_equals(
2533 expect=main.TRUE,
2534 actual=consistentHostsResult,
2535 onpass="Hosts view is consistent across all ONOS nodes",
2536 onfail="ONOS nodes have different views of hosts" )
2537
2538 main.step( "Hosts information is correct" )
2539 hostsResults = hostsResults and ipResult
2540 utilities.assert_equals(
2541 expect=main.TRUE,
2542 actual=hostsResults,
2543 onpass="Host information is correct",
2544 onfail="Host information is incorrect" )
2545
2546 main.step( "Host attachment points to the network" )
2547 utilities.assert_equals(
2548 expect=True,
2549 actual=hostAttachmentResults,
2550 onpass="Hosts are correctly attached to the network",
2551 onfail="ONOS did not correctly attach hosts to the network" )
2552
2553 # Strongly connected clusters of devices
2554 main.step( "Clusters view is consistent across all ONOS nodes" )
2555 consistentClustersResult = main.TRUE
2556 for controller in range( len( clusters ) ):
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002557 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07002558 if "Error" not in clusters[ controller ]:
2559 if clusters[ controller ] == clusters[ 0 ]:
2560 continue
2561 else: # clusters not consistent
2562 main.log.error( "clusters from ONOS" +
2563 controllerStr +
2564 " is inconsistent with ONOS1" )
2565 consistentClustersResult = main.FALSE
Jon Hall5cf14d52015-07-16 12:15:19 -07002566 else:
2567 main.log.error( "Error in getting dataplane clusters " +
2568 "from ONOS" + controllerStr )
2569 consistentClustersResult = main.FALSE
2570 main.log.warn( "ONOS" + controllerStr +
2571 " clusters response: " +
2572 repr( clusters[ controller ] ) )
2573 utilities.assert_equals(
2574 expect=main.TRUE,
2575 actual=consistentClustersResult,
2576 onpass="Clusters view is consistent across all ONOS nodes",
2577 onfail="ONOS nodes have different views of clusters" )
Jon Hall64948022016-05-12 13:38:50 -07002578 if not consistentClustersResult:
2579 main.log.debug( clusters )
Jon Hall5cf14d52015-07-16 12:15:19 -07002580
2581 main.step( "There is only one SCC" )
2582 # there should always only be one cluster
2583 try:
2584 numClusters = len( json.loads( clusters[ 0 ] ) )
2585 except ( ValueError, TypeError ):
2586 main.log.exception( "Error parsing clusters[0]: " +
2587 repr( clusters[0] ) )
Jon Halla440e872016-03-31 15:15:50 -07002588 numClusters = "ERROR"
Jon Hall5cf14d52015-07-16 12:15:19 -07002589 clusterResults = main.FALSE
2590 if numClusters == 1:
2591 clusterResults = main.TRUE
2592 utilities.assert_equals(
2593 expect=1,
2594 actual=numClusters,
2595 onpass="ONOS shows 1 SCC",
2596 onfail="ONOS shows " + str( numClusters ) + " SCCs" )
2597
2598 topoResult = ( devicesResults and linksResults
2599 and hostsResults and consistentHostsResult
2600 and consistentClustersResult and clusterResults
2601 and ipResult and hostAttachmentResults )
2602
2603 topoResult = topoResult and int( count <= 2 )
2604 note = "note it takes about " + str( int( cliTime ) ) + \
2605 " seconds for the test to make all the cli calls to fetch " +\
2606 "the topology from each ONOS instance"
2607 main.log.info(
2608 "Very crass estimate for topology discovery/convergence( " +
2609 str( note ) + " ): " + str( elapsed ) + " seconds, " +
2610 str( count ) + " tries" )
2611
2612 main.step( "Device information is correct" )
2613 utilities.assert_equals(
2614 expect=main.TRUE,
2615 actual=devicesResults,
2616 onpass="Device information is correct",
2617 onfail="Device information is incorrect" )
2618
2619 main.step( "Links are correct" )
2620 utilities.assert_equals(
2621 expect=main.TRUE,
2622 actual=linksResults,
2623 onpass="Link are correct",
2624 onfail="Links are incorrect" )
2625
Jon Halla440e872016-03-31 15:15:50 -07002626 main.step( "Hosts are correct" )
2627 utilities.assert_equals(
2628 expect=main.TRUE,
2629 actual=hostsResults,
2630 onpass="Hosts are correct",
2631 onfail="Hosts are incorrect" )
2632
Jon Hall5cf14d52015-07-16 12:15:19 -07002633 # FIXME: move this to an ONOS state case
2634 main.step( "Checking ONOS nodes" )
Jon Hall41d39f12016-04-11 22:54:35 -07002635 nodeResults = utilities.retry( main.HA.nodesCheck,
2636 False,
2637 args=[main.activeNodes],
2638 attempts=5 )
Jon Hall5cf14d52015-07-16 12:15:19 -07002639
Jon Hall41d39f12016-04-11 22:54:35 -07002640 utilities.assert_equals( expect=True, actual=nodeResults,
Jon Hall5cf14d52015-07-16 12:15:19 -07002641 onpass="Nodes check successful",
2642 onfail="Nodes check NOT successful" )
Jon Halla440e872016-03-31 15:15:50 -07002643 if not nodeResults:
Jon Hall41d39f12016-04-11 22:54:35 -07002644 for i in main.activeNodes:
Jon Halla440e872016-03-31 15:15:50 -07002645 main.log.debug( "{} components not ACTIVE: \n{}".format(
Jon Hall41d39f12016-04-11 22:54:35 -07002646 main.CLIs[i].name,
2647 main.CLIs[i].sendline( "scr:list | grep -v ACTIVE" ) ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07002648
2649 def CASE9( self, main ):
2650 """
2651 Link s3-s28 down
2652 """
2653 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002654 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002655 assert main, "main not defined"
2656 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002657 assert main.CLIs, "main.CLIs not defined"
2658 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002659 # NOTE: You should probably run a topology check after this
2660
2661 linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
2662
2663 description = "Turn off a link to ensure that Link Discovery " +\
2664 "is working properly"
2665 main.case( description )
2666
2667 main.step( "Kill Link between s3 and s28" )
2668 LinkDown = main.Mininet1.link( END1="s3", END2="s28", OPTION="down" )
2669 main.log.info( "Waiting " + str( linkSleep ) +
2670 " seconds for link down to be discovered" )
2671 time.sleep( linkSleep )
2672 utilities.assert_equals( expect=main.TRUE, actual=LinkDown,
2673 onpass="Link down successful",
2674 onfail="Failed to bring link down" )
2675 # TODO do some sort of check here
2676
2677 def CASE10( self, main ):
2678 """
2679 Link s3-s28 up
2680 """
2681 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002682 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002683 assert main, "main not defined"
2684 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002685 assert main.CLIs, "main.CLIs not defined"
2686 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002687 # NOTE: You should probably run a topology check after this
2688
2689 linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
2690
2691 description = "Restore a link to ensure that Link Discovery is " + \
2692 "working properly"
2693 main.case( description )
2694
2695 main.step( "Bring link between s3 and s28 back up" )
2696 LinkUp = main.Mininet1.link( END1="s3", END2="s28", OPTION="up" )
2697 main.log.info( "Waiting " + str( linkSleep ) +
2698 " seconds for link up to be discovered" )
2699 time.sleep( linkSleep )
2700 utilities.assert_equals( expect=main.TRUE, actual=LinkUp,
2701 onpass="Link up successful",
2702 onfail="Failed to bring link up" )
2703 # TODO do some sort of check here
2704
2705 def CASE11( self, main ):
2706 """
2707 Switch Down
2708 """
2709 # NOTE: You should probably run a topology check after this
2710 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002711 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002712 assert main, "main not defined"
2713 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002714 assert main.CLIs, "main.CLIs not defined"
2715 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002716
2717 switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
2718
2719 description = "Killing a switch to ensure it is discovered correctly"
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002720 onosCli = main.CLIs[ main.activeNodes[0] ]
Jon Hall5cf14d52015-07-16 12:15:19 -07002721 main.case( description )
2722 switch = main.params[ 'kill' ][ 'switch' ]
2723 switchDPID = main.params[ 'kill' ][ 'dpid' ]
2724
2725 # TODO: Make this switch parameterizable
2726 main.step( "Kill " + switch )
2727 main.log.info( "Deleting " + switch )
2728 main.Mininet1.delSwitch( switch )
2729 main.log.info( "Waiting " + str( switchSleep ) +
2730 " seconds for switch down to be discovered" )
2731 time.sleep( switchSleep )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002732 device = onosCli.getDevice( dpid=switchDPID )
Jon Hall5cf14d52015-07-16 12:15:19 -07002733 # Peek at the deleted switch
2734 main.log.warn( str( device ) )
2735 result = main.FALSE
2736 if device and device[ 'available' ] is False:
2737 result = main.TRUE
2738 utilities.assert_equals( expect=main.TRUE, actual=result,
2739 onpass="Kill switch successful",
2740 onfail="Failed to kill switch?" )
2741
2742 def CASE12( self, main ):
2743 """
2744 Switch Up
2745 """
2746 # NOTE: You should probably run a topology check after this
2747 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002748 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002749 assert main, "main not defined"
2750 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002751 assert main.CLIs, "main.CLIs not defined"
2752 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002753 assert ONOS1Port, "ONOS1Port not defined"
2754 assert ONOS2Port, "ONOS2Port not defined"
2755 assert ONOS3Port, "ONOS3Port not defined"
2756 assert ONOS4Port, "ONOS4Port not defined"
2757 assert ONOS5Port, "ONOS5Port not defined"
2758 assert ONOS6Port, "ONOS6Port not defined"
2759 assert ONOS7Port, "ONOS7Port not defined"
2760
2761 switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
2762 switch = main.params[ 'kill' ][ 'switch' ]
2763 switchDPID = main.params[ 'kill' ][ 'dpid' ]
2764 links = main.params[ 'kill' ][ 'links' ].split()
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002765 onosCli = main.CLIs[ main.activeNodes[0] ]
Jon Hall5cf14d52015-07-16 12:15:19 -07002766 description = "Adding a switch to ensure it is discovered correctly"
2767 main.case( description )
2768
2769 main.step( "Add back " + switch )
2770 main.Mininet1.addSwitch( switch, dpid=switchDPID )
2771 for peer in links:
2772 main.Mininet1.addLink( switch, peer )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002773 ipList = [ node.ip_address for node in main.nodes ]
Jon Hall5cf14d52015-07-16 12:15:19 -07002774 main.Mininet1.assignSwController( sw=switch, ip=ipList )
2775 main.log.info( "Waiting " + str( switchSleep ) +
2776 " seconds for switch up to be discovered" )
2777 time.sleep( switchSleep )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002778 device = onosCli.getDevice( dpid=switchDPID )
Jon Hall5cf14d52015-07-16 12:15:19 -07002779 # Peek at the deleted switch
2780 main.log.warn( str( device ) )
2781 result = main.FALSE
2782 if device and device[ 'available' ]:
2783 result = main.TRUE
2784 utilities.assert_equals( expect=main.TRUE, actual=result,
2785 onpass="add switch successful",
2786 onfail="Failed to add switch?" )
2787
2788 def CASE13( self, main ):
2789 """
2790 Clean up
2791 """
2792 import os
2793 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002794 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002795 assert main, "main not defined"
2796 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002797 assert main.CLIs, "main.CLIs not defined"
2798 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002799
2800 # printing colors to terminal
2801 colors = { 'cyan': '\033[96m', 'purple': '\033[95m',
2802 'blue': '\033[94m', 'green': '\033[92m',
2803 'yellow': '\033[93m', 'red': '\033[91m', 'end': '\033[0m' }
2804 main.case( "Test Cleanup" )
2805 main.step( "Killing tcpdumps" )
2806 main.Mininet2.stopTcpdump()
2807
2808 testname = main.TEST
Jon Hall96091e62015-09-21 17:34:17 -07002809 if main.params[ 'BACKUP' ][ 'ENABLED' ] == "True":
Jon Hall5cf14d52015-07-16 12:15:19 -07002810 main.step( "Copying MN pcap and ONOS log files to test station" )
2811 teststationUser = main.params[ 'BACKUP' ][ 'TESTONUSER' ]
2812 teststationIP = main.params[ 'BACKUP' ][ 'TESTONIP' ]
Jon Hall96091e62015-09-21 17:34:17 -07002813 # NOTE: MN Pcap file is being saved to logdir.
2814 # We scp this file as MN and TestON aren't necessarily the same vm
2815
2816 # FIXME: To be replaced with a Jenkin's post script
Jon Hall5cf14d52015-07-16 12:15:19 -07002817 # TODO: Load these from params
2818 # NOTE: must end in /
2819 logFolder = "/opt/onos/log/"
2820 logFiles = [ "karaf.log", "karaf.log.1" ]
2821 # NOTE: must end in /
Jon Hall5cf14d52015-07-16 12:15:19 -07002822 for f in logFiles:
Jon Halle1a3b752015-07-22 13:02:46 -07002823 for node in main.nodes:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002824 dstName = main.logdir + "/" + node.name + "-" + f
Jon Hall96091e62015-09-21 17:34:17 -07002825 main.ONOSbench.secureCopy( node.user_name, node.ip_address,
2826 logFolder + f, dstName )
Jon Hall5cf14d52015-07-16 12:15:19 -07002827 # std*.log's
2828 # NOTE: must end in /
2829 logFolder = "/opt/onos/var/"
2830 logFiles = [ "stderr.log", "stdout.log" ]
2831 # NOTE: must end in /
Jon Hall5cf14d52015-07-16 12:15:19 -07002832 for f in logFiles:
Jon Halle1a3b752015-07-22 13:02:46 -07002833 for node in main.nodes:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002834 dstName = main.logdir + "/" + node.name + "-" + f
Jon Hall96091e62015-09-21 17:34:17 -07002835 main.ONOSbench.secureCopy( node.user_name, node.ip_address,
2836 logFolder + f, dstName )
2837 else:
2838 main.log.debug( "skipping saving log files" )
Jon Hall5cf14d52015-07-16 12:15:19 -07002839
2840 main.step( "Stopping Mininet" )
2841 mnResult = main.Mininet1.stopNet()
2842 utilities.assert_equals( expect=main.TRUE, actual=mnResult,
2843 onpass="Mininet stopped",
2844 onfail="MN cleanup NOT successful" )
2845
2846 main.step( "Checking ONOS Logs for errors" )
Jon Halle1a3b752015-07-22 13:02:46 -07002847 for node in main.nodes:
Jon Hall96091e62015-09-21 17:34:17 -07002848 main.log.debug( "Checking logs for errors on " + node.name + ":" )
2849 main.log.warn( main.ONOSbench.checkLogs( node.ip_address ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07002850
2851 try:
2852 timerLog = open( main.logdir + "/Timers.csv", 'w')
2853 # Overwrite with empty line and close
2854 labels = "Gossip Intents, Restart"
2855 data = str( gossipTime ) + ", " + str( main.restartTime )
2856 timerLog.write( labels + "\n" + data )
2857 timerLog.close()
2858 except NameError, e:
2859 main.log.exception(e)
2860
2861 def CASE14( self, main ):
2862 """
2863 start election app on all onos nodes
2864 """
Jon Halle1a3b752015-07-22 13:02:46 -07002865 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002866 assert main, "main not defined"
2867 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002868 assert main.CLIs, "main.CLIs not defined"
2869 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002870
2871 main.case("Start Leadership Election app")
2872 main.step( "Install leadership election app" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002873 onosCli = main.CLIs[ main.activeNodes[0] ]
2874 appResult = onosCli.activateApp( "org.onosproject.election" )
Jon Hall5cf14d52015-07-16 12:15:19 -07002875 utilities.assert_equals(
2876 expect=main.TRUE,
2877 actual=appResult,
2878 onpass="Election app installed",
2879 onfail="Something went wrong with installing Leadership election" )
2880
2881 main.step( "Run for election on each node" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002882 for i in main.activeNodes:
2883 main.CLIs[i].electionTestRun()
Jon Hall25463a82016-04-13 14:03:52 -07002884 time.sleep(5)
2885 activeCLIs = [ main.CLIs[i] for i in main.activeNodes ]
2886 sameResult, leaders = main.HA.consistentLeaderboards( activeCLIs )
Jon Hall5cf14d52015-07-16 12:15:19 -07002887 utilities.assert_equals(
Jon Hall25463a82016-04-13 14:03:52 -07002888 expect=True,
2889 actual=sameResult,
2890 onpass="All nodes see the same leaderboards",
2891 onfail="Inconsistent leaderboards" )
Jon Hall5cf14d52015-07-16 12:15:19 -07002892
Jon Hall25463a82016-04-13 14:03:52 -07002893 if sameResult:
2894 leader = leaders[ 0 ][ 0 ]
2895 if main.nodes[main.activeNodes[0]].ip_address in leader:
2896 correctLeader = True
2897 else:
2898 correctLeader = False
2899 main.step( "First node was elected leader" )
2900 utilities.assert_equals(
2901 expect=True,
2902 actual=correctLeader,
2903 onpass="Correct leader was elected",
2904 onfail="Incorrect leader" )
Jon Hall5cf14d52015-07-16 12:15:19 -07002905
2906 def CASE15( self, main ):
2907 """
2908 Check that Leadership Election is still functional
acsmars71adceb2015-08-31 15:09:26 -07002909 15.1 Run election on each node
2910 15.2 Check that each node has the same leaders and candidates
2911 15.3 Find current leader and withdraw
2912 15.4 Check that a new node was elected leader
2913 15.5 Check that that new leader was the candidate of old leader
2914 15.6 Run for election on old leader
2915 15.7 Check that oldLeader is a candidate, and leader if only 1 node
2916 15.8 Make sure that the old leader was added to the candidate list
2917
2918 old and new variable prefixes refer to data from before vs after
2919 withdrawl and later before withdrawl vs after re-election
Jon Hall5cf14d52015-07-16 12:15:19 -07002920 """
2921 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002922 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002923 assert main, "main not defined"
2924 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002925 assert main.CLIs, "main.CLIs not defined"
2926 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002927
Jon Hall5cf14d52015-07-16 12:15:19 -07002928 description = "Check that Leadership Election is still functional"
2929 main.case( description )
Jon Halla440e872016-03-31 15:15:50 -07002930 # NOTE: Need to re-run after restarts since being a canidate is not persistant
Jon Hall5cf14d52015-07-16 12:15:19 -07002931
Jon Halla440e872016-03-31 15:15:50 -07002932 oldLeaders = [] # list of lists of each nodes' candidates before
2933 newLeaders = [] # list of lists of each nodes' candidates after
acsmars71adceb2015-08-31 15:09:26 -07002934 oldLeader = '' # the old leader from oldLeaders, None if not same
2935 newLeader = '' # the new leaders fron newLoeaders, None if not same
2936 oldLeaderCLI = None # the CLI of the old leader used for re-electing
2937 expectNoLeader = False # True when there is only one leader
2938 if main.numCtrls == 1:
2939 expectNoLeader = True
2940
2941 main.step( "Run for election on each node" )
2942 electionResult = main.TRUE
2943
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002944 for i in main.activeNodes: # run test election on each node
2945 if main.CLIs[i].electionTestRun() == main.FALSE:
acsmars71adceb2015-08-31 15:09:26 -07002946 electionResult = main.FALSE
Jon Hall5cf14d52015-07-16 12:15:19 -07002947 utilities.assert_equals(
2948 expect=main.TRUE,
acsmars71adceb2015-08-31 15:09:26 -07002949 actual=electionResult,
2950 onpass="All nodes successfully ran for leadership",
2951 onfail="At least one node failed to run for leadership" )
2952
acsmars3a72bde2015-09-02 14:16:22 -07002953 if electionResult == main.FALSE:
2954 main.log.error(
2955 "Skipping Test Case because Election Test App isn't loaded" )
2956 main.skipCase()
2957
acsmars71adceb2015-08-31 15:09:26 -07002958 main.step( "Check that each node shows the same leader and candidates" )
Jon Halla440e872016-03-31 15:15:50 -07002959 failMessage = "Nodes have different leaderboards"
Jon Halla440e872016-03-31 15:15:50 -07002960 activeCLIs = [ main.CLIs[i] for i in main.activeNodes ]
Jon Hall41d39f12016-04-11 22:54:35 -07002961 sameResult, oldLeaders = main.HA.consistentLeaderboards( activeCLIs )
Jon Halla440e872016-03-31 15:15:50 -07002962 if sameResult:
2963 oldLeader = oldLeaders[ 0 ][ 0 ]
2964 main.log.warn( oldLeader )
acsmars71adceb2015-08-31 15:09:26 -07002965 else:
Jon Halla440e872016-03-31 15:15:50 -07002966 oldLeader = None
acsmars71adceb2015-08-31 15:09:26 -07002967 utilities.assert_equals(
Jon Halla440e872016-03-31 15:15:50 -07002968 expect=True,
acsmars71adceb2015-08-31 15:09:26 -07002969 actual=sameResult,
Jon Halla440e872016-03-31 15:15:50 -07002970 onpass="Leaderboards are consistent for the election topic",
acsmars71adceb2015-08-31 15:09:26 -07002971 onfail=failMessage )
Jon Hall5cf14d52015-07-16 12:15:19 -07002972
2973 main.step( "Find current leader and withdraw" )
acsmars71adceb2015-08-31 15:09:26 -07002974 withdrawResult = main.TRUE
Jon Hall5cf14d52015-07-16 12:15:19 -07002975 # do some sanity checking on leader before using it
acsmars71adceb2015-08-31 15:09:26 -07002976 if oldLeader is None:
2977 main.log.error( "Leadership isn't consistent." )
2978 withdrawResult = main.FALSE
2979 # Get the CLI of the oldLeader
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002980 for i in main.activeNodes:
acsmars71adceb2015-08-31 15:09:26 -07002981 if oldLeader == main.nodes[ i ].ip_address:
2982 oldLeaderCLI = main.CLIs[ i ]
Jon Hall5cf14d52015-07-16 12:15:19 -07002983 break
2984 else: # FOR/ELSE statement
2985 main.log.error( "Leader election, could not find current leader" )
2986 if oldLeader:
acsmars71adceb2015-08-31 15:09:26 -07002987 withdrawResult = oldLeaderCLI.electionTestWithdraw()
Jon Hall5cf14d52015-07-16 12:15:19 -07002988 utilities.assert_equals(
2989 expect=main.TRUE,
2990 actual=withdrawResult,
2991 onpass="Node was withdrawn from election",
2992 onfail="Node was not withdrawn from election" )
2993
acsmars71adceb2015-08-31 15:09:26 -07002994 main.step( "Check that a new node was elected leader" )
acsmars71adceb2015-08-31 15:09:26 -07002995 failMessage = "Nodes have different leaders"
acsmars71adceb2015-08-31 15:09:26 -07002996 # Get new leaders and candidates
Jon Hall41d39f12016-04-11 22:54:35 -07002997 newLeaderResult, newLeaders = main.HA.consistentLeaderboards( activeCLIs )
Jon Hall3a7843a2016-04-12 03:01:09 -07002998 newLeader = None
Jon Halla440e872016-03-31 15:15:50 -07002999 if newLeaderResult:
Jon Hall3a7843a2016-04-12 03:01:09 -07003000 if newLeaders[ 0 ][ 0 ] == 'none':
3001 main.log.error( "No leader was elected on at least 1 node" )
3002 if not expectNoLeader:
3003 newLeaderResult = False
Jon Hall25463a82016-04-13 14:03:52 -07003004 newLeader = newLeaders[ 0 ][ 0 ]
acsmars71adceb2015-08-31 15:09:26 -07003005
3006 # Check that the new leader is not the older leader, which was withdrawn
3007 if newLeader == oldLeader:
Jon Halla440e872016-03-31 15:15:50 -07003008 newLeaderResult = False
Jon Hall6e709752016-02-01 13:38:46 -08003009 main.log.error( "All nodes still see old leader: " + str( oldLeader ) +
acsmars71adceb2015-08-31 15:09:26 -07003010 " as the current leader" )
Jon Hall5cf14d52015-07-16 12:15:19 -07003011 utilities.assert_equals(
Jon Halla440e872016-03-31 15:15:50 -07003012 expect=True,
acsmars71adceb2015-08-31 15:09:26 -07003013 actual=newLeaderResult,
Jon Hall5cf14d52015-07-16 12:15:19 -07003014 onpass="Leadership election passed",
3015 onfail="Something went wrong with Leadership election" )
3016
Jon Halla440e872016-03-31 15:15:50 -07003017 main.step( "Check that that new leader was the candidate of old leader" )
Jon Hall6e709752016-02-01 13:38:46 -08003018 # candidates[ 2 ] should become the top candidate after withdrawl
acsmars71adceb2015-08-31 15:09:26 -07003019 correctCandidateResult = main.TRUE
3020 if expectNoLeader:
3021 if newLeader == 'none':
3022 main.log.info( "No leader expected. None found. Pass" )
3023 correctCandidateResult = main.TRUE
3024 else:
3025 main.log.info( "Expected no leader, got: " + str( newLeader ) )
3026 correctCandidateResult = main.FALSE
Jon Halla440e872016-03-31 15:15:50 -07003027 elif len( oldLeaders[0] ) >= 3:
3028 if newLeader == oldLeaders[ 0 ][ 2 ]:
3029 # correct leader was elected
3030 correctCandidateResult = main.TRUE
3031 else:
3032 correctCandidateResult = main.FALSE
3033 main.log.error( "Candidate {} was elected. {} should have had priority.".format(
3034 newLeader, oldLeaders[ 0 ][ 2 ] ) )
Jon Hall6e709752016-02-01 13:38:46 -08003035 else:
3036 main.log.warn( "Could not determine who should be the correct leader" )
Jon Halla440e872016-03-31 15:15:50 -07003037 main.log.debug( oldLeaders[ 0 ] )
Jon Hall6e709752016-02-01 13:38:46 -08003038 correctCandidateResult = main.FALSE
acsmars71adceb2015-08-31 15:09:26 -07003039 utilities.assert_equals(
3040 expect=main.TRUE,
3041 actual=correctCandidateResult,
3042 onpass="Correct Candidate Elected",
3043 onfail="Incorrect Candidate Elected" )
3044
Jon Hall5cf14d52015-07-16 12:15:19 -07003045 main.step( "Run for election on old leader( just so everyone " +
3046 "is in the hat )" )
acsmars71adceb2015-08-31 15:09:26 -07003047 if oldLeaderCLI is not None:
3048 runResult = oldLeaderCLI.electionTestRun()
Jon Hall5cf14d52015-07-16 12:15:19 -07003049 else:
acsmars71adceb2015-08-31 15:09:26 -07003050 main.log.error( "No old leader to re-elect" )
Jon Hall5cf14d52015-07-16 12:15:19 -07003051 runResult = main.FALSE
3052 utilities.assert_equals(
3053 expect=main.TRUE,
3054 actual=runResult,
3055 onpass="App re-ran for election",
3056 onfail="App failed to run for election" )
Jon Halla440e872016-03-31 15:15:50 -07003057
acsmars71adceb2015-08-31 15:09:26 -07003058 main.step(
3059 "Check that oldLeader is a candidate, and leader if only 1 node" )
Jon Hall5cf14d52015-07-16 12:15:19 -07003060 # verify leader didn't just change
Jon Halla440e872016-03-31 15:15:50 -07003061 # Get new leaders and candidates
3062 reRunLeaders = []
3063 time.sleep( 5 ) # Paremterize
Jon Hall41d39f12016-04-11 22:54:35 -07003064 positionResult, reRunLeaders = main.HA.consistentLeaderboards( activeCLIs )
acsmars71adceb2015-08-31 15:09:26 -07003065
3066 # Check that the re-elected node is last on the candidate List
Jon Hall3a7843a2016-04-12 03:01:09 -07003067 if not reRunLeaders[0]:
3068 positionResult = main.FALSE
3069 elif oldLeader != reRunLeaders[ 0 ][ -1 ]:
Jon Halla440e872016-03-31 15:15:50 -07003070 main.log.error( "Old Leader ({}) not in the proper position: {} ".format( str( oldLeader),
3071 str( reRunLeaders[ 0 ] ) ) )
acsmars71adceb2015-08-31 15:09:26 -07003072 positionResult = main.FALSE
Jon Hall5cf14d52015-07-16 12:15:19 -07003073 utilities.assert_equals(
Jon Halla440e872016-03-31 15:15:50 -07003074 expect=True,
acsmars71adceb2015-08-31 15:09:26 -07003075 actual=positionResult,
Jon Hall5cf14d52015-07-16 12:15:19 -07003076 onpass="Old leader successfully re-ran for election",
3077 onfail="Something went wrong with Leadership election after " +
3078 "the old leader re-ran for election" )
3079
3080 def CASE16( self, main ):
3081 """
3082 Install Distributed Primitives app
3083 """
3084 import time
Jon Halle1a3b752015-07-22 13:02:46 -07003085 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07003086 assert main, "main not defined"
3087 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07003088 assert main.CLIs, "main.CLIs not defined"
3089 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07003090
3091 # Variables for the distributed primitives tests
3092 global pCounterName
Jon Hall5cf14d52015-07-16 12:15:19 -07003093 global pCounterValue
Jon Hall5cf14d52015-07-16 12:15:19 -07003094 global onosSet
3095 global onosSetName
3096 pCounterName = "TestON-Partitions"
Jon Hall5cf14d52015-07-16 12:15:19 -07003097 pCounterValue = 0
Jon Hall5cf14d52015-07-16 12:15:19 -07003098 onosSet = set([])
3099 onosSetName = "TestON-set"
3100
3101 description = "Install Primitives app"
3102 main.case( description )
3103 main.step( "Install Primitives app" )
3104 appName = "org.onosproject.distributedprimitives"
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003105 node = main.activeNodes[0]
3106 appResults = main.CLIs[node].activateApp( appName )
Jon Hall5cf14d52015-07-16 12:15:19 -07003107 utilities.assert_equals( expect=main.TRUE,
3108 actual=appResults,
3109 onpass="Primitives app activated",
3110 onfail="Primitives app not activated" )
3111 time.sleep( 5 ) # To allow all nodes to activate
3112
3113 def CASE17( self, main ):
3114 """
3115 Check for basic functionality with distributed primitives
3116 """
Jon Hall5cf14d52015-07-16 12:15:19 -07003117 # Make sure variables are defined/set
Jon Halle1a3b752015-07-22 13:02:46 -07003118 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07003119 assert main, "main not defined"
3120 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07003121 assert main.CLIs, "main.CLIs not defined"
3122 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07003123 assert pCounterName, "pCounterName not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07003124 assert onosSetName, "onosSetName not defined"
3125 # NOTE: assert fails if value is 0/None/Empty/False
3126 try:
3127 pCounterValue
3128 except NameError:
3129 main.log.error( "pCounterValue not defined, setting to 0" )
3130 pCounterValue = 0
3131 try:
Jon Hall5cf14d52015-07-16 12:15:19 -07003132 onosSet
3133 except NameError:
3134 main.log.error( "onosSet not defined, setting to empty Set" )
3135 onosSet = set([])
3136 # Variables for the distributed primitives tests. These are local only
3137 addValue = "a"
3138 addAllValue = "a b c d e f"
3139 retainValue = "c d e f"
3140
3141 description = "Check for basic functionality with distributed " +\
3142 "primitives"
3143 main.case( description )
Jon Halle1a3b752015-07-22 13:02:46 -07003144 main.caseExplanation = "Test the methods of the distributed " +\
3145 "primitives (counters and sets) throught the cli"
Jon Hall5cf14d52015-07-16 12:15:19 -07003146 # DISTRIBUTED ATOMIC COUNTERS
Jon Halle1a3b752015-07-22 13:02:46 -07003147 # Partitioned counters
3148 main.step( "Increment then get a default counter on each node" )
Jon Hall5cf14d52015-07-16 12:15:19 -07003149 pCounters = []
3150 threads = []
3151 addedPValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003152 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003153 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3154 name="counterAddAndGet-" + str( i ),
Jon Hall5cf14d52015-07-16 12:15:19 -07003155 args=[ pCounterName ] )
3156 pCounterValue += 1
3157 addedPValues.append( pCounterValue )
3158 threads.append( t )
3159 t.start()
3160
3161 for t in threads:
3162 t.join()
3163 pCounters.append( t.result )
3164 # Check that counter incremented numController times
3165 pCounterResults = True
3166 for i in addedPValues:
3167 tmpResult = i in pCounters
3168 pCounterResults = pCounterResults and tmpResult
3169 if not tmpResult:
3170 main.log.error( str( i ) + " is not in partitioned "
3171 "counter incremented results" )
3172 utilities.assert_equals( expect=True,
3173 actual=pCounterResults,
3174 onpass="Default counter incremented",
3175 onfail="Error incrementing default" +
3176 " counter" )
3177
Jon Halle1a3b752015-07-22 13:02:46 -07003178 main.step( "Get then Increment a default counter on each node" )
3179 pCounters = []
3180 threads = []
3181 addedPValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003182 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003183 t = main.Thread( target=main.CLIs[i].counterTestGetAndAdd,
3184 name="counterGetAndAdd-" + str( i ),
3185 args=[ pCounterName ] )
3186 addedPValues.append( pCounterValue )
3187 pCounterValue += 1
3188 threads.append( t )
3189 t.start()
3190
3191 for t in threads:
3192 t.join()
3193 pCounters.append( t.result )
3194 # Check that counter incremented numController times
3195 pCounterResults = True
3196 for i in addedPValues:
3197 tmpResult = i in pCounters
3198 pCounterResults = pCounterResults and tmpResult
3199 if not tmpResult:
3200 main.log.error( str( i ) + " is not in partitioned "
3201 "counter incremented results" )
3202 utilities.assert_equals( expect=True,
3203 actual=pCounterResults,
3204 onpass="Default counter incremented",
3205 onfail="Error incrementing default" +
3206 " counter" )
3207
3208 main.step( "Counters we added have the correct values" )
Jon Hall41d39f12016-04-11 22:54:35 -07003209 incrementCheck = main.HA.counterCheck( pCounterName, pCounterValue )
Jon Halle1a3b752015-07-22 13:02:46 -07003210 utilities.assert_equals( expect=main.TRUE,
3211 actual=incrementCheck,
3212 onpass="Added counters are correct",
3213 onfail="Added counters are incorrect" )
3214
3215 main.step( "Add -8 to then get a default counter on each node" )
3216 pCounters = []
3217 threads = []
3218 addedPValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003219 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003220 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3221 name="counterIncrement-" + str( i ),
3222 args=[ pCounterName ],
3223 kwargs={ "delta": -8 } )
3224 pCounterValue += -8
3225 addedPValues.append( pCounterValue )
3226 threads.append( t )
3227 t.start()
3228
3229 for t in threads:
3230 t.join()
3231 pCounters.append( t.result )
3232 # Check that counter incremented numController times
3233 pCounterResults = True
3234 for i in addedPValues:
3235 tmpResult = i in pCounters
3236 pCounterResults = pCounterResults and tmpResult
3237 if not tmpResult:
3238 main.log.error( str( i ) + " is not in partitioned "
3239 "counter incremented results" )
3240 utilities.assert_equals( expect=True,
3241 actual=pCounterResults,
3242 onpass="Default counter incremented",
3243 onfail="Error incrementing default" +
3244 " counter" )
3245
3246 main.step( "Add 5 to then get a default counter on each node" )
3247 pCounters = []
3248 threads = []
3249 addedPValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003250 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003251 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3252 name="counterIncrement-" + str( i ),
3253 args=[ pCounterName ],
3254 kwargs={ "delta": 5 } )
3255 pCounterValue += 5
3256 addedPValues.append( pCounterValue )
3257 threads.append( t )
3258 t.start()
3259
3260 for t in threads:
3261 t.join()
3262 pCounters.append( t.result )
3263 # Check that counter incremented numController times
3264 pCounterResults = True
3265 for i in addedPValues:
3266 tmpResult = i in pCounters
3267 pCounterResults = pCounterResults and tmpResult
3268 if not tmpResult:
3269 main.log.error( str( i ) + " is not in partitioned "
3270 "counter incremented results" )
3271 utilities.assert_equals( expect=True,
3272 actual=pCounterResults,
3273 onpass="Default counter incremented",
3274 onfail="Error incrementing default" +
3275 " counter" )
3276
3277 main.step( "Get then add 5 to a default counter on each node" )
3278 pCounters = []
3279 threads = []
3280 addedPValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003281 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003282 t = main.Thread( target=main.CLIs[i].counterTestGetAndAdd,
3283 name="counterIncrement-" + str( i ),
3284 args=[ pCounterName ],
3285 kwargs={ "delta": 5 } )
3286 addedPValues.append( pCounterValue )
3287 pCounterValue += 5
3288 threads.append( t )
3289 t.start()
3290
3291 for t in threads:
3292 t.join()
3293 pCounters.append( t.result )
3294 # Check that counter incremented numController times
3295 pCounterResults = True
3296 for i in addedPValues:
3297 tmpResult = i in pCounters
3298 pCounterResults = pCounterResults and tmpResult
3299 if not tmpResult:
3300 main.log.error( str( i ) + " is not in partitioned "
3301 "counter incremented results" )
3302 utilities.assert_equals( expect=True,
3303 actual=pCounterResults,
3304 onpass="Default counter incremented",
3305 onfail="Error incrementing default" +
3306 " counter" )
3307
3308 main.step( "Counters we added have the correct values" )
Jon Hall41d39f12016-04-11 22:54:35 -07003309 incrementCheck = main.HA.counterCheck( pCounterName, pCounterValue )
Jon Halle1a3b752015-07-22 13:02:46 -07003310 utilities.assert_equals( expect=main.TRUE,
3311 actual=incrementCheck,
3312 onpass="Added counters are correct",
3313 onfail="Added counters are incorrect" )
3314
Jon Hall5cf14d52015-07-16 12:15:19 -07003315 # DISTRIBUTED SETS
3316 main.step( "Distributed Set get" )
3317 size = len( onosSet )
3318 getResponses = []
3319 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003320 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003321 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003322 name="setTestGet-" + str( i ),
3323 args=[ onosSetName ] )
3324 threads.append( t )
3325 t.start()
3326 for t in threads:
3327 t.join()
3328 getResponses.append( t.result )
3329
3330 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003331 for i in range( len( main.activeNodes ) ):
3332 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003333 if isinstance( getResponses[ i ], list):
3334 current = set( getResponses[ i ] )
3335 if len( current ) == len( getResponses[ i ] ):
3336 # no repeats
3337 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003338 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003339 " has incorrect view" +
3340 " of set " + onosSetName + ":\n" +
3341 str( getResponses[ i ] ) )
3342 main.log.debug( "Expected: " + str( onosSet ) )
3343 main.log.debug( "Actual: " + str( current ) )
3344 getResults = main.FALSE
3345 else:
3346 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003347 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003348 " has repeat elements in" +
3349 " set " + onosSetName + ":\n" +
3350 str( getResponses[ i ] ) )
3351 getResults = main.FALSE
3352 elif getResponses[ i ] == main.ERROR:
3353 getResults = main.FALSE
3354 utilities.assert_equals( expect=main.TRUE,
3355 actual=getResults,
3356 onpass="Set elements are correct",
3357 onfail="Set elements are incorrect" )
3358
3359 main.step( "Distributed Set size" )
3360 sizeResponses = []
3361 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003362 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003363 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07003364 name="setTestSize-" + str( i ),
3365 args=[ onosSetName ] )
3366 threads.append( t )
3367 t.start()
3368 for t in threads:
3369 t.join()
3370 sizeResponses.append( t.result )
3371
3372 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003373 for i in range( len( main.activeNodes ) ):
3374 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003375 if size != sizeResponses[ i ]:
3376 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003377 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003378 " expected a size of " + str( size ) +
3379 " for set " + onosSetName +
3380 " but got " + str( sizeResponses[ i ] ) )
3381 utilities.assert_equals( expect=main.TRUE,
3382 actual=sizeResults,
3383 onpass="Set sizes are correct",
3384 onfail="Set sizes are incorrect" )
3385
3386 main.step( "Distributed Set add()" )
3387 onosSet.add( addValue )
3388 addResponses = []
3389 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003390 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003391 t = main.Thread( target=main.CLIs[i].setTestAdd,
Jon Hall5cf14d52015-07-16 12:15:19 -07003392 name="setTestAdd-" + str( i ),
3393 args=[ onosSetName, addValue ] )
3394 threads.append( t )
3395 t.start()
3396 for t in threads:
3397 t.join()
3398 addResponses.append( t.result )
3399
3400 # main.TRUE = successfully changed the set
3401 # main.FALSE = action resulted in no change in set
3402 # main.ERROR - Some error in executing the function
3403 addResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003404 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003405 if addResponses[ i ] == main.TRUE:
3406 # All is well
3407 pass
3408 elif addResponses[ i ] == main.FALSE:
3409 # Already in set, probably fine
3410 pass
3411 elif addResponses[ i ] == main.ERROR:
3412 # Error in execution
3413 addResults = main.FALSE
3414 else:
3415 # unexpected result
3416 addResults = main.FALSE
3417 if addResults != main.TRUE:
3418 main.log.error( "Error executing set add" )
3419
3420 # Check if set is still correct
3421 size = len( onosSet )
3422 getResponses = []
3423 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003424 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003425 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003426 name="setTestGet-" + str( i ),
3427 args=[ onosSetName ] )
3428 threads.append( t )
3429 t.start()
3430 for t in threads:
3431 t.join()
3432 getResponses.append( t.result )
3433 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003434 for i in range( len( main.activeNodes ) ):
3435 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003436 if isinstance( getResponses[ i ], list):
3437 current = set( getResponses[ i ] )
3438 if len( current ) == len( getResponses[ i ] ):
3439 # no repeats
3440 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003441 main.log.error( "ONOS" + node + " has incorrect view" +
Jon Hall5cf14d52015-07-16 12:15:19 -07003442 " of set " + onosSetName + ":\n" +
3443 str( getResponses[ i ] ) )
3444 main.log.debug( "Expected: " + str( onosSet ) )
3445 main.log.debug( "Actual: " + str( current ) )
3446 getResults = main.FALSE
3447 else:
3448 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003449 main.log.error( "ONOS" + node + " has repeat elements in" +
Jon Hall5cf14d52015-07-16 12:15:19 -07003450 " set " + onosSetName + ":\n" +
3451 str( getResponses[ i ] ) )
3452 getResults = main.FALSE
3453 elif getResponses[ i ] == main.ERROR:
3454 getResults = main.FALSE
3455 sizeResponses = []
3456 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003457 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003458 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07003459 name="setTestSize-" + str( i ),
3460 args=[ onosSetName ] )
3461 threads.append( t )
3462 t.start()
3463 for t in threads:
3464 t.join()
3465 sizeResponses.append( t.result )
3466 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003467 for i in range( len( main.activeNodes ) ):
3468 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003469 if size != sizeResponses[ i ]:
3470 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003471 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003472 " expected a size of " + str( size ) +
3473 " for set " + onosSetName +
3474 " but got " + str( sizeResponses[ i ] ) )
3475 addResults = addResults and getResults and sizeResults
3476 utilities.assert_equals( expect=main.TRUE,
3477 actual=addResults,
3478 onpass="Set add correct",
3479 onfail="Set add was incorrect" )
3480
3481 main.step( "Distributed Set addAll()" )
3482 onosSet.update( addAllValue.split() )
3483 addResponses = []
3484 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003485 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003486 t = main.Thread( target=main.CLIs[i].setTestAdd,
Jon Hall5cf14d52015-07-16 12:15:19 -07003487 name="setTestAddAll-" + str( i ),
3488 args=[ onosSetName, addAllValue ] )
3489 threads.append( t )
3490 t.start()
3491 for t in threads:
3492 t.join()
3493 addResponses.append( t.result )
3494
3495 # main.TRUE = successfully changed the set
3496 # main.FALSE = action resulted in no change in set
3497 # main.ERROR - Some error in executing the function
3498 addAllResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003499 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003500 if addResponses[ i ] == main.TRUE:
3501 # All is well
3502 pass
3503 elif addResponses[ i ] == main.FALSE:
3504 # Already in set, probably fine
3505 pass
3506 elif addResponses[ i ] == main.ERROR:
3507 # Error in execution
3508 addAllResults = main.FALSE
3509 else:
3510 # unexpected result
3511 addAllResults = main.FALSE
3512 if addAllResults != main.TRUE:
3513 main.log.error( "Error executing set addAll" )
3514
3515 # Check if set is still correct
3516 size = len( onosSet )
3517 getResponses = []
3518 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003519 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003520 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003521 name="setTestGet-" + str( i ),
3522 args=[ onosSetName ] )
3523 threads.append( t )
3524 t.start()
3525 for t in threads:
3526 t.join()
3527 getResponses.append( t.result )
3528 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003529 for i in range( len( main.activeNodes ) ):
3530 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003531 if isinstance( getResponses[ i ], list):
3532 current = set( getResponses[ i ] )
3533 if len( current ) == len( getResponses[ i ] ):
3534 # no repeats
3535 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003536 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003537 " has incorrect view" +
3538 " of set " + onosSetName + ":\n" +
3539 str( getResponses[ i ] ) )
3540 main.log.debug( "Expected: " + str( onosSet ) )
3541 main.log.debug( "Actual: " + str( current ) )
3542 getResults = main.FALSE
3543 else:
3544 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003545 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003546 " has repeat elements in" +
3547 " set " + onosSetName + ":\n" +
3548 str( getResponses[ i ] ) )
3549 getResults = main.FALSE
3550 elif getResponses[ i ] == main.ERROR:
3551 getResults = main.FALSE
3552 sizeResponses = []
3553 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003554 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003555 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07003556 name="setTestSize-" + str( i ),
3557 args=[ onosSetName ] )
3558 threads.append( t )
3559 t.start()
3560 for t in threads:
3561 t.join()
3562 sizeResponses.append( t.result )
3563 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003564 for i in range( len( main.activeNodes ) ):
3565 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003566 if size != sizeResponses[ i ]:
3567 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003568 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003569 " expected a size of " + str( size ) +
3570 " for set " + onosSetName +
3571 " but got " + str( sizeResponses[ i ] ) )
3572 addAllResults = addAllResults and getResults and sizeResults
3573 utilities.assert_equals( expect=main.TRUE,
3574 actual=addAllResults,
3575 onpass="Set addAll correct",
3576 onfail="Set addAll was incorrect" )
3577
3578 main.step( "Distributed Set contains()" )
3579 containsResponses = []
3580 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003581 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003582 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003583 name="setContains-" + str( i ),
3584 args=[ onosSetName ],
3585 kwargs={ "values": addValue } )
3586 threads.append( t )
3587 t.start()
3588 for t in threads:
3589 t.join()
3590 # NOTE: This is the tuple
3591 containsResponses.append( t.result )
3592
3593 containsResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003594 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003595 if containsResponses[ i ] == main.ERROR:
3596 containsResults = main.FALSE
3597 else:
3598 containsResults = containsResults and\
3599 containsResponses[ i ][ 1 ]
3600 utilities.assert_equals( expect=main.TRUE,
3601 actual=containsResults,
3602 onpass="Set contains is functional",
3603 onfail="Set contains failed" )
3604
3605 main.step( "Distributed Set containsAll()" )
3606 containsAllResponses = []
3607 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003608 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003609 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003610 name="setContainsAll-" + str( i ),
3611 args=[ onosSetName ],
3612 kwargs={ "values": addAllValue } )
3613 threads.append( t )
3614 t.start()
3615 for t in threads:
3616 t.join()
3617 # NOTE: This is the tuple
3618 containsAllResponses.append( t.result )
3619
3620 containsAllResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003621 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003622 if containsResponses[ i ] == main.ERROR:
3623 containsResults = main.FALSE
3624 else:
3625 containsResults = containsResults and\
3626 containsResponses[ i ][ 1 ]
3627 utilities.assert_equals( expect=main.TRUE,
3628 actual=containsAllResults,
3629 onpass="Set containsAll is functional",
3630 onfail="Set containsAll failed" )
3631
3632 main.step( "Distributed Set remove()" )
3633 onosSet.remove( addValue )
3634 removeResponses = []
3635 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003636 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003637 t = main.Thread( target=main.CLIs[i].setTestRemove,
Jon Hall5cf14d52015-07-16 12:15:19 -07003638 name="setTestRemove-" + str( i ),
3639 args=[ onosSetName, addValue ] )
3640 threads.append( t )
3641 t.start()
3642 for t in threads:
3643 t.join()
3644 removeResponses.append( t.result )
3645
3646 # main.TRUE = successfully changed the set
3647 # main.FALSE = action resulted in no change in set
3648 # main.ERROR - Some error in executing the function
3649 removeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003650 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003651 if removeResponses[ i ] == main.TRUE:
3652 # All is well
3653 pass
3654 elif removeResponses[ i ] == main.FALSE:
3655 # not in set, probably fine
3656 pass
3657 elif removeResponses[ i ] == main.ERROR:
3658 # Error in execution
3659 removeResults = main.FALSE
3660 else:
3661 # unexpected result
3662 removeResults = main.FALSE
3663 if removeResults != main.TRUE:
3664 main.log.error( "Error executing set remove" )
3665
3666 # Check if set is still correct
3667 size = len( onosSet )
3668 getResponses = []
3669 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003670 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003671 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003672 name="setTestGet-" + str( i ),
3673 args=[ onosSetName ] )
3674 threads.append( t )
3675 t.start()
3676 for t in threads:
3677 t.join()
3678 getResponses.append( t.result )
3679 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003680 for i in range( len( main.activeNodes ) ):
3681 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003682 if isinstance( getResponses[ i ], list):
3683 current = set( getResponses[ i ] )
3684 if len( current ) == len( getResponses[ i ] ):
3685 # no repeats
3686 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003687 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003688 " has incorrect view" +
3689 " of set " + onosSetName + ":\n" +
3690 str( getResponses[ i ] ) )
3691 main.log.debug( "Expected: " + str( onosSet ) )
3692 main.log.debug( "Actual: " + str( current ) )
3693 getResults = main.FALSE
3694 else:
3695 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003696 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003697 " has repeat elements in" +
3698 " set " + onosSetName + ":\n" +
3699 str( getResponses[ i ] ) )
3700 getResults = main.FALSE
3701 elif getResponses[ i ] == main.ERROR:
3702 getResults = main.FALSE
3703 sizeResponses = []
3704 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003705 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003706 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07003707 name="setTestSize-" + str( i ),
3708 args=[ onosSetName ] )
3709 threads.append( t )
3710 t.start()
3711 for t in threads:
3712 t.join()
3713 sizeResponses.append( t.result )
3714 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003715 for i in range( len( main.activeNodes ) ):
3716 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003717 if size != sizeResponses[ i ]:
3718 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003719 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003720 " expected a size of " + str( size ) +
3721 " for set " + onosSetName +
3722 " but got " + str( sizeResponses[ i ] ) )
3723 removeResults = removeResults and getResults and sizeResults
3724 utilities.assert_equals( expect=main.TRUE,
3725 actual=removeResults,
3726 onpass="Set remove correct",
3727 onfail="Set remove was incorrect" )
3728
3729 main.step( "Distributed Set removeAll()" )
3730 onosSet.difference_update( addAllValue.split() )
3731 removeAllResponses = []
3732 threads = []
3733 try:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003734 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003735 t = main.Thread( target=main.CLIs[i].setTestRemove,
Jon Hall5cf14d52015-07-16 12:15:19 -07003736 name="setTestRemoveAll-" + str( i ),
3737 args=[ onosSetName, addAllValue ] )
3738 threads.append( t )
3739 t.start()
3740 for t in threads:
3741 t.join()
3742 removeAllResponses.append( t.result )
3743 except Exception, e:
3744 main.log.exception(e)
3745
3746 # main.TRUE = successfully changed the set
3747 # main.FALSE = action resulted in no change in set
3748 # main.ERROR - Some error in executing the function
3749 removeAllResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003750 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003751 if removeAllResponses[ i ] == main.TRUE:
3752 # All is well
3753 pass
3754 elif removeAllResponses[ i ] == main.FALSE:
3755 # not in set, probably fine
3756 pass
3757 elif removeAllResponses[ i ] == main.ERROR:
3758 # Error in execution
3759 removeAllResults = main.FALSE
3760 else:
3761 # unexpected result
3762 removeAllResults = main.FALSE
3763 if removeAllResults != main.TRUE:
3764 main.log.error( "Error executing set removeAll" )
3765
3766 # Check if set is still correct
3767 size = len( onosSet )
3768 getResponses = []
3769 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003770 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003771 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003772 name="setTestGet-" + str( i ),
3773 args=[ onosSetName ] )
3774 threads.append( t )
3775 t.start()
3776 for t in threads:
3777 t.join()
3778 getResponses.append( t.result )
3779 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003780 for i in range( len( main.activeNodes ) ):
3781 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003782 if isinstance( getResponses[ i ], list):
3783 current = set( getResponses[ i ] )
3784 if len( current ) == len( getResponses[ i ] ):
3785 # no repeats
3786 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003787 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003788 " has incorrect view" +
3789 " of set " + onosSetName + ":\n" +
3790 str( getResponses[ i ] ) )
3791 main.log.debug( "Expected: " + str( onosSet ) )
3792 main.log.debug( "Actual: " + str( current ) )
3793 getResults = main.FALSE
3794 else:
3795 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003796 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003797 " has repeat elements in" +
3798 " set " + onosSetName + ":\n" +
3799 str( getResponses[ i ] ) )
3800 getResults = main.FALSE
3801 elif getResponses[ i ] == main.ERROR:
3802 getResults = main.FALSE
3803 sizeResponses = []
3804 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003805 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003806 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07003807 name="setTestSize-" + str( i ),
3808 args=[ onosSetName ] )
3809 threads.append( t )
3810 t.start()
3811 for t in threads:
3812 t.join()
3813 sizeResponses.append( t.result )
3814 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003815 for i in range( len( main.activeNodes ) ):
3816 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003817 if size != sizeResponses[ i ]:
3818 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003819 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003820 " expected a size of " + str( size ) +
3821 " for set " + onosSetName +
3822 " but got " + str( sizeResponses[ i ] ) )
3823 removeAllResults = removeAllResults and getResults and sizeResults
3824 utilities.assert_equals( expect=main.TRUE,
3825 actual=removeAllResults,
3826 onpass="Set removeAll correct",
3827 onfail="Set removeAll was incorrect" )
3828
3829 main.step( "Distributed Set addAll()" )
3830 onosSet.update( addAllValue.split() )
3831 addResponses = []
3832 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003833 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003834 t = main.Thread( target=main.CLIs[i].setTestAdd,
Jon Hall5cf14d52015-07-16 12:15:19 -07003835 name="setTestAddAll-" + str( i ),
3836 args=[ onosSetName, addAllValue ] )
3837 threads.append( t )
3838 t.start()
3839 for t in threads:
3840 t.join()
3841 addResponses.append( t.result )
3842
3843 # main.TRUE = successfully changed the set
3844 # main.FALSE = action resulted in no change in set
3845 # main.ERROR - Some error in executing the function
3846 addAllResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003847 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003848 if addResponses[ i ] == main.TRUE:
3849 # All is well
3850 pass
3851 elif addResponses[ i ] == main.FALSE:
3852 # Already in set, probably fine
3853 pass
3854 elif addResponses[ i ] == main.ERROR:
3855 # Error in execution
3856 addAllResults = main.FALSE
3857 else:
3858 # unexpected result
3859 addAllResults = main.FALSE
3860 if addAllResults != main.TRUE:
3861 main.log.error( "Error executing set addAll" )
3862
3863 # Check if set is still correct
3864 size = len( onosSet )
3865 getResponses = []
3866 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003867 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003868 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003869 name="setTestGet-" + str( i ),
3870 args=[ onosSetName ] )
3871 threads.append( t )
3872 t.start()
3873 for t in threads:
3874 t.join()
3875 getResponses.append( t.result )
3876 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003877 for i in range( len( main.activeNodes ) ):
3878 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003879 if isinstance( getResponses[ i ], list):
3880 current = set( getResponses[ i ] )
3881 if len( current ) == len( getResponses[ i ] ):
3882 # no repeats
3883 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003884 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003885 " has incorrect view" +
3886 " of set " + onosSetName + ":\n" +
3887 str( getResponses[ i ] ) )
3888 main.log.debug( "Expected: " + str( onosSet ) )
3889 main.log.debug( "Actual: " + str( current ) )
3890 getResults = main.FALSE
3891 else:
3892 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003893 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003894 " has repeat elements in" +
3895 " set " + onosSetName + ":\n" +
3896 str( getResponses[ i ] ) )
3897 getResults = main.FALSE
3898 elif getResponses[ i ] == main.ERROR:
3899 getResults = main.FALSE
3900 sizeResponses = []
3901 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003902 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003903 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07003904 name="setTestSize-" + str( i ),
3905 args=[ onosSetName ] )
3906 threads.append( t )
3907 t.start()
3908 for t in threads:
3909 t.join()
3910 sizeResponses.append( t.result )
3911 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003912 for i in range( len( main.activeNodes ) ):
3913 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003914 if size != sizeResponses[ i ]:
3915 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003916 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003917 " expected a size of " + str( size ) +
3918 " for set " + onosSetName +
3919 " but got " + str( sizeResponses[ i ] ) )
3920 addAllResults = addAllResults and getResults and sizeResults
3921 utilities.assert_equals( expect=main.TRUE,
3922 actual=addAllResults,
3923 onpass="Set addAll correct",
3924 onfail="Set addAll was incorrect" )
3925
3926 main.step( "Distributed Set clear()" )
3927 onosSet.clear()
3928 clearResponses = []
3929 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003930 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003931 t = main.Thread( target=main.CLIs[i].setTestRemove,
Jon Hall5cf14d52015-07-16 12:15:19 -07003932 name="setTestClear-" + str( i ),
3933 args=[ onosSetName, " "], # Values doesn't matter
3934 kwargs={ "clear": True } )
3935 threads.append( t )
3936 t.start()
3937 for t in threads:
3938 t.join()
3939 clearResponses.append( t.result )
3940
3941 # main.TRUE = successfully changed the set
3942 # main.FALSE = action resulted in no change in set
3943 # main.ERROR - Some error in executing the function
3944 clearResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003945 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003946 if clearResponses[ i ] == main.TRUE:
3947 # All is well
3948 pass
3949 elif clearResponses[ i ] == main.FALSE:
3950 # Nothing set, probably fine
3951 pass
3952 elif clearResponses[ i ] == main.ERROR:
3953 # Error in execution
3954 clearResults = main.FALSE
3955 else:
3956 # unexpected result
3957 clearResults = main.FALSE
3958 if clearResults != main.TRUE:
3959 main.log.error( "Error executing set clear" )
3960
3961 # Check if set is still correct
3962 size = len( onosSet )
3963 getResponses = []
3964 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003965 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003966 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003967 name="setTestGet-" + str( i ),
3968 args=[ onosSetName ] )
3969 threads.append( t )
3970 t.start()
3971 for t in threads:
3972 t.join()
3973 getResponses.append( t.result )
3974 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003975 for i in range( len( main.activeNodes ) ):
3976 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003977 if isinstance( getResponses[ i ], list):
3978 current = set( getResponses[ i ] )
3979 if len( current ) == len( getResponses[ i ] ):
3980 # no repeats
3981 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003982 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003983 " has incorrect view" +
3984 " of set " + onosSetName + ":\n" +
3985 str( getResponses[ i ] ) )
3986 main.log.debug( "Expected: " + str( onosSet ) )
3987 main.log.debug( "Actual: " + str( current ) )
3988 getResults = main.FALSE
3989 else:
3990 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003991 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003992 " has repeat elements in" +
3993 " set " + onosSetName + ":\n" +
3994 str( getResponses[ i ] ) )
3995 getResults = main.FALSE
3996 elif getResponses[ i ] == main.ERROR:
3997 getResults = main.FALSE
3998 sizeResponses = []
3999 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004000 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004001 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07004002 name="setTestSize-" + str( i ),
4003 args=[ onosSetName ] )
4004 threads.append( t )
4005 t.start()
4006 for t in threads:
4007 t.join()
4008 sizeResponses.append( t.result )
4009 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004010 for i in range( len( main.activeNodes ) ):
4011 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004012 if size != sizeResponses[ i ]:
4013 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004014 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004015 " expected a size of " + str( size ) +
4016 " for set " + onosSetName +
4017 " but got " + str( sizeResponses[ i ] ) )
4018 clearResults = clearResults and getResults and sizeResults
4019 utilities.assert_equals( expect=main.TRUE,
4020 actual=clearResults,
4021 onpass="Set clear correct",
4022 onfail="Set clear was incorrect" )
4023
4024 main.step( "Distributed Set addAll()" )
4025 onosSet.update( addAllValue.split() )
4026 addResponses = []
4027 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004028 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004029 t = main.Thread( target=main.CLIs[i].setTestAdd,
Jon Hall5cf14d52015-07-16 12:15:19 -07004030 name="setTestAddAll-" + str( i ),
4031 args=[ onosSetName, addAllValue ] )
4032 threads.append( t )
4033 t.start()
4034 for t in threads:
4035 t.join()
4036 addResponses.append( t.result )
4037
4038 # main.TRUE = successfully changed the set
4039 # main.FALSE = action resulted in no change in set
4040 # main.ERROR - Some error in executing the function
4041 addAllResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004042 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07004043 if addResponses[ i ] == main.TRUE:
4044 # All is well
4045 pass
4046 elif addResponses[ i ] == main.FALSE:
4047 # Already in set, probably fine
4048 pass
4049 elif addResponses[ i ] == main.ERROR:
4050 # Error in execution
4051 addAllResults = main.FALSE
4052 else:
4053 # unexpected result
4054 addAllResults = main.FALSE
4055 if addAllResults != main.TRUE:
4056 main.log.error( "Error executing set addAll" )
4057
4058 # Check if set is still correct
4059 size = len( onosSet )
4060 getResponses = []
4061 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004062 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004063 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07004064 name="setTestGet-" + str( i ),
4065 args=[ onosSetName ] )
4066 threads.append( t )
4067 t.start()
4068 for t in threads:
4069 t.join()
4070 getResponses.append( t.result )
4071 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004072 for i in range( len( main.activeNodes ) ):
4073 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004074 if isinstance( getResponses[ i ], list):
4075 current = set( getResponses[ i ] )
4076 if len( current ) == len( getResponses[ i ] ):
4077 # no repeats
4078 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004079 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004080 " has incorrect view" +
4081 " of set " + onosSetName + ":\n" +
4082 str( getResponses[ i ] ) )
4083 main.log.debug( "Expected: " + str( onosSet ) )
4084 main.log.debug( "Actual: " + str( current ) )
4085 getResults = main.FALSE
4086 else:
4087 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004088 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004089 " has repeat elements in" +
4090 " set " + onosSetName + ":\n" +
4091 str( getResponses[ i ] ) )
4092 getResults = main.FALSE
4093 elif getResponses[ i ] == main.ERROR:
4094 getResults = main.FALSE
4095 sizeResponses = []
4096 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004097 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004098 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07004099 name="setTestSize-" + str( i ),
4100 args=[ onosSetName ] )
4101 threads.append( t )
4102 t.start()
4103 for t in threads:
4104 t.join()
4105 sizeResponses.append( t.result )
4106 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004107 for i in range( len( main.activeNodes ) ):
4108 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004109 if size != sizeResponses[ i ]:
4110 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004111 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004112 " expected a size of " + str( size ) +
4113 " for set " + onosSetName +
4114 " but got " + str( sizeResponses[ i ] ) )
4115 addAllResults = addAllResults and getResults and sizeResults
4116 utilities.assert_equals( expect=main.TRUE,
4117 actual=addAllResults,
4118 onpass="Set addAll correct",
4119 onfail="Set addAll was incorrect" )
4120
4121 main.step( "Distributed Set retain()" )
4122 onosSet.intersection_update( retainValue.split() )
4123 retainResponses = []
4124 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004125 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004126 t = main.Thread( target=main.CLIs[i].setTestRemove,
Jon Hall5cf14d52015-07-16 12:15:19 -07004127 name="setTestRetain-" + str( i ),
4128 args=[ onosSetName, retainValue ],
4129 kwargs={ "retain": True } )
4130 threads.append( t )
4131 t.start()
4132 for t in threads:
4133 t.join()
4134 retainResponses.append( t.result )
4135
4136 # main.TRUE = successfully changed the set
4137 # main.FALSE = action resulted in no change in set
4138 # main.ERROR - Some error in executing the function
4139 retainResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004140 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07004141 if retainResponses[ i ] == main.TRUE:
4142 # All is well
4143 pass
4144 elif retainResponses[ i ] == main.FALSE:
4145 # Already in set, probably fine
4146 pass
4147 elif retainResponses[ i ] == main.ERROR:
4148 # Error in execution
4149 retainResults = main.FALSE
4150 else:
4151 # unexpected result
4152 retainResults = main.FALSE
4153 if retainResults != main.TRUE:
4154 main.log.error( "Error executing set retain" )
4155
4156 # Check if set is still correct
4157 size = len( onosSet )
4158 getResponses = []
4159 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004160 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004161 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07004162 name="setTestGet-" + str( i ),
4163 args=[ onosSetName ] )
4164 threads.append( t )
4165 t.start()
4166 for t in threads:
4167 t.join()
4168 getResponses.append( t.result )
4169 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004170 for i in range( len( main.activeNodes ) ):
4171 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004172 if isinstance( getResponses[ i ], list):
4173 current = set( getResponses[ i ] )
4174 if len( current ) == len( getResponses[ i ] ):
4175 # no repeats
4176 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004177 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004178 " has incorrect view" +
4179 " of set " + onosSetName + ":\n" +
4180 str( getResponses[ i ] ) )
4181 main.log.debug( "Expected: " + str( onosSet ) )
4182 main.log.debug( "Actual: " + str( current ) )
4183 getResults = main.FALSE
4184 else:
4185 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004186 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004187 " has repeat elements in" +
4188 " set " + onosSetName + ":\n" +
4189 str( getResponses[ i ] ) )
4190 getResults = main.FALSE
4191 elif getResponses[ i ] == main.ERROR:
4192 getResults = main.FALSE
4193 sizeResponses = []
4194 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004195 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004196 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07004197 name="setTestSize-" + str( i ),
4198 args=[ onosSetName ] )
4199 threads.append( t )
4200 t.start()
4201 for t in threads:
4202 t.join()
4203 sizeResponses.append( t.result )
4204 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004205 for i in range( len( main.activeNodes ) ):
4206 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004207 if size != sizeResponses[ i ]:
4208 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004209 main.log.error( "ONOS" + node + " expected a size of " +
Jon Hall5cf14d52015-07-16 12:15:19 -07004210 str( size ) + " for set " + onosSetName +
4211 " but got " + str( sizeResponses[ i ] ) )
4212 retainResults = retainResults and getResults and sizeResults
4213 utilities.assert_equals( expect=main.TRUE,
4214 actual=retainResults,
4215 onpass="Set retain correct",
4216 onfail="Set retain was incorrect" )
4217
Jon Hall2a5002c2015-08-21 16:49:11 -07004218 # Transactional maps
4219 main.step( "Partitioned Transactional maps put" )
4220 tMapValue = "Testing"
4221 numKeys = 100
4222 putResult = True
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004223 node = main.activeNodes[0]
4224 putResponses = main.CLIs[node].transactionalMapPut( numKeys, tMapValue )
Jon Hall6e709752016-02-01 13:38:46 -08004225 if putResponses and len( putResponses ) == 100:
Jon Hall2a5002c2015-08-21 16:49:11 -07004226 for i in putResponses:
4227 if putResponses[ i ][ 'value' ] != tMapValue:
4228 putResult = False
4229 else:
4230 putResult = False
4231 if not putResult:
4232 main.log.debug( "Put response values: " + str( putResponses ) )
4233 utilities.assert_equals( expect=True,
4234 actual=putResult,
4235 onpass="Partitioned Transactional Map put successful",
4236 onfail="Partitioned Transactional Map put values are incorrect" )
4237
4238 main.step( "Partitioned Transactional maps get" )
Jon Hall9bfadd22016-05-11 14:48:07 -07004239 # FIXME: is this sleep needed?
4240 time.sleep( 5 )
4241
Jon Hall2a5002c2015-08-21 16:49:11 -07004242 getCheck = True
4243 for n in range( 1, numKeys + 1 ):
4244 getResponses = []
4245 threads = []
4246 valueCheck = True
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004247 for i in main.activeNodes:
Jon Hall2a5002c2015-08-21 16:49:11 -07004248 t = main.Thread( target=main.CLIs[i].transactionalMapGet,
4249 name="TMap-get-" + str( i ),
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004250 args=[ "Key" + str( n ) ] )
Jon Hall2a5002c2015-08-21 16:49:11 -07004251 threads.append( t )
4252 t.start()
4253 for t in threads:
4254 t.join()
4255 getResponses.append( t.result )
4256 for node in getResponses:
4257 if node != tMapValue:
4258 valueCheck = False
4259 if not valueCheck:
4260 main.log.warn( "Values for key 'Key" + str( n ) + "' do not match:" )
4261 main.log.warn( getResponses )
4262 getCheck = getCheck and valueCheck
4263 utilities.assert_equals( expect=True,
4264 actual=getCheck,
4265 onpass="Partitioned Transactional Map get values were correct",
4266 onfail="Partitioned Transactional Map values incorrect" )