blob: 92d2fd57b528964d09184a1bfc2974a5db22c9c9 [file] [log] [blame]
Jon Hall5cf14d52015-07-16 12:15:19 -07001"""
2Description: This test is to determine if ONOS can handle
3 a minority of it's nodes restarting
4
5List of test cases:
6CASE1: Compile ONOS and push it to the test machines
7CASE2: Assign devices to controllers
8CASE21: Assign mastership to controllers
9CASE3: Assign intents
10CASE4: Ping across added host intents
11CASE5: Reading state of ONOS
Jon Hallb3ed8ed2015-10-28 16:43:55 -070012CASE61: The Failure inducing case.
13CASE62: The Failure recovery case.
Jon Hall5cf14d52015-07-16 12:15:19 -070014CASE7: Check state after control plane failure
15CASE8: Compare topo
16CASE9: Link s3-s28 down
17CASE10: Link s3-s28 up
18CASE11: Switch down
19CASE12: Switch up
20CASE13: Clean up
21CASE14: start election app on all onos nodes
22CASE15: Check that Leadership Election is still functional
23CASE16: Install Distributed Primitives app
24CASE17: Check for basic functionality with distributed primitives
25"""
26
27
Jon Hallb3ed8ed2015-10-28 16:43:55 -070028class HAkillNodes:
Jon Hall5cf14d52015-07-16 12:15:19 -070029
30 def __init__( self ):
31 self.default = ''
32
33 def CASE1( self, main ):
34 """
35 CASE1 is to compile ONOS and push it to the test machines
36
37 Startup sequence:
38 cell <name>
39 onos-verify-cell
40 NOTE: temporary - onos-remove-raft-logs
41 onos-uninstall
42 start mininet
43 git pull
44 mvn clean install
45 onos-package
46 onos-install -f
47 onos-wait-for-start
48 start cli sessions
49 start tcpdump
50 """
Jon Halle1a3b752015-07-22 13:02:46 -070051 import imp
Jon Hall3b489db2015-10-05 14:38:37 -070052 import pexpect
Jon Hall6e709752016-02-01 13:38:46 -080053 import time
Jon Halla440e872016-03-31 15:15:50 -070054 import json
Jon Hall6e709752016-02-01 13:38:46 -080055 main.log.info( "ONOS HA test: Restart a minority of ONOS nodes - " +
Jon Hall5cf14d52015-07-16 12:15:19 -070056 "initialization" )
57 main.case( "Setting up test environment" )
Jon Hall783bbf92015-07-23 14:33:19 -070058 main.caseExplanation = "Setup the test environment including " +\
Jon Hall5cf14d52015-07-16 12:15:19 -070059 "installing ONOS, starting Mininet and ONOS" +\
60 "cli sessions."
Jon Hall5cf14d52015-07-16 12:15:19 -070061
62 # load some variables from the params file
63 PULLCODE = False
64 if main.params[ 'Git' ] == 'True':
65 PULLCODE = True
66 gitBranch = main.params[ 'branch' ]
67 cellName = main.params[ 'ENV' ][ 'cellName' ]
68
Jon Halle1a3b752015-07-22 13:02:46 -070069 main.numCtrls = int( main.params[ 'num_controllers' ] )
Jon Hall5cf14d52015-07-16 12:15:19 -070070 if main.ONOSbench.maxNodes:
Jon Halle1a3b752015-07-22 13:02:46 -070071 if main.ONOSbench.maxNodes < main.numCtrls:
72 main.numCtrls = int( main.ONOSbench.maxNodes )
73 # set global variables
Jon Hall5cf14d52015-07-16 12:15:19 -070074 global ONOS1Port
75 global ONOS2Port
76 global ONOS3Port
77 global ONOS4Port
78 global ONOS5Port
79 global ONOS6Port
80 global ONOS7Port
Jon Halla440e872016-03-31 15:15:50 -070081 # These are for csv plotting in jenkins
82 global labels
83 global data
84 labels = []
85 data = []
Jon Hall5cf14d52015-07-16 12:15:19 -070086
87 # FIXME: just get controller port from params?
88 # TODO: do we really need all these?
89 ONOS1Port = main.params[ 'CTRL' ][ 'port1' ]
90 ONOS2Port = main.params[ 'CTRL' ][ 'port2' ]
91 ONOS3Port = main.params[ 'CTRL' ][ 'port3' ]
92 ONOS4Port = main.params[ 'CTRL' ][ 'port4' ]
93 ONOS5Port = main.params[ 'CTRL' ][ 'port5' ]
94 ONOS6Port = main.params[ 'CTRL' ][ 'port6' ]
95 ONOS7Port = main.params[ 'CTRL' ][ 'port7' ]
96
Jon Halle1a3b752015-07-22 13:02:46 -070097 try:
Jon Halla440e872016-03-31 15:15:50 -070098 from tests.HAsanity.dependencies.Counters import Counters
99 main.Counters = Counters()
Jon Halle1a3b752015-07-22 13:02:46 -0700100 except Exception as e:
101 main.log.exception( e )
102 main.cleanup()
103 main.exit()
104
105 main.CLIs = []
106 main.nodes = []
Jon Hall5cf14d52015-07-16 12:15:19 -0700107 ipList = []
Jon Halle1a3b752015-07-22 13:02:46 -0700108 for i in range( 1, main.numCtrls + 1 ):
109 try:
110 main.CLIs.append( getattr( main, 'ONOScli' + str( i ) ) )
111 main.nodes.append( getattr( main, 'ONOS' + str( i ) ) )
112 ipList.append( main.nodes[ -1 ].ip_address )
113 except AttributeError:
114 break
Jon Hall5cf14d52015-07-16 12:15:19 -0700115
116 main.step( "Create cell file" )
117 cellAppString = main.params[ 'ENV' ][ 'appString' ]
118 main.ONOSbench.createCellFile( main.ONOSbench.ip_address, cellName,
119 main.Mininet1.ip_address,
120 cellAppString, ipList )
121 main.step( "Applying cell variable to environment" )
122 cellResult = main.ONOSbench.setCell( cellName )
123 verifyResult = main.ONOSbench.verifyCell()
124
125 # FIXME:this is short term fix
126 main.log.info( "Removing raft logs" )
127 main.ONOSbench.onosRemoveRaftLogs()
128
129 main.log.info( "Uninstalling ONOS" )
Jon Halle1a3b752015-07-22 13:02:46 -0700130 for node in main.nodes:
Jon Hall5cf14d52015-07-16 12:15:19 -0700131 main.ONOSbench.onosUninstall( node.ip_address )
132
133 # Make sure ONOS is DEAD
134 main.log.info( "Killing any ONOS processes" )
135 killResults = main.TRUE
Jon Halle1a3b752015-07-22 13:02:46 -0700136 for node in main.nodes:
Jon Hall5cf14d52015-07-16 12:15:19 -0700137 killed = main.ONOSbench.onosKill( node.ip_address )
138 killResults = killResults and killed
139
140 cleanInstallResult = main.TRUE
141 gitPullResult = main.TRUE
142
143 main.step( "Starting Mininet" )
144 # scp topo file to mininet
145 # TODO: move to params?
146 topoName = "obelisk.py"
147 filePath = main.ONOSbench.home + "/tools/test/topos/"
kelvin-onlabd9e23de2015-08-06 10:34:44 -0700148 main.ONOSbench.scp( main.Mininet1,
149 filePath + topoName,
150 main.Mininet1.home,
151 direction="to" )
Jon Hall5cf14d52015-07-16 12:15:19 -0700152 mnResult = main.Mininet1.startNet( )
153 utilities.assert_equals( expect=main.TRUE, actual=mnResult,
154 onpass="Mininet Started",
155 onfail="Error starting Mininet" )
156
157 main.step( "Git checkout and pull " + gitBranch )
158 if PULLCODE:
159 main.ONOSbench.gitCheckout( gitBranch )
160 gitPullResult = main.ONOSbench.gitPull()
161 # values of 1 or 3 are good
162 utilities.assert_lesser( expect=0, actual=gitPullResult,
163 onpass="Git pull successful",
164 onfail="Git pull failed" )
165 main.ONOSbench.getVersion( report=True )
166
167 main.step( "Using mvn clean install" )
168 cleanInstallResult = main.TRUE
169 if PULLCODE and gitPullResult == main.TRUE:
170 cleanInstallResult = main.ONOSbench.cleanInstall()
171 else:
172 main.log.warn( "Did not pull new code so skipping mvn " +
173 "clean install" )
174 utilities.assert_equals( expect=main.TRUE,
175 actual=cleanInstallResult,
176 onpass="MCI successful",
177 onfail="MCI failed" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700178
179 main.step( "Make sure ONOS service doesn't automatically respawn" )
180 handle = main.ONOSbench.handle
181 handle.sendline( "sed -i -e 's/^respawn$/#respawn/g' tools/package/init/onos.conf" )
182 handle.expect( "\$" ) # $ from the command
183 handle.expect( "\$" ) # $ from the prompt
184
Jon Hall5cf14d52015-07-16 12:15:19 -0700185 # GRAPHS
186 # NOTE: important params here:
187 # job = name of Jenkins job
188 # Plot Name = Plot-HA, only can be used if multiple plots
189 # index = The number of the graph under plot name
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700190 job = "HAkillNodes"
Jon Hall5cf14d52015-07-16 12:15:19 -0700191 plotName = "Plot-HA"
Jon Hall843f8bc2016-03-18 14:28:13 -0700192 index = "2"
Jon Hall5cf14d52015-07-16 12:15:19 -0700193 graphs = '<ac:structured-macro ac:name="html">\n'
194 graphs += '<ac:plain-text-body><![CDATA[\n'
195 graphs += '<iframe src="https://onos-jenkins.onlab.us/job/' + job +\
Jon Halla9845df2016-01-15 14:55:58 -0800196 '/plot/' + plotName + '/getPlot?index=' + index +\
Jon Hall5cf14d52015-07-16 12:15:19 -0700197 '&width=500&height=300"' +\
198 'noborder="0" width="500" height="300" scrolling="yes" ' +\
199 'seamless="seamless"></iframe>\n'
200 graphs += ']]></ac:plain-text-body>\n'
201 graphs += '</ac:structured-macro>\n'
202 main.log.wiki(graphs)
203
204 main.step( "Creating ONOS package" )
Jon Hall3b489db2015-10-05 14:38:37 -0700205 # copy gen-partions file to ONOS
206 # NOTE: this assumes TestON and ONOS are on the same machine
207 srcFile = main.testDir + "/" + main.TEST + "/dependencies/onos-gen-partitions"
208 dstDir = main.ONOSbench.home + "/tools/test/bin/onos-gen-partitions"
209 cpResult = main.ONOSbench.secureCopy( main.ONOSbench.user_name,
210 main.ONOSbench.ip_address,
211 srcFile,
212 dstDir,
213 pwd=main.ONOSbench.pwd,
214 direction="from" )
Jon Hall5cf14d52015-07-16 12:15:19 -0700215 packageResult = main.ONOSbench.onosPackage()
216 utilities.assert_equals( expect=main.TRUE, actual=packageResult,
217 onpass="ONOS package successful",
218 onfail="ONOS package failed" )
219
220 main.step( "Installing ONOS package" )
221 onosInstallResult = main.TRUE
Jon Halle1a3b752015-07-22 13:02:46 -0700222 for node in main.nodes:
Jon Hall5cf14d52015-07-16 12:15:19 -0700223 tmpResult = main.ONOSbench.onosInstall( options="-f",
224 node=node.ip_address )
225 onosInstallResult = onosInstallResult and tmpResult
226 utilities.assert_equals( expect=main.TRUE, actual=onosInstallResult,
227 onpass="ONOS install successful",
228 onfail="ONOS install failed" )
Jon Hall3b489db2015-10-05 14:38:37 -0700229 # clean up gen-partitions file
230 try:
231 main.ONOSbench.handle.sendline( "cd " + main.ONOSbench.home )
232 main.ONOSbench.handle.expect( main.ONOSbench.home + "\$" )
233 main.ONOSbench.handle.sendline( "git checkout -- tools/test/bin/onos-gen-partitions" )
234 main.ONOSbench.handle.expect( main.ONOSbench.home + "\$" )
235 main.log.info( " Cleaning custom gen partitions file, response was: \n" +
236 str( main.ONOSbench.handle.before ) )
237 except ( pexpect.TIMEOUT, pexpect.EOF ):
238 main.log.exception( "ONOSbench: pexpect exception found:" +
239 main.ONOSbench.handle.before )
240 main.cleanup()
241 main.exit()
Jon Hall5cf14d52015-07-16 12:15:19 -0700242
243 main.step( "Checking if ONOS is up yet" )
244 for i in range( 2 ):
245 onosIsupResult = main.TRUE
Jon Halle1a3b752015-07-22 13:02:46 -0700246 for node in main.nodes:
Jon Hall5cf14d52015-07-16 12:15:19 -0700247 started = main.ONOSbench.isup( node.ip_address )
248 if not started:
Jon Hallc6793552016-01-19 14:18:37 -0800249 main.log.error( node.name + " hasn't started" )
Jon Hall5cf14d52015-07-16 12:15:19 -0700250 onosIsupResult = onosIsupResult and started
251 if onosIsupResult == main.TRUE:
252 break
253 utilities.assert_equals( expect=main.TRUE, actual=onosIsupResult,
254 onpass="ONOS startup successful",
255 onfail="ONOS startup failed" )
256
257 main.log.step( "Starting ONOS CLI sessions" )
258 cliResults = main.TRUE
259 threads = []
Jon Halle1a3b752015-07-22 13:02:46 -0700260 for i in range( main.numCtrls ):
261 t = main.Thread( target=main.CLIs[i].startOnosCli,
Jon Hall5cf14d52015-07-16 12:15:19 -0700262 name="startOnosCli-" + str( i ),
Jon Halle1a3b752015-07-22 13:02:46 -0700263 args=[main.nodes[i].ip_address] )
Jon Hall5cf14d52015-07-16 12:15:19 -0700264 threads.append( t )
265 t.start()
266
267 for t in threads:
268 t.join()
269 cliResults = cliResults and t.result
270 utilities.assert_equals( expect=main.TRUE, actual=cliResults,
271 onpass="ONOS cli startup successful",
272 onfail="ONOS cli startup failed" )
273
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700274 # Create a list of active nodes for use when some nodes are stopped
275 main.activeNodes = [ i for i in range( 0, len( main.CLIs ) ) ]
276
Jon Hall5cf14d52015-07-16 12:15:19 -0700277 if main.params[ 'tcpdump' ].lower() == "true":
278 main.step( "Start Packet Capture MN" )
279 main.Mininet2.startTcpdump(
280 str( main.params[ 'MNtcpdump' ][ 'folder' ] ) + str( main.TEST )
281 + "-MN.pcap",
282 intf=main.params[ 'MNtcpdump' ][ 'intf' ],
283 port=main.params[ 'MNtcpdump' ][ 'port' ] )
284
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700285 main.step( "Clean up ONOS service changes" )
286 handle.sendline( "git checkout -- tools/package/init/onos.conf" )
287 handle.expect( "\$" )
288
Jon Halla440e872016-03-31 15:15:50 -0700289 main.step( "Checking ONOS nodes" )
290 nodesOutput = []
291 nodeResults = main.TRUE
292 threads = []
293 for i in main.activeNodes:
294 t = main.Thread( target=main.CLIs[i].nodes,
295 name="nodes-" + str( i ),
296 args=[ ] )
297 threads.append( t )
298 t.start()
299
300 for t in threads:
301 t.join()
302 nodesOutput.append( t.result )
303 ips = [ main.nodes[node].ip_address for node in main.activeNodes ]
304 ips.sort()
305 for i in nodesOutput:
306 try:
307 current = json.loads( i )
308 activeIps = []
309 currentResult = main.FALSE
310 for node in current:
311 if node['state'] == 'READY':
312 activeIps.append( node['ip'] )
313 activeIps.sort()
314 if ips == activeIps:
315 currentResult = main.TRUE
316 except ( ValueError, TypeError ):
317 main.log.error( "Error parsing nodes output" )
318 main.log.warn( repr( i ) )
319 currentResult = main.FALSE
320 nodeResults = nodeResults and currentResult
321 utilities.assert_equals( expect=main.TRUE, actual=nodeResults,
322 onpass="Nodes check successful",
323 onfail="Nodes check NOT successful" )
324
325 if not nodeResults:
326 for cli in main.CLIs:
327 main.log.debug( "{} components not ACTIVE: \n{}".format(
328 cli.name,
329 cli.sendline( "scr:list | grep -v ACTIVE" ) ) )
330
Jon Hall5cf14d52015-07-16 12:15:19 -0700331 if cliResults == main.FALSE:
332 main.log.error( "Failed to start ONOS, stopping test" )
333 main.cleanup()
334 main.exit()
335
Jon Hall172b7ba2016-04-07 18:12:20 -0700336 main.step( "Activate apps defined in the params file" )
337 # get data from the params
338 apps = main.params.get( 'apps' )
339 if apps:
340 apps = apps.split(',')
341 main.log.warn( apps )
342 activateResult = True
343 for app in apps:
344 main.CLIs[ 0 ].app( app, "Activate" )
345 # TODO: check this worked
346 time.sleep( 10 ) # wait for apps to activate
347 for app in apps:
348 state = main.CLIs[ 0 ].appStatus( app )
349 if state == "ACTIVE":
350 activateResult = activeResult and True
351 else:
352 main.log.error( "{} is in {} state".format( app, state ) )
353 activeResult = False
354 utilities.assert_equals( expect=True,
355 actual=activateResult,
356 onpass="Successfully activated apps",
357 onfail="Failed to activate apps" )
358 else:
359 main.log.warn( "No apps were specified to be loaded after startup" )
360
361 main.step( "Set ONOS configurations" )
362 config = main.params.get( 'ONOS_Configuration' )
363 if config:
364 main.log.debug( config )
365 checkResult = main.TRUE
366 for component in config:
367 for setting in config[component]:
368 value = config[component][setting]
369 check = main.CLIs[ 0 ].setCfg( component, setting, value )
370 main.log.info( "Value was changed? {}".format( main.TRUE == check ) )
371 checkResult = check and checkResult
372 utilities.assert_equals( expect=main.TRUE,
373 actual=checkResult,
374 onpass="Successfully set config",
375 onfail="Failed to set config" )
376 else:
377 main.log.warn( "No configurations were specified to be changed after startup" )
378
Jon Hall9d2dcad2016-04-08 10:15:20 -0700379 main.step( "App Ids check" )
380 appCheck = main.TRUE
381 threads = []
382 for i in main.activeNodes:
383 t = main.Thread( target=main.CLIs[i].appToIDCheck,
384 name="appToIDCheck-" + str( i ),
385 args=[] )
386 threads.append( t )
387 t.start()
388
389 for t in threads:
390 t.join()
391 appCheck = appCheck and t.result
392 if appCheck != main.TRUE:
393 node = main.activeNodes[0]
394 main.log.warn( main.CLIs[node].apps() )
395 main.log.warn( main.CLIs[node].appIDs() )
396 utilities.assert_equals( expect=main.TRUE, actual=appCheck,
397 onpass="App Ids seem to be correct",
398 onfail="Something is wrong with app Ids" )
399
Jon Hall5cf14d52015-07-16 12:15:19 -0700400 def CASE2( self, main ):
401 """
402 Assign devices to controllers
403 """
404 import re
Jon Halle1a3b752015-07-22 13:02:46 -0700405 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700406 assert main, "main not defined"
407 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -0700408 assert main.CLIs, "main.CLIs not defined"
409 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700410 assert ONOS1Port, "ONOS1Port not defined"
411 assert ONOS2Port, "ONOS2Port not defined"
412 assert ONOS3Port, "ONOS3Port not defined"
413 assert ONOS4Port, "ONOS4Port not defined"
414 assert ONOS5Port, "ONOS5Port not defined"
415 assert ONOS6Port, "ONOS6Port not defined"
416 assert ONOS7Port, "ONOS7Port not defined"
417
418 main.case( "Assigning devices to controllers" )
Jon Hall783bbf92015-07-23 14:33:19 -0700419 main.caseExplanation = "Assign switches to ONOS using 'ovs-vsctl' " +\
Jon Hall5cf14d52015-07-16 12:15:19 -0700420 "and check that an ONOS node becomes the " +\
421 "master of the device."
422 main.step( "Assign switches to controllers" )
423
424 ipList = []
Jon Halle1a3b752015-07-22 13:02:46 -0700425 for i in range( main.numCtrls ):
426 ipList.append( main.nodes[ i ].ip_address )
Jon Hall5cf14d52015-07-16 12:15:19 -0700427 swList = []
428 for i in range( 1, 29 ):
429 swList.append( "s" + str( i ) )
430 main.Mininet1.assignSwController( sw=swList, ip=ipList )
431
432 mastershipCheck = main.TRUE
433 for i in range( 1, 29 ):
434 response = main.Mininet1.getSwController( "s" + str( i ) )
435 try:
436 main.log.info( str( response ) )
437 except Exception:
438 main.log.info( repr( response ) )
Jon Halle1a3b752015-07-22 13:02:46 -0700439 for node in main.nodes:
Jon Hall5cf14d52015-07-16 12:15:19 -0700440 if re.search( "tcp:" + node.ip_address, response ):
441 mastershipCheck = mastershipCheck and main.TRUE
442 else:
443 main.log.error( "Error, node " + node.ip_address + " is " +
444 "not in the list of controllers s" +
445 str( i ) + " is connecting to." )
446 mastershipCheck = main.FALSE
447 utilities.assert_equals(
448 expect=main.TRUE,
449 actual=mastershipCheck,
450 onpass="Switch mastership assigned correctly",
451 onfail="Switches not assigned correctly to controllers" )
452
453 def CASE21( self, main ):
454 """
455 Assign mastership to controllers
456 """
Jon Hall5cf14d52015-07-16 12:15:19 -0700457 import time
Jon Halle1a3b752015-07-22 13:02:46 -0700458 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700459 assert main, "main not defined"
460 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -0700461 assert main.CLIs, "main.CLIs not defined"
462 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700463 assert ONOS1Port, "ONOS1Port not defined"
464 assert ONOS2Port, "ONOS2Port not defined"
465 assert ONOS3Port, "ONOS3Port not defined"
466 assert ONOS4Port, "ONOS4Port not defined"
467 assert ONOS5Port, "ONOS5Port not defined"
468 assert ONOS6Port, "ONOS6Port not defined"
469 assert ONOS7Port, "ONOS7Port not defined"
470
471 main.case( "Assigning Controller roles for switches" )
Jon Hall783bbf92015-07-23 14:33:19 -0700472 main.caseExplanation = "Check that ONOS is connected to each " +\
Jon Hall5cf14d52015-07-16 12:15:19 -0700473 "device. Then manually assign" +\
474 " mastership to specific ONOS nodes using" +\
475 " 'device-role'"
476 main.step( "Assign mastership of switches to specific controllers" )
477 # Manually assign mastership to the controller we want
478 roleCall = main.TRUE
479
480 ipList = [ ]
481 deviceList = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700482 onosCli = main.CLIs[ main.activeNodes[0] ]
Jon Hall5cf14d52015-07-16 12:15:19 -0700483 try:
484 # Assign mastership to specific controllers. This assignment was
485 # determined for a 7 node cluser, but will work with any sized
486 # cluster
487 for i in range( 1, 29 ): # switches 1 through 28
488 # set up correct variables:
489 if i == 1:
490 c = 0
Jon Halle1a3b752015-07-22 13:02:46 -0700491 ip = main.nodes[ c ].ip_address # ONOS1
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700492 deviceId = onosCli.getDevice( "1000" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700493 elif i == 2:
Jon Halle1a3b752015-07-22 13:02:46 -0700494 c = 1 % main.numCtrls
495 ip = main.nodes[ c ].ip_address # ONOS2
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700496 deviceId = onosCli.getDevice( "2000" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700497 elif i == 3:
Jon Halle1a3b752015-07-22 13:02:46 -0700498 c = 1 % main.numCtrls
499 ip = main.nodes[ c ].ip_address # ONOS2
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700500 deviceId = onosCli.getDevice( "3000" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700501 elif i == 4:
Jon Halle1a3b752015-07-22 13:02:46 -0700502 c = 3 % main.numCtrls
503 ip = main.nodes[ c ].ip_address # ONOS4
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700504 deviceId = onosCli.getDevice( "3004" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700505 elif i == 5:
Jon Halle1a3b752015-07-22 13:02:46 -0700506 c = 2 % main.numCtrls
507 ip = main.nodes[ c ].ip_address # ONOS3
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700508 deviceId = onosCli.getDevice( "5000" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700509 elif i == 6:
Jon Halle1a3b752015-07-22 13:02:46 -0700510 c = 2 % main.numCtrls
511 ip = main.nodes[ c ].ip_address # ONOS3
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700512 deviceId = onosCli.getDevice( "6000" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700513 elif i == 7:
Jon Halle1a3b752015-07-22 13:02:46 -0700514 c = 5 % main.numCtrls
515 ip = main.nodes[ c ].ip_address # ONOS6
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700516 deviceId = onosCli.getDevice( "6007" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700517 elif i >= 8 and i <= 17:
Jon Halle1a3b752015-07-22 13:02:46 -0700518 c = 4 % main.numCtrls
519 ip = main.nodes[ c ].ip_address # ONOS5
Jon Hall5cf14d52015-07-16 12:15:19 -0700520 dpid = '3' + str( i ).zfill( 3 )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700521 deviceId = onosCli.getDevice( dpid ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700522 elif i >= 18 and i <= 27:
Jon Halle1a3b752015-07-22 13:02:46 -0700523 c = 6 % main.numCtrls
524 ip = main.nodes[ c ].ip_address # ONOS7
Jon Hall5cf14d52015-07-16 12:15:19 -0700525 dpid = '6' + str( i ).zfill( 3 )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700526 deviceId = onosCli.getDevice( dpid ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700527 elif i == 28:
528 c = 0
Jon Halle1a3b752015-07-22 13:02:46 -0700529 ip = main.nodes[ c ].ip_address # ONOS1
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700530 deviceId = onosCli.getDevice( "2800" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700531 else:
532 main.log.error( "You didn't write an else statement for " +
533 "switch s" + str( i ) )
534 roleCall = main.FALSE
535 # Assign switch
536 assert deviceId, "No device id for s" + str( i ) + " in ONOS"
537 # TODO: make this controller dynamic
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700538 roleCall = roleCall and onosCli.deviceRole( deviceId, ip )
Jon Hall5cf14d52015-07-16 12:15:19 -0700539 ipList.append( ip )
540 deviceList.append( deviceId )
541 except ( AttributeError, AssertionError ):
542 main.log.exception( "Something is wrong with ONOS device view" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700543 main.log.info( onosCli.devices() )
Jon Hall5cf14d52015-07-16 12:15:19 -0700544 utilities.assert_equals(
545 expect=main.TRUE,
546 actual=roleCall,
547 onpass="Re-assigned switch mastership to designated controller",
548 onfail="Something wrong with deviceRole calls" )
549
550 main.step( "Check mastership was correctly assigned" )
551 roleCheck = main.TRUE
552 # NOTE: This is due to the fact that device mastership change is not
553 # atomic and is actually a multi step process
554 time.sleep( 5 )
555 for i in range( len( ipList ) ):
556 ip = ipList[i]
557 deviceId = deviceList[i]
558 # Check assignment
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700559 master = onosCli.getRole( deviceId ).get( 'master' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700560 if ip in master:
561 roleCheck = roleCheck and main.TRUE
562 else:
563 roleCheck = roleCheck and main.FALSE
564 main.log.error( "Error, controller " + ip + " is not" +
565 " master " + "of device " +
566 str( deviceId ) + ". Master is " +
567 repr( master ) + "." )
568 utilities.assert_equals(
569 expect=main.TRUE,
570 actual=roleCheck,
571 onpass="Switches were successfully reassigned to designated " +
572 "controller",
573 onfail="Switches were not successfully reassigned" )
574
575 def CASE3( self, main ):
576 """
577 Assign intents
578 """
579 import time
580 import json
Jon Halle1a3b752015-07-22 13:02:46 -0700581 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700582 assert main, "main not defined"
583 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -0700584 assert main.CLIs, "main.CLIs not defined"
585 assert main.nodes, "main.nodes not defined"
Jon Halla440e872016-03-31 15:15:50 -0700586 try:
587 labels
588 except NameError:
589 main.log.error( "labels not defined, setting to []" )
590 labels = []
591 try:
592 data
593 except NameError:
594 main.log.error( "data not defined, setting to []" )
595 data = []
Jon Hall5cf14d52015-07-16 12:15:19 -0700596 main.case( "Adding host Intents" )
Jon Hall783bbf92015-07-23 14:33:19 -0700597 main.caseExplanation = "Discover hosts by using pingall then " +\
Jon Hall5cf14d52015-07-16 12:15:19 -0700598 "assign predetermined host-to-host intents." +\
599 " After installation, check that the intent" +\
600 " is distributed to all nodes and the state" +\
601 " is INSTALLED"
602
603 # install onos-app-fwd
604 main.step( "Install reactive forwarding app" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700605 onosCli = main.CLIs[ main.activeNodes[0] ]
606 installResults = onosCli.activateApp( "org.onosproject.fwd" )
Jon Hall5cf14d52015-07-16 12:15:19 -0700607 utilities.assert_equals( expect=main.TRUE, actual=installResults,
608 onpass="Install fwd successful",
609 onfail="Install fwd failed" )
610
611 main.step( "Check app ids" )
612 appCheck = main.TRUE
613 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700614 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -0700615 t = main.Thread( target=main.CLIs[i].appToIDCheck,
Jon Hall5cf14d52015-07-16 12:15:19 -0700616 name="appToIDCheck-" + str( i ),
617 args=[] )
618 threads.append( t )
619 t.start()
620
621 for t in threads:
622 t.join()
623 appCheck = appCheck and t.result
624 if appCheck != main.TRUE:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700625 main.log.warn( onosCli.apps() )
626 main.log.warn( onosCli.appIDs() )
Jon Hall5cf14d52015-07-16 12:15:19 -0700627 utilities.assert_equals( expect=main.TRUE, actual=appCheck,
628 onpass="App Ids seem to be correct",
629 onfail="Something is wrong with app Ids" )
630
631 main.step( "Discovering Hosts( Via pingall for now )" )
632 # FIXME: Once we have a host discovery mechanism, use that instead
633 # REACTIVE FWD test
634 pingResult = main.FALSE
Jon Hall96091e62015-09-21 17:34:17 -0700635 passMsg = "Reactive Pingall test passed"
636 time1 = time.time()
637 pingResult = main.Mininet1.pingall()
638 time2 = time.time()
639 if not pingResult:
640 main.log.warn("First pingall failed. Trying again...")
Jon Hall5cf14d52015-07-16 12:15:19 -0700641 pingResult = main.Mininet1.pingall()
Jon Hall96091e62015-09-21 17:34:17 -0700642 passMsg += " on the second try"
643 utilities.assert_equals(
644 expect=main.TRUE,
645 actual=pingResult,
646 onpass= passMsg,
647 onfail="Reactive Pingall failed, " +
648 "one or more ping pairs failed" )
649 main.log.info( "Time for pingall: %2f seconds" %
650 ( time2 - time1 ) )
Jon Hall5cf14d52015-07-16 12:15:19 -0700651 # timeout for fwd flows
652 time.sleep( 11 )
653 # uninstall onos-app-fwd
654 main.step( "Uninstall reactive forwarding app" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700655 node = main.activeNodes[0]
656 uninstallResult = main.CLIs[node].deactivateApp( "org.onosproject.fwd" )
Jon Hall5cf14d52015-07-16 12:15:19 -0700657 utilities.assert_equals( expect=main.TRUE, actual=uninstallResult,
658 onpass="Uninstall fwd successful",
659 onfail="Uninstall fwd failed" )
660
661 main.step( "Check app ids" )
662 threads = []
663 appCheck2 = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700664 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -0700665 t = main.Thread( target=main.CLIs[i].appToIDCheck,
Jon Hall5cf14d52015-07-16 12:15:19 -0700666 name="appToIDCheck-" + str( i ),
667 args=[] )
668 threads.append( t )
669 t.start()
670
671 for t in threads:
672 t.join()
673 appCheck2 = appCheck2 and t.result
674 if appCheck2 != main.TRUE:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700675 node = main.activeNodes[0]
676 main.log.warn( main.CLIs[node].apps() )
677 main.log.warn( main.CLIs[node].appIDs() )
Jon Hall5cf14d52015-07-16 12:15:19 -0700678 utilities.assert_equals( expect=main.TRUE, actual=appCheck2,
679 onpass="App Ids seem to be correct",
680 onfail="Something is wrong with app Ids" )
681
682 main.step( "Add host intents via cli" )
683 intentIds = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700684 # TODO: move the host numbers to params
685 # Maybe look at all the paths we ping?
Jon Hall5cf14d52015-07-16 12:15:19 -0700686 intentAddResult = True
687 hostResult = main.TRUE
688 for i in range( 8, 18 ):
689 main.log.info( "Adding host intent between h" + str( i ) +
690 " and h" + str( i + 10 ) )
691 host1 = "00:00:00:00:00:" + \
692 str( hex( i )[ 2: ] ).zfill( 2 ).upper()
693 host2 = "00:00:00:00:00:" + \
694 str( hex( i + 10 )[ 2: ] ).zfill( 2 ).upper()
695 # NOTE: getHost can return None
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700696 host1Dict = onosCli.getHost( host1 )
697 host2Dict = onosCli.getHost( host2 )
Jon Hall5cf14d52015-07-16 12:15:19 -0700698 host1Id = None
699 host2Id = None
700 if host1Dict and host2Dict:
701 host1Id = host1Dict.get( 'id', None )
702 host2Id = host2Dict.get( 'id', None )
703 if host1Id and host2Id:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700704 nodeNum = ( i % len( main.activeNodes ) )
705 node = main.activeNodes[nodeNum]
706 tmpId = main.CLIs[node].addHostIntent( host1Id, host2Id )
Jon Hall5cf14d52015-07-16 12:15:19 -0700707 if tmpId:
708 main.log.info( "Added intent with id: " + tmpId )
709 intentIds.append( tmpId )
710 else:
711 main.log.error( "addHostIntent returned: " +
712 repr( tmpId ) )
713 else:
714 main.log.error( "Error, getHost() failed for h" + str( i ) +
715 " and/or h" + str( i + 10 ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700716 node = main.activeNodes[0]
717 hosts = main.CLIs[node].hosts()
Jon Hall5cf14d52015-07-16 12:15:19 -0700718 main.log.warn( "Hosts output: " )
719 try:
720 main.log.warn( json.dumps( json.loads( hosts ),
721 sort_keys=True,
722 indent=4,
723 separators=( ',', ': ' ) ) )
724 except ( ValueError, TypeError ):
725 main.log.warn( repr( hosts ) )
726 hostResult = main.FALSE
727 utilities.assert_equals( expect=main.TRUE, actual=hostResult,
728 onpass="Found a host id for each host",
729 onfail="Error looking up host ids" )
730
731 intentStart = time.time()
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700732 onosIds = onosCli.getAllIntentsId()
Jon Hall5cf14d52015-07-16 12:15:19 -0700733 main.log.info( "Submitted intents: " + str( intentIds ) )
734 main.log.info( "Intents in ONOS: " + str( onosIds ) )
735 for intent in intentIds:
736 if intent in onosIds:
737 pass # intent submitted is in onos
738 else:
739 intentAddResult = False
740 if intentAddResult:
741 intentStop = time.time()
742 else:
743 intentStop = None
744 # Print the intent states
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700745 intents = onosCli.intents()
Jon Hall5cf14d52015-07-16 12:15:19 -0700746 intentStates = []
747 installedCheck = True
748 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
749 count = 0
750 try:
751 for intent in json.loads( intents ):
752 state = intent.get( 'state', None )
753 if "INSTALLED" not in state:
754 installedCheck = False
755 intentId = intent.get( 'id', None )
756 intentStates.append( ( intentId, state ) )
757 except ( ValueError, TypeError ):
758 main.log.exception( "Error parsing intents" )
759 # add submitted intents not in the store
760 tmplist = [ i for i, s in intentStates ]
761 missingIntents = False
762 for i in intentIds:
763 if i not in tmplist:
764 intentStates.append( ( i, " - " ) )
765 missingIntents = True
766 intentStates.sort()
767 for i, s in intentStates:
768 count += 1
769 main.log.info( "%-6s%-15s%-15s" %
770 ( str( count ), str( i ), str( s ) ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700771 leaders = onosCli.leaders()
Jon Hall5cf14d52015-07-16 12:15:19 -0700772 try:
773 missing = False
774 if leaders:
775 parsedLeaders = json.loads( leaders )
776 main.log.warn( json.dumps( parsedLeaders,
777 sort_keys=True,
778 indent=4,
779 separators=( ',', ': ' ) ) )
780 # check for all intent partitions
781 topics = []
782 for i in range( 14 ):
783 topics.append( "intent-partition-" + str( i ) )
784 main.log.debug( topics )
785 ONOStopics = [ j['topic'] for j in parsedLeaders ]
786 for topic in topics:
787 if topic not in ONOStopics:
788 main.log.error( "Error: " + topic +
789 " not in leaders" )
790 missing = True
791 else:
792 main.log.error( "leaders() returned None" )
793 except ( ValueError, TypeError ):
794 main.log.exception( "Error parsing leaders" )
795 main.log.error( repr( leaders ) )
796 # Check all nodes
797 if missing:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700798 for i in main.activeNodes:
799 response = main.CLIs[i].leaders( jsonFormat=False)
800 main.log.warn( str( main.CLIs[i].name ) + " leaders output: \n" +
Jon Hall5cf14d52015-07-16 12:15:19 -0700801 str( response ) )
802
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700803 partitions = onosCli.partitions()
Jon Hall5cf14d52015-07-16 12:15:19 -0700804 try:
805 if partitions :
806 parsedPartitions = json.loads( partitions )
807 main.log.warn( json.dumps( parsedPartitions,
808 sort_keys=True,
809 indent=4,
810 separators=( ',', ': ' ) ) )
811 # TODO check for a leader in all paritions
812 # TODO check for consistency among nodes
813 else:
814 main.log.error( "partitions() returned None" )
815 except ( ValueError, TypeError ):
816 main.log.exception( "Error parsing partitions" )
817 main.log.error( repr( partitions ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700818 pendingMap = onosCli.pendingMap()
Jon Hall5cf14d52015-07-16 12:15:19 -0700819 try:
820 if pendingMap :
821 parsedPending = json.loads( pendingMap )
822 main.log.warn( json.dumps( parsedPending,
823 sort_keys=True,
824 indent=4,
825 separators=( ',', ': ' ) ) )
826 # TODO check something here?
827 else:
828 main.log.error( "pendingMap() returned None" )
829 except ( ValueError, TypeError ):
830 main.log.exception( "Error parsing pending map" )
831 main.log.error( repr( pendingMap ) )
832
833 intentAddResult = bool( intentAddResult and not missingIntents and
834 installedCheck )
835 if not intentAddResult:
836 main.log.error( "Error in pushing host intents to ONOS" )
837
838 main.step( "Intent Anti-Entropy dispersion" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700839 for j in range(100):
Jon Hall5cf14d52015-07-16 12:15:19 -0700840 correct = True
841 main.log.info( "Submitted intents: " + str( sorted( intentIds ) ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700842 for i in main.activeNodes:
Jon Hall5cf14d52015-07-16 12:15:19 -0700843 onosIds = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700844 ids = main.CLIs[i].getAllIntentsId()
Jon Hall5cf14d52015-07-16 12:15:19 -0700845 onosIds.append( ids )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700846 main.log.debug( "Intents in " + main.CLIs[i].name + ": " +
Jon Hall5cf14d52015-07-16 12:15:19 -0700847 str( sorted( onosIds ) ) )
848 if sorted( ids ) != sorted( intentIds ):
849 main.log.warn( "Set of intent IDs doesn't match" )
850 correct = False
851 break
852 else:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700853 intents = json.loads( main.CLIs[i].intents() )
Jon Hall5cf14d52015-07-16 12:15:19 -0700854 for intent in intents:
855 if intent[ 'state' ] != "INSTALLED":
856 main.log.warn( "Intent " + intent[ 'id' ] +
857 " is " + intent[ 'state' ] )
858 correct = False
859 break
860 if correct:
861 break
862 else:
863 time.sleep(1)
864 if not intentStop:
865 intentStop = time.time()
866 global gossipTime
867 gossipTime = intentStop - intentStart
868 main.log.info( "It took about " + str( gossipTime ) +
869 " seconds for all intents to appear in each node" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700870 gossipPeriod = int( main.params['timers']['gossip'] )
871 maxGossipTime = gossipPeriod * len( main.activeNodes )
Jon Hall5cf14d52015-07-16 12:15:19 -0700872 utilities.assert_greater_equals(
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700873 expect=maxGossipTime, actual=gossipTime,
Jon Hall5cf14d52015-07-16 12:15:19 -0700874 onpass="ECM anti-entropy for intents worked within " +
875 "expected time",
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700876 onfail="Intent ECM anti-entropy took too long. " +
877 "Expected time:{}, Actual time:{}".format( maxGossipTime,
878 gossipTime ) )
879 if gossipTime <= maxGossipTime:
Jon Hall5cf14d52015-07-16 12:15:19 -0700880 intentAddResult = True
881
882 if not intentAddResult or "key" in pendingMap:
883 import time
884 installedCheck = True
885 main.log.info( "Sleeping 60 seconds to see if intents are found" )
886 time.sleep( 60 )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700887 onosIds = onosCli.getAllIntentsId()
Jon Hall5cf14d52015-07-16 12:15:19 -0700888 main.log.info( "Submitted intents: " + str( intentIds ) )
889 main.log.info( "Intents in ONOS: " + str( onosIds ) )
890 # Print the intent states
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700891 intents = onosCli.intents()
Jon Hall5cf14d52015-07-16 12:15:19 -0700892 intentStates = []
893 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
894 count = 0
895 try:
896 for intent in json.loads( intents ):
897 # Iter through intents of a node
898 state = intent.get( 'state', None )
899 if "INSTALLED" not in state:
900 installedCheck = False
901 intentId = intent.get( 'id', None )
902 intentStates.append( ( intentId, state ) )
903 except ( ValueError, TypeError ):
904 main.log.exception( "Error parsing intents" )
905 # add submitted intents not in the store
906 tmplist = [ i for i, s in intentStates ]
907 for i in intentIds:
908 if i not in tmplist:
909 intentStates.append( ( i, " - " ) )
910 intentStates.sort()
911 for i, s in intentStates:
912 count += 1
913 main.log.info( "%-6s%-15s%-15s" %
914 ( str( count ), str( i ), str( s ) ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700915 leaders = onosCli.leaders()
Jon Hall5cf14d52015-07-16 12:15:19 -0700916 try:
917 missing = False
918 if leaders:
919 parsedLeaders = json.loads( leaders )
920 main.log.warn( json.dumps( parsedLeaders,
921 sort_keys=True,
922 indent=4,
923 separators=( ',', ': ' ) ) )
924 # check for all intent partitions
925 # check for election
926 topics = []
927 for i in range( 14 ):
928 topics.append( "intent-partition-" + str( i ) )
929 # FIXME: this should only be after we start the app
930 topics.append( "org.onosproject.election" )
931 main.log.debug( topics )
932 ONOStopics = [ j['topic'] for j in parsedLeaders ]
933 for topic in topics:
934 if topic not in ONOStopics:
935 main.log.error( "Error: " + topic +
936 " not in leaders" )
937 missing = True
938 else:
939 main.log.error( "leaders() returned None" )
940 except ( ValueError, TypeError ):
941 main.log.exception( "Error parsing leaders" )
942 main.log.error( repr( leaders ) )
943 # Check all nodes
944 if missing:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700945 for i in main.activeNodes:
946 node = main.CLIs[i]
Jon Hall5cf14d52015-07-16 12:15:19 -0700947 response = node.leaders( jsonFormat=False)
948 main.log.warn( str( node.name ) + " leaders output: \n" +
949 str( response ) )
950
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700951 partitions = onosCli.partitions()
Jon Hall5cf14d52015-07-16 12:15:19 -0700952 try:
953 if partitions :
954 parsedPartitions = json.loads( partitions )
955 main.log.warn( json.dumps( parsedPartitions,
956 sort_keys=True,
957 indent=4,
958 separators=( ',', ': ' ) ) )
959 # TODO check for a leader in all paritions
960 # TODO check for consistency among nodes
961 else:
962 main.log.error( "partitions() returned None" )
963 except ( ValueError, TypeError ):
964 main.log.exception( "Error parsing partitions" )
965 main.log.error( repr( partitions ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700966 pendingMap = onosCli.pendingMap()
Jon Hall5cf14d52015-07-16 12:15:19 -0700967 try:
968 if pendingMap :
969 parsedPending = json.loads( pendingMap )
970 main.log.warn( json.dumps( parsedPending,
971 sort_keys=True,
972 indent=4,
973 separators=( ',', ': ' ) ) )
974 # TODO check something here?
975 else:
976 main.log.error( "pendingMap() returned None" )
977 except ( ValueError, TypeError ):
978 main.log.exception( "Error parsing pending map" )
979 main.log.error( repr( pendingMap ) )
980
981 def CASE4( self, main ):
982 """
983 Ping across added host intents
984 """
985 import json
986 import time
Jon Halle1a3b752015-07-22 13:02:46 -0700987 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700988 assert main, "main not defined"
989 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -0700990 assert main.CLIs, "main.CLIs not defined"
991 assert main.nodes, "main.nodes not defined"
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700992 main.case( "Verify connectivity by sending traffic across Intents" )
Jon Hall783bbf92015-07-23 14:33:19 -0700993 main.caseExplanation = "Ping across added host intents to check " +\
Jon Hall5cf14d52015-07-16 12:15:19 -0700994 "functionality and check the state of " +\
995 "the intent"
Jon Hall5cf14d52015-07-16 12:15:19 -0700996
997 main.step( "Check Intent state" )
998 installedCheck = False
999 loopCount = 0
1000 while not installedCheck and loopCount < 40:
1001 installedCheck = True
1002 # Print the intent states
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001003 intents = onosCli.intents()
Jon Hall5cf14d52015-07-16 12:15:19 -07001004 intentStates = []
1005 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
1006 count = 0
1007 # Iter through intents of a node
1008 try:
1009 for intent in json.loads( intents ):
1010 state = intent.get( 'state', None )
1011 if "INSTALLED" not in state:
1012 installedCheck = False
1013 intentId = intent.get( 'id', None )
1014 intentStates.append( ( intentId, state ) )
1015 except ( ValueError, TypeError ):
1016 main.log.exception( "Error parsing intents." )
1017 # Print states
1018 intentStates.sort()
1019 for i, s in intentStates:
1020 count += 1
1021 main.log.info( "%-6s%-15s%-15s" %
1022 ( str( count ), str( i ), str( s ) ) )
1023 if not installedCheck:
1024 time.sleep( 1 )
1025 loopCount += 1
1026 utilities.assert_equals( expect=True, actual=installedCheck,
1027 onpass="Intents are all INSTALLED",
1028 onfail="Intents are not all in " +
1029 "INSTALLED state" )
1030
Jon Hall9d2dcad2016-04-08 10:15:20 -07001031 main.step( "Ping across added host intents" )
1032 onosCli = main.CLIs[ main.activeNodes[0] ]
1033 PingResult = main.TRUE
1034 for i in range( 8, 18 ):
1035 ping = main.Mininet1.pingHost( src="h" + str( i ),
1036 target="h" + str( i + 10 ) )
1037 PingResult = PingResult and ping
1038 if ping == main.FALSE:
1039 main.log.warn( "Ping failed between h" + str( i ) +
1040 " and h" + str( i + 10 ) )
1041 elif ping == main.TRUE:
1042 main.log.info( "Ping test passed!" )
1043 # Don't set PingResult or you'd override failures
1044 if PingResult == main.FALSE:
1045 main.log.error(
1046 "Intents have not been installed correctly, pings failed." )
1047 # TODO: pretty print
1048 main.log.warn( "ONOS1 intents: " )
1049 try:
1050 tmpIntents = onosCli.intents()
1051 main.log.warn( json.dumps( json.loads( tmpIntents ),
1052 sort_keys=True,
1053 indent=4,
1054 separators=( ',', ': ' ) ) )
1055 except ( ValueError, TypeError ):
1056 main.log.warn( repr( tmpIntents ) )
1057 utilities.assert_equals(
1058 expect=main.TRUE,
1059 actual=PingResult,
1060 onpass="Intents have been installed correctly and pings work",
1061 onfail="Intents have not been installed correctly, pings failed." )
1062
Jon Hall5cf14d52015-07-16 12:15:19 -07001063 main.step( "Check leadership of topics" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001064 leaders = onosCli.leaders()
Jon Hall5cf14d52015-07-16 12:15:19 -07001065 topicCheck = main.TRUE
1066 try:
1067 if leaders:
1068 parsedLeaders = json.loads( leaders )
1069 main.log.warn( json.dumps( parsedLeaders,
1070 sort_keys=True,
1071 indent=4,
1072 separators=( ',', ': ' ) ) )
1073 # check for all intent partitions
1074 # check for election
1075 # TODO: Look at Devices as topics now that it uses this system
1076 topics = []
1077 for i in range( 14 ):
1078 topics.append( "intent-partition-" + str( i ) )
1079 # FIXME: this should only be after we start the app
1080 # FIXME: topics.append( "org.onosproject.election" )
1081 # Print leaders output
1082 main.log.debug( topics )
1083 ONOStopics = [ j['topic'] for j in parsedLeaders ]
1084 for topic in topics:
1085 if topic not in ONOStopics:
1086 main.log.error( "Error: " + topic +
1087 " not in leaders" )
1088 topicCheck = main.FALSE
1089 else:
1090 main.log.error( "leaders() returned None" )
1091 topicCheck = main.FALSE
1092 except ( ValueError, TypeError ):
1093 topicCheck = main.FALSE
1094 main.log.exception( "Error parsing leaders" )
1095 main.log.error( repr( leaders ) )
1096 # TODO: Check for a leader of these topics
1097 # Check all nodes
1098 if topicCheck:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001099 for i in main.activeNodes:
1100 node = main.CLIs[i]
Jon Hall5cf14d52015-07-16 12:15:19 -07001101 response = node.leaders( jsonFormat=False)
1102 main.log.warn( str( node.name ) + " leaders output: \n" +
1103 str( response ) )
1104
1105 utilities.assert_equals( expect=main.TRUE, actual=topicCheck,
1106 onpass="intent Partitions is in leaders",
1107 onfail="Some topics were lost " )
1108 # Print partitions
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001109 partitions = onosCli.partitions()
Jon Hall5cf14d52015-07-16 12:15:19 -07001110 try:
1111 if partitions :
1112 parsedPartitions = json.loads( partitions )
1113 main.log.warn( json.dumps( parsedPartitions,
1114 sort_keys=True,
1115 indent=4,
1116 separators=( ',', ': ' ) ) )
1117 # TODO check for a leader in all paritions
1118 # TODO check for consistency among nodes
1119 else:
1120 main.log.error( "partitions() returned None" )
1121 except ( ValueError, TypeError ):
1122 main.log.exception( "Error parsing partitions" )
1123 main.log.error( repr( partitions ) )
1124 # Print Pending Map
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001125 pendingMap = onosCli.pendingMap()
Jon Hall5cf14d52015-07-16 12:15:19 -07001126 try:
1127 if pendingMap :
1128 parsedPending = json.loads( pendingMap )
1129 main.log.warn( json.dumps( parsedPending,
1130 sort_keys=True,
1131 indent=4,
1132 separators=( ',', ': ' ) ) )
1133 # TODO check something here?
1134 else:
1135 main.log.error( "pendingMap() returned None" )
1136 except ( ValueError, TypeError ):
1137 main.log.exception( "Error parsing pending map" )
1138 main.log.error( repr( pendingMap ) )
1139
1140 if not installedCheck:
1141 main.log.info( "Waiting 60 seconds to see if the state of " +
1142 "intents change" )
1143 time.sleep( 60 )
1144 # Print the intent states
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001145 intents = onosCli.intents()
Jon Hall5cf14d52015-07-16 12:15:19 -07001146 intentStates = []
1147 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
1148 count = 0
1149 # Iter through intents of a node
1150 try:
1151 for intent in json.loads( intents ):
1152 state = intent.get( 'state', None )
1153 if "INSTALLED" not in state:
1154 installedCheck = False
1155 intentId = intent.get( 'id', None )
1156 intentStates.append( ( intentId, state ) )
1157 except ( ValueError, TypeError ):
1158 main.log.exception( "Error parsing intents." )
1159 intentStates.sort()
1160 for i, s in intentStates:
1161 count += 1
1162 main.log.info( "%-6s%-15s%-15s" %
1163 ( str( count ), str( i ), str( s ) ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001164 leaders = onosCli.leaders()
Jon Hall5cf14d52015-07-16 12:15:19 -07001165 try:
1166 missing = False
1167 if leaders:
1168 parsedLeaders = json.loads( leaders )
1169 main.log.warn( json.dumps( parsedLeaders,
1170 sort_keys=True,
1171 indent=4,
1172 separators=( ',', ': ' ) ) )
1173 # check for all intent partitions
1174 # check for election
1175 topics = []
1176 for i in range( 14 ):
1177 topics.append( "intent-partition-" + str( i ) )
1178 # FIXME: this should only be after we start the app
1179 topics.append( "org.onosproject.election" )
1180 main.log.debug( topics )
1181 ONOStopics = [ j['topic'] for j in parsedLeaders ]
1182 for topic in topics:
1183 if topic not in ONOStopics:
1184 main.log.error( "Error: " + topic +
1185 " not in leaders" )
1186 missing = True
1187 else:
1188 main.log.error( "leaders() returned None" )
1189 except ( ValueError, TypeError ):
1190 main.log.exception( "Error parsing leaders" )
1191 main.log.error( repr( leaders ) )
1192 if missing:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001193 for i in main.activeNodes:
1194 node = main.CLIs[i]
Jon Hall5cf14d52015-07-16 12:15:19 -07001195 response = node.leaders( jsonFormat=False)
1196 main.log.warn( str( node.name ) + " leaders output: \n" +
1197 str( response ) )
1198
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001199 partitions = onosCli.partitions()
Jon Hall5cf14d52015-07-16 12:15:19 -07001200 try:
1201 if partitions :
1202 parsedPartitions = json.loads( partitions )
1203 main.log.warn( json.dumps( parsedPartitions,
1204 sort_keys=True,
1205 indent=4,
1206 separators=( ',', ': ' ) ) )
1207 # TODO check for a leader in all paritions
1208 # TODO check for consistency among nodes
1209 else:
1210 main.log.error( "partitions() returned None" )
1211 except ( ValueError, TypeError ):
1212 main.log.exception( "Error parsing partitions" )
1213 main.log.error( repr( partitions ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001214 pendingMap = onosCli.pendingMap()
Jon Hall5cf14d52015-07-16 12:15:19 -07001215 try:
1216 if pendingMap :
1217 parsedPending = json.loads( pendingMap )
1218 main.log.warn( json.dumps( parsedPending,
1219 sort_keys=True,
1220 indent=4,
1221 separators=( ',', ': ' ) ) )
1222 # TODO check something here?
1223 else:
1224 main.log.error( "pendingMap() returned None" )
1225 except ( ValueError, TypeError ):
1226 main.log.exception( "Error parsing pending map" )
1227 main.log.error( repr( pendingMap ) )
1228 # Print flowrules
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001229 node = main.activeNodes[0]
1230 main.log.debug( main.CLIs[node].flows( jsonFormat=False ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001231 main.step( "Wait a minute then ping again" )
1232 # the wait is above
1233 PingResult = main.TRUE
1234 for i in range( 8, 18 ):
1235 ping = main.Mininet1.pingHost( src="h" + str( i ),
1236 target="h" + str( i + 10 ) )
1237 PingResult = PingResult and ping
1238 if ping == main.FALSE:
1239 main.log.warn( "Ping failed between h" + str( i ) +
1240 " and h" + str( i + 10 ) )
1241 elif ping == main.TRUE:
1242 main.log.info( "Ping test passed!" )
1243 # Don't set PingResult or you'd override failures
1244 if PingResult == main.FALSE:
1245 main.log.error(
1246 "Intents have not been installed correctly, pings failed." )
1247 # TODO: pretty print
1248 main.log.warn( "ONOS1 intents: " )
1249 try:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001250 tmpIntents = onosCli.intents()
Jon Hall5cf14d52015-07-16 12:15:19 -07001251 main.log.warn( json.dumps( json.loads( tmpIntents ),
1252 sort_keys=True,
1253 indent=4,
1254 separators=( ',', ': ' ) ) )
1255 except ( ValueError, TypeError ):
1256 main.log.warn( repr( tmpIntents ) )
1257 utilities.assert_equals(
1258 expect=main.TRUE,
1259 actual=PingResult,
1260 onpass="Intents have been installed correctly and pings work",
1261 onfail="Intents have not been installed correctly, pings failed." )
1262
1263 def CASE5( self, main ):
1264 """
1265 Reading state of ONOS
1266 """
1267 import json
1268 import time
Jon Halle1a3b752015-07-22 13:02:46 -07001269 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07001270 assert main, "main not defined"
1271 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07001272 assert main.CLIs, "main.CLIs not defined"
1273 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07001274
1275 main.case( "Setting up and gathering data for current state" )
1276 # The general idea for this test case is to pull the state of
1277 # ( intents,flows, topology,... ) from each ONOS node
1278 # We can then compare them with each other and also with past states
1279
1280 main.step( "Check that each switch has a master" )
1281 global mastershipState
1282 mastershipState = '[]'
1283
1284 # Assert that each device has a master
1285 rolesNotNull = main.TRUE
1286 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001287 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001288 t = main.Thread( target=main.CLIs[i].rolesNotNull,
Jon Hall5cf14d52015-07-16 12:15:19 -07001289 name="rolesNotNull-" + str( i ),
1290 args=[] )
1291 threads.append( t )
1292 t.start()
1293
1294 for t in threads:
1295 t.join()
1296 rolesNotNull = rolesNotNull and t.result
1297 utilities.assert_equals(
1298 expect=main.TRUE,
1299 actual=rolesNotNull,
1300 onpass="Each device has a master",
1301 onfail="Some devices don't have a master assigned" )
1302
1303 main.step( "Get the Mastership of each switch from each controller" )
1304 ONOSMastership = []
1305 mastershipCheck = main.FALSE
1306 consistentMastership = True
1307 rolesResults = True
1308 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001309 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001310 t = main.Thread( target=main.CLIs[i].roles,
Jon Hall5cf14d52015-07-16 12:15:19 -07001311 name="roles-" + str( i ),
1312 args=[] )
1313 threads.append( t )
1314 t.start()
1315
1316 for t in threads:
1317 t.join()
1318 ONOSMastership.append( t.result )
1319
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001320 for i in range( len( ONOSMastership ) ):
1321 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001322 if not ONOSMastership[i] or "Error" in ONOSMastership[i]:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001323 main.log.error( "Error in getting ONOS" + node + " roles" )
1324 main.log.warn( "ONOS" + node + " mastership response: " +
1325 repr( ONOSMastership[i] ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001326 rolesResults = False
1327 utilities.assert_equals(
1328 expect=True,
1329 actual=rolesResults,
1330 onpass="No error in reading roles output",
1331 onfail="Error in reading roles from ONOS" )
1332
1333 main.step( "Check for consistency in roles from each controller" )
1334 if all([ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
1335 main.log.info(
1336 "Switch roles are consistent across all ONOS nodes" )
1337 else:
1338 consistentMastership = False
1339 utilities.assert_equals(
1340 expect=True,
1341 actual=consistentMastership,
1342 onpass="Switch roles are consistent across all ONOS nodes",
1343 onfail="ONOS nodes have different views of switch roles" )
1344
1345 if rolesResults and not consistentMastership:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001346 for i in range( len( main.activeNodes ) ):
1347 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001348 try:
1349 main.log.warn(
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001350 "ONOS" + node + " roles: ",
Jon Hall5cf14d52015-07-16 12:15:19 -07001351 json.dumps(
1352 json.loads( ONOSMastership[ i ] ),
1353 sort_keys=True,
1354 indent=4,
1355 separators=( ',', ': ' ) ) )
1356 except ( ValueError, TypeError ):
1357 main.log.warn( repr( ONOSMastership[ i ] ) )
1358 elif rolesResults and consistentMastership:
1359 mastershipCheck = main.TRUE
1360 mastershipState = ONOSMastership[ 0 ]
1361
1362 main.step( "Get the intents from each controller" )
1363 global intentState
1364 intentState = []
1365 ONOSIntents = []
1366 intentCheck = main.FALSE
1367 consistentIntents = True
1368 intentsResults = True
1369 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001370 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001371 t = main.Thread( target=main.CLIs[i].intents,
Jon Hall5cf14d52015-07-16 12:15:19 -07001372 name="intents-" + str( i ),
1373 args=[],
1374 kwargs={ 'jsonFormat': True } )
1375 threads.append( t )
1376 t.start()
1377
1378 for t in threads:
1379 t.join()
1380 ONOSIntents.append( t.result )
1381
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001382 for i in range( len( ONOSIntents ) ):
1383 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001384 if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001385 main.log.error( "Error in getting ONOS" + node + " intents" )
1386 main.log.warn( "ONOS" + node + " intents response: " +
Jon Hall5cf14d52015-07-16 12:15:19 -07001387 repr( ONOSIntents[ i ] ) )
1388 intentsResults = False
1389 utilities.assert_equals(
1390 expect=True,
1391 actual=intentsResults,
1392 onpass="No error in reading intents output",
1393 onfail="Error in reading intents from ONOS" )
1394
1395 main.step( "Check for consistency in Intents from each controller" )
1396 if all([ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
1397 main.log.info( "Intents are consistent across all ONOS " +
1398 "nodes" )
1399 else:
1400 consistentIntents = False
1401 main.log.error( "Intents not consistent" )
1402 utilities.assert_equals(
1403 expect=True,
1404 actual=consistentIntents,
1405 onpass="Intents are consistent across all ONOS nodes",
1406 onfail="ONOS nodes have different views of intents" )
1407
1408 if intentsResults:
1409 # Try to make it easy to figure out what is happening
1410 #
1411 # Intent ONOS1 ONOS2 ...
1412 # 0x01 INSTALLED INSTALLING
1413 # ... ... ...
1414 # ... ... ...
1415 title = " Id"
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001416 for n in main.activeNodes:
Jon Hall5cf14d52015-07-16 12:15:19 -07001417 title += " " * 10 + "ONOS" + str( n + 1 )
1418 main.log.warn( title )
1419 # get all intent keys in the cluster
1420 keys = []
Jon Halla440e872016-03-31 15:15:50 -07001421 try:
1422 # Get the set of all intent keys
Jon Hall5cf14d52015-07-16 12:15:19 -07001423 for nodeStr in ONOSIntents:
1424 node = json.loads( nodeStr )
1425 for intent in node:
Jon Halla440e872016-03-31 15:15:50 -07001426 keys.append( intent.get( 'id' ) )
1427 keys = set( keys )
1428 # For each intent key, print the state on each node
1429 for key in keys:
1430 row = "%-13s" % key
1431 for nodeStr in ONOSIntents:
1432 node = json.loads( nodeStr )
1433 for intent in node:
1434 if intent.get( 'id', "Error" ) == key:
1435 row += "%-15s" % intent.get( 'state' )
1436 main.log.warn( row )
1437 # End of intent state table
1438 except ValueError as e:
1439 main.log.exception( e )
1440 main.log.debug( "nodeStr was: " + repr( nodeStr ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001441
1442 if intentsResults and not consistentIntents:
1443 # print the json objects
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001444 n = str( main.activeNodes[-1] + 1 )
1445 main.log.debug( "ONOS" + n + " intents: " )
Jon Hall5cf14d52015-07-16 12:15:19 -07001446 main.log.debug( json.dumps( json.loads( ONOSIntents[ -1 ] ),
1447 sort_keys=True,
1448 indent=4,
1449 separators=( ',', ': ' ) ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001450 for i in range( len( ONOSIntents ) ):
1451 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001452 if ONOSIntents[ i ] != ONOSIntents[ -1 ]:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001453 main.log.debug( "ONOS" + node + " intents: " )
Jon Hall5cf14d52015-07-16 12:15:19 -07001454 main.log.debug( json.dumps( json.loads( ONOSIntents[i] ),
1455 sort_keys=True,
1456 indent=4,
1457 separators=( ',', ': ' ) ) )
1458 else:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001459 main.log.debug( "ONOS" + node + " intents match ONOS" +
1460 n + " intents" )
Jon Hall5cf14d52015-07-16 12:15:19 -07001461 elif intentsResults and consistentIntents:
1462 intentCheck = main.TRUE
1463 intentState = ONOSIntents[ 0 ]
1464
1465 main.step( "Get the flows from each controller" )
1466 global flowState
1467 flowState = []
1468 ONOSFlows = []
1469 ONOSFlowsJson = []
1470 flowCheck = main.FALSE
1471 consistentFlows = True
1472 flowsResults = True
1473 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001474 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001475 t = main.Thread( target=main.CLIs[i].flows,
Jon Hall5cf14d52015-07-16 12:15:19 -07001476 name="flows-" + str( i ),
1477 args=[],
1478 kwargs={ 'jsonFormat': True } )
1479 threads.append( t )
1480 t.start()
1481
1482 # NOTE: Flows command can take some time to run
1483 time.sleep(30)
1484 for t in threads:
1485 t.join()
1486 result = t.result
1487 ONOSFlows.append( result )
1488
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001489 for i in range( len( ONOSFlows ) ):
1490 num = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001491 if not ONOSFlows[ i ] or "Error" in ONOSFlows[ i ]:
1492 main.log.error( "Error in getting ONOS" + num + " flows" )
1493 main.log.warn( "ONOS" + num + " flows response: " +
1494 repr( ONOSFlows[ i ] ) )
1495 flowsResults = False
1496 ONOSFlowsJson.append( None )
1497 else:
1498 try:
1499 ONOSFlowsJson.append( json.loads( ONOSFlows[ i ] ) )
1500 except ( ValueError, TypeError ):
1501 # FIXME: change this to log.error?
1502 main.log.exception( "Error in parsing ONOS" + num +
1503 " response as json." )
1504 main.log.error( repr( ONOSFlows[ i ] ) )
1505 ONOSFlowsJson.append( None )
1506 flowsResults = False
1507 utilities.assert_equals(
1508 expect=True,
1509 actual=flowsResults,
1510 onpass="No error in reading flows output",
1511 onfail="Error in reading flows from ONOS" )
1512
1513 main.step( "Check for consistency in Flows from each controller" )
1514 tmp = [ len( i ) == len( ONOSFlowsJson[ 0 ] ) for i in ONOSFlowsJson ]
1515 if all( tmp ):
1516 main.log.info( "Flow count is consistent across all ONOS nodes" )
1517 else:
1518 consistentFlows = False
1519 utilities.assert_equals(
1520 expect=True,
1521 actual=consistentFlows,
1522 onpass="The flow count is consistent across all ONOS nodes",
1523 onfail="ONOS nodes have different flow counts" )
1524
1525 if flowsResults and not consistentFlows:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001526 for i in range( len( ONOSFlows ) ):
1527 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001528 try:
1529 main.log.warn(
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001530 "ONOS" + node + " flows: " +
Jon Hall5cf14d52015-07-16 12:15:19 -07001531 json.dumps( json.loads( ONOSFlows[i] ), sort_keys=True,
1532 indent=4, separators=( ',', ': ' ) ) )
1533 except ( ValueError, TypeError ):
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001534 main.log.warn( "ONOS" + node + " flows: " +
1535 repr( ONOSFlows[ i ] ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001536 elif flowsResults and consistentFlows:
1537 flowCheck = main.TRUE
1538 flowState = ONOSFlows[ 0 ]
1539
1540 main.step( "Get the OF Table entries" )
1541 global flows
1542 flows = []
1543 for i in range( 1, 29 ):
GlennRC68467eb2015-11-16 18:01:01 -08001544 flows.append( main.Mininet1.getFlowTable( "s" + str( i ), version="1.3", debug=False ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001545 if flowCheck == main.FALSE:
1546 for table in flows:
1547 main.log.warn( table )
1548 # TODO: Compare switch flow tables with ONOS flow tables
1549
1550 main.step( "Start continuous pings" )
1551 main.Mininet2.pingLong(
1552 src=main.params[ 'PING' ][ 'source1' ],
1553 target=main.params[ 'PING' ][ 'target1' ],
1554 pingTime=500 )
1555 main.Mininet2.pingLong(
1556 src=main.params[ 'PING' ][ 'source2' ],
1557 target=main.params[ 'PING' ][ 'target2' ],
1558 pingTime=500 )
1559 main.Mininet2.pingLong(
1560 src=main.params[ 'PING' ][ 'source3' ],
1561 target=main.params[ 'PING' ][ 'target3' ],
1562 pingTime=500 )
1563 main.Mininet2.pingLong(
1564 src=main.params[ 'PING' ][ 'source4' ],
1565 target=main.params[ 'PING' ][ 'target4' ],
1566 pingTime=500 )
1567 main.Mininet2.pingLong(
1568 src=main.params[ 'PING' ][ 'source5' ],
1569 target=main.params[ 'PING' ][ 'target5' ],
1570 pingTime=500 )
1571 main.Mininet2.pingLong(
1572 src=main.params[ 'PING' ][ 'source6' ],
1573 target=main.params[ 'PING' ][ 'target6' ],
1574 pingTime=500 )
1575 main.Mininet2.pingLong(
1576 src=main.params[ 'PING' ][ 'source7' ],
1577 target=main.params[ 'PING' ][ 'target7' ],
1578 pingTime=500 )
1579 main.Mininet2.pingLong(
1580 src=main.params[ 'PING' ][ 'source8' ],
1581 target=main.params[ 'PING' ][ 'target8' ],
1582 pingTime=500 )
1583 main.Mininet2.pingLong(
1584 src=main.params[ 'PING' ][ 'source9' ],
1585 target=main.params[ 'PING' ][ 'target9' ],
1586 pingTime=500 )
1587 main.Mininet2.pingLong(
1588 src=main.params[ 'PING' ][ 'source10' ],
1589 target=main.params[ 'PING' ][ 'target10' ],
1590 pingTime=500 )
1591
1592 main.step( "Collecting topology information from ONOS" )
1593 devices = []
1594 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001595 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001596 t = main.Thread( target=main.CLIs[i].devices,
Jon Hall5cf14d52015-07-16 12:15:19 -07001597 name="devices-" + str( i ),
1598 args=[ ] )
1599 threads.append( t )
1600 t.start()
1601
1602 for t in threads:
1603 t.join()
1604 devices.append( t.result )
1605 hosts = []
1606 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001607 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001608 t = main.Thread( target=main.CLIs[i].hosts,
Jon Hall5cf14d52015-07-16 12:15:19 -07001609 name="hosts-" + str( i ),
1610 args=[ ] )
1611 threads.append( t )
1612 t.start()
1613
1614 for t in threads:
1615 t.join()
1616 try:
1617 hosts.append( json.loads( t.result ) )
1618 except ( ValueError, TypeError ):
1619 # FIXME: better handling of this, print which node
1620 # Maybe use thread name?
1621 main.log.exception( "Error parsing json output of hosts" )
Jon Hallf3d16e72015-12-16 17:45:08 -08001622 main.log.warn( repr( t.result ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001623 hosts.append( None )
1624
1625 ports = []
1626 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001627 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001628 t = main.Thread( target=main.CLIs[i].ports,
Jon Hall5cf14d52015-07-16 12:15:19 -07001629 name="ports-" + str( i ),
1630 args=[ ] )
1631 threads.append( t )
1632 t.start()
1633
1634 for t in threads:
1635 t.join()
1636 ports.append( t.result )
1637 links = []
1638 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001639 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001640 t = main.Thread( target=main.CLIs[i].links,
Jon Hall5cf14d52015-07-16 12:15:19 -07001641 name="links-" + str( i ),
1642 args=[ ] )
1643 threads.append( t )
1644 t.start()
1645
1646 for t in threads:
1647 t.join()
1648 links.append( t.result )
1649 clusters = []
1650 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001651 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001652 t = main.Thread( target=main.CLIs[i].clusters,
Jon Hall5cf14d52015-07-16 12:15:19 -07001653 name="clusters-" + str( i ),
1654 args=[ ] )
1655 threads.append( t )
1656 t.start()
1657
1658 for t in threads:
1659 t.join()
1660 clusters.append( t.result )
1661 # Compare json objects for hosts and dataplane clusters
1662
1663 # hosts
1664 main.step( "Host view is consistent across ONOS nodes" )
1665 consistentHostsResult = main.TRUE
1666 for controller in range( len( hosts ) ):
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001667 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hallf3d16e72015-12-16 17:45:08 -08001668 if hosts[ controller ] and "Error" not in hosts[ controller ]:
Jon Hall5cf14d52015-07-16 12:15:19 -07001669 if hosts[ controller ] == hosts[ 0 ]:
1670 continue
1671 else: # hosts not consistent
1672 main.log.error( "hosts from ONOS" +
1673 controllerStr +
1674 " is inconsistent with ONOS1" )
1675 main.log.warn( repr( hosts[ controller ] ) )
1676 consistentHostsResult = main.FALSE
1677
1678 else:
1679 main.log.error( "Error in getting ONOS hosts from ONOS" +
1680 controllerStr )
1681 consistentHostsResult = main.FALSE
1682 main.log.warn( "ONOS" + controllerStr +
1683 " hosts response: " +
1684 repr( hosts[ controller ] ) )
1685 utilities.assert_equals(
1686 expect=main.TRUE,
1687 actual=consistentHostsResult,
1688 onpass="Hosts view is consistent across all ONOS nodes",
1689 onfail="ONOS nodes have different views of hosts" )
1690
1691 main.step( "Each host has an IP address" )
1692 ipResult = main.TRUE
1693 for controller in range( 0, len( hosts ) ):
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001694 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hallf3d16e72015-12-16 17:45:08 -08001695 if hosts[ controller ]:
1696 for host in hosts[ controller ]:
1697 if not host.get( 'ipAddresses', [ ] ):
1698 main.log.error( "Error with host ips on controller" +
1699 controllerStr + ": " + str( host ) )
1700 ipResult = main.FALSE
Jon Hall5cf14d52015-07-16 12:15:19 -07001701 utilities.assert_equals(
1702 expect=main.TRUE,
1703 actual=ipResult,
1704 onpass="The ips of the hosts aren't empty",
1705 onfail="The ip of at least one host is missing" )
1706
1707 # Strongly connected clusters of devices
1708 main.step( "Cluster view is consistent across ONOS nodes" )
1709 consistentClustersResult = main.TRUE
1710 for controller in range( len( clusters ) ):
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001711 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001712 if "Error" not in clusters[ controller ]:
1713 if clusters[ controller ] == clusters[ 0 ]:
1714 continue
1715 else: # clusters not consistent
1716 main.log.error( "clusters from ONOS" + controllerStr +
1717 " is inconsistent with ONOS1" )
1718 consistentClustersResult = main.FALSE
1719
1720 else:
1721 main.log.error( "Error in getting dataplane clusters " +
1722 "from ONOS" + controllerStr )
1723 consistentClustersResult = main.FALSE
1724 main.log.warn( "ONOS" + controllerStr +
1725 " clusters response: " +
1726 repr( clusters[ controller ] ) )
1727 utilities.assert_equals(
1728 expect=main.TRUE,
1729 actual=consistentClustersResult,
1730 onpass="Clusters view is consistent across all ONOS nodes",
1731 onfail="ONOS nodes have different views of clusters" )
Jon Hall172b7ba2016-04-07 18:12:20 -07001732 if consistentClustersResult != main.TRUE:
1733 main.log.debug( clusters )
Jon Hall5cf14d52015-07-16 12:15:19 -07001734 # there should always only be one cluster
1735 main.step( "Cluster view correct across ONOS nodes" )
1736 try:
1737 numClusters = len( json.loads( clusters[ 0 ] ) )
1738 except ( ValueError, TypeError ):
1739 main.log.exception( "Error parsing clusters[0]: " +
1740 repr( clusters[ 0 ] ) )
Jon Hall6e709752016-02-01 13:38:46 -08001741 numClusters = "ERROR"
Jon Hall5cf14d52015-07-16 12:15:19 -07001742 clusterResults = main.FALSE
1743 if numClusters == 1:
1744 clusterResults = main.TRUE
1745 utilities.assert_equals(
1746 expect=1,
1747 actual=numClusters,
1748 onpass="ONOS shows 1 SCC",
1749 onfail="ONOS shows " + str( numClusters ) + " SCCs" )
1750
1751 main.step( "Comparing ONOS topology to MN" )
1752 devicesResults = main.TRUE
1753 linksResults = main.TRUE
1754 hostsResults = main.TRUE
1755 mnSwitches = main.Mininet1.getSwitches()
1756 mnLinks = main.Mininet1.getLinks()
1757 mnHosts = main.Mininet1.getHosts()
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001758 for controller in main.activeNodes:
1759 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001760 if devices[ controller ] and ports[ controller ] and\
1761 "Error" not in devices[ controller ] and\
1762 "Error" not in ports[ controller ]:
Jon Hall6e709752016-02-01 13:38:46 -08001763 currentDevicesResult = main.Mininet1.compareSwitches(
1764 mnSwitches,
1765 json.loads( devices[ controller ] ),
1766 json.loads( ports[ controller ] ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001767 else:
1768 currentDevicesResult = main.FALSE
1769 utilities.assert_equals( expect=main.TRUE,
1770 actual=currentDevicesResult,
1771 onpass="ONOS" + controllerStr +
1772 " Switches view is correct",
1773 onfail="ONOS" + controllerStr +
1774 " Switches view is incorrect" )
1775 if links[ controller ] and "Error" not in links[ controller ]:
1776 currentLinksResult = main.Mininet1.compareLinks(
1777 mnSwitches, mnLinks,
1778 json.loads( links[ controller ] ) )
1779 else:
1780 currentLinksResult = main.FALSE
1781 utilities.assert_equals( expect=main.TRUE,
1782 actual=currentLinksResult,
1783 onpass="ONOS" + controllerStr +
1784 " links view is correct",
1785 onfail="ONOS" + controllerStr +
1786 " links view is incorrect" )
1787
Jon Hall657cdf62015-12-17 14:40:51 -08001788 if hosts[ controller ] and "Error" not in hosts[ controller ]:
Jon Hall5cf14d52015-07-16 12:15:19 -07001789 currentHostsResult = main.Mininet1.compareHosts(
1790 mnHosts,
1791 hosts[ controller ] )
1792 else:
1793 currentHostsResult = main.FALSE
1794 utilities.assert_equals( expect=main.TRUE,
1795 actual=currentHostsResult,
1796 onpass="ONOS" + controllerStr +
1797 " hosts exist in Mininet",
1798 onfail="ONOS" + controllerStr +
1799 " hosts don't match Mininet" )
1800
1801 devicesResults = devicesResults and currentDevicesResult
1802 linksResults = linksResults and currentLinksResult
1803 hostsResults = hostsResults and currentHostsResult
1804
1805 main.step( "Device information is correct" )
1806 utilities.assert_equals(
1807 expect=main.TRUE,
1808 actual=devicesResults,
1809 onpass="Device information is correct",
1810 onfail="Device information is incorrect" )
1811
1812 main.step( "Links are correct" )
1813 utilities.assert_equals(
1814 expect=main.TRUE,
1815 actual=linksResults,
1816 onpass="Link are correct",
1817 onfail="Links are incorrect" )
1818
1819 main.step( "Hosts are correct" )
1820 utilities.assert_equals(
1821 expect=main.TRUE,
1822 actual=hostsResults,
1823 onpass="Hosts are correct",
1824 onfail="Hosts are incorrect" )
1825
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001826 def CASE61( self, main ):
Jon Hall5cf14d52015-07-16 12:15:19 -07001827 """
1828 The Failure case.
1829 """
Jon Halle1a3b752015-07-22 13:02:46 -07001830 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07001831 assert main, "main not defined"
1832 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07001833 assert main.CLIs, "main.CLIs not defined"
1834 assert main.nodes, "main.nodes not defined"
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001835 main.case( "Kill minority of ONOS nodes" )
Jon Hall96091e62015-09-21 17:34:17 -07001836
1837 main.step( "Checking ONOS Logs for errors" )
1838 for node in main.nodes:
1839 main.log.debug( "Checking logs for errors on " + node.name + ":" )
1840 main.log.warn( main.ONOSbench.checkLogs( node.ip_address ) )
1841
Jon Hall3b489db2015-10-05 14:38:37 -07001842 n = len( main.nodes ) # Number of nodes
1843 p = ( ( n + 1 ) / 2 ) + 1 # Number of partitions
1844 main.kill = [ 0 ] # ONOS node to kill, listed by index in main.nodes
1845 if n > 3:
1846 main.kill.append( p - 1 )
1847 # NOTE: This only works for cluster sizes of 3,5, or 7.
1848
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001849 main.step( "Kill " + str( len( main.kill ) ) + " ONOS nodes" )
Jon Hall3b489db2015-10-05 14:38:37 -07001850 killResults = main.TRUE
1851 for i in main.kill:
1852 killResults = killResults and\
1853 main.ONOSbench.onosKill( main.nodes[i].ip_address )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001854 main.activeNodes.remove( i )
Jon Hall5cf14d52015-07-16 12:15:19 -07001855 utilities.assert_equals( expect=main.TRUE, actual=killResults,
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001856 onpass="ONOS nodes killed successfully",
1857 onfail="ONOS nodes NOT successfully killed" )
1858
1859 def CASE62( self, main ):
1860 """
1861 The bring up stopped nodes
1862 """
1863 import time
1864 assert main.numCtrls, "main.numCtrls not defined"
1865 assert main, "main not defined"
1866 assert utilities.assert_equals, "utilities.assert_equals not defined"
1867 assert main.CLIs, "main.CLIs not defined"
1868 assert main.nodes, "main.nodes not defined"
1869 assert main.kill, "main.kill not defined"
1870 main.case( "Restart minority of ONOS nodes" )
1871
1872 main.step( "Restarting " + str( len( main.kill ) ) + " ONOS nodes" )
1873 startResults = main.TRUE
1874 restartTime = time.time()
1875 for i in main.kill:
1876 startResults = startResults and\
1877 main.ONOSbench.onosStart( main.nodes[i].ip_address )
1878 utilities.assert_equals( expect=main.TRUE, actual=startResults,
1879 onpass="ONOS nodes started successfully",
1880 onfail="ONOS nodes NOT successfully started" )
Jon Hall5cf14d52015-07-16 12:15:19 -07001881
1882 main.step( "Checking if ONOS is up yet" )
1883 count = 0
1884 onosIsupResult = main.FALSE
1885 while onosIsupResult == main.FALSE and count < 10:
Jon Hall3b489db2015-10-05 14:38:37 -07001886 onosIsupResult = main.TRUE
1887 for i in main.kill:
1888 onosIsupResult = onosIsupResult and\
1889 main.ONOSbench.isup( main.nodes[i].ip_address )
Jon Hall5cf14d52015-07-16 12:15:19 -07001890 count = count + 1
Jon Hall5cf14d52015-07-16 12:15:19 -07001891 utilities.assert_equals( expect=main.TRUE, actual=onosIsupResult,
1892 onpass="ONOS restarted successfully",
1893 onfail="ONOS restart NOT successful" )
1894
Jon Halle1a3b752015-07-22 13:02:46 -07001895 main.step( "Restarting ONOS main.CLIs" )
Jon Hall3b489db2015-10-05 14:38:37 -07001896 cliResults = main.TRUE
1897 for i in main.kill:
1898 cliResults = cliResults and\
1899 main.CLIs[i].startOnosCli( main.nodes[i].ip_address )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001900 main.activeNodes.append( i )
Jon Hall5cf14d52015-07-16 12:15:19 -07001901 utilities.assert_equals( expect=main.TRUE, actual=cliResults,
1902 onpass="ONOS cli restarted",
1903 onfail="ONOS cli did not restart" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001904 main.activeNodes.sort()
1905 try:
1906 assert list( set( main.activeNodes ) ) == main.activeNodes,\
1907 "List of active nodes has duplicates, this likely indicates something was run out of order"
1908 except AssertionError:
1909 main.log.exception( "" )
1910 main.cleanup()
1911 main.exit()
Jon Hall5cf14d52015-07-16 12:15:19 -07001912
1913 # Grab the time of restart so we chan check how long the gossip
1914 # protocol has had time to work
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001915 main.restartTime = time.time() - restartTime
Jon Hall5cf14d52015-07-16 12:15:19 -07001916 main.log.debug( "Restart time: " + str( main.restartTime ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001917 # TODO: MAke this configurable. Also, we are breaking the above timer
1918 time.sleep( 60 )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001919 node = main.activeNodes[0]
1920 main.log.debug( main.CLIs[node].nodes( jsonFormat=False ) )
1921 main.log.debug( main.CLIs[node].leaders( jsonFormat=False ) )
1922 main.log.debug( main.CLIs[node].partitions( jsonFormat=False ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001923
Jon Halla440e872016-03-31 15:15:50 -07001924 main.step( "Rerun for election on the node(s) that were killed" )
1925 runResults = main.TRUE
1926 for i in main.kill:
1927 runResults = runResults and\
1928 main.CLIs[i].electionTestRun()
1929 utilities.assert_equals( expect=main.TRUE, actual=runResults,
1930 onpass="ONOS nodes reran for election topic",
1931 onfail="Errror rerunning for election" )
1932
Jon Hall5cf14d52015-07-16 12:15:19 -07001933 def CASE7( self, main ):
1934 """
1935 Check state after ONOS failure
1936 """
1937 import json
Jon Halle1a3b752015-07-22 13:02:46 -07001938 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07001939 assert main, "main not defined"
1940 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07001941 assert main.CLIs, "main.CLIs not defined"
1942 assert main.nodes, "main.nodes not defined"
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001943 try:
1944 main.kill
1945 except AttributeError:
1946 main.kill = []
1947
Jon Hall5cf14d52015-07-16 12:15:19 -07001948 main.case( "Running ONOS Constant State Tests" )
1949
1950 main.step( "Check that each switch has a master" )
1951 # Assert that each device has a master
1952 rolesNotNull = main.TRUE
1953 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001954 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001955 t = main.Thread( target=main.CLIs[i].rolesNotNull,
Jon Hall5cf14d52015-07-16 12:15:19 -07001956 name="rolesNotNull-" + str( i ),
1957 args=[ ] )
1958 threads.append( t )
1959 t.start()
1960
1961 for t in threads:
1962 t.join()
1963 rolesNotNull = rolesNotNull and t.result
1964 utilities.assert_equals(
1965 expect=main.TRUE,
1966 actual=rolesNotNull,
1967 onpass="Each device has a master",
1968 onfail="Some devices don't have a master assigned" )
1969
1970 main.step( "Read device roles from ONOS" )
1971 ONOSMastership = []
Jon Halla440e872016-03-31 15:15:50 -07001972 mastershipCheck = main.FALSE
Jon Hall5cf14d52015-07-16 12:15:19 -07001973 consistentMastership = True
1974 rolesResults = True
1975 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001976 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001977 t = main.Thread( target=main.CLIs[i].roles,
Jon Hall5cf14d52015-07-16 12:15:19 -07001978 name="roles-" + str( i ),
1979 args=[] )
1980 threads.append( t )
1981 t.start()
1982
1983 for t in threads:
1984 t.join()
1985 ONOSMastership.append( t.result )
1986
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001987 for i in range( len( ONOSMastership ) ):
1988 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001989 if not ONOSMastership[i] or "Error" in ONOSMastership[i]:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001990 main.log.error( "Error in getting ONOS" + node + " roles" )
1991 main.log.warn( "ONOS" + node + " mastership response: " +
1992 repr( ONOSMastership[i] ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001993 rolesResults = False
1994 utilities.assert_equals(
1995 expect=True,
1996 actual=rolesResults,
1997 onpass="No error in reading roles output",
1998 onfail="Error in reading roles from ONOS" )
1999
2000 main.step( "Check for consistency in roles from each controller" )
2001 if all([ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
2002 main.log.info(
2003 "Switch roles are consistent across all ONOS nodes" )
2004 else:
2005 consistentMastership = False
2006 utilities.assert_equals(
2007 expect=True,
2008 actual=consistentMastership,
2009 onpass="Switch roles are consistent across all ONOS nodes",
2010 onfail="ONOS nodes have different views of switch roles" )
2011
2012 if rolesResults and not consistentMastership:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002013 for i in range( len( ONOSMastership ) ):
2014 node = str( main.activeNodes[i] + 1 )
2015 main.log.warn( "ONOS" + node + " roles: ",
2016 json.dumps( json.loads( ONOSMastership[ i ] ),
2017 sort_keys=True,
2018 indent=4,
2019 separators=( ',', ': ' ) ) )
Jon Halla440e872016-03-31 15:15:50 -07002020 elif rolesResults and consistentMastership:
2021 mastershipCheck = main.TRUE
Jon Hall5cf14d52015-07-16 12:15:19 -07002022
2023 # NOTE: we expect mastership to change on controller failure
Jon Hall5cf14d52015-07-16 12:15:19 -07002024
2025 main.step( "Get the intents and compare across all nodes" )
2026 ONOSIntents = []
2027 intentCheck = main.FALSE
2028 consistentIntents = True
2029 intentsResults = True
2030 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002031 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07002032 t = main.Thread( target=main.CLIs[i].intents,
Jon Hall5cf14d52015-07-16 12:15:19 -07002033 name="intents-" + str( i ),
2034 args=[],
2035 kwargs={ 'jsonFormat': True } )
2036 threads.append( t )
2037 t.start()
2038
2039 for t in threads:
2040 t.join()
2041 ONOSIntents.append( t.result )
2042
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002043 for i in range( len( ONOSIntents) ):
2044 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07002045 if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002046 main.log.error( "Error in getting ONOS" + node + " intents" )
2047 main.log.warn( "ONOS" + node + " intents response: " +
Jon Hall5cf14d52015-07-16 12:15:19 -07002048 repr( ONOSIntents[ i ] ) )
2049 intentsResults = False
2050 utilities.assert_equals(
2051 expect=True,
2052 actual=intentsResults,
2053 onpass="No error in reading intents output",
2054 onfail="Error in reading intents from ONOS" )
2055
2056 main.step( "Check for consistency in Intents from each controller" )
2057 if all([ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
2058 main.log.info( "Intents are consistent across all ONOS " +
2059 "nodes" )
2060 else:
2061 consistentIntents = False
2062
2063 # Try to make it easy to figure out what is happening
2064 #
2065 # Intent ONOS1 ONOS2 ...
2066 # 0x01 INSTALLED INSTALLING
2067 # ... ... ...
2068 # ... ... ...
2069 title = " ID"
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002070 for n in main.activeNodes:
Jon Hall5cf14d52015-07-16 12:15:19 -07002071 title += " " * 10 + "ONOS" + str( n + 1 )
2072 main.log.warn( title )
2073 # get all intent keys in the cluster
2074 keys = []
2075 for nodeStr in ONOSIntents:
2076 node = json.loads( nodeStr )
2077 for intent in node:
2078 keys.append( intent.get( 'id' ) )
2079 keys = set( keys )
2080 for key in keys:
2081 row = "%-13s" % key
2082 for nodeStr in ONOSIntents:
2083 node = json.loads( nodeStr )
2084 for intent in node:
2085 if intent.get( 'id' ) == key:
2086 row += "%-15s" % intent.get( 'state' )
2087 main.log.warn( row )
2088 # End table view
2089
2090 utilities.assert_equals(
2091 expect=True,
2092 actual=consistentIntents,
2093 onpass="Intents are consistent across all ONOS nodes",
2094 onfail="ONOS nodes have different views of intents" )
2095 intentStates = []
2096 for node in ONOSIntents: # Iter through ONOS nodes
2097 nodeStates = []
2098 # Iter through intents of a node
2099 try:
2100 for intent in json.loads( node ):
2101 nodeStates.append( intent[ 'state' ] )
2102 except ( ValueError, TypeError ):
2103 main.log.exception( "Error in parsing intents" )
2104 main.log.error( repr( node ) )
2105 intentStates.append( nodeStates )
2106 out = [ (i, nodeStates.count( i ) ) for i in set( nodeStates ) ]
2107 main.log.info( dict( out ) )
2108
2109 if intentsResults and not consistentIntents:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002110 for i in range( len( main.activeNodes ) ):
2111 node = str( main.activeNodes[i] + 1 )
2112 main.log.warn( "ONOS" + node + " intents: " )
Jon Hall5cf14d52015-07-16 12:15:19 -07002113 main.log.warn( json.dumps(
2114 json.loads( ONOSIntents[ i ] ),
2115 sort_keys=True,
2116 indent=4,
2117 separators=( ',', ': ' ) ) )
2118 elif intentsResults and consistentIntents:
2119 intentCheck = main.TRUE
2120
2121 # NOTE: Store has no durability, so intents are lost across system
2122 # restarts
2123 main.step( "Compare current intents with intents before the failure" )
2124 # NOTE: this requires case 5 to pass for intentState to be set.
2125 # maybe we should stop the test if that fails?
2126 sameIntents = main.FALSE
Jon Halla440e872016-03-31 15:15:50 -07002127 try:
2128 intentState
2129 except NameError:
2130 main.log.warn( "No previous intent state was saved" )
2131 else:
2132 if intentState and intentState == ONOSIntents[ 0 ]:
2133 sameIntents = main.TRUE
2134 main.log.info( "Intents are consistent with before failure" )
2135 # TODO: possibly the states have changed? we may need to figure out
2136 # what the acceptable states are
2137 elif len( intentState ) == len( ONOSIntents[ 0 ] ):
2138 sameIntents = main.TRUE
2139 try:
2140 before = json.loads( intentState )
2141 after = json.loads( ONOSIntents[ 0 ] )
2142 for intent in before:
2143 if intent not in after:
2144 sameIntents = main.FALSE
2145 main.log.debug( "Intent is not currently in ONOS " +
2146 "(at least in the same form):" )
2147 main.log.debug( json.dumps( intent ) )
2148 except ( ValueError, TypeError ):
2149 main.log.exception( "Exception printing intents" )
2150 main.log.debug( repr( ONOSIntents[0] ) )
2151 main.log.debug( repr( intentState ) )
2152 if sameIntents == main.FALSE:
2153 try:
2154 main.log.debug( "ONOS intents before: " )
2155 main.log.debug( json.dumps( json.loads( intentState ),
2156 sort_keys=True, indent=4,
2157 separators=( ',', ': ' ) ) )
2158 main.log.debug( "Current ONOS intents: " )
2159 main.log.debug( json.dumps( json.loads( ONOSIntents[ 0 ] ),
2160 sort_keys=True, indent=4,
2161 separators=( ',', ': ' ) ) )
2162 except ( ValueError, TypeError ):
2163 main.log.exception( "Exception printing intents" )
2164 main.log.debug( repr( ONOSIntents[0] ) )
2165 main.log.debug( repr( intentState ) )
2166 utilities.assert_equals(
2167 expect=main.TRUE,
2168 actual=sameIntents,
2169 onpass="Intents are consistent with before failure",
2170 onfail="The Intents changed during failure" )
Jon Hall5cf14d52015-07-16 12:15:19 -07002171 intentCheck = intentCheck and sameIntents
2172
2173 main.step( "Get the OF Table entries and compare to before " +
2174 "component failure" )
2175 FlowTables = main.TRUE
Jon Hall5cf14d52015-07-16 12:15:19 -07002176 for i in range( 28 ):
2177 main.log.info( "Checking flow table on s" + str( i + 1 ) )
GlennRC68467eb2015-11-16 18:01:01 -08002178 tmpFlows = main.Mininet1.getFlowTable( "s" + str( i + 1 ), version="1.3", debug=False )
2179 FlowTables = FlowTables and main.Mininet1.flowTableComp( flows[i], tmpFlows )
Jon Hall5cf14d52015-07-16 12:15:19 -07002180 if FlowTables == main.FALSE:
GlennRC68467eb2015-11-16 18:01:01 -08002181 main.log.warn( "Differences in flow table for switch: s{}".format( i + 1 ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07002182 utilities.assert_equals(
2183 expect=main.TRUE,
2184 actual=FlowTables,
2185 onpass="No changes were found in the flow tables",
2186 onfail="Changes were found in the flow tables" )
2187
2188 main.Mininet2.pingLongKill()
2189 '''
2190 main.step( "Check the continuous pings to ensure that no packets " +
2191 "were dropped during component failure" )
2192 main.Mininet2.pingKill( main.params[ 'TESTONUSER' ],
2193 main.params[ 'TESTONIP' ] )
2194 LossInPings = main.FALSE
2195 # NOTE: checkForLoss returns main.FALSE with 0% packet loss
2196 for i in range( 8, 18 ):
2197 main.log.info(
2198 "Checking for a loss in pings along flow from s" +
2199 str( i ) )
2200 LossInPings = main.Mininet2.checkForLoss(
2201 "/tmp/ping.h" +
2202 str( i ) ) or LossInPings
2203 if LossInPings == main.TRUE:
2204 main.log.info( "Loss in ping detected" )
2205 elif LossInPings == main.ERROR:
2206 main.log.info( "There are multiple mininet process running" )
2207 elif LossInPings == main.FALSE:
2208 main.log.info( "No Loss in the pings" )
2209 main.log.info( "No loss of dataplane connectivity" )
2210 utilities.assert_equals(
2211 expect=main.FALSE,
2212 actual=LossInPings,
2213 onpass="No Loss of connectivity",
2214 onfail="Loss of dataplane connectivity detected" )
2215 '''
2216
2217 main.step( "Leadership Election is still functional" )
2218 # Test of LeadershipElection
2219 leaderList = []
Jon Hall5cf14d52015-07-16 12:15:19 -07002220
Jon Hall3b489db2015-10-05 14:38:37 -07002221 restarted = []
2222 for i in main.kill:
2223 restarted.append( main.nodes[i].ip_address )
Jon Hall5cf14d52015-07-16 12:15:19 -07002224 leaderResult = main.TRUE
Jon Hall3b489db2015-10-05 14:38:37 -07002225
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002226 for i in main.activeNodes:
2227 cli = main.CLIs[i]
Jon Hall5cf14d52015-07-16 12:15:19 -07002228 leaderN = cli.electionTestLeader()
2229 leaderList.append( leaderN )
2230 if leaderN == main.FALSE:
2231 # error in response
2232 main.log.error( "Something is wrong with " +
2233 "electionTestLeader function, check the" +
2234 " error logs" )
2235 leaderResult = main.FALSE
2236 elif leaderN is None:
2237 main.log.error( cli.name +
2238 " shows no leader for the election-app was" +
2239 " elected after the old one died" )
2240 leaderResult = main.FALSE
2241 elif leaderN in restarted:
2242 main.log.error( cli.name + " shows " + str( leaderN ) +
2243 " as leader for the election-app, but it " +
2244 "was restarted" )
2245 leaderResult = main.FALSE
2246 if len( set( leaderList ) ) != 1:
2247 leaderResult = main.FALSE
2248 main.log.error(
2249 "Inconsistent view of leader for the election test app" )
2250 # TODO: print the list
2251 utilities.assert_equals(
2252 expect=main.TRUE,
2253 actual=leaderResult,
2254 onpass="Leadership election passed",
2255 onfail="Something went wrong with Leadership election" )
2256
2257 def CASE8( self, main ):
2258 """
2259 Compare topo
2260 """
2261 import json
2262 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002263 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002264 assert main, "main not defined"
2265 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002266 assert main.CLIs, "main.CLIs not defined"
2267 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002268
2269 main.case( "Compare ONOS Topology view to Mininet topology" )
Jon Hall783bbf92015-07-23 14:33:19 -07002270 main.caseExplanation = "Compare topology objects between Mininet" +\
Jon Hall5cf14d52015-07-16 12:15:19 -07002271 " and ONOS"
Jon Hall5cf14d52015-07-16 12:15:19 -07002272 topoResult = main.FALSE
Jon Hall6e709752016-02-01 13:38:46 -08002273 topoFailMsg = "ONOS topology don't match Mininet"
Jon Hall5cf14d52015-07-16 12:15:19 -07002274 elapsed = 0
2275 count = 0
Jon Halle9b1fa32015-12-08 15:32:21 -08002276 main.step( "Comparing ONOS topology to MN topology" )
Jon Hall5cf14d52015-07-16 12:15:19 -07002277 startTime = time.time()
2278 # Give time for Gossip to work
Jon Halle9b1fa32015-12-08 15:32:21 -08002279 while topoResult == main.FALSE and ( elapsed < 60 or count < 3 ):
Jon Hall96091e62015-09-21 17:34:17 -07002280 devicesResults = main.TRUE
2281 linksResults = main.TRUE
2282 hostsResults = main.TRUE
2283 hostAttachmentResults = True
Jon Hall5cf14d52015-07-16 12:15:19 -07002284 count += 1
2285 cliStart = time.time()
2286 devices = []
2287 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002288 for i in main.activeNodes:
Jon Hall6e709752016-02-01 13:38:46 -08002289 t = main.Thread( target=utilities.retry,
Jon Hall5cf14d52015-07-16 12:15:19 -07002290 name="devices-" + str( i ),
Jon Hall6e709752016-02-01 13:38:46 -08002291 args=[ main.CLIs[i].devices, [ None ] ],
2292 kwargs= { 'sleep': 5, 'attempts': 5,
2293 'randomTime': True } )
Jon Hall5cf14d52015-07-16 12:15:19 -07002294 threads.append( t )
2295 t.start()
2296
2297 for t in threads:
2298 t.join()
2299 devices.append( t.result )
2300 hosts = []
2301 ipResult = main.TRUE
2302 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002303 for i in main.activeNodes:
Jon Halld8f6de82015-12-17 17:04:34 -08002304 t = main.Thread( target=utilities.retry,
Jon Hall5cf14d52015-07-16 12:15:19 -07002305 name="hosts-" + str( i ),
Jon Halld8f6de82015-12-17 17:04:34 -08002306 args=[ main.CLIs[i].hosts, [ None ] ],
2307 kwargs= { 'sleep': 5, 'attempts': 5,
2308 'randomTime': True } )
Jon Hall5cf14d52015-07-16 12:15:19 -07002309 threads.append( t )
2310 t.start()
2311
2312 for t in threads:
2313 t.join()
2314 try:
2315 hosts.append( json.loads( t.result ) )
2316 except ( ValueError, TypeError ):
2317 main.log.exception( "Error parsing hosts results" )
2318 main.log.error( repr( t.result ) )
Jon Hallf3d16e72015-12-16 17:45:08 -08002319 hosts.append( None )
Jon Hall5cf14d52015-07-16 12:15:19 -07002320 for controller in range( 0, len( hosts ) ):
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002321 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hallacd1b182015-12-17 11:43:20 -08002322 if hosts[ controller ]:
2323 for host in hosts[ controller ]:
2324 if host is None or host.get( 'ipAddresses', [] ) == []:
2325 main.log.error(
2326 "Error with host ipAddresses on controller" +
2327 controllerStr + ": " + str( host ) )
2328 ipResult = main.FALSE
Jon Hall5cf14d52015-07-16 12:15:19 -07002329 ports = []
2330 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002331 for i in main.activeNodes:
Jon Hall6e709752016-02-01 13:38:46 -08002332 t = main.Thread( target=utilities.retry,
Jon Hall5cf14d52015-07-16 12:15:19 -07002333 name="ports-" + str( i ),
Jon Hall6e709752016-02-01 13:38:46 -08002334 args=[ main.CLIs[i].ports, [ None ] ],
2335 kwargs= { 'sleep': 5, 'attempts': 5,
2336 'randomTime': True } )
Jon Hall5cf14d52015-07-16 12:15:19 -07002337 threads.append( t )
2338 t.start()
2339
2340 for t in threads:
2341 t.join()
2342 ports.append( t.result )
2343 links = []
2344 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002345 for i in main.activeNodes:
Jon Hall6e709752016-02-01 13:38:46 -08002346 t = main.Thread( target=utilities.retry,
Jon Hall5cf14d52015-07-16 12:15:19 -07002347 name="links-" + str( i ),
Jon Hall6e709752016-02-01 13:38:46 -08002348 args=[ main.CLIs[i].links, [ None ] ],
2349 kwargs= { 'sleep': 5, 'attempts': 5,
2350 'randomTime': True } )
Jon Hall5cf14d52015-07-16 12:15:19 -07002351 threads.append( t )
2352 t.start()
2353
2354 for t in threads:
2355 t.join()
2356 links.append( t.result )
2357 clusters = []
2358 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002359 for i in main.activeNodes:
Jon Hall6e709752016-02-01 13:38:46 -08002360 t = main.Thread( target=utilities.retry,
Jon Hall5cf14d52015-07-16 12:15:19 -07002361 name="clusters-" + str( i ),
Jon Hall6e709752016-02-01 13:38:46 -08002362 args=[ main.CLIs[i].clusters, [ None ] ],
2363 kwargs= { 'sleep': 5, 'attempts': 5,
2364 'randomTime': True } )
Jon Hall5cf14d52015-07-16 12:15:19 -07002365 threads.append( t )
2366 t.start()
2367
2368 for t in threads:
2369 t.join()
2370 clusters.append( t.result )
2371
2372 elapsed = time.time() - startTime
2373 cliTime = time.time() - cliStart
2374 print "Elapsed time: " + str( elapsed )
2375 print "CLI time: " + str( cliTime )
2376
Jon Hall6e709752016-02-01 13:38:46 -08002377 if all( e is None for e in devices ) and\
2378 all( e is None for e in hosts ) and\
2379 all( e is None for e in ports ) and\
2380 all( e is None for e in links ) and\
2381 all( e is None for e in clusters ):
2382 topoFailMsg = "Could not get topology from ONOS"
2383 main.log.error( topoFailMsg )
2384 continue # Try again, No use trying to compare
2385
Jon Hall5cf14d52015-07-16 12:15:19 -07002386 mnSwitches = main.Mininet1.getSwitches()
2387 mnLinks = main.Mininet1.getLinks()
2388 mnHosts = main.Mininet1.getHosts()
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002389 for controller in range( len( main.activeNodes ) ):
2390 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07002391 if devices[ controller ] and ports[ controller ] and\
2392 "Error" not in devices[ controller ] and\
2393 "Error" not in ports[ controller ]:
2394
Jon Hallc6793552016-01-19 14:18:37 -08002395 try:
2396 currentDevicesResult = main.Mininet1.compareSwitches(
2397 mnSwitches,
2398 json.loads( devices[ controller ] ),
2399 json.loads( ports[ controller ] ) )
2400 except ( TypeError, ValueError ) as e:
2401 main.log.exception( "Object not as expected; devices={!r}\nports={!r}".format(
2402 devices[ controller ], ports[ controller ] ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07002403 else:
2404 currentDevicesResult = main.FALSE
2405 utilities.assert_equals( expect=main.TRUE,
2406 actual=currentDevicesResult,
2407 onpass="ONOS" + controllerStr +
2408 " Switches view is correct",
2409 onfail="ONOS" + controllerStr +
2410 " Switches view is incorrect" )
2411
2412 if links[ controller ] and "Error" not in links[ controller ]:
2413 currentLinksResult = main.Mininet1.compareLinks(
2414 mnSwitches, mnLinks,
2415 json.loads( links[ controller ] ) )
2416 else:
2417 currentLinksResult = main.FALSE
2418 utilities.assert_equals( expect=main.TRUE,
2419 actual=currentLinksResult,
2420 onpass="ONOS" + controllerStr +
2421 " links view is correct",
2422 onfail="ONOS" + controllerStr +
2423 " links view is incorrect" )
Jon Hall657cdf62015-12-17 14:40:51 -08002424 if hosts[ controller ] and "Error" not in hosts[ controller ]:
Jon Hall5cf14d52015-07-16 12:15:19 -07002425 currentHostsResult = main.Mininet1.compareHosts(
2426 mnHosts,
2427 hosts[ controller ] )
Jon Hall13b446e2016-01-05 12:17:01 -08002428 elif hosts[ controller ] == []:
2429 currentHostsResult = main.TRUE
Jon Hall5cf14d52015-07-16 12:15:19 -07002430 else:
2431 currentHostsResult = main.FALSE
2432 utilities.assert_equals( expect=main.TRUE,
2433 actual=currentHostsResult,
2434 onpass="ONOS" + controllerStr +
2435 " hosts exist in Mininet",
2436 onfail="ONOS" + controllerStr +
2437 " hosts don't match Mininet" )
2438 # CHECKING HOST ATTACHMENT POINTS
2439 hostAttachment = True
2440 zeroHosts = False
2441 # FIXME: topo-HA/obelisk specific mappings:
2442 # key is mac and value is dpid
2443 mappings = {}
2444 for i in range( 1, 29 ): # hosts 1 through 28
2445 # set up correct variables:
2446 macId = "00:" * 5 + hex( i ).split( "0x" )[1].upper().zfill(2)
2447 if i == 1:
2448 deviceId = "1000".zfill(16)
2449 elif i == 2:
2450 deviceId = "2000".zfill(16)
2451 elif i == 3:
2452 deviceId = "3000".zfill(16)
2453 elif i == 4:
2454 deviceId = "3004".zfill(16)
2455 elif i == 5:
2456 deviceId = "5000".zfill(16)
2457 elif i == 6:
2458 deviceId = "6000".zfill(16)
2459 elif i == 7:
2460 deviceId = "6007".zfill(16)
2461 elif i >= 8 and i <= 17:
2462 dpid = '3' + str( i ).zfill( 3 )
2463 deviceId = dpid.zfill(16)
2464 elif i >= 18 and i <= 27:
2465 dpid = '6' + str( i ).zfill( 3 )
2466 deviceId = dpid.zfill(16)
2467 elif i == 28:
2468 deviceId = "2800".zfill(16)
2469 mappings[ macId ] = deviceId
Jon Halld8f6de82015-12-17 17:04:34 -08002470 if hosts[ controller ] is not None and "Error" not in hosts[ controller ]:
Jon Hall5cf14d52015-07-16 12:15:19 -07002471 if hosts[ controller ] == []:
2472 main.log.warn( "There are no hosts discovered" )
2473 zeroHosts = True
2474 else:
2475 for host in hosts[ controller ]:
2476 mac = None
2477 location = None
2478 device = None
2479 port = None
2480 try:
2481 mac = host.get( 'mac' )
2482 assert mac, "mac field could not be found for this host object"
2483
2484 location = host.get( 'location' )
2485 assert location, "location field could not be found for this host object"
2486
2487 # Trim the protocol identifier off deviceId
2488 device = str( location.get( 'elementId' ) ).split(':')[1]
2489 assert device, "elementId field could not be found for this host location object"
2490
2491 port = location.get( 'port' )
2492 assert port, "port field could not be found for this host location object"
2493
2494 # Now check if this matches where they should be
2495 if mac and device and port:
2496 if str( port ) != "1":
2497 main.log.error( "The attachment port is incorrect for " +
2498 "host " + str( mac ) +
2499 ". Expected: 1 Actual: " + str( port) )
2500 hostAttachment = False
2501 if device != mappings[ str( mac ) ]:
2502 main.log.error( "The attachment device is incorrect for " +
2503 "host " + str( mac ) +
2504 ". Expected: " + mappings[ str( mac ) ] +
2505 " Actual: " + device )
2506 hostAttachment = False
2507 else:
2508 hostAttachment = False
2509 except AssertionError:
2510 main.log.exception( "Json object not as expected" )
2511 main.log.error( repr( host ) )
2512 hostAttachment = False
2513 else:
2514 main.log.error( "No hosts json output or \"Error\"" +
2515 " in output. hosts = " +
2516 repr( hosts[ controller ] ) )
2517 if zeroHosts is False:
2518 hostAttachment = True
2519
2520 # END CHECKING HOST ATTACHMENT POINTS
2521 devicesResults = devicesResults and currentDevicesResult
2522 linksResults = linksResults and currentLinksResult
2523 hostsResults = hostsResults and currentHostsResult
2524 hostAttachmentResults = hostAttachmentResults and\
2525 hostAttachment
Jon Halla440e872016-03-31 15:15:50 -07002526 topoResult = ( devicesResults and linksResults
2527 and hostsResults and ipResult and
2528 hostAttachmentResults )
Jon Halle9b1fa32015-12-08 15:32:21 -08002529 utilities.assert_equals( expect=True,
2530 actual=topoResult,
2531 onpass="ONOS topology matches Mininet",
Jon Hall6e709752016-02-01 13:38:46 -08002532 onfail=topoFailMsg )
Jon Halle9b1fa32015-12-08 15:32:21 -08002533 # End of While loop to pull ONOS state
Jon Hall5cf14d52015-07-16 12:15:19 -07002534
2535 # Compare json objects for hosts and dataplane clusters
2536
2537 # hosts
2538 main.step( "Hosts view is consistent across all ONOS nodes" )
2539 consistentHostsResult = main.TRUE
2540 for controller in range( len( hosts ) ):
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002541 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hall13b446e2016-01-05 12:17:01 -08002542 if hosts[ controller ] is not None and "Error" not in hosts[ controller ]:
Jon Hall5cf14d52015-07-16 12:15:19 -07002543 if hosts[ controller ] == hosts[ 0 ]:
2544 continue
2545 else: # hosts not consistent
2546 main.log.error( "hosts from ONOS" + controllerStr +
2547 " is inconsistent with ONOS1" )
2548 main.log.warn( repr( hosts[ controller ] ) )
2549 consistentHostsResult = main.FALSE
2550
2551 else:
2552 main.log.error( "Error in getting ONOS hosts from ONOS" +
2553 controllerStr )
2554 consistentHostsResult = main.FALSE
2555 main.log.warn( "ONOS" + controllerStr +
2556 " hosts response: " +
2557 repr( hosts[ controller ] ) )
2558 utilities.assert_equals(
2559 expect=main.TRUE,
2560 actual=consistentHostsResult,
2561 onpass="Hosts view is consistent across all ONOS nodes",
2562 onfail="ONOS nodes have different views of hosts" )
2563
2564 main.step( "Hosts information is correct" )
2565 hostsResults = hostsResults and ipResult
2566 utilities.assert_equals(
2567 expect=main.TRUE,
2568 actual=hostsResults,
2569 onpass="Host information is correct",
2570 onfail="Host information is incorrect" )
2571
2572 main.step( "Host attachment points to the network" )
2573 utilities.assert_equals(
2574 expect=True,
2575 actual=hostAttachmentResults,
2576 onpass="Hosts are correctly attached to the network",
2577 onfail="ONOS did not correctly attach hosts to the network" )
2578
2579 # Strongly connected clusters of devices
2580 main.step( "Clusters view is consistent across all ONOS nodes" )
2581 consistentClustersResult = main.TRUE
2582 for controller in range( len( clusters ) ):
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002583 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07002584 if "Error" not in clusters[ controller ]:
2585 if clusters[ controller ] == clusters[ 0 ]:
2586 continue
2587 else: # clusters not consistent
2588 main.log.error( "clusters from ONOS" +
2589 controllerStr +
2590 " is inconsistent with ONOS1" )
2591 consistentClustersResult = main.FALSE
Jon Hall5cf14d52015-07-16 12:15:19 -07002592 else:
2593 main.log.error( "Error in getting dataplane clusters " +
2594 "from ONOS" + controllerStr )
2595 consistentClustersResult = main.FALSE
2596 main.log.warn( "ONOS" + controllerStr +
2597 " clusters response: " +
2598 repr( clusters[ controller ] ) )
2599 utilities.assert_equals(
2600 expect=main.TRUE,
2601 actual=consistentClustersResult,
2602 onpass="Clusters view is consistent across all ONOS nodes",
2603 onfail="ONOS nodes have different views of clusters" )
2604
2605 main.step( "There is only one SCC" )
2606 # there should always only be one cluster
2607 try:
2608 numClusters = len( json.loads( clusters[ 0 ] ) )
2609 except ( ValueError, TypeError ):
2610 main.log.exception( "Error parsing clusters[0]: " +
2611 repr( clusters[0] ) )
Jon Halla440e872016-03-31 15:15:50 -07002612 numClusters = "ERROR"
Jon Hall5cf14d52015-07-16 12:15:19 -07002613 clusterResults = main.FALSE
2614 if numClusters == 1:
2615 clusterResults = main.TRUE
2616 utilities.assert_equals(
2617 expect=1,
2618 actual=numClusters,
2619 onpass="ONOS shows 1 SCC",
2620 onfail="ONOS shows " + str( numClusters ) + " SCCs" )
2621
2622 topoResult = ( devicesResults and linksResults
2623 and hostsResults and consistentHostsResult
2624 and consistentClustersResult and clusterResults
2625 and ipResult and hostAttachmentResults )
2626
2627 topoResult = topoResult and int( count <= 2 )
2628 note = "note it takes about " + str( int( cliTime ) ) + \
2629 " seconds for the test to make all the cli calls to fetch " +\
2630 "the topology from each ONOS instance"
2631 main.log.info(
2632 "Very crass estimate for topology discovery/convergence( " +
2633 str( note ) + " ): " + str( elapsed ) + " seconds, " +
2634 str( count ) + " tries" )
2635
2636 main.step( "Device information is correct" )
2637 utilities.assert_equals(
2638 expect=main.TRUE,
2639 actual=devicesResults,
2640 onpass="Device information is correct",
2641 onfail="Device information is incorrect" )
2642
2643 main.step( "Links are correct" )
2644 utilities.assert_equals(
2645 expect=main.TRUE,
2646 actual=linksResults,
2647 onpass="Link are correct",
2648 onfail="Links are incorrect" )
2649
Jon Halla440e872016-03-31 15:15:50 -07002650 main.step( "Hosts are correct" )
2651 utilities.assert_equals(
2652 expect=main.TRUE,
2653 actual=hostsResults,
2654 onpass="Hosts are correct",
2655 onfail="Hosts are incorrect" )
2656
Jon Hall5cf14d52015-07-16 12:15:19 -07002657 # FIXME: move this to an ONOS state case
2658 main.step( "Checking ONOS nodes" )
2659 nodesOutput = []
2660 nodeResults = main.TRUE
2661 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002662 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07002663 t = main.Thread( target=main.CLIs[i].nodes,
Jon Hall5cf14d52015-07-16 12:15:19 -07002664 name="nodes-" + str( i ),
2665 args=[ ] )
2666 threads.append( t )
2667 t.start()
2668
2669 for t in threads:
2670 t.join()
2671 nodesOutput.append( t.result )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002672 ips = [ main.nodes[node].ip_address for node in main.activeNodes ]
Jon Halle9b1fa32015-12-08 15:32:21 -08002673 ips.sort()
Jon Hall5cf14d52015-07-16 12:15:19 -07002674 for i in nodesOutput:
2675 try:
2676 current = json.loads( i )
Jon Halle9b1fa32015-12-08 15:32:21 -08002677 activeIps = []
2678 currentResult = main.FALSE
Jon Hall5cf14d52015-07-16 12:15:19 -07002679 for node in current:
Jon Hallbd182782016-03-28 16:42:22 -07002680 if node['state'] == 'READY':
Jon Halle9b1fa32015-12-08 15:32:21 -08002681 activeIps.append( node['ip'] )
2682 activeIps.sort()
2683 if ips == activeIps:
2684 currentResult = main.TRUE
Jon Hall5cf14d52015-07-16 12:15:19 -07002685 except ( ValueError, TypeError ):
2686 main.log.error( "Error parsing nodes output" )
2687 main.log.warn( repr( i ) )
Jon Halle9b1fa32015-12-08 15:32:21 -08002688 currentResult = main.FALSE
2689 nodeResults = nodeResults and currentResult
Jon Hall5cf14d52015-07-16 12:15:19 -07002690 utilities.assert_equals( expect=main.TRUE, actual=nodeResults,
2691 onpass="Nodes check successful",
2692 onfail="Nodes check NOT successful" )
Jon Halla440e872016-03-31 15:15:50 -07002693 if not nodeResults:
2694 for cli in main.CLIs:
2695 main.log.debug( "{} components not ACTIVE: \n{}".format(
2696 cli.name,
2697 cli.sendline( "scr:list | grep -v ACTIVE" ) ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07002698
2699 def CASE9( self, main ):
2700 """
2701 Link s3-s28 down
2702 """
2703 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002704 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002705 assert main, "main not defined"
2706 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002707 assert main.CLIs, "main.CLIs not defined"
2708 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002709 # NOTE: You should probably run a topology check after this
2710
2711 linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
2712
2713 description = "Turn off a link to ensure that Link Discovery " +\
2714 "is working properly"
2715 main.case( description )
2716
2717 main.step( "Kill Link between s3 and s28" )
2718 LinkDown = main.Mininet1.link( END1="s3", END2="s28", OPTION="down" )
2719 main.log.info( "Waiting " + str( linkSleep ) +
2720 " seconds for link down to be discovered" )
2721 time.sleep( linkSleep )
2722 utilities.assert_equals( expect=main.TRUE, actual=LinkDown,
2723 onpass="Link down successful",
2724 onfail="Failed to bring link down" )
2725 # TODO do some sort of check here
2726
2727 def CASE10( self, main ):
2728 """
2729 Link s3-s28 up
2730 """
2731 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002732 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002733 assert main, "main not defined"
2734 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002735 assert main.CLIs, "main.CLIs not defined"
2736 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002737 # NOTE: You should probably run a topology check after this
2738
2739 linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
2740
2741 description = "Restore a link to ensure that Link Discovery is " + \
2742 "working properly"
2743 main.case( description )
2744
2745 main.step( "Bring link between s3 and s28 back up" )
2746 LinkUp = main.Mininet1.link( END1="s3", END2="s28", OPTION="up" )
2747 main.log.info( "Waiting " + str( linkSleep ) +
2748 " seconds for link up to be discovered" )
2749 time.sleep( linkSleep )
2750 utilities.assert_equals( expect=main.TRUE, actual=LinkUp,
2751 onpass="Link up successful",
2752 onfail="Failed to bring link up" )
2753 # TODO do some sort of check here
2754
2755 def CASE11( self, main ):
2756 """
2757 Switch Down
2758 """
2759 # NOTE: You should probably run a topology check after this
2760 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002761 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002762 assert main, "main not defined"
2763 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002764 assert main.CLIs, "main.CLIs not defined"
2765 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002766
2767 switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
2768
2769 description = "Killing a switch to ensure it is discovered correctly"
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002770 onosCli = main.CLIs[ main.activeNodes[0] ]
Jon Hall5cf14d52015-07-16 12:15:19 -07002771 main.case( description )
2772 switch = main.params[ 'kill' ][ 'switch' ]
2773 switchDPID = main.params[ 'kill' ][ 'dpid' ]
2774
2775 # TODO: Make this switch parameterizable
2776 main.step( "Kill " + switch )
2777 main.log.info( "Deleting " + switch )
2778 main.Mininet1.delSwitch( switch )
2779 main.log.info( "Waiting " + str( switchSleep ) +
2780 " seconds for switch down to be discovered" )
2781 time.sleep( switchSleep )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002782 device = onosCli.getDevice( dpid=switchDPID )
Jon Hall5cf14d52015-07-16 12:15:19 -07002783 # Peek at the deleted switch
2784 main.log.warn( str( device ) )
2785 result = main.FALSE
2786 if device and device[ 'available' ] is False:
2787 result = main.TRUE
2788 utilities.assert_equals( expect=main.TRUE, actual=result,
2789 onpass="Kill switch successful",
2790 onfail="Failed to kill switch?" )
2791
2792 def CASE12( self, main ):
2793 """
2794 Switch Up
2795 """
2796 # NOTE: You should probably run a topology check after this
2797 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002798 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002799 assert main, "main not defined"
2800 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002801 assert main.CLIs, "main.CLIs not defined"
2802 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002803 assert ONOS1Port, "ONOS1Port not defined"
2804 assert ONOS2Port, "ONOS2Port not defined"
2805 assert ONOS3Port, "ONOS3Port not defined"
2806 assert ONOS4Port, "ONOS4Port not defined"
2807 assert ONOS5Port, "ONOS5Port not defined"
2808 assert ONOS6Port, "ONOS6Port not defined"
2809 assert ONOS7Port, "ONOS7Port not defined"
2810
2811 switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
2812 switch = main.params[ 'kill' ][ 'switch' ]
2813 switchDPID = main.params[ 'kill' ][ 'dpid' ]
2814 links = main.params[ 'kill' ][ 'links' ].split()
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002815 onosCli = main.CLIs[ main.activeNodes[0] ]
Jon Hall5cf14d52015-07-16 12:15:19 -07002816 description = "Adding a switch to ensure it is discovered correctly"
2817 main.case( description )
2818
2819 main.step( "Add back " + switch )
2820 main.Mininet1.addSwitch( switch, dpid=switchDPID )
2821 for peer in links:
2822 main.Mininet1.addLink( switch, peer )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002823 ipList = [ node.ip_address for node in main.nodes ]
Jon Hall5cf14d52015-07-16 12:15:19 -07002824 main.Mininet1.assignSwController( sw=switch, ip=ipList )
2825 main.log.info( "Waiting " + str( switchSleep ) +
2826 " seconds for switch up to be discovered" )
2827 time.sleep( switchSleep )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002828 device = onosCli.getDevice( dpid=switchDPID )
Jon Hall5cf14d52015-07-16 12:15:19 -07002829 # Peek at the deleted switch
2830 main.log.warn( str( device ) )
2831 result = main.FALSE
2832 if device and device[ 'available' ]:
2833 result = main.TRUE
2834 utilities.assert_equals( expect=main.TRUE, actual=result,
2835 onpass="add switch successful",
2836 onfail="Failed to add switch?" )
2837
2838 def CASE13( self, main ):
2839 """
2840 Clean up
2841 """
2842 import os
2843 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002844 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002845 assert main, "main not defined"
2846 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002847 assert main.CLIs, "main.CLIs not defined"
2848 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002849
2850 # printing colors to terminal
2851 colors = { 'cyan': '\033[96m', 'purple': '\033[95m',
2852 'blue': '\033[94m', 'green': '\033[92m',
2853 'yellow': '\033[93m', 'red': '\033[91m', 'end': '\033[0m' }
2854 main.case( "Test Cleanup" )
2855 main.step( "Killing tcpdumps" )
2856 main.Mininet2.stopTcpdump()
2857
2858 testname = main.TEST
Jon Hall96091e62015-09-21 17:34:17 -07002859 if main.params[ 'BACKUP' ][ 'ENABLED' ] == "True":
Jon Hall5cf14d52015-07-16 12:15:19 -07002860 main.step( "Copying MN pcap and ONOS log files to test station" )
2861 teststationUser = main.params[ 'BACKUP' ][ 'TESTONUSER' ]
2862 teststationIP = main.params[ 'BACKUP' ][ 'TESTONIP' ]
Jon Hall96091e62015-09-21 17:34:17 -07002863 # NOTE: MN Pcap file is being saved to logdir.
2864 # We scp this file as MN and TestON aren't necessarily the same vm
2865
2866 # FIXME: To be replaced with a Jenkin's post script
Jon Hall5cf14d52015-07-16 12:15:19 -07002867 # TODO: Load these from params
2868 # NOTE: must end in /
2869 logFolder = "/opt/onos/log/"
2870 logFiles = [ "karaf.log", "karaf.log.1" ]
2871 # NOTE: must end in /
Jon Hall5cf14d52015-07-16 12:15:19 -07002872 for f in logFiles:
Jon Halle1a3b752015-07-22 13:02:46 -07002873 for node in main.nodes:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002874 dstName = main.logdir + "/" + node.name + "-" + f
Jon Hall96091e62015-09-21 17:34:17 -07002875 main.ONOSbench.secureCopy( node.user_name, node.ip_address,
2876 logFolder + f, dstName )
Jon Hall5cf14d52015-07-16 12:15:19 -07002877 # std*.log's
2878 # NOTE: must end in /
2879 logFolder = "/opt/onos/var/"
2880 logFiles = [ "stderr.log", "stdout.log" ]
2881 # NOTE: must end in /
Jon Hall5cf14d52015-07-16 12:15:19 -07002882 for f in logFiles:
Jon Halle1a3b752015-07-22 13:02:46 -07002883 for node in main.nodes:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002884 dstName = main.logdir + "/" + node.name + "-" + f
Jon Hall96091e62015-09-21 17:34:17 -07002885 main.ONOSbench.secureCopy( node.user_name, node.ip_address,
2886 logFolder + f, dstName )
2887 else:
2888 main.log.debug( "skipping saving log files" )
Jon Hall5cf14d52015-07-16 12:15:19 -07002889
2890 main.step( "Stopping Mininet" )
2891 mnResult = main.Mininet1.stopNet()
2892 utilities.assert_equals( expect=main.TRUE, actual=mnResult,
2893 onpass="Mininet stopped",
2894 onfail="MN cleanup NOT successful" )
2895
2896 main.step( "Checking ONOS Logs for errors" )
Jon Halle1a3b752015-07-22 13:02:46 -07002897 for node in main.nodes:
Jon Hall96091e62015-09-21 17:34:17 -07002898 main.log.debug( "Checking logs for errors on " + node.name + ":" )
2899 main.log.warn( main.ONOSbench.checkLogs( node.ip_address ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07002900
2901 try:
2902 timerLog = open( main.logdir + "/Timers.csv", 'w')
2903 # Overwrite with empty line and close
2904 labels = "Gossip Intents, Restart"
2905 data = str( gossipTime ) + ", " + str( main.restartTime )
2906 timerLog.write( labels + "\n" + data )
2907 timerLog.close()
2908 except NameError, e:
2909 main.log.exception(e)
2910
2911 def CASE14( self, main ):
2912 """
2913 start election app on all onos nodes
2914 """
Jon Halle1a3b752015-07-22 13:02:46 -07002915 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002916 assert main, "main not defined"
2917 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002918 assert main.CLIs, "main.CLIs not defined"
2919 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002920
2921 main.case("Start Leadership Election app")
2922 main.step( "Install leadership election app" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002923 onosCli = main.CLIs[ main.activeNodes[0] ]
2924 appResult = onosCli.activateApp( "org.onosproject.election" )
Jon Hall5cf14d52015-07-16 12:15:19 -07002925 utilities.assert_equals(
2926 expect=main.TRUE,
2927 actual=appResult,
2928 onpass="Election app installed",
2929 onfail="Something went wrong with installing Leadership election" )
2930
2931 main.step( "Run for election on each node" )
2932 leaderResult = main.TRUE
2933 leaders = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002934 for i in main.activeNodes:
2935 main.CLIs[i].electionTestRun()
2936 for i in main.activeNodes:
2937 cli = main.CLIs[i]
Jon Hall5cf14d52015-07-16 12:15:19 -07002938 leader = cli.electionTestLeader()
2939 if leader is None or leader == main.FALSE:
2940 main.log.error( cli.name + ": Leader for the election app " +
2941 "should be an ONOS node, instead got '" +
2942 str( leader ) + "'" )
2943 leaderResult = main.FALSE
2944 leaders.append( leader )
2945 utilities.assert_equals(
2946 expect=main.TRUE,
2947 actual=leaderResult,
2948 onpass="Successfully ran for leadership",
2949 onfail="Failed to run for leadership" )
2950
2951 main.step( "Check that each node shows the same leader" )
2952 sameLeader = main.TRUE
2953 if len( set( leaders ) ) != 1:
2954 sameLeader = main.FALSE
Jon Halle1a3b752015-07-22 13:02:46 -07002955 main.log.error( "Results of electionTestLeader is order of main.CLIs:" +
Jon Hall5cf14d52015-07-16 12:15:19 -07002956 str( leaders ) )
2957 utilities.assert_equals(
2958 expect=main.TRUE,
2959 actual=sameLeader,
2960 onpass="Leadership is consistent for the election topic",
2961 onfail="Nodes have different leaders" )
2962
2963 def CASE15( self, main ):
2964 """
2965 Check that Leadership Election is still functional
acsmars71adceb2015-08-31 15:09:26 -07002966 15.1 Run election on each node
2967 15.2 Check that each node has the same leaders and candidates
2968 15.3 Find current leader and withdraw
2969 15.4 Check that a new node was elected leader
2970 15.5 Check that that new leader was the candidate of old leader
2971 15.6 Run for election on old leader
2972 15.7 Check that oldLeader is a candidate, and leader if only 1 node
2973 15.8 Make sure that the old leader was added to the candidate list
2974
2975 old and new variable prefixes refer to data from before vs after
2976 withdrawl and later before withdrawl vs after re-election
Jon Hall5cf14d52015-07-16 12:15:19 -07002977 """
2978 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002979 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002980 assert main, "main not defined"
2981 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002982 assert main.CLIs, "main.CLIs not defined"
2983 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002984
Jon Hall5cf14d52015-07-16 12:15:19 -07002985 description = "Check that Leadership Election is still functional"
2986 main.case( description )
Jon Halla440e872016-03-31 15:15:50 -07002987 # NOTE: Need to re-run after restarts since being a canidate is not persistant
Jon Hall5cf14d52015-07-16 12:15:19 -07002988
Jon Halla440e872016-03-31 15:15:50 -07002989 oldLeaders = [] # list of lists of each nodes' candidates before
2990 newLeaders = [] # list of lists of each nodes' candidates after
acsmars71adceb2015-08-31 15:09:26 -07002991 oldLeader = '' # the old leader from oldLeaders, None if not same
2992 newLeader = '' # the new leaders fron newLoeaders, None if not same
2993 oldLeaderCLI = None # the CLI of the old leader used for re-electing
2994 expectNoLeader = False # True when there is only one leader
2995 if main.numCtrls == 1:
2996 expectNoLeader = True
2997
2998 main.step( "Run for election on each node" )
2999 electionResult = main.TRUE
3000
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003001 for i in main.activeNodes: # run test election on each node
3002 if main.CLIs[i].electionTestRun() == main.FALSE:
acsmars71adceb2015-08-31 15:09:26 -07003003 electionResult = main.FALSE
Jon Hall5cf14d52015-07-16 12:15:19 -07003004 utilities.assert_equals(
3005 expect=main.TRUE,
acsmars71adceb2015-08-31 15:09:26 -07003006 actual=electionResult,
3007 onpass="All nodes successfully ran for leadership",
3008 onfail="At least one node failed to run for leadership" )
3009
acsmars3a72bde2015-09-02 14:16:22 -07003010 if electionResult == main.FALSE:
3011 main.log.error(
3012 "Skipping Test Case because Election Test App isn't loaded" )
3013 main.skipCase()
3014
acsmars71adceb2015-08-31 15:09:26 -07003015 main.step( "Check that each node shows the same leader and candidates" )
Jon Halla440e872016-03-31 15:15:50 -07003016 failMessage = "Nodes have different leaderboards"
3017 def consistentLeaderboards( nodes ):
3018 TOPIC = 'org.onosproject.election'
3019 # FIXME: use threads
3020 #FIXME: should we retry outside the function?
3021 for n in range( 5 ): # Retry in case election is still happening
3022 leaderList = []
3023 # Get all leaderboards
3024 for cli in nodes:
3025 leaderList.append( cli.specificLeaderCandidate( TOPIC ) )
3026 # Compare leaderboards
3027 result = all( i == leaderList[0] for i in leaderList ) and\
3028 leaderList is not None
3029 main.log.debug( leaderList )
3030 main.log.warn( result )
3031 if result:
3032 return ( result, leaderList )
3033 time.sleep(5) #TODO: paramerterize
3034 main.log.error( "Inconsistent leaderboards:" + str( leaderList ) )
3035 activeCLIs = [ main.CLIs[i] for i in main.activeNodes ]
3036 sameResult, oldLeaders = consistentLeaderboards( activeCLIs )
3037 if sameResult:
3038 oldLeader = oldLeaders[ 0 ][ 0 ]
3039 main.log.warn( oldLeader )
acsmars71adceb2015-08-31 15:09:26 -07003040 else:
Jon Halla440e872016-03-31 15:15:50 -07003041 oldLeader = None
acsmars71adceb2015-08-31 15:09:26 -07003042 utilities.assert_equals(
Jon Halla440e872016-03-31 15:15:50 -07003043 expect=True,
acsmars71adceb2015-08-31 15:09:26 -07003044 actual=sameResult,
Jon Halla440e872016-03-31 15:15:50 -07003045 onpass="Leaderboards are consistent for the election topic",
acsmars71adceb2015-08-31 15:09:26 -07003046 onfail=failMessage )
Jon Hall5cf14d52015-07-16 12:15:19 -07003047
3048 main.step( "Find current leader and withdraw" )
acsmars71adceb2015-08-31 15:09:26 -07003049 withdrawResult = main.TRUE
Jon Hall5cf14d52015-07-16 12:15:19 -07003050 # do some sanity checking on leader before using it
acsmars71adceb2015-08-31 15:09:26 -07003051 if oldLeader is None:
3052 main.log.error( "Leadership isn't consistent." )
3053 withdrawResult = main.FALSE
3054 # Get the CLI of the oldLeader
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003055 for i in main.activeNodes:
acsmars71adceb2015-08-31 15:09:26 -07003056 if oldLeader == main.nodes[ i ].ip_address:
3057 oldLeaderCLI = main.CLIs[ i ]
Jon Hall5cf14d52015-07-16 12:15:19 -07003058 break
3059 else: # FOR/ELSE statement
3060 main.log.error( "Leader election, could not find current leader" )
3061 if oldLeader:
acsmars71adceb2015-08-31 15:09:26 -07003062 withdrawResult = oldLeaderCLI.electionTestWithdraw()
Jon Hall5cf14d52015-07-16 12:15:19 -07003063 utilities.assert_equals(
3064 expect=main.TRUE,
3065 actual=withdrawResult,
3066 onpass="Node was withdrawn from election",
3067 onfail="Node was not withdrawn from election" )
3068
acsmars71adceb2015-08-31 15:09:26 -07003069 main.step( "Check that a new node was elected leader" )
acsmars71adceb2015-08-31 15:09:26 -07003070 failMessage = "Nodes have different leaders"
acsmars71adceb2015-08-31 15:09:26 -07003071 # Get new leaders and candidates
Jon Halla440e872016-03-31 15:15:50 -07003072 newLeaderResult, newLeaders = consistentLeaderboards( activeCLIs )
3073 if newLeaders[ 0 ][ 0 ] == 'none':
3074 main.log.error( "No leader was elected on at least 1 node" )
3075 if not expectNoLeader:
3076 newLeaderResult = False
3077 if newLeaderResult:
3078 newLeader = newLeaders[ 0 ][ 0 ]
Jon Hall5cf14d52015-07-16 12:15:19 -07003079 else:
Jon Halla440e872016-03-31 15:15:50 -07003080 newLeader = None
acsmars71adceb2015-08-31 15:09:26 -07003081
3082 # Check that the new leader is not the older leader, which was withdrawn
3083 if newLeader == oldLeader:
Jon Halla440e872016-03-31 15:15:50 -07003084 newLeaderResult = False
Jon Hall6e709752016-02-01 13:38:46 -08003085 main.log.error( "All nodes still see old leader: " + str( oldLeader ) +
acsmars71adceb2015-08-31 15:09:26 -07003086 " as the current leader" )
Jon Hall5cf14d52015-07-16 12:15:19 -07003087 utilities.assert_equals(
Jon Halla440e872016-03-31 15:15:50 -07003088 expect=True,
acsmars71adceb2015-08-31 15:09:26 -07003089 actual=newLeaderResult,
Jon Hall5cf14d52015-07-16 12:15:19 -07003090 onpass="Leadership election passed",
3091 onfail="Something went wrong with Leadership election" )
3092
Jon Halla440e872016-03-31 15:15:50 -07003093 main.step( "Check that that new leader was the candidate of old leader" )
Jon Hall6e709752016-02-01 13:38:46 -08003094 # candidates[ 2 ] should become the top candidate after withdrawl
acsmars71adceb2015-08-31 15:09:26 -07003095 correctCandidateResult = main.TRUE
3096 if expectNoLeader:
3097 if newLeader == 'none':
3098 main.log.info( "No leader expected. None found. Pass" )
3099 correctCandidateResult = main.TRUE
3100 else:
3101 main.log.info( "Expected no leader, got: " + str( newLeader ) )
3102 correctCandidateResult = main.FALSE
Jon Halla440e872016-03-31 15:15:50 -07003103 elif len( oldLeaders[0] ) >= 3:
3104 if newLeader == oldLeaders[ 0 ][ 2 ]:
3105 # correct leader was elected
3106 correctCandidateResult = main.TRUE
3107 else:
3108 correctCandidateResult = main.FALSE
3109 main.log.error( "Candidate {} was elected. {} should have had priority.".format(
3110 newLeader, oldLeaders[ 0 ][ 2 ] ) )
Jon Hall6e709752016-02-01 13:38:46 -08003111 else:
3112 main.log.warn( "Could not determine who should be the correct leader" )
Jon Halla440e872016-03-31 15:15:50 -07003113 main.log.debug( oldLeaders[ 0 ] )
Jon Hall6e709752016-02-01 13:38:46 -08003114 correctCandidateResult = main.FALSE
acsmars71adceb2015-08-31 15:09:26 -07003115 utilities.assert_equals(
3116 expect=main.TRUE,
3117 actual=correctCandidateResult,
3118 onpass="Correct Candidate Elected",
3119 onfail="Incorrect Candidate Elected" )
3120
Jon Hall5cf14d52015-07-16 12:15:19 -07003121 main.step( "Run for election on old leader( just so everyone " +
3122 "is in the hat )" )
acsmars71adceb2015-08-31 15:09:26 -07003123 if oldLeaderCLI is not None:
3124 runResult = oldLeaderCLI.electionTestRun()
Jon Hall5cf14d52015-07-16 12:15:19 -07003125 else:
acsmars71adceb2015-08-31 15:09:26 -07003126 main.log.error( "No old leader to re-elect" )
Jon Hall5cf14d52015-07-16 12:15:19 -07003127 runResult = main.FALSE
3128 utilities.assert_equals(
3129 expect=main.TRUE,
3130 actual=runResult,
3131 onpass="App re-ran for election",
3132 onfail="App failed to run for election" )
Jon Halla440e872016-03-31 15:15:50 -07003133
acsmars71adceb2015-08-31 15:09:26 -07003134 main.step(
3135 "Check that oldLeader is a candidate, and leader if only 1 node" )
Jon Hall5cf14d52015-07-16 12:15:19 -07003136 # verify leader didn't just change
Jon Halla440e872016-03-31 15:15:50 -07003137 # Get new leaders and candidates
3138 reRunLeaders = []
3139 time.sleep( 5 ) # Paremterize
3140 positionResult, reRunLeaders = consistentLeaderboards( activeCLIs )
acsmars71adceb2015-08-31 15:09:26 -07003141
3142 # Check that the re-elected node is last on the candidate List
Jon Halla440e872016-03-31 15:15:50 -07003143 if oldLeader != reRunLeaders[ 0 ][ -1 ]:
3144 main.log.error( "Old Leader ({}) not in the proper position: {} ".format( str( oldLeader),
3145 str( reRunLeaders[ 0 ] ) ) )
acsmars71adceb2015-08-31 15:09:26 -07003146 positionResult = main.FALSE
Jon Hall5cf14d52015-07-16 12:15:19 -07003147
3148 utilities.assert_equals(
Jon Halla440e872016-03-31 15:15:50 -07003149 expect=True,
acsmars71adceb2015-08-31 15:09:26 -07003150 actual=positionResult,
Jon Hall5cf14d52015-07-16 12:15:19 -07003151 onpass="Old leader successfully re-ran for election",
3152 onfail="Something went wrong with Leadership election after " +
3153 "the old leader re-ran for election" )
3154
3155 def CASE16( self, main ):
3156 """
3157 Install Distributed Primitives app
3158 """
3159 import time
Jon Halle1a3b752015-07-22 13:02:46 -07003160 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07003161 assert main, "main not defined"
3162 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07003163 assert main.CLIs, "main.CLIs not defined"
3164 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07003165
3166 # Variables for the distributed primitives tests
3167 global pCounterName
Jon Hall5cf14d52015-07-16 12:15:19 -07003168 global pCounterValue
Jon Hall5cf14d52015-07-16 12:15:19 -07003169 global onosSet
3170 global onosSetName
3171 pCounterName = "TestON-Partitions"
Jon Hall5cf14d52015-07-16 12:15:19 -07003172 pCounterValue = 0
Jon Hall5cf14d52015-07-16 12:15:19 -07003173 onosSet = set([])
3174 onosSetName = "TestON-set"
3175
3176 description = "Install Primitives app"
3177 main.case( description )
3178 main.step( "Install Primitives app" )
3179 appName = "org.onosproject.distributedprimitives"
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003180 node = main.activeNodes[0]
3181 appResults = main.CLIs[node].activateApp( appName )
Jon Hall5cf14d52015-07-16 12:15:19 -07003182 utilities.assert_equals( expect=main.TRUE,
3183 actual=appResults,
3184 onpass="Primitives app activated",
3185 onfail="Primitives app not activated" )
3186 time.sleep( 5 ) # To allow all nodes to activate
3187
3188 def CASE17( self, main ):
3189 """
3190 Check for basic functionality with distributed primitives
3191 """
Jon Hall5cf14d52015-07-16 12:15:19 -07003192 # Make sure variables are defined/set
Jon Halle1a3b752015-07-22 13:02:46 -07003193 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07003194 assert main, "main not defined"
3195 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07003196 assert main.CLIs, "main.CLIs not defined"
3197 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07003198 assert pCounterName, "pCounterName not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07003199 assert onosSetName, "onosSetName not defined"
3200 # NOTE: assert fails if value is 0/None/Empty/False
3201 try:
3202 pCounterValue
3203 except NameError:
3204 main.log.error( "pCounterValue not defined, setting to 0" )
3205 pCounterValue = 0
3206 try:
Jon Hall5cf14d52015-07-16 12:15:19 -07003207 onosSet
3208 except NameError:
3209 main.log.error( "onosSet not defined, setting to empty Set" )
3210 onosSet = set([])
3211 # Variables for the distributed primitives tests. These are local only
3212 addValue = "a"
3213 addAllValue = "a b c d e f"
3214 retainValue = "c d e f"
3215
3216 description = "Check for basic functionality with distributed " +\
3217 "primitives"
3218 main.case( description )
Jon Halle1a3b752015-07-22 13:02:46 -07003219 main.caseExplanation = "Test the methods of the distributed " +\
3220 "primitives (counters and sets) throught the cli"
Jon Hall5cf14d52015-07-16 12:15:19 -07003221 # DISTRIBUTED ATOMIC COUNTERS
Jon Halle1a3b752015-07-22 13:02:46 -07003222 # Partitioned counters
3223 main.step( "Increment then get a default counter on each node" )
Jon Hall5cf14d52015-07-16 12:15:19 -07003224 pCounters = []
3225 threads = []
3226 addedPValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003227 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003228 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3229 name="counterAddAndGet-" + str( i ),
Jon Hall5cf14d52015-07-16 12:15:19 -07003230 args=[ pCounterName ] )
3231 pCounterValue += 1
3232 addedPValues.append( pCounterValue )
3233 threads.append( t )
3234 t.start()
3235
3236 for t in threads:
3237 t.join()
3238 pCounters.append( t.result )
3239 # Check that counter incremented numController times
3240 pCounterResults = True
3241 for i in addedPValues:
3242 tmpResult = i in pCounters
3243 pCounterResults = pCounterResults and tmpResult
3244 if not tmpResult:
3245 main.log.error( str( i ) + " is not in partitioned "
3246 "counter incremented results" )
3247 utilities.assert_equals( expect=True,
3248 actual=pCounterResults,
3249 onpass="Default counter incremented",
3250 onfail="Error incrementing default" +
3251 " counter" )
3252
Jon Halle1a3b752015-07-22 13:02:46 -07003253 main.step( "Get then Increment a default counter on each node" )
3254 pCounters = []
3255 threads = []
3256 addedPValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003257 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003258 t = main.Thread( target=main.CLIs[i].counterTestGetAndAdd,
3259 name="counterGetAndAdd-" + str( i ),
3260 args=[ pCounterName ] )
3261 addedPValues.append( pCounterValue )
3262 pCounterValue += 1
3263 threads.append( t )
3264 t.start()
3265
3266 for t in threads:
3267 t.join()
3268 pCounters.append( t.result )
3269 # Check that counter incremented numController times
3270 pCounterResults = True
3271 for i in addedPValues:
3272 tmpResult = i in pCounters
3273 pCounterResults = pCounterResults and tmpResult
3274 if not tmpResult:
3275 main.log.error( str( i ) + " is not in partitioned "
3276 "counter incremented results" )
3277 utilities.assert_equals( expect=True,
3278 actual=pCounterResults,
3279 onpass="Default counter incremented",
3280 onfail="Error incrementing default" +
3281 " counter" )
3282
3283 main.step( "Counters we added have the correct values" )
3284 incrementCheck = main.Counters.counterCheck( pCounterName, pCounterValue )
3285 utilities.assert_equals( expect=main.TRUE,
3286 actual=incrementCheck,
3287 onpass="Added counters are correct",
3288 onfail="Added counters are incorrect" )
3289
3290 main.step( "Add -8 to then get a default counter on each node" )
3291 pCounters = []
3292 threads = []
3293 addedPValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003294 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003295 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3296 name="counterIncrement-" + str( i ),
3297 args=[ pCounterName ],
3298 kwargs={ "delta": -8 } )
3299 pCounterValue += -8
3300 addedPValues.append( pCounterValue )
3301 threads.append( t )
3302 t.start()
3303
3304 for t in threads:
3305 t.join()
3306 pCounters.append( t.result )
3307 # Check that counter incremented numController times
3308 pCounterResults = True
3309 for i in addedPValues:
3310 tmpResult = i in pCounters
3311 pCounterResults = pCounterResults and tmpResult
3312 if not tmpResult:
3313 main.log.error( str( i ) + " is not in partitioned "
3314 "counter incremented results" )
3315 utilities.assert_equals( expect=True,
3316 actual=pCounterResults,
3317 onpass="Default counter incremented",
3318 onfail="Error incrementing default" +
3319 " counter" )
3320
3321 main.step( "Add 5 to then get a default counter on each node" )
3322 pCounters = []
3323 threads = []
3324 addedPValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003325 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003326 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3327 name="counterIncrement-" + str( i ),
3328 args=[ pCounterName ],
3329 kwargs={ "delta": 5 } )
3330 pCounterValue += 5
3331 addedPValues.append( pCounterValue )
3332 threads.append( t )
3333 t.start()
3334
3335 for t in threads:
3336 t.join()
3337 pCounters.append( t.result )
3338 # Check that counter incremented numController times
3339 pCounterResults = True
3340 for i in addedPValues:
3341 tmpResult = i in pCounters
3342 pCounterResults = pCounterResults and tmpResult
3343 if not tmpResult:
3344 main.log.error( str( i ) + " is not in partitioned "
3345 "counter incremented results" )
3346 utilities.assert_equals( expect=True,
3347 actual=pCounterResults,
3348 onpass="Default counter incremented",
3349 onfail="Error incrementing default" +
3350 " counter" )
3351
3352 main.step( "Get then add 5 to a default counter on each node" )
3353 pCounters = []
3354 threads = []
3355 addedPValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003356 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003357 t = main.Thread( target=main.CLIs[i].counterTestGetAndAdd,
3358 name="counterIncrement-" + str( i ),
3359 args=[ pCounterName ],
3360 kwargs={ "delta": 5 } )
3361 addedPValues.append( pCounterValue )
3362 pCounterValue += 5
3363 threads.append( t )
3364 t.start()
3365
3366 for t in threads:
3367 t.join()
3368 pCounters.append( t.result )
3369 # Check that counter incremented numController times
3370 pCounterResults = True
3371 for i in addedPValues:
3372 tmpResult = i in pCounters
3373 pCounterResults = pCounterResults and tmpResult
3374 if not tmpResult:
3375 main.log.error( str( i ) + " is not in partitioned "
3376 "counter incremented results" )
3377 utilities.assert_equals( expect=True,
3378 actual=pCounterResults,
3379 onpass="Default counter incremented",
3380 onfail="Error incrementing default" +
3381 " counter" )
3382
3383 main.step( "Counters we added have the correct values" )
3384 incrementCheck = main.Counters.counterCheck( pCounterName, pCounterValue )
3385 utilities.assert_equals( expect=main.TRUE,
3386 actual=incrementCheck,
3387 onpass="Added counters are correct",
3388 onfail="Added counters are incorrect" )
3389
Jon Hall5cf14d52015-07-16 12:15:19 -07003390 # DISTRIBUTED SETS
3391 main.step( "Distributed Set get" )
3392 size = len( onosSet )
3393 getResponses = []
3394 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003395 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003396 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003397 name="setTestGet-" + str( i ),
3398 args=[ onosSetName ] )
3399 threads.append( t )
3400 t.start()
3401 for t in threads:
3402 t.join()
3403 getResponses.append( t.result )
3404
3405 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003406 for i in range( len( main.activeNodes ) ):
3407 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003408 if isinstance( getResponses[ i ], list):
3409 current = set( getResponses[ i ] )
3410 if len( current ) == len( getResponses[ i ] ):
3411 # no repeats
3412 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003413 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003414 " has incorrect view" +
3415 " of set " + onosSetName + ":\n" +
3416 str( getResponses[ i ] ) )
3417 main.log.debug( "Expected: " + str( onosSet ) )
3418 main.log.debug( "Actual: " + str( current ) )
3419 getResults = main.FALSE
3420 else:
3421 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003422 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003423 " has repeat elements in" +
3424 " set " + onosSetName + ":\n" +
3425 str( getResponses[ i ] ) )
3426 getResults = main.FALSE
3427 elif getResponses[ i ] == main.ERROR:
3428 getResults = main.FALSE
3429 utilities.assert_equals( expect=main.TRUE,
3430 actual=getResults,
3431 onpass="Set elements are correct",
3432 onfail="Set elements are incorrect" )
3433
3434 main.step( "Distributed Set size" )
3435 sizeResponses = []
3436 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003437 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003438 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07003439 name="setTestSize-" + str( i ),
3440 args=[ onosSetName ] )
3441 threads.append( t )
3442 t.start()
3443 for t in threads:
3444 t.join()
3445 sizeResponses.append( t.result )
3446
3447 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003448 for i in range( len( main.activeNodes ) ):
3449 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003450 if size != sizeResponses[ i ]:
3451 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003452 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003453 " expected a size of " + str( size ) +
3454 " for set " + onosSetName +
3455 " but got " + str( sizeResponses[ i ] ) )
3456 utilities.assert_equals( expect=main.TRUE,
3457 actual=sizeResults,
3458 onpass="Set sizes are correct",
3459 onfail="Set sizes are incorrect" )
3460
3461 main.step( "Distributed Set add()" )
3462 onosSet.add( addValue )
3463 addResponses = []
3464 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003465 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003466 t = main.Thread( target=main.CLIs[i].setTestAdd,
Jon Hall5cf14d52015-07-16 12:15:19 -07003467 name="setTestAdd-" + str( i ),
3468 args=[ onosSetName, addValue ] )
3469 threads.append( t )
3470 t.start()
3471 for t in threads:
3472 t.join()
3473 addResponses.append( t.result )
3474
3475 # main.TRUE = successfully changed the set
3476 # main.FALSE = action resulted in no change in set
3477 # main.ERROR - Some error in executing the function
3478 addResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003479 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003480 if addResponses[ i ] == main.TRUE:
3481 # All is well
3482 pass
3483 elif addResponses[ i ] == main.FALSE:
3484 # Already in set, probably fine
3485 pass
3486 elif addResponses[ i ] == main.ERROR:
3487 # Error in execution
3488 addResults = main.FALSE
3489 else:
3490 # unexpected result
3491 addResults = main.FALSE
3492 if addResults != main.TRUE:
3493 main.log.error( "Error executing set add" )
3494
3495 # Check if set is still correct
3496 size = len( onosSet )
3497 getResponses = []
3498 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003499 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003500 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003501 name="setTestGet-" + str( i ),
3502 args=[ onosSetName ] )
3503 threads.append( t )
3504 t.start()
3505 for t in threads:
3506 t.join()
3507 getResponses.append( t.result )
3508 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003509 for i in range( len( main.activeNodes ) ):
3510 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003511 if isinstance( getResponses[ i ], list):
3512 current = set( getResponses[ i ] )
3513 if len( current ) == len( getResponses[ i ] ):
3514 # no repeats
3515 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003516 main.log.error( "ONOS" + node + " has incorrect view" +
Jon Hall5cf14d52015-07-16 12:15:19 -07003517 " of set " + onosSetName + ":\n" +
3518 str( getResponses[ i ] ) )
3519 main.log.debug( "Expected: " + str( onosSet ) )
3520 main.log.debug( "Actual: " + str( current ) )
3521 getResults = main.FALSE
3522 else:
3523 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003524 main.log.error( "ONOS" + node + " has repeat elements in" +
Jon Hall5cf14d52015-07-16 12:15:19 -07003525 " set " + onosSetName + ":\n" +
3526 str( getResponses[ i ] ) )
3527 getResults = main.FALSE
3528 elif getResponses[ i ] == main.ERROR:
3529 getResults = main.FALSE
3530 sizeResponses = []
3531 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003532 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003533 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07003534 name="setTestSize-" + str( i ),
3535 args=[ onosSetName ] )
3536 threads.append( t )
3537 t.start()
3538 for t in threads:
3539 t.join()
3540 sizeResponses.append( t.result )
3541 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003542 for i in range( len( main.activeNodes ) ):
3543 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003544 if size != sizeResponses[ i ]:
3545 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003546 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003547 " expected a size of " + str( size ) +
3548 " for set " + onosSetName +
3549 " but got " + str( sizeResponses[ i ] ) )
3550 addResults = addResults and getResults and sizeResults
3551 utilities.assert_equals( expect=main.TRUE,
3552 actual=addResults,
3553 onpass="Set add correct",
3554 onfail="Set add was incorrect" )
3555
3556 main.step( "Distributed Set addAll()" )
3557 onosSet.update( addAllValue.split() )
3558 addResponses = []
3559 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003560 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003561 t = main.Thread( target=main.CLIs[i].setTestAdd,
Jon Hall5cf14d52015-07-16 12:15:19 -07003562 name="setTestAddAll-" + str( i ),
3563 args=[ onosSetName, addAllValue ] )
3564 threads.append( t )
3565 t.start()
3566 for t in threads:
3567 t.join()
3568 addResponses.append( t.result )
3569
3570 # main.TRUE = successfully changed the set
3571 # main.FALSE = action resulted in no change in set
3572 # main.ERROR - Some error in executing the function
3573 addAllResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003574 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003575 if addResponses[ i ] == main.TRUE:
3576 # All is well
3577 pass
3578 elif addResponses[ i ] == main.FALSE:
3579 # Already in set, probably fine
3580 pass
3581 elif addResponses[ i ] == main.ERROR:
3582 # Error in execution
3583 addAllResults = main.FALSE
3584 else:
3585 # unexpected result
3586 addAllResults = main.FALSE
3587 if addAllResults != main.TRUE:
3588 main.log.error( "Error executing set addAll" )
3589
3590 # Check if set is still correct
3591 size = len( onosSet )
3592 getResponses = []
3593 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003594 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003595 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003596 name="setTestGet-" + str( i ),
3597 args=[ onosSetName ] )
3598 threads.append( t )
3599 t.start()
3600 for t in threads:
3601 t.join()
3602 getResponses.append( t.result )
3603 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003604 for i in range( len( main.activeNodes ) ):
3605 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003606 if isinstance( getResponses[ i ], list):
3607 current = set( getResponses[ i ] )
3608 if len( current ) == len( getResponses[ i ] ):
3609 # no repeats
3610 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003611 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003612 " has incorrect view" +
3613 " of set " + onosSetName + ":\n" +
3614 str( getResponses[ i ] ) )
3615 main.log.debug( "Expected: " + str( onosSet ) )
3616 main.log.debug( "Actual: " + str( current ) )
3617 getResults = main.FALSE
3618 else:
3619 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003620 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003621 " has repeat elements in" +
3622 " set " + onosSetName + ":\n" +
3623 str( getResponses[ i ] ) )
3624 getResults = main.FALSE
3625 elif getResponses[ i ] == main.ERROR:
3626 getResults = main.FALSE
3627 sizeResponses = []
3628 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003629 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003630 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07003631 name="setTestSize-" + str( i ),
3632 args=[ onosSetName ] )
3633 threads.append( t )
3634 t.start()
3635 for t in threads:
3636 t.join()
3637 sizeResponses.append( t.result )
3638 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003639 for i in range( len( main.activeNodes ) ):
3640 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003641 if size != sizeResponses[ i ]:
3642 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003643 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003644 " expected a size of " + str( size ) +
3645 " for set " + onosSetName +
3646 " but got " + str( sizeResponses[ i ] ) )
3647 addAllResults = addAllResults and getResults and sizeResults
3648 utilities.assert_equals( expect=main.TRUE,
3649 actual=addAllResults,
3650 onpass="Set addAll correct",
3651 onfail="Set addAll was incorrect" )
3652
3653 main.step( "Distributed Set contains()" )
3654 containsResponses = []
3655 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003656 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003657 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003658 name="setContains-" + str( i ),
3659 args=[ onosSetName ],
3660 kwargs={ "values": addValue } )
3661 threads.append( t )
3662 t.start()
3663 for t in threads:
3664 t.join()
3665 # NOTE: This is the tuple
3666 containsResponses.append( t.result )
3667
3668 containsResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003669 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003670 if containsResponses[ i ] == main.ERROR:
3671 containsResults = main.FALSE
3672 else:
3673 containsResults = containsResults and\
3674 containsResponses[ i ][ 1 ]
3675 utilities.assert_equals( expect=main.TRUE,
3676 actual=containsResults,
3677 onpass="Set contains is functional",
3678 onfail="Set contains failed" )
3679
3680 main.step( "Distributed Set containsAll()" )
3681 containsAllResponses = []
3682 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003683 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003684 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003685 name="setContainsAll-" + str( i ),
3686 args=[ onosSetName ],
3687 kwargs={ "values": addAllValue } )
3688 threads.append( t )
3689 t.start()
3690 for t in threads:
3691 t.join()
3692 # NOTE: This is the tuple
3693 containsAllResponses.append( t.result )
3694
3695 containsAllResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003696 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003697 if containsResponses[ i ] == main.ERROR:
3698 containsResults = main.FALSE
3699 else:
3700 containsResults = containsResults and\
3701 containsResponses[ i ][ 1 ]
3702 utilities.assert_equals( expect=main.TRUE,
3703 actual=containsAllResults,
3704 onpass="Set containsAll is functional",
3705 onfail="Set containsAll failed" )
3706
3707 main.step( "Distributed Set remove()" )
3708 onosSet.remove( addValue )
3709 removeResponses = []
3710 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003711 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003712 t = main.Thread( target=main.CLIs[i].setTestRemove,
Jon Hall5cf14d52015-07-16 12:15:19 -07003713 name="setTestRemove-" + str( i ),
3714 args=[ onosSetName, addValue ] )
3715 threads.append( t )
3716 t.start()
3717 for t in threads:
3718 t.join()
3719 removeResponses.append( t.result )
3720
3721 # main.TRUE = successfully changed the set
3722 # main.FALSE = action resulted in no change in set
3723 # main.ERROR - Some error in executing the function
3724 removeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003725 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003726 if removeResponses[ i ] == main.TRUE:
3727 # All is well
3728 pass
3729 elif removeResponses[ i ] == main.FALSE:
3730 # not in set, probably fine
3731 pass
3732 elif removeResponses[ i ] == main.ERROR:
3733 # Error in execution
3734 removeResults = main.FALSE
3735 else:
3736 # unexpected result
3737 removeResults = main.FALSE
3738 if removeResults != main.TRUE:
3739 main.log.error( "Error executing set remove" )
3740
3741 # Check if set is still correct
3742 size = len( onosSet )
3743 getResponses = []
3744 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003745 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003746 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003747 name="setTestGet-" + str( i ),
3748 args=[ onosSetName ] )
3749 threads.append( t )
3750 t.start()
3751 for t in threads:
3752 t.join()
3753 getResponses.append( t.result )
3754 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003755 for i in range( len( main.activeNodes ) ):
3756 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003757 if isinstance( getResponses[ i ], list):
3758 current = set( getResponses[ i ] )
3759 if len( current ) == len( getResponses[ i ] ):
3760 # no repeats
3761 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003762 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003763 " has incorrect view" +
3764 " of set " + onosSetName + ":\n" +
3765 str( getResponses[ i ] ) )
3766 main.log.debug( "Expected: " + str( onosSet ) )
3767 main.log.debug( "Actual: " + str( current ) )
3768 getResults = main.FALSE
3769 else:
3770 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003771 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003772 " has repeat elements in" +
3773 " set " + onosSetName + ":\n" +
3774 str( getResponses[ i ] ) )
3775 getResults = main.FALSE
3776 elif getResponses[ i ] == main.ERROR:
3777 getResults = main.FALSE
3778 sizeResponses = []
3779 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003780 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003781 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07003782 name="setTestSize-" + str( i ),
3783 args=[ onosSetName ] )
3784 threads.append( t )
3785 t.start()
3786 for t in threads:
3787 t.join()
3788 sizeResponses.append( t.result )
3789 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003790 for i in range( len( main.activeNodes ) ):
3791 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003792 if size != sizeResponses[ i ]:
3793 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003794 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003795 " expected a size of " + str( size ) +
3796 " for set " + onosSetName +
3797 " but got " + str( sizeResponses[ i ] ) )
3798 removeResults = removeResults and getResults and sizeResults
3799 utilities.assert_equals( expect=main.TRUE,
3800 actual=removeResults,
3801 onpass="Set remove correct",
3802 onfail="Set remove was incorrect" )
3803
3804 main.step( "Distributed Set removeAll()" )
3805 onosSet.difference_update( addAllValue.split() )
3806 removeAllResponses = []
3807 threads = []
3808 try:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003809 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003810 t = main.Thread( target=main.CLIs[i].setTestRemove,
Jon Hall5cf14d52015-07-16 12:15:19 -07003811 name="setTestRemoveAll-" + str( i ),
3812 args=[ onosSetName, addAllValue ] )
3813 threads.append( t )
3814 t.start()
3815 for t in threads:
3816 t.join()
3817 removeAllResponses.append( t.result )
3818 except Exception, e:
3819 main.log.exception(e)
3820
3821 # main.TRUE = successfully changed the set
3822 # main.FALSE = action resulted in no change in set
3823 # main.ERROR - Some error in executing the function
3824 removeAllResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003825 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003826 if removeAllResponses[ i ] == main.TRUE:
3827 # All is well
3828 pass
3829 elif removeAllResponses[ i ] == main.FALSE:
3830 # not in set, probably fine
3831 pass
3832 elif removeAllResponses[ i ] == main.ERROR:
3833 # Error in execution
3834 removeAllResults = main.FALSE
3835 else:
3836 # unexpected result
3837 removeAllResults = main.FALSE
3838 if removeAllResults != main.TRUE:
3839 main.log.error( "Error executing set removeAll" )
3840
3841 # Check if set is still correct
3842 size = len( onosSet )
3843 getResponses = []
3844 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003845 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003846 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003847 name="setTestGet-" + str( i ),
3848 args=[ onosSetName ] )
3849 threads.append( t )
3850 t.start()
3851 for t in threads:
3852 t.join()
3853 getResponses.append( t.result )
3854 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003855 for i in range( len( main.activeNodes ) ):
3856 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003857 if isinstance( getResponses[ i ], list):
3858 current = set( getResponses[ i ] )
3859 if len( current ) == len( getResponses[ i ] ):
3860 # no repeats
3861 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003862 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003863 " has incorrect view" +
3864 " of set " + onosSetName + ":\n" +
3865 str( getResponses[ i ] ) )
3866 main.log.debug( "Expected: " + str( onosSet ) )
3867 main.log.debug( "Actual: " + str( current ) )
3868 getResults = main.FALSE
3869 else:
3870 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003871 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003872 " has repeat elements in" +
3873 " set " + onosSetName + ":\n" +
3874 str( getResponses[ i ] ) )
3875 getResults = main.FALSE
3876 elif getResponses[ i ] == main.ERROR:
3877 getResults = main.FALSE
3878 sizeResponses = []
3879 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003880 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003881 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07003882 name="setTestSize-" + str( i ),
3883 args=[ onosSetName ] )
3884 threads.append( t )
3885 t.start()
3886 for t in threads:
3887 t.join()
3888 sizeResponses.append( t.result )
3889 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003890 for i in range( len( main.activeNodes ) ):
3891 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003892 if size != sizeResponses[ i ]:
3893 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003894 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003895 " expected a size of " + str( size ) +
3896 " for set " + onosSetName +
3897 " but got " + str( sizeResponses[ i ] ) )
3898 removeAllResults = removeAllResults and getResults and sizeResults
3899 utilities.assert_equals( expect=main.TRUE,
3900 actual=removeAllResults,
3901 onpass="Set removeAll correct",
3902 onfail="Set removeAll was incorrect" )
3903
3904 main.step( "Distributed Set addAll()" )
3905 onosSet.update( addAllValue.split() )
3906 addResponses = []
3907 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003908 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003909 t = main.Thread( target=main.CLIs[i].setTestAdd,
Jon Hall5cf14d52015-07-16 12:15:19 -07003910 name="setTestAddAll-" + str( i ),
3911 args=[ onosSetName, addAllValue ] )
3912 threads.append( t )
3913 t.start()
3914 for t in threads:
3915 t.join()
3916 addResponses.append( t.result )
3917
3918 # main.TRUE = successfully changed the set
3919 # main.FALSE = action resulted in no change in set
3920 # main.ERROR - Some error in executing the function
3921 addAllResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003922 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003923 if addResponses[ i ] == main.TRUE:
3924 # All is well
3925 pass
3926 elif addResponses[ i ] == main.FALSE:
3927 # Already in set, probably fine
3928 pass
3929 elif addResponses[ i ] == main.ERROR:
3930 # Error in execution
3931 addAllResults = main.FALSE
3932 else:
3933 # unexpected result
3934 addAllResults = main.FALSE
3935 if addAllResults != main.TRUE:
3936 main.log.error( "Error executing set addAll" )
3937
3938 # Check if set is still correct
3939 size = len( onosSet )
3940 getResponses = []
3941 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003942 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003943 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003944 name="setTestGet-" + str( i ),
3945 args=[ onosSetName ] )
3946 threads.append( t )
3947 t.start()
3948 for t in threads:
3949 t.join()
3950 getResponses.append( t.result )
3951 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003952 for i in range( len( main.activeNodes ) ):
3953 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003954 if isinstance( getResponses[ i ], list):
3955 current = set( getResponses[ i ] )
3956 if len( current ) == len( getResponses[ i ] ):
3957 # no repeats
3958 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003959 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003960 " has incorrect view" +
3961 " of set " + onosSetName + ":\n" +
3962 str( getResponses[ i ] ) )
3963 main.log.debug( "Expected: " + str( onosSet ) )
3964 main.log.debug( "Actual: " + str( current ) )
3965 getResults = main.FALSE
3966 else:
3967 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003968 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003969 " has repeat elements in" +
3970 " set " + onosSetName + ":\n" +
3971 str( getResponses[ i ] ) )
3972 getResults = main.FALSE
3973 elif getResponses[ i ] == main.ERROR:
3974 getResults = main.FALSE
3975 sizeResponses = []
3976 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003977 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003978 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07003979 name="setTestSize-" + str( i ),
3980 args=[ onosSetName ] )
3981 threads.append( t )
3982 t.start()
3983 for t in threads:
3984 t.join()
3985 sizeResponses.append( t.result )
3986 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003987 for i in range( len( main.activeNodes ) ):
3988 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003989 if size != sizeResponses[ i ]:
3990 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003991 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003992 " expected a size of " + str( size ) +
3993 " for set " + onosSetName +
3994 " but got " + str( sizeResponses[ i ] ) )
3995 addAllResults = addAllResults and getResults and sizeResults
3996 utilities.assert_equals( expect=main.TRUE,
3997 actual=addAllResults,
3998 onpass="Set addAll correct",
3999 onfail="Set addAll was incorrect" )
4000
4001 main.step( "Distributed Set clear()" )
4002 onosSet.clear()
4003 clearResponses = []
4004 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004005 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004006 t = main.Thread( target=main.CLIs[i].setTestRemove,
Jon Hall5cf14d52015-07-16 12:15:19 -07004007 name="setTestClear-" + str( i ),
4008 args=[ onosSetName, " "], # Values doesn't matter
4009 kwargs={ "clear": True } )
4010 threads.append( t )
4011 t.start()
4012 for t in threads:
4013 t.join()
4014 clearResponses.append( t.result )
4015
4016 # main.TRUE = successfully changed the set
4017 # main.FALSE = action resulted in no change in set
4018 # main.ERROR - Some error in executing the function
4019 clearResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004020 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07004021 if clearResponses[ i ] == main.TRUE:
4022 # All is well
4023 pass
4024 elif clearResponses[ i ] == main.FALSE:
4025 # Nothing set, probably fine
4026 pass
4027 elif clearResponses[ i ] == main.ERROR:
4028 # Error in execution
4029 clearResults = main.FALSE
4030 else:
4031 # unexpected result
4032 clearResults = main.FALSE
4033 if clearResults != main.TRUE:
4034 main.log.error( "Error executing set clear" )
4035
4036 # Check if set is still correct
4037 size = len( onosSet )
4038 getResponses = []
4039 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004040 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004041 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07004042 name="setTestGet-" + str( i ),
4043 args=[ onosSetName ] )
4044 threads.append( t )
4045 t.start()
4046 for t in threads:
4047 t.join()
4048 getResponses.append( t.result )
4049 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004050 for i in range( len( main.activeNodes ) ):
4051 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004052 if isinstance( getResponses[ i ], list):
4053 current = set( getResponses[ i ] )
4054 if len( current ) == len( getResponses[ i ] ):
4055 # no repeats
4056 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004057 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004058 " has incorrect view" +
4059 " of set " + onosSetName + ":\n" +
4060 str( getResponses[ i ] ) )
4061 main.log.debug( "Expected: " + str( onosSet ) )
4062 main.log.debug( "Actual: " + str( current ) )
4063 getResults = main.FALSE
4064 else:
4065 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004066 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004067 " has repeat elements in" +
4068 " set " + onosSetName + ":\n" +
4069 str( getResponses[ i ] ) )
4070 getResults = main.FALSE
4071 elif getResponses[ i ] == main.ERROR:
4072 getResults = main.FALSE
4073 sizeResponses = []
4074 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004075 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004076 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07004077 name="setTestSize-" + str( i ),
4078 args=[ onosSetName ] )
4079 threads.append( t )
4080 t.start()
4081 for t in threads:
4082 t.join()
4083 sizeResponses.append( t.result )
4084 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004085 for i in range( len( main.activeNodes ) ):
4086 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004087 if size != sizeResponses[ i ]:
4088 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004089 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004090 " expected a size of " + str( size ) +
4091 " for set " + onosSetName +
4092 " but got " + str( sizeResponses[ i ] ) )
4093 clearResults = clearResults and getResults and sizeResults
4094 utilities.assert_equals( expect=main.TRUE,
4095 actual=clearResults,
4096 onpass="Set clear correct",
4097 onfail="Set clear was incorrect" )
4098
4099 main.step( "Distributed Set addAll()" )
4100 onosSet.update( addAllValue.split() )
4101 addResponses = []
4102 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004103 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004104 t = main.Thread( target=main.CLIs[i].setTestAdd,
Jon Hall5cf14d52015-07-16 12:15:19 -07004105 name="setTestAddAll-" + str( i ),
4106 args=[ onosSetName, addAllValue ] )
4107 threads.append( t )
4108 t.start()
4109 for t in threads:
4110 t.join()
4111 addResponses.append( t.result )
4112
4113 # main.TRUE = successfully changed the set
4114 # main.FALSE = action resulted in no change in set
4115 # main.ERROR - Some error in executing the function
4116 addAllResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004117 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07004118 if addResponses[ i ] == main.TRUE:
4119 # All is well
4120 pass
4121 elif addResponses[ i ] == main.FALSE:
4122 # Already in set, probably fine
4123 pass
4124 elif addResponses[ i ] == main.ERROR:
4125 # Error in execution
4126 addAllResults = main.FALSE
4127 else:
4128 # unexpected result
4129 addAllResults = main.FALSE
4130 if addAllResults != main.TRUE:
4131 main.log.error( "Error executing set addAll" )
4132
4133 # Check if set is still correct
4134 size = len( onosSet )
4135 getResponses = []
4136 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004137 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004138 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07004139 name="setTestGet-" + str( i ),
4140 args=[ onosSetName ] )
4141 threads.append( t )
4142 t.start()
4143 for t in threads:
4144 t.join()
4145 getResponses.append( t.result )
4146 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004147 for i in range( len( main.activeNodes ) ):
4148 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004149 if isinstance( getResponses[ i ], list):
4150 current = set( getResponses[ i ] )
4151 if len( current ) == len( getResponses[ i ] ):
4152 # no repeats
4153 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004154 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004155 " has incorrect view" +
4156 " of set " + onosSetName + ":\n" +
4157 str( getResponses[ i ] ) )
4158 main.log.debug( "Expected: " + str( onosSet ) )
4159 main.log.debug( "Actual: " + str( current ) )
4160 getResults = main.FALSE
4161 else:
4162 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004163 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004164 " has repeat elements in" +
4165 " set " + onosSetName + ":\n" +
4166 str( getResponses[ i ] ) )
4167 getResults = main.FALSE
4168 elif getResponses[ i ] == main.ERROR:
4169 getResults = main.FALSE
4170 sizeResponses = []
4171 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004172 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004173 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07004174 name="setTestSize-" + str( i ),
4175 args=[ onosSetName ] )
4176 threads.append( t )
4177 t.start()
4178 for t in threads:
4179 t.join()
4180 sizeResponses.append( t.result )
4181 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004182 for i in range( len( main.activeNodes ) ):
4183 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004184 if size != sizeResponses[ i ]:
4185 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004186 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004187 " expected a size of " + str( size ) +
4188 " for set " + onosSetName +
4189 " but got " + str( sizeResponses[ i ] ) )
4190 addAllResults = addAllResults and getResults and sizeResults
4191 utilities.assert_equals( expect=main.TRUE,
4192 actual=addAllResults,
4193 onpass="Set addAll correct",
4194 onfail="Set addAll was incorrect" )
4195
4196 main.step( "Distributed Set retain()" )
4197 onosSet.intersection_update( retainValue.split() )
4198 retainResponses = []
4199 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004200 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004201 t = main.Thread( target=main.CLIs[i].setTestRemove,
Jon Hall5cf14d52015-07-16 12:15:19 -07004202 name="setTestRetain-" + str( i ),
4203 args=[ onosSetName, retainValue ],
4204 kwargs={ "retain": True } )
4205 threads.append( t )
4206 t.start()
4207 for t in threads:
4208 t.join()
4209 retainResponses.append( t.result )
4210
4211 # main.TRUE = successfully changed the set
4212 # main.FALSE = action resulted in no change in set
4213 # main.ERROR - Some error in executing the function
4214 retainResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004215 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07004216 if retainResponses[ i ] == main.TRUE:
4217 # All is well
4218 pass
4219 elif retainResponses[ i ] == main.FALSE:
4220 # Already in set, probably fine
4221 pass
4222 elif retainResponses[ i ] == main.ERROR:
4223 # Error in execution
4224 retainResults = main.FALSE
4225 else:
4226 # unexpected result
4227 retainResults = main.FALSE
4228 if retainResults != main.TRUE:
4229 main.log.error( "Error executing set retain" )
4230
4231 # Check if set is still correct
4232 size = len( onosSet )
4233 getResponses = []
4234 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004235 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004236 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07004237 name="setTestGet-" + str( i ),
4238 args=[ onosSetName ] )
4239 threads.append( t )
4240 t.start()
4241 for t in threads:
4242 t.join()
4243 getResponses.append( t.result )
4244 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004245 for i in range( len( main.activeNodes ) ):
4246 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004247 if isinstance( getResponses[ i ], list):
4248 current = set( getResponses[ i ] )
4249 if len( current ) == len( getResponses[ i ] ):
4250 # no repeats
4251 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004252 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004253 " has incorrect view" +
4254 " of set " + onosSetName + ":\n" +
4255 str( getResponses[ i ] ) )
4256 main.log.debug( "Expected: " + str( onosSet ) )
4257 main.log.debug( "Actual: " + str( current ) )
4258 getResults = main.FALSE
4259 else:
4260 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004261 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004262 " has repeat elements in" +
4263 " set " + onosSetName + ":\n" +
4264 str( getResponses[ i ] ) )
4265 getResults = main.FALSE
4266 elif getResponses[ i ] == main.ERROR:
4267 getResults = main.FALSE
4268 sizeResponses = []
4269 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004270 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004271 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07004272 name="setTestSize-" + str( i ),
4273 args=[ onosSetName ] )
4274 threads.append( t )
4275 t.start()
4276 for t in threads:
4277 t.join()
4278 sizeResponses.append( t.result )
4279 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004280 for i in range( len( main.activeNodes ) ):
4281 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004282 if size != sizeResponses[ i ]:
4283 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004284 main.log.error( "ONOS" + node + " expected a size of " +
Jon Hall5cf14d52015-07-16 12:15:19 -07004285 str( size ) + " for set " + onosSetName +
4286 " but got " + str( sizeResponses[ i ] ) )
4287 retainResults = retainResults and getResults and sizeResults
4288 utilities.assert_equals( expect=main.TRUE,
4289 actual=retainResults,
4290 onpass="Set retain correct",
4291 onfail="Set retain was incorrect" )
4292
Jon Hall2a5002c2015-08-21 16:49:11 -07004293 # Transactional maps
4294 main.step( "Partitioned Transactional maps put" )
4295 tMapValue = "Testing"
4296 numKeys = 100
4297 putResult = True
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004298 node = main.activeNodes[0]
4299 putResponses = main.CLIs[node].transactionalMapPut( numKeys, tMapValue )
Jon Hall6e709752016-02-01 13:38:46 -08004300 if putResponses and len( putResponses ) == 100:
Jon Hall2a5002c2015-08-21 16:49:11 -07004301 for i in putResponses:
4302 if putResponses[ i ][ 'value' ] != tMapValue:
4303 putResult = False
4304 else:
4305 putResult = False
4306 if not putResult:
4307 main.log.debug( "Put response values: " + str( putResponses ) )
4308 utilities.assert_equals( expect=True,
4309 actual=putResult,
4310 onpass="Partitioned Transactional Map put successful",
4311 onfail="Partitioned Transactional Map put values are incorrect" )
4312
4313 main.step( "Partitioned Transactional maps get" )
4314 getCheck = True
4315 for n in range( 1, numKeys + 1 ):
4316 getResponses = []
4317 threads = []
4318 valueCheck = True
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004319 for i in main.activeNodes:
Jon Hall2a5002c2015-08-21 16:49:11 -07004320 t = main.Thread( target=main.CLIs[i].transactionalMapGet,
4321 name="TMap-get-" + str( i ),
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004322 args=[ "Key" + str( n ) ] )
Jon Hall2a5002c2015-08-21 16:49:11 -07004323 threads.append( t )
4324 t.start()
4325 for t in threads:
4326 t.join()
4327 getResponses.append( t.result )
4328 for node in getResponses:
4329 if node != tMapValue:
4330 valueCheck = False
4331 if not valueCheck:
4332 main.log.warn( "Values for key 'Key" + str( n ) + "' do not match:" )
4333 main.log.warn( getResponses )
4334 getCheck = getCheck and valueCheck
4335 utilities.assert_equals( expect=True,
4336 actual=getCheck,
4337 onpass="Partitioned Transactional Map get values were correct",
4338 onfail="Partitioned Transactional Map values incorrect" )