blob: ddf9b2a7ed8a60eb99b86b130d20725a5b08c985 [file] [log] [blame]
Jon Hall5cf14d52015-07-16 12:15:19 -07001"""
2Description: This test is to determine if ONOS can handle
3 a minority of it's nodes restarting
4
5List of test cases:
6CASE1: Compile ONOS and push it to the test machines
7CASE2: Assign devices to controllers
8CASE21: Assign mastership to controllers
9CASE3: Assign intents
10CASE4: Ping across added host intents
11CASE5: Reading state of ONOS
Jon Hallb3ed8ed2015-10-28 16:43:55 -070012CASE61: The Failure inducing case.
13CASE62: The Failure recovery case.
Jon Hall5cf14d52015-07-16 12:15:19 -070014CASE7: Check state after control plane failure
15CASE8: Compare topo
16CASE9: Link s3-s28 down
17CASE10: Link s3-s28 up
18CASE11: Switch down
19CASE12: Switch up
20CASE13: Clean up
21CASE14: start election app on all onos nodes
22CASE15: Check that Leadership Election is still functional
23CASE16: Install Distributed Primitives app
24CASE17: Check for basic functionality with distributed primitives
25"""
26
27
Jon Hallb3ed8ed2015-10-28 16:43:55 -070028class HAstopNodes:
Jon Hall5cf14d52015-07-16 12:15:19 -070029
30 def __init__( self ):
31 self.default = ''
32
33 def CASE1( self, main ):
34 """
35 CASE1 is to compile ONOS and push it to the test machines
36
37 Startup sequence:
38 cell <name>
39 onos-verify-cell
40 NOTE: temporary - onos-remove-raft-logs
41 onos-uninstall
42 start mininet
43 git pull
44 mvn clean install
45 onos-package
46 onos-install -f
47 onos-wait-for-start
48 start cli sessions
49 start tcpdump
50 """
Jon Halle1a3b752015-07-22 13:02:46 -070051 import imp
Jon Hall3b489db2015-10-05 14:38:37 -070052 import pexpect
Jon Hallf3d16e72015-12-16 17:45:08 -080053 import time
Jon Halla440e872016-03-31 15:15:50 -070054 import json
Jon Hallb3ed8ed2015-10-28 16:43:55 -070055 main.log.info( "ONOS HA test: Stop a minority of ONOS nodes - " +
Jon Hall5cf14d52015-07-16 12:15:19 -070056 "initialization" )
57 main.case( "Setting up test environment" )
Jon Hall783bbf92015-07-23 14:33:19 -070058 main.caseExplanation = "Setup the test environment including " +\
Jon Hall5cf14d52015-07-16 12:15:19 -070059 "installing ONOS, starting Mininet and ONOS" +\
60 "cli sessions."
Jon Hall5cf14d52015-07-16 12:15:19 -070061
62 # load some variables from the params file
63 PULLCODE = False
64 if main.params[ 'Git' ] == 'True':
65 PULLCODE = True
66 gitBranch = main.params[ 'branch' ]
67 cellName = main.params[ 'ENV' ][ 'cellName' ]
68
Jon Halle1a3b752015-07-22 13:02:46 -070069 main.numCtrls = int( main.params[ 'num_controllers' ] )
Jon Hall5cf14d52015-07-16 12:15:19 -070070 if main.ONOSbench.maxNodes:
Jon Halle1a3b752015-07-22 13:02:46 -070071 if main.ONOSbench.maxNodes < main.numCtrls:
72 main.numCtrls = int( main.ONOSbench.maxNodes )
73 # set global variables
Jon Hall5cf14d52015-07-16 12:15:19 -070074 global ONOS1Port
75 global ONOS2Port
76 global ONOS3Port
77 global ONOS4Port
78 global ONOS5Port
79 global ONOS6Port
80 global ONOS7Port
Jon Halla440e872016-03-31 15:15:50 -070081 # These are for csv plotting in jenkins
82 global labels
83 global data
84 labels = []
85 data = []
Jon Hall5cf14d52015-07-16 12:15:19 -070086
87 # FIXME: just get controller port from params?
88 # TODO: do we really need all these?
89 ONOS1Port = main.params[ 'CTRL' ][ 'port1' ]
90 ONOS2Port = main.params[ 'CTRL' ][ 'port2' ]
91 ONOS3Port = main.params[ 'CTRL' ][ 'port3' ]
92 ONOS4Port = main.params[ 'CTRL' ][ 'port4' ]
93 ONOS5Port = main.params[ 'CTRL' ][ 'port5' ]
94 ONOS6Port = main.params[ 'CTRL' ][ 'port6' ]
95 ONOS7Port = main.params[ 'CTRL' ][ 'port7' ]
96
Jon Halle1a3b752015-07-22 13:02:46 -070097 try:
Jon Hall53c5e662016-04-13 16:06:56 -070098 from tests.HA.dependencies.HA import HA
Jon Hall41d39f12016-04-11 22:54:35 -070099 main.HA = HA()
Jon Halle1a3b752015-07-22 13:02:46 -0700100 except Exception as e:
101 main.log.exception( e )
102 main.cleanup()
103 main.exit()
104
105 main.CLIs = []
106 main.nodes = []
Jon Hall5cf14d52015-07-16 12:15:19 -0700107 ipList = []
Jon Halle1a3b752015-07-22 13:02:46 -0700108 for i in range( 1, main.numCtrls + 1 ):
109 try:
110 main.CLIs.append( getattr( main, 'ONOScli' + str( i ) ) )
111 main.nodes.append( getattr( main, 'ONOS' + str( i ) ) )
112 ipList.append( main.nodes[ -1 ].ip_address )
113 except AttributeError:
114 break
Jon Hall5cf14d52015-07-16 12:15:19 -0700115
116 main.step( "Create cell file" )
117 cellAppString = main.params[ 'ENV' ][ 'appString' ]
118 main.ONOSbench.createCellFile( main.ONOSbench.ip_address, cellName,
119 main.Mininet1.ip_address,
120 cellAppString, ipList )
121 main.step( "Applying cell variable to environment" )
122 cellResult = main.ONOSbench.setCell( cellName )
123 verifyResult = main.ONOSbench.verifyCell()
124
125 # FIXME:this is short term fix
126 main.log.info( "Removing raft logs" )
127 main.ONOSbench.onosRemoveRaftLogs()
128
129 main.log.info( "Uninstalling ONOS" )
Jon Halle1a3b752015-07-22 13:02:46 -0700130 for node in main.nodes:
Jon Hall5cf14d52015-07-16 12:15:19 -0700131 main.ONOSbench.onosUninstall( node.ip_address )
132
133 # Make sure ONOS is DEAD
134 main.log.info( "Killing any ONOS processes" )
135 killResults = main.TRUE
Jon Halle1a3b752015-07-22 13:02:46 -0700136 for node in main.nodes:
Jon Hall5cf14d52015-07-16 12:15:19 -0700137 killed = main.ONOSbench.onosKill( node.ip_address )
138 killResults = killResults and killed
139
140 cleanInstallResult = main.TRUE
141 gitPullResult = main.TRUE
142
143 main.step( "Starting Mininet" )
144 # scp topo file to mininet
145 # TODO: move to params?
146 topoName = "obelisk.py"
147 filePath = main.ONOSbench.home + "/tools/test/topos/"
kelvin-onlabd9e23de2015-08-06 10:34:44 -0700148 main.ONOSbench.scp( main.Mininet1,
149 filePath + topoName,
150 main.Mininet1.home,
151 direction="to" )
Jon Hall5cf14d52015-07-16 12:15:19 -0700152 mnResult = main.Mininet1.startNet( )
153 utilities.assert_equals( expect=main.TRUE, actual=mnResult,
154 onpass="Mininet Started",
155 onfail="Error starting Mininet" )
156
157 main.step( "Git checkout and pull " + gitBranch )
158 if PULLCODE:
159 main.ONOSbench.gitCheckout( gitBranch )
160 gitPullResult = main.ONOSbench.gitPull()
161 # values of 1 or 3 are good
162 utilities.assert_lesser( expect=0, actual=gitPullResult,
163 onpass="Git pull successful",
164 onfail="Git pull failed" )
165 main.ONOSbench.getVersion( report=True )
166
167 main.step( "Using mvn clean install" )
168 cleanInstallResult = main.TRUE
169 if PULLCODE and gitPullResult == main.TRUE:
170 cleanInstallResult = main.ONOSbench.cleanInstall()
171 else:
172 main.log.warn( "Did not pull new code so skipping mvn " +
173 "clean install" )
174 utilities.assert_equals( expect=main.TRUE,
175 actual=cleanInstallResult,
176 onpass="MCI successful",
177 onfail="MCI failed" )
178 # GRAPHS
179 # NOTE: important params here:
180 # job = name of Jenkins job
181 # Plot Name = Plot-HA, only can be used if multiple plots
182 # index = The number of the graph under plot name
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700183 job = "HAstopNodes"
Jon Hall5cf14d52015-07-16 12:15:19 -0700184 plotName = "Plot-HA"
Jon Hall843f8bc2016-03-18 14:28:13 -0700185 index = "2"
Jon Hall5cf14d52015-07-16 12:15:19 -0700186 graphs = '<ac:structured-macro ac:name="html">\n'
187 graphs += '<ac:plain-text-body><![CDATA[\n'
188 graphs += '<iframe src="https://onos-jenkins.onlab.us/job/' + job +\
Jon Halla9845df2016-01-15 14:55:58 -0800189 '/plot/' + plotName + '/getPlot?index=' + index +\
Jon Hall5cf14d52015-07-16 12:15:19 -0700190 '&width=500&height=300"' +\
191 'noborder="0" width="500" height="300" scrolling="yes" ' +\
192 'seamless="seamless"></iframe>\n'
193 graphs += ']]></ac:plain-text-body>\n'
194 graphs += '</ac:structured-macro>\n'
195 main.log.wiki(graphs)
196
197 main.step( "Creating ONOS package" )
Jon Hall3b489db2015-10-05 14:38:37 -0700198 # copy gen-partions file to ONOS
199 # NOTE: this assumes TestON and ONOS are on the same machine
Jon Hall53c5e662016-04-13 16:06:56 -0700200 srcFile = main.testDir + "/HA/dependencies/onos-gen-partitions"
Jon Hall3b489db2015-10-05 14:38:37 -0700201 dstDir = main.ONOSbench.home + "/tools/test/bin/onos-gen-partitions"
202 cpResult = main.ONOSbench.secureCopy( main.ONOSbench.user_name,
203 main.ONOSbench.ip_address,
204 srcFile,
205 dstDir,
206 pwd=main.ONOSbench.pwd,
207 direction="from" )
Jon Hallbd60ea02016-08-23 10:03:59 -0700208 packageResult = main.ONOSbench.buckBuild()
Jon Hall5cf14d52015-07-16 12:15:19 -0700209 utilities.assert_equals( expect=main.TRUE, actual=packageResult,
210 onpass="ONOS package successful",
211 onfail="ONOS package failed" )
212
213 main.step( "Installing ONOS package" )
214 onosInstallResult = main.TRUE
Jon Halle1a3b752015-07-22 13:02:46 -0700215 for node in main.nodes:
Jon Hall5cf14d52015-07-16 12:15:19 -0700216 tmpResult = main.ONOSbench.onosInstall( options="-f",
217 node=node.ip_address )
218 onosInstallResult = onosInstallResult and tmpResult
219 utilities.assert_equals( expect=main.TRUE, actual=onosInstallResult,
220 onpass="ONOS install successful",
221 onfail="ONOS install failed" )
Jon Hall3b489db2015-10-05 14:38:37 -0700222 # clean up gen-partitions file
223 try:
224 main.ONOSbench.handle.sendline( "cd " + main.ONOSbench.home )
225 main.ONOSbench.handle.expect( main.ONOSbench.home + "\$" )
226 main.ONOSbench.handle.sendline( "git checkout -- tools/test/bin/onos-gen-partitions" )
227 main.ONOSbench.handle.expect( main.ONOSbench.home + "\$" )
228 main.log.info( " Cleaning custom gen partitions file, response was: \n" +
229 str( main.ONOSbench.handle.before ) )
230 except ( pexpect.TIMEOUT, pexpect.EOF ):
231 main.log.exception( "ONOSbench: pexpect exception found:" +
232 main.ONOSbench.handle.before )
233 main.cleanup()
234 main.exit()
Jon Hall5cf14d52015-07-16 12:15:19 -0700235
236 main.step( "Checking if ONOS is up yet" )
237 for i in range( 2 ):
238 onosIsupResult = main.TRUE
Jon Halle1a3b752015-07-22 13:02:46 -0700239 for node in main.nodes:
Jon Hall5cf14d52015-07-16 12:15:19 -0700240 started = main.ONOSbench.isup( node.ip_address )
241 if not started:
Jon Hallc6793552016-01-19 14:18:37 -0800242 main.log.error( node.name + " hasn't started" )
Jon Hall5cf14d52015-07-16 12:15:19 -0700243 onosIsupResult = onosIsupResult and started
244 if onosIsupResult == main.TRUE:
245 break
246 utilities.assert_equals( expect=main.TRUE, actual=onosIsupResult,
247 onpass="ONOS startup successful",
248 onfail="ONOS startup failed" )
249
Chiyu Chengef109502016-11-21 15:51:38 -0800250 main.step( "Set up ONOS secure SSH" )
251 secureSshResult = main.TRUE
252 for node in main.nodes:
253 secureSshResult = secureSshResult and main.ONOSbench.onosSecureSSH( node=node.ip_address )
254 utilities.assert_equals( expect=main.TRUE, actual=secureSshResult,
255 onpass="Test step PASS",
256 onfail="Test step FAIL" )
257
Jon Hall6509dbf2016-06-21 17:01:17 -0700258 main.step( "Starting ONOS CLI sessions" )
Jon Hall5cf14d52015-07-16 12:15:19 -0700259 cliResults = main.TRUE
260 threads = []
Jon Halle1a3b752015-07-22 13:02:46 -0700261 for i in range( main.numCtrls ):
262 t = main.Thread( target=main.CLIs[i].startOnosCli,
Jon Hall5cf14d52015-07-16 12:15:19 -0700263 name="startOnosCli-" + str( i ),
Jon Halle1a3b752015-07-22 13:02:46 -0700264 args=[main.nodes[i].ip_address] )
Jon Hall5cf14d52015-07-16 12:15:19 -0700265 threads.append( t )
266 t.start()
267
268 for t in threads:
269 t.join()
270 cliResults = cliResults and t.result
271 utilities.assert_equals( expect=main.TRUE, actual=cliResults,
272 onpass="ONOS cli startup successful",
273 onfail="ONOS cli startup failed" )
274
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700275 # Create a list of active nodes for use when some nodes are stopped
276 main.activeNodes = [ i for i in range( 0, len( main.CLIs ) ) ]
277
Jon Hall5cf14d52015-07-16 12:15:19 -0700278 if main.params[ 'tcpdump' ].lower() == "true":
279 main.step( "Start Packet Capture MN" )
280 main.Mininet2.startTcpdump(
281 str( main.params[ 'MNtcpdump' ][ 'folder' ] ) + str( main.TEST )
282 + "-MN.pcap",
283 intf=main.params[ 'MNtcpdump' ][ 'intf' ],
284 port=main.params[ 'MNtcpdump' ][ 'port' ] )
285
Jon Halla440e872016-03-31 15:15:50 -0700286 main.step( "Checking ONOS nodes" )
Jon Hall41d39f12016-04-11 22:54:35 -0700287 nodeResults = utilities.retry( main.HA.nodesCheck,
288 False,
289 args=[main.activeNodes],
290 attempts=5 )
Jon Halla440e872016-03-31 15:15:50 -0700291
Jon Hall41d39f12016-04-11 22:54:35 -0700292 utilities.assert_equals( expect=True, actual=nodeResults,
Jon Halla440e872016-03-31 15:15:50 -0700293 onpass="Nodes check successful",
294 onfail="Nodes check NOT successful" )
295
296 if not nodeResults:
Jon Hall7ac7bc32016-05-05 10:57:02 -0700297 for i in main.activeNodes:
298 cli = main.CLIs[i]
Jon Halla440e872016-03-31 15:15:50 -0700299 main.log.debug( "{} components not ACTIVE: \n{}".format(
300 cli.name,
301 cli.sendline( "scr:list | grep -v ACTIVE" ) ) )
Jon Hall5cf14d52015-07-16 12:15:19 -0700302 main.log.error( "Failed to start ONOS, stopping test" )
303 main.cleanup()
304 main.exit()
305
Jon Hall172b7ba2016-04-07 18:12:20 -0700306 main.step( "Activate apps defined in the params file" )
307 # get data from the params
308 apps = main.params.get( 'apps' )
309 if apps:
310 apps = apps.split(',')
311 main.log.warn( apps )
312 activateResult = True
313 for app in apps:
314 main.CLIs[ 0 ].app( app, "Activate" )
315 # TODO: check this worked
316 time.sleep( 10 ) # wait for apps to activate
317 for app in apps:
318 state = main.CLIs[ 0 ].appStatus( app )
319 if state == "ACTIVE":
320 activateResult = activeResult and True
321 else:
322 main.log.error( "{} is in {} state".format( app, state ) )
323 activeResult = False
324 utilities.assert_equals( expect=True,
325 actual=activateResult,
326 onpass="Successfully activated apps",
327 onfail="Failed to activate apps" )
328 else:
329 main.log.warn( "No apps were specified to be loaded after startup" )
330
331 main.step( "Set ONOS configurations" )
332 config = main.params.get( 'ONOS_Configuration' )
333 if config:
334 main.log.debug( config )
335 checkResult = main.TRUE
336 for component in config:
337 for setting in config[component]:
338 value = config[component][setting]
339 check = main.CLIs[ 0 ].setCfg( component, setting, value )
340 main.log.info( "Value was changed? {}".format( main.TRUE == check ) )
341 checkResult = check and checkResult
342 utilities.assert_equals( expect=main.TRUE,
343 actual=checkResult,
344 onpass="Successfully set config",
345 onfail="Failed to set config" )
346 else:
347 main.log.warn( "No configurations were specified to be changed after startup" )
348
Jon Hall9d2dcad2016-04-08 10:15:20 -0700349 main.step( "App Ids check" )
350 appCheck = main.TRUE
351 threads = []
352 for i in main.activeNodes:
353 t = main.Thread( target=main.CLIs[i].appToIDCheck,
354 name="appToIDCheck-" + str( i ),
355 args=[] )
356 threads.append( t )
357 t.start()
358
359 for t in threads:
360 t.join()
361 appCheck = appCheck and t.result
362 if appCheck != main.TRUE:
363 node = main.activeNodes[0]
364 main.log.warn( main.CLIs[node].apps() )
365 main.log.warn( main.CLIs[node].appIDs() )
366 utilities.assert_equals( expect=main.TRUE, actual=appCheck,
367 onpass="App Ids seem to be correct",
368 onfail="Something is wrong with app Ids" )
369
Jon Hall5cf14d52015-07-16 12:15:19 -0700370 def CASE2( self, main ):
371 """
372 Assign devices to controllers
373 """
374 import re
Jon Halle1a3b752015-07-22 13:02:46 -0700375 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700376 assert main, "main not defined"
377 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -0700378 assert main.CLIs, "main.CLIs not defined"
379 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700380 assert ONOS1Port, "ONOS1Port not defined"
381 assert ONOS2Port, "ONOS2Port not defined"
382 assert ONOS3Port, "ONOS3Port not defined"
383 assert ONOS4Port, "ONOS4Port not defined"
384 assert ONOS5Port, "ONOS5Port not defined"
385 assert ONOS6Port, "ONOS6Port not defined"
386 assert ONOS7Port, "ONOS7Port not defined"
387
388 main.case( "Assigning devices to controllers" )
Jon Hall783bbf92015-07-23 14:33:19 -0700389 main.caseExplanation = "Assign switches to ONOS using 'ovs-vsctl' " +\
Jon Hall5cf14d52015-07-16 12:15:19 -0700390 "and check that an ONOS node becomes the " +\
391 "master of the device."
392 main.step( "Assign switches to controllers" )
393
394 ipList = []
Jon Halle1a3b752015-07-22 13:02:46 -0700395 for i in range( main.numCtrls ):
396 ipList.append( main.nodes[ i ].ip_address )
Jon Hall5cf14d52015-07-16 12:15:19 -0700397 swList = []
398 for i in range( 1, 29 ):
399 swList.append( "s" + str( i ) )
400 main.Mininet1.assignSwController( sw=swList, ip=ipList )
401
402 mastershipCheck = main.TRUE
403 for i in range( 1, 29 ):
404 response = main.Mininet1.getSwController( "s" + str( i ) )
405 try:
406 main.log.info( str( response ) )
407 except Exception:
408 main.log.info( repr( response ) )
Jon Halle1a3b752015-07-22 13:02:46 -0700409 for node in main.nodes:
Jon Hall5cf14d52015-07-16 12:15:19 -0700410 if re.search( "tcp:" + node.ip_address, response ):
411 mastershipCheck = mastershipCheck and main.TRUE
412 else:
413 main.log.error( "Error, node " + node.ip_address + " is " +
414 "not in the list of controllers s" +
415 str( i ) + " is connecting to." )
416 mastershipCheck = main.FALSE
417 utilities.assert_equals(
418 expect=main.TRUE,
419 actual=mastershipCheck,
420 onpass="Switch mastership assigned correctly",
421 onfail="Switches not assigned correctly to controllers" )
422
423 def CASE21( self, main ):
424 """
425 Assign mastership to controllers
426 """
Jon Hall5cf14d52015-07-16 12:15:19 -0700427 import time
Jon Halle1a3b752015-07-22 13:02:46 -0700428 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700429 assert main, "main not defined"
430 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -0700431 assert main.CLIs, "main.CLIs not defined"
432 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700433 assert ONOS1Port, "ONOS1Port not defined"
434 assert ONOS2Port, "ONOS2Port not defined"
435 assert ONOS3Port, "ONOS3Port not defined"
436 assert ONOS4Port, "ONOS4Port not defined"
437 assert ONOS5Port, "ONOS5Port not defined"
438 assert ONOS6Port, "ONOS6Port not defined"
439 assert ONOS7Port, "ONOS7Port not defined"
440
441 main.case( "Assigning Controller roles for switches" )
Jon Hall783bbf92015-07-23 14:33:19 -0700442 main.caseExplanation = "Check that ONOS is connected to each " +\
Jon Hall5cf14d52015-07-16 12:15:19 -0700443 "device. Then manually assign" +\
444 " mastership to specific ONOS nodes using" +\
445 " 'device-role'"
446 main.step( "Assign mastership of switches to specific controllers" )
447 # Manually assign mastership to the controller we want
448 roleCall = main.TRUE
449
450 ipList = [ ]
451 deviceList = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700452 onosCli = main.CLIs[ main.activeNodes[0] ]
Jon Hall5cf14d52015-07-16 12:15:19 -0700453 try:
454 # Assign mastership to specific controllers. This assignment was
455 # determined for a 7 node cluser, but will work with any sized
456 # cluster
457 for i in range( 1, 29 ): # switches 1 through 28
458 # set up correct variables:
459 if i == 1:
460 c = 0
Jon Halle1a3b752015-07-22 13:02:46 -0700461 ip = main.nodes[ c ].ip_address # ONOS1
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700462 deviceId = onosCli.getDevice( "1000" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700463 elif i == 2:
Jon Halle1a3b752015-07-22 13:02:46 -0700464 c = 1 % main.numCtrls
465 ip = main.nodes[ c ].ip_address # ONOS2
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700466 deviceId = onosCli.getDevice( "2000" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700467 elif i == 3:
Jon Halle1a3b752015-07-22 13:02:46 -0700468 c = 1 % main.numCtrls
469 ip = main.nodes[ c ].ip_address # ONOS2
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700470 deviceId = onosCli.getDevice( "3000" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700471 elif i == 4:
Jon Halle1a3b752015-07-22 13:02:46 -0700472 c = 3 % main.numCtrls
473 ip = main.nodes[ c ].ip_address # ONOS4
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700474 deviceId = onosCli.getDevice( "3004" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700475 elif i == 5:
Jon Halle1a3b752015-07-22 13:02:46 -0700476 c = 2 % main.numCtrls
477 ip = main.nodes[ c ].ip_address # ONOS3
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700478 deviceId = onosCli.getDevice( "5000" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700479 elif i == 6:
Jon Halle1a3b752015-07-22 13:02:46 -0700480 c = 2 % main.numCtrls
481 ip = main.nodes[ c ].ip_address # ONOS3
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700482 deviceId = onosCli.getDevice( "6000" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700483 elif i == 7:
Jon Halle1a3b752015-07-22 13:02:46 -0700484 c = 5 % main.numCtrls
485 ip = main.nodes[ c ].ip_address # ONOS6
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700486 deviceId = onosCli.getDevice( "6007" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700487 elif i >= 8 and i <= 17:
Jon Halle1a3b752015-07-22 13:02:46 -0700488 c = 4 % main.numCtrls
489 ip = main.nodes[ c ].ip_address # ONOS5
Jon Hall5cf14d52015-07-16 12:15:19 -0700490 dpid = '3' + str( i ).zfill( 3 )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700491 deviceId = onosCli.getDevice( dpid ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700492 elif i >= 18 and i <= 27:
Jon Halle1a3b752015-07-22 13:02:46 -0700493 c = 6 % main.numCtrls
494 ip = main.nodes[ c ].ip_address # ONOS7
Jon Hall5cf14d52015-07-16 12:15:19 -0700495 dpid = '6' + str( i ).zfill( 3 )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700496 deviceId = onosCli.getDevice( dpid ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700497 elif i == 28:
498 c = 0
Jon Halle1a3b752015-07-22 13:02:46 -0700499 ip = main.nodes[ c ].ip_address # ONOS1
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700500 deviceId = onosCli.getDevice( "2800" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700501 else:
502 main.log.error( "You didn't write an else statement for " +
503 "switch s" + str( i ) )
504 roleCall = main.FALSE
505 # Assign switch
506 assert deviceId, "No device id for s" + str( i ) + " in ONOS"
507 # TODO: make this controller dynamic
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700508 roleCall = roleCall and onosCli.deviceRole( deviceId, ip )
Jon Hall5cf14d52015-07-16 12:15:19 -0700509 ipList.append( ip )
510 deviceList.append( deviceId )
511 except ( AttributeError, AssertionError ):
512 main.log.exception( "Something is wrong with ONOS device view" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700513 main.log.info( onosCli.devices() )
Jon Hall5cf14d52015-07-16 12:15:19 -0700514 utilities.assert_equals(
515 expect=main.TRUE,
516 actual=roleCall,
517 onpass="Re-assigned switch mastership to designated controller",
518 onfail="Something wrong with deviceRole calls" )
519
520 main.step( "Check mastership was correctly assigned" )
521 roleCheck = main.TRUE
522 # NOTE: This is due to the fact that device mastership change is not
523 # atomic and is actually a multi step process
524 time.sleep( 5 )
525 for i in range( len( ipList ) ):
526 ip = ipList[i]
527 deviceId = deviceList[i]
528 # Check assignment
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700529 master = onosCli.getRole( deviceId ).get( 'master' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700530 if ip in master:
531 roleCheck = roleCheck and main.TRUE
532 else:
533 roleCheck = roleCheck and main.FALSE
534 main.log.error( "Error, controller " + ip + " is not" +
535 " master " + "of device " +
536 str( deviceId ) + ". Master is " +
537 repr( master ) + "." )
538 utilities.assert_equals(
539 expect=main.TRUE,
540 actual=roleCheck,
541 onpass="Switches were successfully reassigned to designated " +
542 "controller",
543 onfail="Switches were not successfully reassigned" )
544
545 def CASE3( self, main ):
546 """
547 Assign intents
548 """
549 import time
550 import json
Jon Halle1a3b752015-07-22 13:02:46 -0700551 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700552 assert main, "main not defined"
553 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -0700554 assert main.CLIs, "main.CLIs not defined"
555 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700556 main.case( "Adding host Intents" )
Jon Hall783bbf92015-07-23 14:33:19 -0700557 main.caseExplanation = "Discover hosts by using pingall then " +\
Jon Hall5cf14d52015-07-16 12:15:19 -0700558 "assign predetermined host-to-host intents." +\
559 " After installation, check that the intent" +\
560 " is distributed to all nodes and the state" +\
561 " is INSTALLED"
562
563 # install onos-app-fwd
564 main.step( "Install reactive forwarding app" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700565 onosCli = main.CLIs[ main.activeNodes[0] ]
566 installResults = onosCli.activateApp( "org.onosproject.fwd" )
Jon Hall5cf14d52015-07-16 12:15:19 -0700567 utilities.assert_equals( expect=main.TRUE, actual=installResults,
568 onpass="Install fwd successful",
569 onfail="Install fwd failed" )
570
571 main.step( "Check app ids" )
572 appCheck = main.TRUE
573 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700574 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -0700575 t = main.Thread( target=main.CLIs[i].appToIDCheck,
Jon Hall5cf14d52015-07-16 12:15:19 -0700576 name="appToIDCheck-" + str( i ),
577 args=[] )
578 threads.append( t )
579 t.start()
580
581 for t in threads:
582 t.join()
583 appCheck = appCheck and t.result
584 if appCheck != main.TRUE:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700585 main.log.warn( onosCli.apps() )
586 main.log.warn( onosCli.appIDs() )
Jon Hall5cf14d52015-07-16 12:15:19 -0700587 utilities.assert_equals( expect=main.TRUE, actual=appCheck,
588 onpass="App Ids seem to be correct",
589 onfail="Something is wrong with app Ids" )
590
591 main.step( "Discovering Hosts( Via pingall for now )" )
592 # FIXME: Once we have a host discovery mechanism, use that instead
593 # REACTIVE FWD test
594 pingResult = main.FALSE
Jon Hall96091e62015-09-21 17:34:17 -0700595 passMsg = "Reactive Pingall test passed"
596 time1 = time.time()
597 pingResult = main.Mininet1.pingall()
598 time2 = time.time()
599 if not pingResult:
600 main.log.warn("First pingall failed. Trying again...")
Jon Hall5cf14d52015-07-16 12:15:19 -0700601 pingResult = main.Mininet1.pingall()
Jon Hall96091e62015-09-21 17:34:17 -0700602 passMsg += " on the second try"
603 utilities.assert_equals(
604 expect=main.TRUE,
605 actual=pingResult,
606 onpass= passMsg,
607 onfail="Reactive Pingall failed, " +
608 "one or more ping pairs failed" )
609 main.log.info( "Time for pingall: %2f seconds" %
610 ( time2 - time1 ) )
Jon Hall5cf14d52015-07-16 12:15:19 -0700611 # timeout for fwd flows
612 time.sleep( 11 )
613 # uninstall onos-app-fwd
614 main.step( "Uninstall reactive forwarding app" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700615 node = main.activeNodes[0]
616 uninstallResult = main.CLIs[node].deactivateApp( "org.onosproject.fwd" )
Jon Hall5cf14d52015-07-16 12:15:19 -0700617 utilities.assert_equals( expect=main.TRUE, actual=uninstallResult,
618 onpass="Uninstall fwd successful",
619 onfail="Uninstall fwd failed" )
620
621 main.step( "Check app ids" )
622 threads = []
623 appCheck2 = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700624 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -0700625 t = main.Thread( target=main.CLIs[i].appToIDCheck,
Jon Hall5cf14d52015-07-16 12:15:19 -0700626 name="appToIDCheck-" + str( i ),
627 args=[] )
628 threads.append( t )
629 t.start()
630
631 for t in threads:
632 t.join()
633 appCheck2 = appCheck2 and t.result
634 if appCheck2 != main.TRUE:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700635 node = main.activeNodes[0]
636 main.log.warn( main.CLIs[node].apps() )
637 main.log.warn( main.CLIs[node].appIDs() )
Jon Hall5cf14d52015-07-16 12:15:19 -0700638 utilities.assert_equals( expect=main.TRUE, actual=appCheck2,
639 onpass="App Ids seem to be correct",
640 onfail="Something is wrong with app Ids" )
641
642 main.step( "Add host intents via cli" )
643 intentIds = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700644 # TODO: move the host numbers to params
645 # Maybe look at all the paths we ping?
Jon Hall5cf14d52015-07-16 12:15:19 -0700646 intentAddResult = True
647 hostResult = main.TRUE
648 for i in range( 8, 18 ):
649 main.log.info( "Adding host intent between h" + str( i ) +
650 " and h" + str( i + 10 ) )
651 host1 = "00:00:00:00:00:" + \
652 str( hex( i )[ 2: ] ).zfill( 2 ).upper()
653 host2 = "00:00:00:00:00:" + \
654 str( hex( i + 10 )[ 2: ] ).zfill( 2 ).upper()
655 # NOTE: getHost can return None
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700656 host1Dict = onosCli.getHost( host1 )
657 host2Dict = onosCli.getHost( host2 )
Jon Hall5cf14d52015-07-16 12:15:19 -0700658 host1Id = None
659 host2Id = None
660 if host1Dict and host2Dict:
661 host1Id = host1Dict.get( 'id', None )
662 host2Id = host2Dict.get( 'id', None )
663 if host1Id and host2Id:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700664 nodeNum = ( i % len( main.activeNodes ) )
665 node = main.activeNodes[nodeNum]
666 tmpId = main.CLIs[node].addHostIntent( host1Id, host2Id )
Jon Hall5cf14d52015-07-16 12:15:19 -0700667 if tmpId:
668 main.log.info( "Added intent with id: " + tmpId )
669 intentIds.append( tmpId )
670 else:
671 main.log.error( "addHostIntent returned: " +
672 repr( tmpId ) )
673 else:
674 main.log.error( "Error, getHost() failed for h" + str( i ) +
675 " and/or h" + str( i + 10 ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700676 node = main.activeNodes[0]
677 hosts = main.CLIs[node].hosts()
Jon Hall5cf14d52015-07-16 12:15:19 -0700678 main.log.warn( "Hosts output: " )
679 try:
680 main.log.warn( json.dumps( json.loads( hosts ),
681 sort_keys=True,
682 indent=4,
683 separators=( ',', ': ' ) ) )
684 except ( ValueError, TypeError ):
685 main.log.warn( repr( hosts ) )
686 hostResult = main.FALSE
687 utilities.assert_equals( expect=main.TRUE, actual=hostResult,
688 onpass="Found a host id for each host",
689 onfail="Error looking up host ids" )
690
691 intentStart = time.time()
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700692 onosIds = onosCli.getAllIntentsId()
Jon Hall5cf14d52015-07-16 12:15:19 -0700693 main.log.info( "Submitted intents: " + str( intentIds ) )
694 main.log.info( "Intents in ONOS: " + str( onosIds ) )
695 for intent in intentIds:
696 if intent in onosIds:
697 pass # intent submitted is in onos
698 else:
699 intentAddResult = False
700 if intentAddResult:
701 intentStop = time.time()
702 else:
703 intentStop = None
704 # Print the intent states
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700705 intents = onosCli.intents()
Jon Hall5cf14d52015-07-16 12:15:19 -0700706 intentStates = []
707 installedCheck = True
708 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
709 count = 0
710 try:
711 for intent in json.loads( intents ):
712 state = intent.get( 'state', None )
713 if "INSTALLED" not in state:
714 installedCheck = False
715 intentId = intent.get( 'id', None )
716 intentStates.append( ( intentId, state ) )
717 except ( ValueError, TypeError ):
718 main.log.exception( "Error parsing intents" )
719 # add submitted intents not in the store
720 tmplist = [ i for i, s in intentStates ]
721 missingIntents = False
722 for i in intentIds:
723 if i not in tmplist:
724 intentStates.append( ( i, " - " ) )
725 missingIntents = True
726 intentStates.sort()
727 for i, s in intentStates:
728 count += 1
729 main.log.info( "%-6s%-15s%-15s" %
730 ( str( count ), str( i ), str( s ) ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700731 leaders = onosCli.leaders()
Jon Hall5cf14d52015-07-16 12:15:19 -0700732 try:
733 missing = False
734 if leaders:
735 parsedLeaders = json.loads( leaders )
736 main.log.warn( json.dumps( parsedLeaders,
737 sort_keys=True,
738 indent=4,
739 separators=( ',', ': ' ) ) )
740 # check for all intent partitions
741 topics = []
742 for i in range( 14 ):
Jon Hall8dafdcc2016-09-16 10:21:25 -0700743 topics.append( "work-partition-" + str( i ) )
Jon Hall5cf14d52015-07-16 12:15:19 -0700744 main.log.debug( topics )
745 ONOStopics = [ j['topic'] for j in parsedLeaders ]
746 for topic in topics:
747 if topic not in ONOStopics:
748 main.log.error( "Error: " + topic +
749 " not in leaders" )
750 missing = True
751 else:
752 main.log.error( "leaders() returned None" )
753 except ( ValueError, TypeError ):
754 main.log.exception( "Error parsing leaders" )
755 main.log.error( repr( leaders ) )
756 # Check all nodes
757 if missing:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700758 for i in main.activeNodes:
759 response = main.CLIs[i].leaders( jsonFormat=False)
760 main.log.warn( str( main.CLIs[i].name ) + " leaders output: \n" +
Jon Hall5cf14d52015-07-16 12:15:19 -0700761 str( response ) )
762
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700763 partitions = onosCli.partitions()
Jon Hall5cf14d52015-07-16 12:15:19 -0700764 try:
765 if partitions :
766 parsedPartitions = json.loads( partitions )
767 main.log.warn( json.dumps( parsedPartitions,
768 sort_keys=True,
769 indent=4,
770 separators=( ',', ': ' ) ) )
771 # TODO check for a leader in all paritions
772 # TODO check for consistency among nodes
773 else:
774 main.log.error( "partitions() returned None" )
775 except ( ValueError, TypeError ):
776 main.log.exception( "Error parsing partitions" )
777 main.log.error( repr( partitions ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700778 pendingMap = onosCli.pendingMap()
Jon Hall5cf14d52015-07-16 12:15:19 -0700779 try:
780 if pendingMap :
781 parsedPending = json.loads( pendingMap )
782 main.log.warn( json.dumps( parsedPending,
783 sort_keys=True,
784 indent=4,
785 separators=( ',', ': ' ) ) )
786 # TODO check something here?
787 else:
788 main.log.error( "pendingMap() returned None" )
789 except ( ValueError, TypeError ):
790 main.log.exception( "Error parsing pending map" )
791 main.log.error( repr( pendingMap ) )
792
793 intentAddResult = bool( intentAddResult and not missingIntents and
794 installedCheck )
795 if not intentAddResult:
796 main.log.error( "Error in pushing host intents to ONOS" )
797
798 main.step( "Intent Anti-Entropy dispersion" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700799 for j in range(100):
Jon Hall5cf14d52015-07-16 12:15:19 -0700800 correct = True
801 main.log.info( "Submitted intents: " + str( sorted( intentIds ) ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700802 for i in main.activeNodes:
Jon Hall5cf14d52015-07-16 12:15:19 -0700803 onosIds = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700804 ids = main.CLIs[i].getAllIntentsId()
Jon Hall5cf14d52015-07-16 12:15:19 -0700805 onosIds.append( ids )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700806 main.log.debug( "Intents in " + main.CLIs[i].name + ": " +
Jon Hall5cf14d52015-07-16 12:15:19 -0700807 str( sorted( onosIds ) ) )
808 if sorted( ids ) != sorted( intentIds ):
809 main.log.warn( "Set of intent IDs doesn't match" )
810 correct = False
811 break
812 else:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700813 intents = json.loads( main.CLIs[i].intents() )
Jon Hall5cf14d52015-07-16 12:15:19 -0700814 for intent in intents:
815 if intent[ 'state' ] != "INSTALLED":
816 main.log.warn( "Intent " + intent[ 'id' ] +
817 " is " + intent[ 'state' ] )
818 correct = False
819 break
820 if correct:
821 break
822 else:
823 time.sleep(1)
824 if not intentStop:
825 intentStop = time.time()
826 global gossipTime
827 gossipTime = intentStop - intentStart
828 main.log.info( "It took about " + str( gossipTime ) +
829 " seconds for all intents to appear in each node" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700830 gossipPeriod = int( main.params['timers']['gossip'] )
831 maxGossipTime = gossipPeriod * len( main.activeNodes )
Jon Hall5cf14d52015-07-16 12:15:19 -0700832 utilities.assert_greater_equals(
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700833 expect=maxGossipTime, actual=gossipTime,
Jon Hall5cf14d52015-07-16 12:15:19 -0700834 onpass="ECM anti-entropy for intents worked within " +
835 "expected time",
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700836 onfail="Intent ECM anti-entropy took too long. " +
837 "Expected time:{}, Actual time:{}".format( maxGossipTime,
838 gossipTime ) )
839 if gossipTime <= maxGossipTime:
Jon Hall5cf14d52015-07-16 12:15:19 -0700840 intentAddResult = True
841
842 if not intentAddResult or "key" in pendingMap:
843 import time
844 installedCheck = True
845 main.log.info( "Sleeping 60 seconds to see if intents are found" )
846 time.sleep( 60 )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700847 onosIds = onosCli.getAllIntentsId()
Jon Hall5cf14d52015-07-16 12:15:19 -0700848 main.log.info( "Submitted intents: " + str( intentIds ) )
849 main.log.info( "Intents in ONOS: " + str( onosIds ) )
850 # Print the intent states
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700851 intents = onosCli.intents()
Jon Hall5cf14d52015-07-16 12:15:19 -0700852 intentStates = []
853 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
854 count = 0
855 try:
856 for intent in json.loads( intents ):
857 # Iter through intents of a node
858 state = intent.get( 'state', None )
859 if "INSTALLED" not in state:
860 installedCheck = False
861 intentId = intent.get( 'id', None )
862 intentStates.append( ( intentId, state ) )
863 except ( ValueError, TypeError ):
864 main.log.exception( "Error parsing intents" )
865 # add submitted intents not in the store
866 tmplist = [ i for i, s in intentStates ]
867 for i in intentIds:
868 if i not in tmplist:
869 intentStates.append( ( i, " - " ) )
870 intentStates.sort()
871 for i, s in intentStates:
872 count += 1
873 main.log.info( "%-6s%-15s%-15s" %
874 ( str( count ), str( i ), str( s ) ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700875 leaders = onosCli.leaders()
Jon Hall5cf14d52015-07-16 12:15:19 -0700876 try:
877 missing = False
878 if leaders:
879 parsedLeaders = json.loads( leaders )
880 main.log.warn( json.dumps( parsedLeaders,
881 sort_keys=True,
882 indent=4,
883 separators=( ',', ': ' ) ) )
884 # check for all intent partitions
885 # check for election
886 topics = []
887 for i in range( 14 ):
Jon Hall8dafdcc2016-09-16 10:21:25 -0700888 topics.append( "work-partition-" + str( i ) )
Jon Hall5cf14d52015-07-16 12:15:19 -0700889 # FIXME: this should only be after we start the app
890 topics.append( "org.onosproject.election" )
891 main.log.debug( topics )
892 ONOStopics = [ j['topic'] for j in parsedLeaders ]
893 for topic in topics:
894 if topic not in ONOStopics:
895 main.log.error( "Error: " + topic +
896 " not in leaders" )
897 missing = True
898 else:
899 main.log.error( "leaders() returned None" )
900 except ( ValueError, TypeError ):
901 main.log.exception( "Error parsing leaders" )
902 main.log.error( repr( leaders ) )
903 # Check all nodes
904 if missing:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700905 for i in main.activeNodes:
906 node = main.CLIs[i]
Jon Hall5cf14d52015-07-16 12:15:19 -0700907 response = node.leaders( jsonFormat=False)
908 main.log.warn( str( node.name ) + " leaders output: \n" +
909 str( response ) )
910
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700911 partitions = onosCli.partitions()
Jon Hall5cf14d52015-07-16 12:15:19 -0700912 try:
913 if partitions :
914 parsedPartitions = json.loads( partitions )
915 main.log.warn( json.dumps( parsedPartitions,
916 sort_keys=True,
917 indent=4,
918 separators=( ',', ': ' ) ) )
919 # TODO check for a leader in all paritions
920 # TODO check for consistency among nodes
921 else:
922 main.log.error( "partitions() returned None" )
923 except ( ValueError, TypeError ):
924 main.log.exception( "Error parsing partitions" )
925 main.log.error( repr( partitions ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700926 pendingMap = onosCli.pendingMap()
Jon Hall5cf14d52015-07-16 12:15:19 -0700927 try:
928 if pendingMap :
929 parsedPending = json.loads( pendingMap )
930 main.log.warn( json.dumps( parsedPending,
931 sort_keys=True,
932 indent=4,
933 separators=( ',', ': ' ) ) )
934 # TODO check something here?
935 else:
936 main.log.error( "pendingMap() returned None" )
937 except ( ValueError, TypeError ):
938 main.log.exception( "Error parsing pending map" )
939 main.log.error( repr( pendingMap ) )
940
941 def CASE4( self, main ):
942 """
943 Ping across added host intents
944 """
945 import json
946 import time
Jon Halle1a3b752015-07-22 13:02:46 -0700947 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700948 assert main, "main not defined"
949 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -0700950 assert main.CLIs, "main.CLIs not defined"
951 assert main.nodes, "main.nodes not defined"
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700952 main.case( "Verify connectivity by sending traffic across Intents" )
Jon Hall783bbf92015-07-23 14:33:19 -0700953 main.caseExplanation = "Ping across added host intents to check " +\
Jon Hall5cf14d52015-07-16 12:15:19 -0700954 "functionality and check the state of " +\
955 "the intent"
Jon Hall5cf14d52015-07-16 12:15:19 -0700956
Jon Hall41d39f12016-04-11 22:54:35 -0700957 onosCli = main.CLIs[ main.activeNodes[0] ]
Jon Hall5cf14d52015-07-16 12:15:19 -0700958 main.step( "Check Intent state" )
959 installedCheck = False
960 loopCount = 0
961 while not installedCheck and loopCount < 40:
962 installedCheck = True
963 # Print the intent states
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700964 intents = onosCli.intents()
Jon Hall5cf14d52015-07-16 12:15:19 -0700965 intentStates = []
966 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
967 count = 0
968 # Iter through intents of a node
969 try:
970 for intent in json.loads( intents ):
971 state = intent.get( 'state', None )
972 if "INSTALLED" not in state:
973 installedCheck = False
974 intentId = intent.get( 'id', None )
975 intentStates.append( ( intentId, state ) )
976 except ( ValueError, TypeError ):
977 main.log.exception( "Error parsing intents." )
978 # Print states
979 intentStates.sort()
980 for i, s in intentStates:
981 count += 1
982 main.log.info( "%-6s%-15s%-15s" %
983 ( str( count ), str( i ), str( s ) ) )
984 if not installedCheck:
985 time.sleep( 1 )
986 loopCount += 1
987 utilities.assert_equals( expect=True, actual=installedCheck,
988 onpass="Intents are all INSTALLED",
989 onfail="Intents are not all in " +
990 "INSTALLED state" )
991
Jon Hall9d2dcad2016-04-08 10:15:20 -0700992 main.step( "Ping across added host intents" )
Jon Hall9d2dcad2016-04-08 10:15:20 -0700993 PingResult = main.TRUE
994 for i in range( 8, 18 ):
995 ping = main.Mininet1.pingHost( src="h" + str( i ),
996 target="h" + str( i + 10 ) )
997 PingResult = PingResult and ping
998 if ping == main.FALSE:
999 main.log.warn( "Ping failed between h" + str( i ) +
1000 " and h" + str( i + 10 ) )
1001 elif ping == main.TRUE:
1002 main.log.info( "Ping test passed!" )
1003 # Don't set PingResult or you'd override failures
1004 if PingResult == main.FALSE:
1005 main.log.error(
1006 "Intents have not been installed correctly, pings failed." )
1007 # TODO: pretty print
1008 main.log.warn( "ONOS1 intents: " )
1009 try:
1010 tmpIntents = onosCli.intents()
1011 main.log.warn( json.dumps( json.loads( tmpIntents ),
1012 sort_keys=True,
1013 indent=4,
1014 separators=( ',', ': ' ) ) )
1015 except ( ValueError, TypeError ):
1016 main.log.warn( repr( tmpIntents ) )
1017 utilities.assert_equals(
1018 expect=main.TRUE,
1019 actual=PingResult,
1020 onpass="Intents have been installed correctly and pings work",
1021 onfail="Intents have not been installed correctly, pings failed." )
1022
Jon Hall5cf14d52015-07-16 12:15:19 -07001023 main.step( "Check leadership of topics" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001024 leaders = onosCli.leaders()
Jon Hall5cf14d52015-07-16 12:15:19 -07001025 topicCheck = main.TRUE
1026 try:
1027 if leaders:
1028 parsedLeaders = json.loads( leaders )
1029 main.log.warn( json.dumps( parsedLeaders,
1030 sort_keys=True,
1031 indent=4,
1032 separators=( ',', ': ' ) ) )
1033 # check for all intent partitions
1034 # check for election
1035 # TODO: Look at Devices as topics now that it uses this system
1036 topics = []
1037 for i in range( 14 ):
Jon Hall8dafdcc2016-09-16 10:21:25 -07001038 topics.append( "work-partition-" + str( i ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001039 # FIXME: this should only be after we start the app
1040 # FIXME: topics.append( "org.onosproject.election" )
1041 # Print leaders output
1042 main.log.debug( topics )
1043 ONOStopics = [ j['topic'] for j in parsedLeaders ]
1044 for topic in topics:
1045 if topic not in ONOStopics:
1046 main.log.error( "Error: " + topic +
1047 " not in leaders" )
1048 topicCheck = main.FALSE
1049 else:
1050 main.log.error( "leaders() returned None" )
1051 topicCheck = main.FALSE
1052 except ( ValueError, TypeError ):
1053 topicCheck = main.FALSE
1054 main.log.exception( "Error parsing leaders" )
1055 main.log.error( repr( leaders ) )
1056 # TODO: Check for a leader of these topics
1057 # Check all nodes
1058 if topicCheck:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001059 for i in main.activeNodes:
1060 node = main.CLIs[i]
Jon Hall5cf14d52015-07-16 12:15:19 -07001061 response = node.leaders( jsonFormat=False)
1062 main.log.warn( str( node.name ) + " leaders output: \n" +
1063 str( response ) )
1064
1065 utilities.assert_equals( expect=main.TRUE, actual=topicCheck,
1066 onpass="intent Partitions is in leaders",
1067 onfail="Some topics were lost " )
1068 # Print partitions
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001069 partitions = onosCli.partitions()
Jon Hall5cf14d52015-07-16 12:15:19 -07001070 try:
1071 if partitions :
1072 parsedPartitions = json.loads( partitions )
1073 main.log.warn( json.dumps( parsedPartitions,
1074 sort_keys=True,
1075 indent=4,
1076 separators=( ',', ': ' ) ) )
1077 # TODO check for a leader in all paritions
1078 # TODO check for consistency among nodes
1079 else:
1080 main.log.error( "partitions() returned None" )
1081 except ( ValueError, TypeError ):
1082 main.log.exception( "Error parsing partitions" )
1083 main.log.error( repr( partitions ) )
1084 # Print Pending Map
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001085 pendingMap = onosCli.pendingMap()
Jon Hall5cf14d52015-07-16 12:15:19 -07001086 try:
1087 if pendingMap :
1088 parsedPending = json.loads( pendingMap )
1089 main.log.warn( json.dumps( parsedPending,
1090 sort_keys=True,
1091 indent=4,
1092 separators=( ',', ': ' ) ) )
1093 # TODO check something here?
1094 else:
1095 main.log.error( "pendingMap() returned None" )
1096 except ( ValueError, TypeError ):
1097 main.log.exception( "Error parsing pending map" )
1098 main.log.error( repr( pendingMap ) )
1099
1100 if not installedCheck:
1101 main.log.info( "Waiting 60 seconds to see if the state of " +
1102 "intents change" )
1103 time.sleep( 60 )
1104 # Print the intent states
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001105 intents = onosCli.intents()
Jon Hall5cf14d52015-07-16 12:15:19 -07001106 intentStates = []
1107 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
1108 count = 0
1109 # Iter through intents of a node
1110 try:
1111 for intent in json.loads( intents ):
1112 state = intent.get( 'state', None )
1113 if "INSTALLED" not in state:
1114 installedCheck = False
1115 intentId = intent.get( 'id', None )
1116 intentStates.append( ( intentId, state ) )
1117 except ( ValueError, TypeError ):
1118 main.log.exception( "Error parsing intents." )
1119 intentStates.sort()
1120 for i, s in intentStates:
1121 count += 1
1122 main.log.info( "%-6s%-15s%-15s" %
1123 ( str( count ), str( i ), str( s ) ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001124 leaders = onosCli.leaders()
Jon Hall5cf14d52015-07-16 12:15:19 -07001125 try:
1126 missing = False
1127 if leaders:
1128 parsedLeaders = json.loads( leaders )
1129 main.log.warn( json.dumps( parsedLeaders,
1130 sort_keys=True,
1131 indent=4,
1132 separators=( ',', ': ' ) ) )
1133 # check for all intent partitions
1134 # check for election
1135 topics = []
1136 for i in range( 14 ):
Jon Hall8dafdcc2016-09-16 10:21:25 -07001137 topics.append( "work-partition-" + str( i ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001138 # FIXME: this should only be after we start the app
1139 topics.append( "org.onosproject.election" )
1140 main.log.debug( topics )
1141 ONOStopics = [ j['topic'] for j in parsedLeaders ]
1142 for topic in topics:
1143 if topic not in ONOStopics:
1144 main.log.error( "Error: " + topic +
1145 " not in leaders" )
1146 missing = True
1147 else:
1148 main.log.error( "leaders() returned None" )
1149 except ( ValueError, TypeError ):
1150 main.log.exception( "Error parsing leaders" )
1151 main.log.error( repr( leaders ) )
1152 if missing:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001153 for i in main.activeNodes:
1154 node = main.CLIs[i]
Jon Hall5cf14d52015-07-16 12:15:19 -07001155 response = node.leaders( jsonFormat=False)
1156 main.log.warn( str( node.name ) + " leaders output: \n" +
1157 str( response ) )
1158
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001159 partitions = onosCli.partitions()
Jon Hall5cf14d52015-07-16 12:15:19 -07001160 try:
1161 if partitions :
1162 parsedPartitions = json.loads( partitions )
1163 main.log.warn( json.dumps( parsedPartitions,
1164 sort_keys=True,
1165 indent=4,
1166 separators=( ',', ': ' ) ) )
1167 # TODO check for a leader in all paritions
1168 # TODO check for consistency among nodes
1169 else:
1170 main.log.error( "partitions() returned None" )
1171 except ( ValueError, TypeError ):
1172 main.log.exception( "Error parsing partitions" )
1173 main.log.error( repr( partitions ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001174 pendingMap = onosCli.pendingMap()
Jon Hall5cf14d52015-07-16 12:15:19 -07001175 try:
1176 if pendingMap :
1177 parsedPending = json.loads( pendingMap )
1178 main.log.warn( json.dumps( parsedPending,
1179 sort_keys=True,
1180 indent=4,
1181 separators=( ',', ': ' ) ) )
1182 # TODO check something here?
1183 else:
1184 main.log.error( "pendingMap() returned None" )
1185 except ( ValueError, TypeError ):
1186 main.log.exception( "Error parsing pending map" )
1187 main.log.error( repr( pendingMap ) )
1188 # Print flowrules
Jon Hall41d39f12016-04-11 22:54:35 -07001189 main.log.debug( onosCli.flows( jsonFormat=False ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001190 main.step( "Wait a minute then ping again" )
1191 # the wait is above
1192 PingResult = main.TRUE
1193 for i in range( 8, 18 ):
1194 ping = main.Mininet1.pingHost( src="h" + str( i ),
1195 target="h" + str( i + 10 ) )
1196 PingResult = PingResult and ping
1197 if ping == main.FALSE:
1198 main.log.warn( "Ping failed between h" + str( i ) +
1199 " and h" + str( i + 10 ) )
1200 elif ping == main.TRUE:
1201 main.log.info( "Ping test passed!" )
1202 # Don't set PingResult or you'd override failures
1203 if PingResult == main.FALSE:
1204 main.log.error(
1205 "Intents have not been installed correctly, pings failed." )
1206 # TODO: pretty print
1207 main.log.warn( "ONOS1 intents: " )
1208 try:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001209 tmpIntents = onosCli.intents()
Jon Hall5cf14d52015-07-16 12:15:19 -07001210 main.log.warn( json.dumps( json.loads( tmpIntents ),
1211 sort_keys=True,
1212 indent=4,
1213 separators=( ',', ': ' ) ) )
1214 except ( ValueError, TypeError ):
1215 main.log.warn( repr( tmpIntents ) )
1216 utilities.assert_equals(
1217 expect=main.TRUE,
1218 actual=PingResult,
1219 onpass="Intents have been installed correctly and pings work",
1220 onfail="Intents have not been installed correctly, pings failed." )
1221
1222 def CASE5( self, main ):
1223 """
1224 Reading state of ONOS
1225 """
1226 import json
1227 import time
Jon Halle1a3b752015-07-22 13:02:46 -07001228 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07001229 assert main, "main not defined"
1230 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07001231 assert main.CLIs, "main.CLIs not defined"
1232 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07001233
1234 main.case( "Setting up and gathering data for current state" )
1235 # The general idea for this test case is to pull the state of
1236 # ( intents,flows, topology,... ) from each ONOS node
1237 # We can then compare them with each other and also with past states
1238
1239 main.step( "Check that each switch has a master" )
1240 global mastershipState
1241 mastershipState = '[]'
1242
1243 # Assert that each device has a master
1244 rolesNotNull = main.TRUE
1245 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001246 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001247 t = main.Thread( target=main.CLIs[i].rolesNotNull,
Jon Hall5cf14d52015-07-16 12:15:19 -07001248 name="rolesNotNull-" + str( i ),
1249 args=[] )
1250 threads.append( t )
1251 t.start()
1252
1253 for t in threads:
1254 t.join()
1255 rolesNotNull = rolesNotNull and t.result
1256 utilities.assert_equals(
1257 expect=main.TRUE,
1258 actual=rolesNotNull,
1259 onpass="Each device has a master",
1260 onfail="Some devices don't have a master assigned" )
1261
1262 main.step( "Get the Mastership of each switch from each controller" )
1263 ONOSMastership = []
1264 mastershipCheck = main.FALSE
1265 consistentMastership = True
1266 rolesResults = True
1267 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001268 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001269 t = main.Thread( target=main.CLIs[i].roles,
Jon Hall5cf14d52015-07-16 12:15:19 -07001270 name="roles-" + str( i ),
1271 args=[] )
1272 threads.append( t )
1273 t.start()
1274
1275 for t in threads:
1276 t.join()
1277 ONOSMastership.append( t.result )
1278
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001279 for i in range( len( ONOSMastership ) ):
1280 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001281 if not ONOSMastership[i] or "Error" in ONOSMastership[i]:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001282 main.log.error( "Error in getting ONOS" + node + " roles" )
1283 main.log.warn( "ONOS" + node + " mastership response: " +
1284 repr( ONOSMastership[i] ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001285 rolesResults = False
1286 utilities.assert_equals(
1287 expect=True,
1288 actual=rolesResults,
1289 onpass="No error in reading roles output",
1290 onfail="Error in reading roles from ONOS" )
1291
1292 main.step( "Check for consistency in roles from each controller" )
1293 if all([ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
1294 main.log.info(
1295 "Switch roles are consistent across all ONOS nodes" )
1296 else:
1297 consistentMastership = False
1298 utilities.assert_equals(
1299 expect=True,
1300 actual=consistentMastership,
1301 onpass="Switch roles are consistent across all ONOS nodes",
1302 onfail="ONOS nodes have different views of switch roles" )
1303
1304 if rolesResults and not consistentMastership:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001305 for i in range( len( main.activeNodes ) ):
1306 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001307 try:
1308 main.log.warn(
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001309 "ONOS" + node + " roles: ",
Jon Hall5cf14d52015-07-16 12:15:19 -07001310 json.dumps(
1311 json.loads( ONOSMastership[ i ] ),
1312 sort_keys=True,
1313 indent=4,
1314 separators=( ',', ': ' ) ) )
1315 except ( ValueError, TypeError ):
1316 main.log.warn( repr( ONOSMastership[ i ] ) )
1317 elif rolesResults and consistentMastership:
1318 mastershipCheck = main.TRUE
1319 mastershipState = ONOSMastership[ 0 ]
1320
1321 main.step( "Get the intents from each controller" )
1322 global intentState
1323 intentState = []
1324 ONOSIntents = []
1325 intentCheck = main.FALSE
1326 consistentIntents = True
1327 intentsResults = True
1328 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001329 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001330 t = main.Thread( target=main.CLIs[i].intents,
Jon Hall5cf14d52015-07-16 12:15:19 -07001331 name="intents-" + str( i ),
1332 args=[],
1333 kwargs={ 'jsonFormat': True } )
1334 threads.append( t )
1335 t.start()
1336
1337 for t in threads:
1338 t.join()
1339 ONOSIntents.append( t.result )
1340
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001341 for i in range( len( ONOSIntents ) ):
1342 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001343 if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001344 main.log.error( "Error in getting ONOS" + node + " intents" )
1345 main.log.warn( "ONOS" + node + " intents response: " +
Jon Hall5cf14d52015-07-16 12:15:19 -07001346 repr( ONOSIntents[ i ] ) )
1347 intentsResults = False
1348 utilities.assert_equals(
1349 expect=True,
1350 actual=intentsResults,
1351 onpass="No error in reading intents output",
1352 onfail="Error in reading intents from ONOS" )
1353
1354 main.step( "Check for consistency in Intents from each controller" )
1355 if all([ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
1356 main.log.info( "Intents are consistent across all ONOS " +
1357 "nodes" )
1358 else:
1359 consistentIntents = False
1360 main.log.error( "Intents not consistent" )
1361 utilities.assert_equals(
1362 expect=True,
1363 actual=consistentIntents,
1364 onpass="Intents are consistent across all ONOS nodes",
1365 onfail="ONOS nodes have different views of intents" )
1366
1367 if intentsResults:
1368 # Try to make it easy to figure out what is happening
1369 #
1370 # Intent ONOS1 ONOS2 ...
1371 # 0x01 INSTALLED INSTALLING
1372 # ... ... ...
1373 # ... ... ...
1374 title = " Id"
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001375 for n in main.activeNodes:
Jon Hall5cf14d52015-07-16 12:15:19 -07001376 title += " " * 10 + "ONOS" + str( n + 1 )
1377 main.log.warn( title )
1378 # get all intent keys in the cluster
1379 keys = []
Jon Halla440e872016-03-31 15:15:50 -07001380 try:
1381 # Get the set of all intent keys
Jon Hall5cf14d52015-07-16 12:15:19 -07001382 for nodeStr in ONOSIntents:
1383 node = json.loads( nodeStr )
1384 for intent in node:
Jon Halla440e872016-03-31 15:15:50 -07001385 keys.append( intent.get( 'id' ) )
1386 keys = set( keys )
1387 # For each intent key, print the state on each node
1388 for key in keys:
1389 row = "%-13s" % key
1390 for nodeStr in ONOSIntents:
1391 node = json.loads( nodeStr )
1392 for intent in node:
1393 if intent.get( 'id', "Error" ) == key:
1394 row += "%-15s" % intent.get( 'state' )
1395 main.log.warn( row )
1396 # End of intent state table
1397 except ValueError as e:
1398 main.log.exception( e )
1399 main.log.debug( "nodeStr was: " + repr( nodeStr ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001400
1401 if intentsResults and not consistentIntents:
1402 # print the json objects
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001403 n = str( main.activeNodes[-1] + 1 )
1404 main.log.debug( "ONOS" + n + " intents: " )
Jon Hall5cf14d52015-07-16 12:15:19 -07001405 main.log.debug( json.dumps( json.loads( ONOSIntents[ -1 ] ),
1406 sort_keys=True,
1407 indent=4,
1408 separators=( ',', ': ' ) ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001409 for i in range( len( ONOSIntents ) ):
1410 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001411 if ONOSIntents[ i ] != ONOSIntents[ -1 ]:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001412 main.log.debug( "ONOS" + node + " intents: " )
Jon Hall5cf14d52015-07-16 12:15:19 -07001413 main.log.debug( json.dumps( json.loads( ONOSIntents[i] ),
1414 sort_keys=True,
1415 indent=4,
1416 separators=( ',', ': ' ) ) )
1417 else:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001418 main.log.debug( "ONOS" + node + " intents match ONOS" +
1419 n + " intents" )
Jon Hall5cf14d52015-07-16 12:15:19 -07001420 elif intentsResults and consistentIntents:
1421 intentCheck = main.TRUE
1422 intentState = ONOSIntents[ 0 ]
1423
1424 main.step( "Get the flows from each controller" )
1425 global flowState
1426 flowState = []
1427 ONOSFlows = []
1428 ONOSFlowsJson = []
1429 flowCheck = main.FALSE
1430 consistentFlows = True
1431 flowsResults = True
1432 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001433 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001434 t = main.Thread( target=main.CLIs[i].flows,
Jon Hall5cf14d52015-07-16 12:15:19 -07001435 name="flows-" + str( i ),
1436 args=[],
1437 kwargs={ 'jsonFormat': True } )
1438 threads.append( t )
1439 t.start()
1440
1441 # NOTE: Flows command can take some time to run
1442 time.sleep(30)
1443 for t in threads:
1444 t.join()
1445 result = t.result
1446 ONOSFlows.append( result )
1447
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001448 for i in range( len( ONOSFlows ) ):
1449 num = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001450 if not ONOSFlows[ i ] or "Error" in ONOSFlows[ i ]:
1451 main.log.error( "Error in getting ONOS" + num + " flows" )
1452 main.log.warn( "ONOS" + num + " flows response: " +
1453 repr( ONOSFlows[ i ] ) )
1454 flowsResults = False
1455 ONOSFlowsJson.append( None )
1456 else:
1457 try:
1458 ONOSFlowsJson.append( json.loads( ONOSFlows[ i ] ) )
1459 except ( ValueError, TypeError ):
1460 # FIXME: change this to log.error?
1461 main.log.exception( "Error in parsing ONOS" + num +
1462 " response as json." )
1463 main.log.error( repr( ONOSFlows[ i ] ) )
1464 ONOSFlowsJson.append( None )
1465 flowsResults = False
1466 utilities.assert_equals(
1467 expect=True,
1468 actual=flowsResults,
1469 onpass="No error in reading flows output",
1470 onfail="Error in reading flows from ONOS" )
1471
1472 main.step( "Check for consistency in Flows from each controller" )
1473 tmp = [ len( i ) == len( ONOSFlowsJson[ 0 ] ) for i in ONOSFlowsJson ]
1474 if all( tmp ):
1475 main.log.info( "Flow count is consistent across all ONOS nodes" )
1476 else:
1477 consistentFlows = False
1478 utilities.assert_equals(
1479 expect=True,
1480 actual=consistentFlows,
1481 onpass="The flow count is consistent across all ONOS nodes",
1482 onfail="ONOS nodes have different flow counts" )
1483
1484 if flowsResults and not consistentFlows:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001485 for i in range( len( ONOSFlows ) ):
1486 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001487 try:
1488 main.log.warn(
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001489 "ONOS" + node + " flows: " +
Jon Hall5cf14d52015-07-16 12:15:19 -07001490 json.dumps( json.loads( ONOSFlows[i] ), sort_keys=True,
1491 indent=4, separators=( ',', ': ' ) ) )
1492 except ( ValueError, TypeError ):
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001493 main.log.warn( "ONOS" + node + " flows: " +
1494 repr( ONOSFlows[ i ] ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001495 elif flowsResults and consistentFlows:
1496 flowCheck = main.TRUE
1497 flowState = ONOSFlows[ 0 ]
1498
1499 main.step( "Get the OF Table entries" )
1500 global flows
1501 flows = []
1502 for i in range( 1, 29 ):
GlennRC68467eb2015-11-16 18:01:01 -08001503 flows.append( main.Mininet1.getFlowTable( "s" + str( i ), version="1.3", debug=False ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001504 if flowCheck == main.FALSE:
1505 for table in flows:
1506 main.log.warn( table )
1507 # TODO: Compare switch flow tables with ONOS flow tables
1508
1509 main.step( "Start continuous pings" )
1510 main.Mininet2.pingLong(
1511 src=main.params[ 'PING' ][ 'source1' ],
1512 target=main.params[ 'PING' ][ 'target1' ],
1513 pingTime=500 )
1514 main.Mininet2.pingLong(
1515 src=main.params[ 'PING' ][ 'source2' ],
1516 target=main.params[ 'PING' ][ 'target2' ],
1517 pingTime=500 )
1518 main.Mininet2.pingLong(
1519 src=main.params[ 'PING' ][ 'source3' ],
1520 target=main.params[ 'PING' ][ 'target3' ],
1521 pingTime=500 )
1522 main.Mininet2.pingLong(
1523 src=main.params[ 'PING' ][ 'source4' ],
1524 target=main.params[ 'PING' ][ 'target4' ],
1525 pingTime=500 )
1526 main.Mininet2.pingLong(
1527 src=main.params[ 'PING' ][ 'source5' ],
1528 target=main.params[ 'PING' ][ 'target5' ],
1529 pingTime=500 )
1530 main.Mininet2.pingLong(
1531 src=main.params[ 'PING' ][ 'source6' ],
1532 target=main.params[ 'PING' ][ 'target6' ],
1533 pingTime=500 )
1534 main.Mininet2.pingLong(
1535 src=main.params[ 'PING' ][ 'source7' ],
1536 target=main.params[ 'PING' ][ 'target7' ],
1537 pingTime=500 )
1538 main.Mininet2.pingLong(
1539 src=main.params[ 'PING' ][ 'source8' ],
1540 target=main.params[ 'PING' ][ 'target8' ],
1541 pingTime=500 )
1542 main.Mininet2.pingLong(
1543 src=main.params[ 'PING' ][ 'source9' ],
1544 target=main.params[ 'PING' ][ 'target9' ],
1545 pingTime=500 )
1546 main.Mininet2.pingLong(
1547 src=main.params[ 'PING' ][ 'source10' ],
1548 target=main.params[ 'PING' ][ 'target10' ],
1549 pingTime=500 )
1550
1551 main.step( "Collecting topology information from ONOS" )
1552 devices = []
1553 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001554 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001555 t = main.Thread( target=main.CLIs[i].devices,
Jon Hall5cf14d52015-07-16 12:15:19 -07001556 name="devices-" + str( i ),
1557 args=[ ] )
1558 threads.append( t )
1559 t.start()
1560
1561 for t in threads:
1562 t.join()
1563 devices.append( t.result )
1564 hosts = []
1565 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001566 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001567 t = main.Thread( target=main.CLIs[i].hosts,
Jon Hall5cf14d52015-07-16 12:15:19 -07001568 name="hosts-" + str( i ),
1569 args=[ ] )
1570 threads.append( t )
1571 t.start()
1572
1573 for t in threads:
1574 t.join()
1575 try:
1576 hosts.append( json.loads( t.result ) )
1577 except ( ValueError, TypeError ):
1578 # FIXME: better handling of this, print which node
1579 # Maybe use thread name?
1580 main.log.exception( "Error parsing json output of hosts" )
Jon Hallf3d16e72015-12-16 17:45:08 -08001581 main.log.warn( repr( t.result ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001582 hosts.append( None )
1583
1584 ports = []
1585 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001586 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001587 t = main.Thread( target=main.CLIs[i].ports,
Jon Hall5cf14d52015-07-16 12:15:19 -07001588 name="ports-" + str( i ),
1589 args=[ ] )
1590 threads.append( t )
1591 t.start()
1592
1593 for t in threads:
1594 t.join()
1595 ports.append( t.result )
1596 links = []
1597 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001598 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001599 t = main.Thread( target=main.CLIs[i].links,
Jon Hall5cf14d52015-07-16 12:15:19 -07001600 name="links-" + str( i ),
1601 args=[ ] )
1602 threads.append( t )
1603 t.start()
1604
1605 for t in threads:
1606 t.join()
1607 links.append( t.result )
1608 clusters = []
1609 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001610 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001611 t = main.Thread( target=main.CLIs[i].clusters,
Jon Hall5cf14d52015-07-16 12:15:19 -07001612 name="clusters-" + str( i ),
1613 args=[ ] )
1614 threads.append( t )
1615 t.start()
1616
1617 for t in threads:
1618 t.join()
1619 clusters.append( t.result )
1620 # Compare json objects for hosts and dataplane clusters
1621
1622 # hosts
1623 main.step( "Host view is consistent across ONOS nodes" )
1624 consistentHostsResult = main.TRUE
1625 for controller in range( len( hosts ) ):
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001626 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hallf3d16e72015-12-16 17:45:08 -08001627 if hosts[ controller ] and "Error" not in hosts[ controller ]:
Jon Hall5cf14d52015-07-16 12:15:19 -07001628 if hosts[ controller ] == hosts[ 0 ]:
1629 continue
1630 else: # hosts not consistent
1631 main.log.error( "hosts from ONOS" +
1632 controllerStr +
1633 " is inconsistent with ONOS1" )
1634 main.log.warn( repr( hosts[ controller ] ) )
1635 consistentHostsResult = main.FALSE
1636
1637 else:
1638 main.log.error( "Error in getting ONOS hosts from ONOS" +
1639 controllerStr )
1640 consistentHostsResult = main.FALSE
1641 main.log.warn( "ONOS" + controllerStr +
1642 " hosts response: " +
1643 repr( hosts[ controller ] ) )
1644 utilities.assert_equals(
1645 expect=main.TRUE,
1646 actual=consistentHostsResult,
1647 onpass="Hosts view is consistent across all ONOS nodes",
1648 onfail="ONOS nodes have different views of hosts" )
1649
1650 main.step( "Each host has an IP address" )
1651 ipResult = main.TRUE
1652 for controller in range( 0, len( hosts ) ):
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001653 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hallf3d16e72015-12-16 17:45:08 -08001654 if hosts[ controller ]:
1655 for host in hosts[ controller ]:
1656 if not host.get( 'ipAddresses', [ ] ):
1657 main.log.error( "Error with host ips on controller" +
1658 controllerStr + ": " + str( host ) )
1659 ipResult = main.FALSE
Jon Hall5cf14d52015-07-16 12:15:19 -07001660 utilities.assert_equals(
1661 expect=main.TRUE,
1662 actual=ipResult,
1663 onpass="The ips of the hosts aren't empty",
1664 onfail="The ip of at least one host is missing" )
1665
1666 # Strongly connected clusters of devices
1667 main.step( "Cluster view is consistent across ONOS nodes" )
1668 consistentClustersResult = main.TRUE
1669 for controller in range( len( clusters ) ):
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001670 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001671 if "Error" not in clusters[ controller ]:
1672 if clusters[ controller ] == clusters[ 0 ]:
1673 continue
1674 else: # clusters not consistent
1675 main.log.error( "clusters from ONOS" + controllerStr +
1676 " is inconsistent with ONOS1" )
1677 consistentClustersResult = main.FALSE
1678
1679 else:
1680 main.log.error( "Error in getting dataplane clusters " +
1681 "from ONOS" + controllerStr )
1682 consistentClustersResult = main.FALSE
1683 main.log.warn( "ONOS" + controllerStr +
1684 " clusters response: " +
1685 repr( clusters[ controller ] ) )
1686 utilities.assert_equals(
1687 expect=main.TRUE,
1688 actual=consistentClustersResult,
1689 onpass="Clusters view is consistent across all ONOS nodes",
1690 onfail="ONOS nodes have different views of clusters" )
Jon Hall64948022016-05-12 13:38:50 -07001691 if not consistentClustersResult:
Jon Hall172b7ba2016-04-07 18:12:20 -07001692 main.log.debug( clusters )
Jon Hall64948022016-05-12 13:38:50 -07001693
Jon Hall5cf14d52015-07-16 12:15:19 -07001694 # there should always only be one cluster
1695 main.step( "Cluster view correct across ONOS nodes" )
1696 try:
1697 numClusters = len( json.loads( clusters[ 0 ] ) )
1698 except ( ValueError, TypeError ):
1699 main.log.exception( "Error parsing clusters[0]: " +
1700 repr( clusters[ 0 ] ) )
Jon Hall6e709752016-02-01 13:38:46 -08001701 numClusters = "ERROR"
Jon Hall5cf14d52015-07-16 12:15:19 -07001702 clusterResults = main.FALSE
1703 if numClusters == 1:
1704 clusterResults = main.TRUE
1705 utilities.assert_equals(
1706 expect=1,
1707 actual=numClusters,
1708 onpass="ONOS shows 1 SCC",
1709 onfail="ONOS shows " + str( numClusters ) + " SCCs" )
1710
1711 main.step( "Comparing ONOS topology to MN" )
1712 devicesResults = main.TRUE
1713 linksResults = main.TRUE
1714 hostsResults = main.TRUE
1715 mnSwitches = main.Mininet1.getSwitches()
1716 mnLinks = main.Mininet1.getLinks()
1717 mnHosts = main.Mininet1.getHosts()
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001718 for controller in main.activeNodes:
1719 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001720 if devices[ controller ] and ports[ controller ] and\
1721 "Error" not in devices[ controller ] and\
1722 "Error" not in ports[ controller ]:
Jon Hall6e709752016-02-01 13:38:46 -08001723 currentDevicesResult = main.Mininet1.compareSwitches(
1724 mnSwitches,
1725 json.loads( devices[ controller ] ),
1726 json.loads( ports[ controller ] ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001727 else:
1728 currentDevicesResult = main.FALSE
1729 utilities.assert_equals( expect=main.TRUE,
1730 actual=currentDevicesResult,
1731 onpass="ONOS" + controllerStr +
1732 " Switches view is correct",
1733 onfail="ONOS" + controllerStr +
1734 " Switches view is incorrect" )
1735 if links[ controller ] and "Error" not in links[ controller ]:
1736 currentLinksResult = main.Mininet1.compareLinks(
1737 mnSwitches, mnLinks,
1738 json.loads( links[ controller ] ) )
1739 else:
1740 currentLinksResult = main.FALSE
1741 utilities.assert_equals( expect=main.TRUE,
1742 actual=currentLinksResult,
1743 onpass="ONOS" + controllerStr +
1744 " links view is correct",
1745 onfail="ONOS" + controllerStr +
1746 " links view is incorrect" )
1747
Jon Hall657cdf62015-12-17 14:40:51 -08001748 if hosts[ controller ] and "Error" not in hosts[ controller ]:
Jon Hall5cf14d52015-07-16 12:15:19 -07001749 currentHostsResult = main.Mininet1.compareHosts(
1750 mnHosts,
1751 hosts[ controller ] )
1752 else:
1753 currentHostsResult = main.FALSE
1754 utilities.assert_equals( expect=main.TRUE,
1755 actual=currentHostsResult,
1756 onpass="ONOS" + controllerStr +
1757 " hosts exist in Mininet",
1758 onfail="ONOS" + controllerStr +
1759 " hosts don't match Mininet" )
1760
1761 devicesResults = devicesResults and currentDevicesResult
1762 linksResults = linksResults and currentLinksResult
1763 hostsResults = hostsResults and currentHostsResult
1764
1765 main.step( "Device information is correct" )
1766 utilities.assert_equals(
1767 expect=main.TRUE,
1768 actual=devicesResults,
1769 onpass="Device information is correct",
1770 onfail="Device information is incorrect" )
1771
1772 main.step( "Links are correct" )
1773 utilities.assert_equals(
1774 expect=main.TRUE,
1775 actual=linksResults,
1776 onpass="Link are correct",
1777 onfail="Links are incorrect" )
1778
1779 main.step( "Hosts are correct" )
1780 utilities.assert_equals(
1781 expect=main.TRUE,
1782 actual=hostsResults,
1783 onpass="Hosts are correct",
1784 onfail="Hosts are incorrect" )
1785
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001786 def CASE61( self, main ):
Jon Hall5cf14d52015-07-16 12:15:19 -07001787 """
1788 The Failure case.
1789 """
Jon Halle1a3b752015-07-22 13:02:46 -07001790 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07001791 assert main, "main not defined"
1792 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07001793 assert main.CLIs, "main.CLIs not defined"
1794 assert main.nodes, "main.nodes not defined"
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001795 main.case( "Stop minority of ONOS nodes" )
Jon Hall96091e62015-09-21 17:34:17 -07001796
1797 main.step( "Checking ONOS Logs for errors" )
1798 for node in main.nodes:
1799 main.log.debug( "Checking logs for errors on " + node.name + ":" )
1800 main.log.warn( main.ONOSbench.checkLogs( node.ip_address ) )
1801
Jon Hall3b489db2015-10-05 14:38:37 -07001802 n = len( main.nodes ) # Number of nodes
1803 p = ( ( n + 1 ) / 2 ) + 1 # Number of partitions
1804 main.kill = [ 0 ] # ONOS node to kill, listed by index in main.nodes
1805 if n > 3:
1806 main.kill.append( p - 1 )
1807 # NOTE: This only works for cluster sizes of 3,5, or 7.
1808
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001809 main.step( "Stopping " + str( len( main.kill ) ) + " ONOS nodes" )
Jon Hall3b489db2015-10-05 14:38:37 -07001810 killResults = main.TRUE
1811 for i in main.kill:
1812 killResults = killResults and\
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001813 main.ONOSbench.onosStop( main.nodes[i].ip_address )
1814 main.activeNodes.remove( i )
Jon Hall5cf14d52015-07-16 12:15:19 -07001815 utilities.assert_equals( expect=main.TRUE, actual=killResults,
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001816 onpass="ONOS nodes stopped successfully",
1817 onfail="ONOS nodes NOT successfully stopped" )
1818
Jon Halld2871c22016-07-26 11:01:14 -07001819 main.step( "Checking ONOS nodes" )
1820 nodeResults = utilities.retry( main.HA.nodesCheck,
1821 False,
1822 args=[main.activeNodes],
1823 sleep=15,
1824 attempts=5 )
1825
1826 utilities.assert_equals( expect=True, actual=nodeResults,
1827 onpass="Nodes check successful",
1828 onfail="Nodes check NOT successful" )
1829
1830 if not nodeResults:
1831 for i in main.activeNodes:
1832 cli = main.CLIs[i]
1833 main.log.debug( "{} components not ACTIVE: \n{}".format(
1834 cli.name,
1835 cli.sendline( "scr:list | grep -v ACTIVE" ) ) )
1836 main.log.error( "Failed to start ONOS, stopping test" )
1837 main.cleanup()
1838 main.exit()
1839
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001840 def CASE62( self, main ):
1841 """
1842 The bring up stopped nodes
1843 """
1844 import time
1845 assert main.numCtrls, "main.numCtrls not defined"
1846 assert main, "main not defined"
1847 assert utilities.assert_equals, "utilities.assert_equals not defined"
1848 assert main.CLIs, "main.CLIs not defined"
1849 assert main.nodes, "main.nodes not defined"
1850 assert main.kill, "main.kill not defined"
1851 main.case( "Restart minority of ONOS nodes" )
1852
1853 main.step( "Restarting " + str( len( main.kill ) ) + " ONOS nodes" )
1854 startResults = main.TRUE
1855 restartTime = time.time()
1856 for i in main.kill:
1857 startResults = startResults and\
1858 main.ONOSbench.onosStart( main.nodes[i].ip_address )
1859 utilities.assert_equals( expect=main.TRUE, actual=startResults,
1860 onpass="ONOS nodes started successfully",
1861 onfail="ONOS nodes NOT successfully started" )
Jon Hall5cf14d52015-07-16 12:15:19 -07001862
1863 main.step( "Checking if ONOS is up yet" )
1864 count = 0
1865 onosIsupResult = main.FALSE
1866 while onosIsupResult == main.FALSE and count < 10:
Jon Hall3b489db2015-10-05 14:38:37 -07001867 onosIsupResult = main.TRUE
1868 for i in main.kill:
1869 onosIsupResult = onosIsupResult and\
1870 main.ONOSbench.isup( main.nodes[i].ip_address )
Jon Hall5cf14d52015-07-16 12:15:19 -07001871 count = count + 1
Jon Hall5cf14d52015-07-16 12:15:19 -07001872 utilities.assert_equals( expect=main.TRUE, actual=onosIsupResult,
1873 onpass="ONOS restarted successfully",
1874 onfail="ONOS restart NOT successful" )
1875
Jon Halle1a3b752015-07-22 13:02:46 -07001876 main.step( "Restarting ONOS main.CLIs" )
Jon Hall3b489db2015-10-05 14:38:37 -07001877 cliResults = main.TRUE
1878 for i in main.kill:
1879 cliResults = cliResults and\
1880 main.CLIs[i].startOnosCli( main.nodes[i].ip_address )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001881 main.activeNodes.append( i )
Jon Hall5cf14d52015-07-16 12:15:19 -07001882 utilities.assert_equals( expect=main.TRUE, actual=cliResults,
1883 onpass="ONOS cli restarted",
1884 onfail="ONOS cli did not restart" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001885 main.activeNodes.sort()
1886 try:
1887 assert list( set( main.activeNodes ) ) == main.activeNodes,\
1888 "List of active nodes has duplicates, this likely indicates something was run out of order"
1889 except AssertionError:
1890 main.log.exception( "" )
1891 main.cleanup()
1892 main.exit()
Jon Hall5cf14d52015-07-16 12:15:19 -07001893
1894 # Grab the time of restart so we chan check how long the gossip
1895 # protocol has had time to work
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001896 main.restartTime = time.time() - restartTime
Jon Hall5cf14d52015-07-16 12:15:19 -07001897 main.log.debug( "Restart time: " + str( main.restartTime ) )
Jon Halld2871c22016-07-26 11:01:14 -07001898
1899 main.step( "Checking ONOS nodes" )
1900 nodeResults = utilities.retry( main.HA.nodesCheck,
1901 False,
1902 args=[main.activeNodes],
1903 sleep=15,
1904 attempts=5 )
1905
1906 utilities.assert_equals( expect=True, actual=nodeResults,
1907 onpass="Nodes check successful",
1908 onfail="Nodes check NOT successful" )
1909
1910 if not nodeResults:
1911 for i in main.activeNodes:
1912 cli = main.CLIs[i]
1913 main.log.debug( "{} components not ACTIVE: \n{}".format(
1914 cli.name,
1915 cli.sendline( "scr:list | grep -v ACTIVE" ) ) )
1916 main.log.error( "Failed to start ONOS, stopping test" )
1917 main.cleanup()
1918 main.exit()
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001919 node = main.activeNodes[0]
1920 main.log.debug( main.CLIs[node].nodes( jsonFormat=False ) )
1921 main.log.debug( main.CLIs[node].leaders( jsonFormat=False ) )
1922 main.log.debug( main.CLIs[node].partitions( jsonFormat=False ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001923
Jon Halla440e872016-03-31 15:15:50 -07001924 main.step( "Rerun for election on the node(s) that were killed" )
1925 runResults = main.TRUE
1926 for i in main.kill:
1927 runResults = runResults and\
1928 main.CLIs[i].electionTestRun()
1929 utilities.assert_equals( expect=main.TRUE, actual=runResults,
1930 onpass="ONOS nodes reran for election topic",
1931 onfail="Errror rerunning for election" )
1932
Jon Hall5cf14d52015-07-16 12:15:19 -07001933 def CASE7( self, main ):
1934 """
1935 Check state after ONOS failure
1936 """
1937 import json
Jon Halle1a3b752015-07-22 13:02:46 -07001938 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07001939 assert main, "main not defined"
1940 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07001941 assert main.CLIs, "main.CLIs not defined"
1942 assert main.nodes, "main.nodes not defined"
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001943 try:
1944 main.kill
1945 except AttributeError:
1946 main.kill = []
1947
Jon Hall5cf14d52015-07-16 12:15:19 -07001948 main.case( "Running ONOS Constant State Tests" )
1949
1950 main.step( "Check that each switch has a master" )
1951 # Assert that each device has a master
1952 rolesNotNull = main.TRUE
1953 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001954 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001955 t = main.Thread( target=main.CLIs[i].rolesNotNull,
Jon Hall5cf14d52015-07-16 12:15:19 -07001956 name="rolesNotNull-" + str( i ),
1957 args=[ ] )
1958 threads.append( t )
1959 t.start()
1960
1961 for t in threads:
1962 t.join()
1963 rolesNotNull = rolesNotNull and t.result
1964 utilities.assert_equals(
1965 expect=main.TRUE,
1966 actual=rolesNotNull,
1967 onpass="Each device has a master",
1968 onfail="Some devices don't have a master assigned" )
1969
1970 main.step( "Read device roles from ONOS" )
1971 ONOSMastership = []
Jon Halla440e872016-03-31 15:15:50 -07001972 mastershipCheck = main.FALSE
Jon Hall5cf14d52015-07-16 12:15:19 -07001973 consistentMastership = True
1974 rolesResults = True
1975 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001976 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001977 t = main.Thread( target=main.CLIs[i].roles,
Jon Hall5cf14d52015-07-16 12:15:19 -07001978 name="roles-" + str( i ),
1979 args=[] )
1980 threads.append( t )
1981 t.start()
1982
1983 for t in threads:
1984 t.join()
1985 ONOSMastership.append( t.result )
1986
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001987 for i in range( len( ONOSMastership ) ):
1988 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001989 if not ONOSMastership[i] or "Error" in ONOSMastership[i]:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001990 main.log.error( "Error in getting ONOS" + node + " roles" )
1991 main.log.warn( "ONOS" + node + " mastership response: " +
1992 repr( ONOSMastership[i] ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001993 rolesResults = False
1994 utilities.assert_equals(
1995 expect=True,
1996 actual=rolesResults,
1997 onpass="No error in reading roles output",
1998 onfail="Error in reading roles from ONOS" )
1999
2000 main.step( "Check for consistency in roles from each controller" )
2001 if all([ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
2002 main.log.info(
2003 "Switch roles are consistent across all ONOS nodes" )
2004 else:
2005 consistentMastership = False
2006 utilities.assert_equals(
2007 expect=True,
2008 actual=consistentMastership,
2009 onpass="Switch roles are consistent across all ONOS nodes",
2010 onfail="ONOS nodes have different views of switch roles" )
2011
2012 if rolesResults and not consistentMastership:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002013 for i in range( len( ONOSMastership ) ):
2014 node = str( main.activeNodes[i] + 1 )
2015 main.log.warn( "ONOS" + node + " roles: ",
2016 json.dumps( json.loads( ONOSMastership[ i ] ),
2017 sort_keys=True,
2018 indent=4,
2019 separators=( ',', ': ' ) ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07002020
2021 # NOTE: we expect mastership to change on controller failure
Jon Hall5cf14d52015-07-16 12:15:19 -07002022
2023 main.step( "Get the intents and compare across all nodes" )
2024 ONOSIntents = []
2025 intentCheck = main.FALSE
2026 consistentIntents = True
2027 intentsResults = True
2028 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002029 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07002030 t = main.Thread( target=main.CLIs[i].intents,
Jon Hall5cf14d52015-07-16 12:15:19 -07002031 name="intents-" + str( i ),
2032 args=[],
2033 kwargs={ 'jsonFormat': True } )
2034 threads.append( t )
2035 t.start()
2036
2037 for t in threads:
2038 t.join()
2039 ONOSIntents.append( t.result )
2040
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002041 for i in range( len( ONOSIntents) ):
2042 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07002043 if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002044 main.log.error( "Error in getting ONOS" + node + " intents" )
2045 main.log.warn( "ONOS" + node + " intents response: " +
Jon Hall5cf14d52015-07-16 12:15:19 -07002046 repr( ONOSIntents[ i ] ) )
2047 intentsResults = False
2048 utilities.assert_equals(
2049 expect=True,
2050 actual=intentsResults,
2051 onpass="No error in reading intents output",
2052 onfail="Error in reading intents from ONOS" )
2053
2054 main.step( "Check for consistency in Intents from each controller" )
2055 if all([ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
2056 main.log.info( "Intents are consistent across all ONOS " +
2057 "nodes" )
2058 else:
2059 consistentIntents = False
2060
2061 # Try to make it easy to figure out what is happening
2062 #
2063 # Intent ONOS1 ONOS2 ...
2064 # 0x01 INSTALLED INSTALLING
2065 # ... ... ...
2066 # ... ... ...
2067 title = " ID"
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002068 for n in main.activeNodes:
Jon Hall5cf14d52015-07-16 12:15:19 -07002069 title += " " * 10 + "ONOS" + str( n + 1 )
2070 main.log.warn( title )
2071 # get all intent keys in the cluster
2072 keys = []
2073 for nodeStr in ONOSIntents:
2074 node = json.loads( nodeStr )
2075 for intent in node:
2076 keys.append( intent.get( 'id' ) )
2077 keys = set( keys )
2078 for key in keys:
2079 row = "%-13s" % key
2080 for nodeStr in ONOSIntents:
2081 node = json.loads( nodeStr )
2082 for intent in node:
2083 if intent.get( 'id' ) == key:
2084 row += "%-15s" % intent.get( 'state' )
2085 main.log.warn( row )
2086 # End table view
2087
2088 utilities.assert_equals(
2089 expect=True,
2090 actual=consistentIntents,
2091 onpass="Intents are consistent across all ONOS nodes",
2092 onfail="ONOS nodes have different views of intents" )
2093 intentStates = []
2094 for node in ONOSIntents: # Iter through ONOS nodes
2095 nodeStates = []
2096 # Iter through intents of a node
2097 try:
2098 for intent in json.loads( node ):
2099 nodeStates.append( intent[ 'state' ] )
2100 except ( ValueError, TypeError ):
2101 main.log.exception( "Error in parsing intents" )
2102 main.log.error( repr( node ) )
2103 intentStates.append( nodeStates )
2104 out = [ (i, nodeStates.count( i ) ) for i in set( nodeStates ) ]
2105 main.log.info( dict( out ) )
2106
2107 if intentsResults and not consistentIntents:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002108 for i in range( len( main.activeNodes ) ):
2109 node = str( main.activeNodes[i] + 1 )
2110 main.log.warn( "ONOS" + node + " intents: " )
Jon Hall5cf14d52015-07-16 12:15:19 -07002111 main.log.warn( json.dumps(
2112 json.loads( ONOSIntents[ i ] ),
2113 sort_keys=True,
2114 indent=4,
2115 separators=( ',', ': ' ) ) )
2116 elif intentsResults and consistentIntents:
2117 intentCheck = main.TRUE
2118
2119 # NOTE: Store has no durability, so intents are lost across system
2120 # restarts
2121 main.step( "Compare current intents with intents before the failure" )
2122 # NOTE: this requires case 5 to pass for intentState to be set.
2123 # maybe we should stop the test if that fails?
2124 sameIntents = main.FALSE
Jon Halla440e872016-03-31 15:15:50 -07002125 try:
2126 intentState
2127 except NameError:
2128 main.log.warn( "No previous intent state was saved" )
2129 else:
2130 if intentState and intentState == ONOSIntents[ 0 ]:
2131 sameIntents = main.TRUE
2132 main.log.info( "Intents are consistent with before failure" )
2133 # TODO: possibly the states have changed? we may need to figure out
2134 # what the acceptable states are
2135 elif len( intentState ) == len( ONOSIntents[ 0 ] ):
2136 sameIntents = main.TRUE
2137 try:
2138 before = json.loads( intentState )
2139 after = json.loads( ONOSIntents[ 0 ] )
2140 for intent in before:
2141 if intent not in after:
2142 sameIntents = main.FALSE
2143 main.log.debug( "Intent is not currently in ONOS " +
2144 "(at least in the same form):" )
2145 main.log.debug( json.dumps( intent ) )
2146 except ( ValueError, TypeError ):
2147 main.log.exception( "Exception printing intents" )
2148 main.log.debug( repr( ONOSIntents[0] ) )
2149 main.log.debug( repr( intentState ) )
2150 if sameIntents == main.FALSE:
2151 try:
2152 main.log.debug( "ONOS intents before: " )
2153 main.log.debug( json.dumps( json.loads( intentState ),
2154 sort_keys=True, indent=4,
2155 separators=( ',', ': ' ) ) )
2156 main.log.debug( "Current ONOS intents: " )
2157 main.log.debug( json.dumps( json.loads( ONOSIntents[ 0 ] ),
2158 sort_keys=True, indent=4,
2159 separators=( ',', ': ' ) ) )
2160 except ( ValueError, TypeError ):
2161 main.log.exception( "Exception printing intents" )
2162 main.log.debug( repr( ONOSIntents[0] ) )
2163 main.log.debug( repr( intentState ) )
2164 utilities.assert_equals(
2165 expect=main.TRUE,
2166 actual=sameIntents,
2167 onpass="Intents are consistent with before failure",
2168 onfail="The Intents changed during failure" )
Jon Hall5cf14d52015-07-16 12:15:19 -07002169 intentCheck = intentCheck and sameIntents
2170
2171 main.step( "Get the OF Table entries and compare to before " +
2172 "component failure" )
2173 FlowTables = main.TRUE
Jon Hall5cf14d52015-07-16 12:15:19 -07002174 for i in range( 28 ):
2175 main.log.info( "Checking flow table on s" + str( i + 1 ) )
GlennRC68467eb2015-11-16 18:01:01 -08002176 tmpFlows = main.Mininet1.getFlowTable( "s" + str( i + 1 ), version="1.3", debug=False )
Jon Hall41d39f12016-04-11 22:54:35 -07002177 curSwitch = main.Mininet1.flowTableComp( flows[i], tmpFlows )
2178 FlowTables = FlowTables and curSwitch
2179 if curSwitch == main.FALSE:
GlennRC68467eb2015-11-16 18:01:01 -08002180 main.log.warn( "Differences in flow table for switch: s{}".format( i + 1 ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07002181 utilities.assert_equals(
2182 expect=main.TRUE,
2183 actual=FlowTables,
2184 onpass="No changes were found in the flow tables",
2185 onfail="Changes were found in the flow tables" )
2186
2187 main.Mininet2.pingLongKill()
2188 '''
2189 main.step( "Check the continuous pings to ensure that no packets " +
2190 "were dropped during component failure" )
2191 main.Mininet2.pingKill( main.params[ 'TESTONUSER' ],
2192 main.params[ 'TESTONIP' ] )
2193 LossInPings = main.FALSE
2194 # NOTE: checkForLoss returns main.FALSE with 0% packet loss
2195 for i in range( 8, 18 ):
2196 main.log.info(
2197 "Checking for a loss in pings along flow from s" +
2198 str( i ) )
2199 LossInPings = main.Mininet2.checkForLoss(
2200 "/tmp/ping.h" +
2201 str( i ) ) or LossInPings
2202 if LossInPings == main.TRUE:
2203 main.log.info( "Loss in ping detected" )
2204 elif LossInPings == main.ERROR:
2205 main.log.info( "There are multiple mininet process running" )
2206 elif LossInPings == main.FALSE:
2207 main.log.info( "No Loss in the pings" )
2208 main.log.info( "No loss of dataplane connectivity" )
2209 utilities.assert_equals(
2210 expect=main.FALSE,
2211 actual=LossInPings,
2212 onpass="No Loss of connectivity",
2213 onfail="Loss of dataplane connectivity detected" )
2214 '''
2215
2216 main.step( "Leadership Election is still functional" )
2217 # Test of LeadershipElection
2218 leaderList = []
Jon Hall5cf14d52015-07-16 12:15:19 -07002219
Jon Hall3b489db2015-10-05 14:38:37 -07002220 restarted = []
2221 for i in main.kill:
2222 restarted.append( main.nodes[i].ip_address )
Jon Hall5cf14d52015-07-16 12:15:19 -07002223 leaderResult = main.TRUE
Jon Hall3b489db2015-10-05 14:38:37 -07002224
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002225 for i in main.activeNodes:
2226 cli = main.CLIs[i]
Jon Hall5cf14d52015-07-16 12:15:19 -07002227 leaderN = cli.electionTestLeader()
2228 leaderList.append( leaderN )
2229 if leaderN == main.FALSE:
2230 # error in response
2231 main.log.error( "Something is wrong with " +
2232 "electionTestLeader function, check the" +
2233 " error logs" )
2234 leaderResult = main.FALSE
2235 elif leaderN is None:
2236 main.log.error( cli.name +
2237 " shows no leader for the election-app was" +
2238 " elected after the old one died" )
2239 leaderResult = main.FALSE
2240 elif leaderN in restarted:
2241 main.log.error( cli.name + " shows " + str( leaderN ) +
2242 " as leader for the election-app, but it " +
2243 "was restarted" )
2244 leaderResult = main.FALSE
2245 if len( set( leaderList ) ) != 1:
2246 leaderResult = main.FALSE
2247 main.log.error(
2248 "Inconsistent view of leader for the election test app" )
2249 # TODO: print the list
2250 utilities.assert_equals(
2251 expect=main.TRUE,
2252 actual=leaderResult,
2253 onpass="Leadership election passed",
2254 onfail="Something went wrong with Leadership election" )
2255
2256 def CASE8( self, main ):
2257 """
2258 Compare topo
2259 """
2260 import json
2261 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002262 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002263 assert main, "main not defined"
2264 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002265 assert main.CLIs, "main.CLIs not defined"
2266 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002267
2268 main.case( "Compare ONOS Topology view to Mininet topology" )
Jon Hall783bbf92015-07-23 14:33:19 -07002269 main.caseExplanation = "Compare topology objects between Mininet" +\
Jon Hall5cf14d52015-07-16 12:15:19 -07002270 " and ONOS"
Jon Hall5cf14d52015-07-16 12:15:19 -07002271 topoResult = main.FALSE
Jon Hall6e709752016-02-01 13:38:46 -08002272 topoFailMsg = "ONOS topology don't match Mininet"
Jon Hall5cf14d52015-07-16 12:15:19 -07002273 elapsed = 0
2274 count = 0
Jon Halle9b1fa32015-12-08 15:32:21 -08002275 main.step( "Comparing ONOS topology to MN topology" )
Jon Hall5cf14d52015-07-16 12:15:19 -07002276 startTime = time.time()
2277 # Give time for Gossip to work
Jon Halle9b1fa32015-12-08 15:32:21 -08002278 while topoResult == main.FALSE and ( elapsed < 60 or count < 3 ):
Jon Hall96091e62015-09-21 17:34:17 -07002279 devicesResults = main.TRUE
2280 linksResults = main.TRUE
2281 hostsResults = main.TRUE
2282 hostAttachmentResults = True
Jon Hall5cf14d52015-07-16 12:15:19 -07002283 count += 1
2284 cliStart = time.time()
2285 devices = []
2286 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002287 for i in main.activeNodes:
Jon Hall6e709752016-02-01 13:38:46 -08002288 t = main.Thread( target=utilities.retry,
Jon Hall5cf14d52015-07-16 12:15:19 -07002289 name="devices-" + str( i ),
Jon Hall6e709752016-02-01 13:38:46 -08002290 args=[ main.CLIs[i].devices, [ None ] ],
2291 kwargs= { 'sleep': 5, 'attempts': 5,
2292 'randomTime': True } )
Jon Hall5cf14d52015-07-16 12:15:19 -07002293 threads.append( t )
2294 t.start()
2295
2296 for t in threads:
2297 t.join()
2298 devices.append( t.result )
2299 hosts = []
2300 ipResult = main.TRUE
2301 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002302 for i in main.activeNodes:
Jon Halld8f6de82015-12-17 17:04:34 -08002303 t = main.Thread( target=utilities.retry,
Jon Hall5cf14d52015-07-16 12:15:19 -07002304 name="hosts-" + str( i ),
Jon Halld8f6de82015-12-17 17:04:34 -08002305 args=[ main.CLIs[i].hosts, [ None ] ],
2306 kwargs= { 'sleep': 5, 'attempts': 5,
2307 'randomTime': True } )
Jon Hall5cf14d52015-07-16 12:15:19 -07002308 threads.append( t )
2309 t.start()
2310
2311 for t in threads:
2312 t.join()
2313 try:
2314 hosts.append( json.loads( t.result ) )
2315 except ( ValueError, TypeError ):
2316 main.log.exception( "Error parsing hosts results" )
2317 main.log.error( repr( t.result ) )
Jon Hallf3d16e72015-12-16 17:45:08 -08002318 hosts.append( None )
Jon Hall5cf14d52015-07-16 12:15:19 -07002319 for controller in range( 0, len( hosts ) ):
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002320 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hallacd1b182015-12-17 11:43:20 -08002321 if hosts[ controller ]:
2322 for host in hosts[ controller ]:
2323 if host is None or host.get( 'ipAddresses', [] ) == []:
2324 main.log.error(
2325 "Error with host ipAddresses on controller" +
2326 controllerStr + ": " + str( host ) )
2327 ipResult = main.FALSE
Jon Hall5cf14d52015-07-16 12:15:19 -07002328 ports = []
2329 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002330 for i in main.activeNodes:
Jon Hall6e709752016-02-01 13:38:46 -08002331 t = main.Thread( target=utilities.retry,
Jon Hall5cf14d52015-07-16 12:15:19 -07002332 name="ports-" + str( i ),
Jon Hall6e709752016-02-01 13:38:46 -08002333 args=[ main.CLIs[i].ports, [ None ] ],
2334 kwargs= { 'sleep': 5, 'attempts': 5,
2335 'randomTime': True } )
Jon Hall5cf14d52015-07-16 12:15:19 -07002336 threads.append( t )
2337 t.start()
2338
2339 for t in threads:
2340 t.join()
2341 ports.append( t.result )
2342 links = []
2343 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002344 for i in main.activeNodes:
Jon Hall6e709752016-02-01 13:38:46 -08002345 t = main.Thread( target=utilities.retry,
Jon Hall5cf14d52015-07-16 12:15:19 -07002346 name="links-" + str( i ),
Jon Hall6e709752016-02-01 13:38:46 -08002347 args=[ main.CLIs[i].links, [ None ] ],
2348 kwargs= { 'sleep': 5, 'attempts': 5,
2349 'randomTime': True } )
Jon Hall5cf14d52015-07-16 12:15:19 -07002350 threads.append( t )
2351 t.start()
2352
2353 for t in threads:
2354 t.join()
2355 links.append( t.result )
2356 clusters = []
2357 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002358 for i in main.activeNodes:
Jon Hall6e709752016-02-01 13:38:46 -08002359 t = main.Thread( target=utilities.retry,
Jon Hall5cf14d52015-07-16 12:15:19 -07002360 name="clusters-" + str( i ),
Jon Hall6e709752016-02-01 13:38:46 -08002361 args=[ main.CLIs[i].clusters, [ None ] ],
2362 kwargs= { 'sleep': 5, 'attempts': 5,
2363 'randomTime': True } )
Jon Hall5cf14d52015-07-16 12:15:19 -07002364 threads.append( t )
2365 t.start()
2366
2367 for t in threads:
2368 t.join()
2369 clusters.append( t.result )
2370
2371 elapsed = time.time() - startTime
2372 cliTime = time.time() - cliStart
2373 print "Elapsed time: " + str( elapsed )
2374 print "CLI time: " + str( cliTime )
2375
Jon Hall6e709752016-02-01 13:38:46 -08002376 if all( e is None for e in devices ) and\
2377 all( e is None for e in hosts ) and\
2378 all( e is None for e in ports ) and\
2379 all( e is None for e in links ) and\
2380 all( e is None for e in clusters ):
2381 topoFailMsg = "Could not get topology from ONOS"
2382 main.log.error( topoFailMsg )
2383 continue # Try again, No use trying to compare
2384
Jon Hall5cf14d52015-07-16 12:15:19 -07002385 mnSwitches = main.Mininet1.getSwitches()
2386 mnLinks = main.Mininet1.getLinks()
2387 mnHosts = main.Mininet1.getHosts()
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002388 for controller in range( len( main.activeNodes ) ):
2389 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07002390 if devices[ controller ] and ports[ controller ] and\
2391 "Error" not in devices[ controller ] and\
2392 "Error" not in ports[ controller ]:
2393
Jon Hallc6793552016-01-19 14:18:37 -08002394 try:
2395 currentDevicesResult = main.Mininet1.compareSwitches(
2396 mnSwitches,
2397 json.loads( devices[ controller ] ),
2398 json.loads( ports[ controller ] ) )
2399 except ( TypeError, ValueError ) as e:
2400 main.log.exception( "Object not as expected; devices={!r}\nports={!r}".format(
2401 devices[ controller ], ports[ controller ] ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07002402 else:
2403 currentDevicesResult = main.FALSE
2404 utilities.assert_equals( expect=main.TRUE,
2405 actual=currentDevicesResult,
2406 onpass="ONOS" + controllerStr +
2407 " Switches view is correct",
2408 onfail="ONOS" + controllerStr +
2409 " Switches view is incorrect" )
2410
2411 if links[ controller ] and "Error" not in links[ controller ]:
2412 currentLinksResult = main.Mininet1.compareLinks(
2413 mnSwitches, mnLinks,
2414 json.loads( links[ controller ] ) )
2415 else:
2416 currentLinksResult = main.FALSE
2417 utilities.assert_equals( expect=main.TRUE,
2418 actual=currentLinksResult,
2419 onpass="ONOS" + controllerStr +
2420 " links view is correct",
2421 onfail="ONOS" + controllerStr +
2422 " links view is incorrect" )
Jon Hall657cdf62015-12-17 14:40:51 -08002423 if hosts[ controller ] and "Error" not in hosts[ controller ]:
Jon Hall5cf14d52015-07-16 12:15:19 -07002424 currentHostsResult = main.Mininet1.compareHosts(
2425 mnHosts,
2426 hosts[ controller ] )
Jon Hall13b446e2016-01-05 12:17:01 -08002427 elif hosts[ controller ] == []:
2428 currentHostsResult = main.TRUE
Jon Hall5cf14d52015-07-16 12:15:19 -07002429 else:
2430 currentHostsResult = main.FALSE
2431 utilities.assert_equals( expect=main.TRUE,
2432 actual=currentHostsResult,
2433 onpass="ONOS" + controllerStr +
2434 " hosts exist in Mininet",
2435 onfail="ONOS" + controllerStr +
2436 " hosts don't match Mininet" )
2437 # CHECKING HOST ATTACHMENT POINTS
2438 hostAttachment = True
2439 zeroHosts = False
2440 # FIXME: topo-HA/obelisk specific mappings:
2441 # key is mac and value is dpid
2442 mappings = {}
2443 for i in range( 1, 29 ): # hosts 1 through 28
2444 # set up correct variables:
2445 macId = "00:" * 5 + hex( i ).split( "0x" )[1].upper().zfill(2)
2446 if i == 1:
2447 deviceId = "1000".zfill(16)
2448 elif i == 2:
2449 deviceId = "2000".zfill(16)
2450 elif i == 3:
2451 deviceId = "3000".zfill(16)
2452 elif i == 4:
2453 deviceId = "3004".zfill(16)
2454 elif i == 5:
2455 deviceId = "5000".zfill(16)
2456 elif i == 6:
2457 deviceId = "6000".zfill(16)
2458 elif i == 7:
2459 deviceId = "6007".zfill(16)
2460 elif i >= 8 and i <= 17:
2461 dpid = '3' + str( i ).zfill( 3 )
2462 deviceId = dpid.zfill(16)
2463 elif i >= 18 and i <= 27:
2464 dpid = '6' + str( i ).zfill( 3 )
2465 deviceId = dpid.zfill(16)
2466 elif i == 28:
2467 deviceId = "2800".zfill(16)
2468 mappings[ macId ] = deviceId
Jon Halld8f6de82015-12-17 17:04:34 -08002469 if hosts[ controller ] is not None and "Error" not in hosts[ controller ]:
Jon Hall5cf14d52015-07-16 12:15:19 -07002470 if hosts[ controller ] == []:
2471 main.log.warn( "There are no hosts discovered" )
2472 zeroHosts = True
2473 else:
2474 for host in hosts[ controller ]:
2475 mac = None
2476 location = None
2477 device = None
2478 port = None
2479 try:
2480 mac = host.get( 'mac' )
2481 assert mac, "mac field could not be found for this host object"
2482
2483 location = host.get( 'location' )
2484 assert location, "location field could not be found for this host object"
2485
2486 # Trim the protocol identifier off deviceId
2487 device = str( location.get( 'elementId' ) ).split(':')[1]
2488 assert device, "elementId field could not be found for this host location object"
2489
2490 port = location.get( 'port' )
2491 assert port, "port field could not be found for this host location object"
2492
2493 # Now check if this matches where they should be
2494 if mac and device and port:
2495 if str( port ) != "1":
2496 main.log.error( "The attachment port is incorrect for " +
2497 "host " + str( mac ) +
2498 ". Expected: 1 Actual: " + str( port) )
2499 hostAttachment = False
2500 if device != mappings[ str( mac ) ]:
2501 main.log.error( "The attachment device is incorrect for " +
2502 "host " + str( mac ) +
2503 ". Expected: " + mappings[ str( mac ) ] +
2504 " Actual: " + device )
2505 hostAttachment = False
2506 else:
2507 hostAttachment = False
2508 except AssertionError:
2509 main.log.exception( "Json object not as expected" )
2510 main.log.error( repr( host ) )
2511 hostAttachment = False
2512 else:
2513 main.log.error( "No hosts json output or \"Error\"" +
2514 " in output. hosts = " +
2515 repr( hosts[ controller ] ) )
2516 if zeroHosts is False:
2517 hostAttachment = True
2518
2519 # END CHECKING HOST ATTACHMENT POINTS
2520 devicesResults = devicesResults and currentDevicesResult
2521 linksResults = linksResults and currentLinksResult
2522 hostsResults = hostsResults and currentHostsResult
2523 hostAttachmentResults = hostAttachmentResults and\
2524 hostAttachment
Jon Halla440e872016-03-31 15:15:50 -07002525 topoResult = ( devicesResults and linksResults
2526 and hostsResults and ipResult and
2527 hostAttachmentResults )
Jon Halle9b1fa32015-12-08 15:32:21 -08002528 utilities.assert_equals( expect=True,
2529 actual=topoResult,
2530 onpass="ONOS topology matches Mininet",
Jon Hall6e709752016-02-01 13:38:46 -08002531 onfail=topoFailMsg )
Jon Halle9b1fa32015-12-08 15:32:21 -08002532 # End of While loop to pull ONOS state
Jon Hall5cf14d52015-07-16 12:15:19 -07002533
2534 # Compare json objects for hosts and dataplane clusters
2535
2536 # hosts
2537 main.step( "Hosts view is consistent across all ONOS nodes" )
2538 consistentHostsResult = main.TRUE
2539 for controller in range( len( hosts ) ):
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002540 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hall13b446e2016-01-05 12:17:01 -08002541 if hosts[ controller ] is not None and "Error" not in hosts[ controller ]:
Jon Hall5cf14d52015-07-16 12:15:19 -07002542 if hosts[ controller ] == hosts[ 0 ]:
2543 continue
2544 else: # hosts not consistent
2545 main.log.error( "hosts from ONOS" + controllerStr +
2546 " is inconsistent with ONOS1" )
2547 main.log.warn( repr( hosts[ controller ] ) )
2548 consistentHostsResult = main.FALSE
2549
2550 else:
2551 main.log.error( "Error in getting ONOS hosts from ONOS" +
2552 controllerStr )
2553 consistentHostsResult = main.FALSE
2554 main.log.warn( "ONOS" + controllerStr +
2555 " hosts response: " +
2556 repr( hosts[ controller ] ) )
2557 utilities.assert_equals(
2558 expect=main.TRUE,
2559 actual=consistentHostsResult,
2560 onpass="Hosts view is consistent across all ONOS nodes",
2561 onfail="ONOS nodes have different views of hosts" )
2562
2563 main.step( "Hosts information is correct" )
2564 hostsResults = hostsResults and ipResult
2565 utilities.assert_equals(
2566 expect=main.TRUE,
2567 actual=hostsResults,
2568 onpass="Host information is correct",
2569 onfail="Host information is incorrect" )
2570
2571 main.step( "Host attachment points to the network" )
2572 utilities.assert_equals(
2573 expect=True,
2574 actual=hostAttachmentResults,
2575 onpass="Hosts are correctly attached to the network",
2576 onfail="ONOS did not correctly attach hosts to the network" )
2577
2578 # Strongly connected clusters of devices
2579 main.step( "Clusters view is consistent across all ONOS nodes" )
2580 consistentClustersResult = main.TRUE
2581 for controller in range( len( clusters ) ):
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002582 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07002583 if "Error" not in clusters[ controller ]:
2584 if clusters[ controller ] == clusters[ 0 ]:
2585 continue
2586 else: # clusters not consistent
2587 main.log.error( "clusters from ONOS" +
2588 controllerStr +
2589 " is inconsistent with ONOS1" )
2590 consistentClustersResult = main.FALSE
Jon Hall5cf14d52015-07-16 12:15:19 -07002591 else:
2592 main.log.error( "Error in getting dataplane clusters " +
2593 "from ONOS" + controllerStr )
2594 consistentClustersResult = main.FALSE
2595 main.log.warn( "ONOS" + controllerStr +
2596 " clusters response: " +
2597 repr( clusters[ controller ] ) )
2598 utilities.assert_equals(
2599 expect=main.TRUE,
2600 actual=consistentClustersResult,
2601 onpass="Clusters view is consistent across all ONOS nodes",
2602 onfail="ONOS nodes have different views of clusters" )
Jon Hall64948022016-05-12 13:38:50 -07002603 if not consistentClustersResult:
2604 main.log.debug( clusters )
Jon Hall5cf14d52015-07-16 12:15:19 -07002605
2606 main.step( "There is only one SCC" )
2607 # there should always only be one cluster
2608 try:
2609 numClusters = len( json.loads( clusters[ 0 ] ) )
2610 except ( ValueError, TypeError ):
2611 main.log.exception( "Error parsing clusters[0]: " +
2612 repr( clusters[0] ) )
Jon Halla440e872016-03-31 15:15:50 -07002613 numClusters = "ERROR"
Jon Hall5cf14d52015-07-16 12:15:19 -07002614 clusterResults = main.FALSE
2615 if numClusters == 1:
2616 clusterResults = main.TRUE
2617 utilities.assert_equals(
2618 expect=1,
2619 actual=numClusters,
2620 onpass="ONOS shows 1 SCC",
2621 onfail="ONOS shows " + str( numClusters ) + " SCCs" )
2622
2623 topoResult = ( devicesResults and linksResults
2624 and hostsResults and consistentHostsResult
2625 and consistentClustersResult and clusterResults
2626 and ipResult and hostAttachmentResults )
2627
2628 topoResult = topoResult and int( count <= 2 )
2629 note = "note it takes about " + str( int( cliTime ) ) + \
2630 " seconds for the test to make all the cli calls to fetch " +\
2631 "the topology from each ONOS instance"
2632 main.log.info(
2633 "Very crass estimate for topology discovery/convergence( " +
2634 str( note ) + " ): " + str( elapsed ) + " seconds, " +
2635 str( count ) + " tries" )
2636
2637 main.step( "Device information is correct" )
2638 utilities.assert_equals(
2639 expect=main.TRUE,
2640 actual=devicesResults,
2641 onpass="Device information is correct",
2642 onfail="Device information is incorrect" )
2643
2644 main.step( "Links are correct" )
2645 utilities.assert_equals(
2646 expect=main.TRUE,
2647 actual=linksResults,
2648 onpass="Link are correct",
2649 onfail="Links are incorrect" )
2650
Jon Halla440e872016-03-31 15:15:50 -07002651 main.step( "Hosts are correct" )
2652 utilities.assert_equals(
2653 expect=main.TRUE,
2654 actual=hostsResults,
2655 onpass="Hosts are correct",
2656 onfail="Hosts are incorrect" )
2657
Jon Hall5cf14d52015-07-16 12:15:19 -07002658 # FIXME: move this to an ONOS state case
2659 main.step( "Checking ONOS nodes" )
Jon Hall41d39f12016-04-11 22:54:35 -07002660 nodeResults = utilities.retry( main.HA.nodesCheck,
2661 False,
2662 args=[main.activeNodes],
2663 attempts=5 )
Jon Hall5cf14d52015-07-16 12:15:19 -07002664
Jon Hall41d39f12016-04-11 22:54:35 -07002665 utilities.assert_equals( expect=True, actual=nodeResults,
Jon Hall5cf14d52015-07-16 12:15:19 -07002666 onpass="Nodes check successful",
2667 onfail="Nodes check NOT successful" )
Jon Halla440e872016-03-31 15:15:50 -07002668 if not nodeResults:
Jon Hall41d39f12016-04-11 22:54:35 -07002669 for i in main.activeNodes:
Jon Halla440e872016-03-31 15:15:50 -07002670 main.log.debug( "{} components not ACTIVE: \n{}".format(
Jon Hall41d39f12016-04-11 22:54:35 -07002671 main.CLIs[i].name,
2672 main.CLIs[i].sendline( "scr:list | grep -v ACTIVE" ) ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07002673
Jon Halld2871c22016-07-26 11:01:14 -07002674 if not topoResult:
2675 main.cleanup()
2676 main.exit()
2677
Jon Hall5cf14d52015-07-16 12:15:19 -07002678 def CASE9( self, main ):
2679 """
2680 Link s3-s28 down
2681 """
2682 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002683 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002684 assert main, "main not defined"
2685 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002686 assert main.CLIs, "main.CLIs not defined"
2687 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002688 # NOTE: You should probably run a topology check after this
2689
2690 linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
2691
2692 description = "Turn off a link to ensure that Link Discovery " +\
2693 "is working properly"
2694 main.case( description )
2695
2696 main.step( "Kill Link between s3 and s28" )
2697 LinkDown = main.Mininet1.link( END1="s3", END2="s28", OPTION="down" )
2698 main.log.info( "Waiting " + str( linkSleep ) +
2699 " seconds for link down to be discovered" )
2700 time.sleep( linkSleep )
2701 utilities.assert_equals( expect=main.TRUE, actual=LinkDown,
2702 onpass="Link down successful",
2703 onfail="Failed to bring link down" )
2704 # TODO do some sort of check here
2705
2706 def CASE10( self, main ):
2707 """
2708 Link s3-s28 up
2709 """
2710 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002711 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002712 assert main, "main not defined"
2713 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002714 assert main.CLIs, "main.CLIs not defined"
2715 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002716 # NOTE: You should probably run a topology check after this
2717
2718 linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
2719
2720 description = "Restore a link to ensure that Link Discovery is " + \
2721 "working properly"
2722 main.case( description )
2723
2724 main.step( "Bring link between s3 and s28 back up" )
2725 LinkUp = main.Mininet1.link( END1="s3", END2="s28", OPTION="up" )
2726 main.log.info( "Waiting " + str( linkSleep ) +
2727 " seconds for link up to be discovered" )
2728 time.sleep( linkSleep )
2729 utilities.assert_equals( expect=main.TRUE, actual=LinkUp,
2730 onpass="Link up successful",
2731 onfail="Failed to bring link up" )
2732 # TODO do some sort of check here
2733
2734 def CASE11( self, main ):
2735 """
2736 Switch Down
2737 """
2738 # NOTE: You should probably run a topology check after this
2739 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002740 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002741 assert main, "main not defined"
2742 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002743 assert main.CLIs, "main.CLIs not defined"
2744 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002745
2746 switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
2747
2748 description = "Killing a switch to ensure it is discovered correctly"
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002749 onosCli = main.CLIs[ main.activeNodes[0] ]
Jon Hall5cf14d52015-07-16 12:15:19 -07002750 main.case( description )
2751 switch = main.params[ 'kill' ][ 'switch' ]
2752 switchDPID = main.params[ 'kill' ][ 'dpid' ]
2753
2754 # TODO: Make this switch parameterizable
2755 main.step( "Kill " + switch )
2756 main.log.info( "Deleting " + switch )
2757 main.Mininet1.delSwitch( switch )
2758 main.log.info( "Waiting " + str( switchSleep ) +
2759 " seconds for switch down to be discovered" )
2760 time.sleep( switchSleep )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002761 device = onosCli.getDevice( dpid=switchDPID )
Jon Hall5cf14d52015-07-16 12:15:19 -07002762 # Peek at the deleted switch
2763 main.log.warn( str( device ) )
2764 result = main.FALSE
2765 if device and device[ 'available' ] is False:
2766 result = main.TRUE
2767 utilities.assert_equals( expect=main.TRUE, actual=result,
2768 onpass="Kill switch successful",
2769 onfail="Failed to kill switch?" )
2770
2771 def CASE12( self, main ):
2772 """
2773 Switch Up
2774 """
2775 # NOTE: You should probably run a topology check after this
2776 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002777 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002778 assert main, "main not defined"
2779 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002780 assert main.CLIs, "main.CLIs not defined"
2781 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002782 assert ONOS1Port, "ONOS1Port not defined"
2783 assert ONOS2Port, "ONOS2Port not defined"
2784 assert ONOS3Port, "ONOS3Port not defined"
2785 assert ONOS4Port, "ONOS4Port not defined"
2786 assert ONOS5Port, "ONOS5Port not defined"
2787 assert ONOS6Port, "ONOS6Port not defined"
2788 assert ONOS7Port, "ONOS7Port not defined"
2789
2790 switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
2791 switch = main.params[ 'kill' ][ 'switch' ]
2792 switchDPID = main.params[ 'kill' ][ 'dpid' ]
2793 links = main.params[ 'kill' ][ 'links' ].split()
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002794 onosCli = main.CLIs[ main.activeNodes[0] ]
Jon Hall5cf14d52015-07-16 12:15:19 -07002795 description = "Adding a switch to ensure it is discovered correctly"
2796 main.case( description )
2797
2798 main.step( "Add back " + switch )
2799 main.Mininet1.addSwitch( switch, dpid=switchDPID )
2800 for peer in links:
2801 main.Mininet1.addLink( switch, peer )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002802 ipList = [ node.ip_address for node in main.nodes ]
Jon Hall5cf14d52015-07-16 12:15:19 -07002803 main.Mininet1.assignSwController( sw=switch, ip=ipList )
2804 main.log.info( "Waiting " + str( switchSleep ) +
2805 " seconds for switch up to be discovered" )
2806 time.sleep( switchSleep )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002807 device = onosCli.getDevice( dpid=switchDPID )
Jon Hall5cf14d52015-07-16 12:15:19 -07002808 # Peek at the deleted switch
2809 main.log.warn( str( device ) )
2810 result = main.FALSE
2811 if device and device[ 'available' ]:
2812 result = main.TRUE
2813 utilities.assert_equals( expect=main.TRUE, actual=result,
2814 onpass="add switch successful",
2815 onfail="Failed to add switch?" )
2816
2817 def CASE13( self, main ):
2818 """
2819 Clean up
2820 """
2821 import os
2822 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002823 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002824 assert main, "main not defined"
2825 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002826 assert main.CLIs, "main.CLIs not defined"
2827 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002828
2829 # printing colors to terminal
2830 colors = { 'cyan': '\033[96m', 'purple': '\033[95m',
2831 'blue': '\033[94m', 'green': '\033[92m',
2832 'yellow': '\033[93m', 'red': '\033[91m', 'end': '\033[0m' }
2833 main.case( "Test Cleanup" )
2834 main.step( "Killing tcpdumps" )
2835 main.Mininet2.stopTcpdump()
2836
2837 testname = main.TEST
Jon Hall96091e62015-09-21 17:34:17 -07002838 if main.params[ 'BACKUP' ][ 'ENABLED' ] == "True":
Jon Hall5cf14d52015-07-16 12:15:19 -07002839 main.step( "Copying MN pcap and ONOS log files to test station" )
2840 teststationUser = main.params[ 'BACKUP' ][ 'TESTONUSER' ]
2841 teststationIP = main.params[ 'BACKUP' ][ 'TESTONIP' ]
Jon Hall96091e62015-09-21 17:34:17 -07002842 # NOTE: MN Pcap file is being saved to logdir.
2843 # We scp this file as MN and TestON aren't necessarily the same vm
2844
2845 # FIXME: To be replaced with a Jenkin's post script
Jon Hall5cf14d52015-07-16 12:15:19 -07002846 # TODO: Load these from params
2847 # NOTE: must end in /
2848 logFolder = "/opt/onos/log/"
2849 logFiles = [ "karaf.log", "karaf.log.1" ]
2850 # NOTE: must end in /
Jon Hall5cf14d52015-07-16 12:15:19 -07002851 for f in logFiles:
Jon Halle1a3b752015-07-22 13:02:46 -07002852 for node in main.nodes:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002853 dstName = main.logdir + "/" + node.name + "-" + f
Jon Hall96091e62015-09-21 17:34:17 -07002854 main.ONOSbench.secureCopy( node.user_name, node.ip_address,
2855 logFolder + f, dstName )
Jon Hall5cf14d52015-07-16 12:15:19 -07002856 # std*.log's
2857 # NOTE: must end in /
2858 logFolder = "/opt/onos/var/"
2859 logFiles = [ "stderr.log", "stdout.log" ]
2860 # NOTE: must end in /
Jon Hall5cf14d52015-07-16 12:15:19 -07002861 for f in logFiles:
Jon Halle1a3b752015-07-22 13:02:46 -07002862 for node in main.nodes:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002863 dstName = main.logdir + "/" + node.name + "-" + f
Jon Hall96091e62015-09-21 17:34:17 -07002864 main.ONOSbench.secureCopy( node.user_name, node.ip_address,
2865 logFolder + f, dstName )
2866 else:
2867 main.log.debug( "skipping saving log files" )
Jon Hall5cf14d52015-07-16 12:15:19 -07002868
2869 main.step( "Stopping Mininet" )
2870 mnResult = main.Mininet1.stopNet()
2871 utilities.assert_equals( expect=main.TRUE, actual=mnResult,
2872 onpass="Mininet stopped",
2873 onfail="MN cleanup NOT successful" )
2874
2875 main.step( "Checking ONOS Logs for errors" )
Jon Halle1a3b752015-07-22 13:02:46 -07002876 for node in main.nodes:
Jon Hall96091e62015-09-21 17:34:17 -07002877 main.log.debug( "Checking logs for errors on " + node.name + ":" )
2878 main.log.warn( main.ONOSbench.checkLogs( node.ip_address ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07002879
2880 try:
2881 timerLog = open( main.logdir + "/Timers.csv", 'w')
2882 # Overwrite with empty line and close
2883 labels = "Gossip Intents, Restart"
2884 data = str( gossipTime ) + ", " + str( main.restartTime )
2885 timerLog.write( labels + "\n" + data )
2886 timerLog.close()
2887 except NameError, e:
2888 main.log.exception(e)
2889
2890 def CASE14( self, main ):
2891 """
2892 start election app on all onos nodes
2893 """
Jon Halle1a3b752015-07-22 13:02:46 -07002894 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002895 assert main, "main not defined"
2896 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002897 assert main.CLIs, "main.CLIs not defined"
2898 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002899
2900 main.case("Start Leadership Election app")
2901 main.step( "Install leadership election app" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002902 onosCli = main.CLIs[ main.activeNodes[0] ]
2903 appResult = onosCli.activateApp( "org.onosproject.election" )
Jon Hall5cf14d52015-07-16 12:15:19 -07002904 utilities.assert_equals(
2905 expect=main.TRUE,
2906 actual=appResult,
2907 onpass="Election app installed",
2908 onfail="Something went wrong with installing Leadership election" )
2909
2910 main.step( "Run for election on each node" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002911 for i in main.activeNodes:
2912 main.CLIs[i].electionTestRun()
Jon Hall25463a82016-04-13 14:03:52 -07002913 time.sleep(5)
2914 activeCLIs = [ main.CLIs[i] for i in main.activeNodes ]
2915 sameResult, leaders = main.HA.consistentLeaderboards( activeCLIs )
Jon Hall5cf14d52015-07-16 12:15:19 -07002916 utilities.assert_equals(
Jon Hall25463a82016-04-13 14:03:52 -07002917 expect=True,
2918 actual=sameResult,
2919 onpass="All nodes see the same leaderboards",
2920 onfail="Inconsistent leaderboards" )
Jon Hall5cf14d52015-07-16 12:15:19 -07002921
Jon Hall25463a82016-04-13 14:03:52 -07002922 if sameResult:
2923 leader = leaders[ 0 ][ 0 ]
2924 if main.nodes[main.activeNodes[0]].ip_address in leader:
2925 correctLeader = True
2926 else:
2927 correctLeader = False
2928 main.step( "First node was elected leader" )
2929 utilities.assert_equals(
2930 expect=True,
2931 actual=correctLeader,
2932 onpass="Correct leader was elected",
2933 onfail="Incorrect leader" )
Jon Hall5cf14d52015-07-16 12:15:19 -07002934
2935 def CASE15( self, main ):
2936 """
2937 Check that Leadership Election is still functional
acsmars71adceb2015-08-31 15:09:26 -07002938 15.1 Run election on each node
2939 15.2 Check that each node has the same leaders and candidates
2940 15.3 Find current leader and withdraw
2941 15.4 Check that a new node was elected leader
2942 15.5 Check that that new leader was the candidate of old leader
2943 15.6 Run for election on old leader
2944 15.7 Check that oldLeader is a candidate, and leader if only 1 node
2945 15.8 Make sure that the old leader was added to the candidate list
2946
2947 old and new variable prefixes refer to data from before vs after
2948 withdrawl and later before withdrawl vs after re-election
Jon Hall5cf14d52015-07-16 12:15:19 -07002949 """
2950 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002951 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002952 assert main, "main not defined"
2953 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002954 assert main.CLIs, "main.CLIs not defined"
2955 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002956
Jon Hall5cf14d52015-07-16 12:15:19 -07002957 description = "Check that Leadership Election is still functional"
2958 main.case( description )
Jon Halla440e872016-03-31 15:15:50 -07002959 # NOTE: Need to re-run after restarts since being a canidate is not persistant
Jon Hall5cf14d52015-07-16 12:15:19 -07002960
Jon Halla440e872016-03-31 15:15:50 -07002961 oldLeaders = [] # list of lists of each nodes' candidates before
2962 newLeaders = [] # list of lists of each nodes' candidates after
acsmars71adceb2015-08-31 15:09:26 -07002963 oldLeader = '' # the old leader from oldLeaders, None if not same
2964 newLeader = '' # the new leaders fron newLoeaders, None if not same
2965 oldLeaderCLI = None # the CLI of the old leader used for re-electing
2966 expectNoLeader = False # True when there is only one leader
2967 if main.numCtrls == 1:
2968 expectNoLeader = True
2969
2970 main.step( "Run for election on each node" )
2971 electionResult = main.TRUE
2972
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002973 for i in main.activeNodes: # run test election on each node
2974 if main.CLIs[i].electionTestRun() == main.FALSE:
acsmars71adceb2015-08-31 15:09:26 -07002975 electionResult = main.FALSE
Jon Hall5cf14d52015-07-16 12:15:19 -07002976 utilities.assert_equals(
2977 expect=main.TRUE,
acsmars71adceb2015-08-31 15:09:26 -07002978 actual=electionResult,
2979 onpass="All nodes successfully ran for leadership",
2980 onfail="At least one node failed to run for leadership" )
2981
acsmars3a72bde2015-09-02 14:16:22 -07002982 if electionResult == main.FALSE:
2983 main.log.error(
2984 "Skipping Test Case because Election Test App isn't loaded" )
2985 main.skipCase()
2986
acsmars71adceb2015-08-31 15:09:26 -07002987 main.step( "Check that each node shows the same leader and candidates" )
Jon Halla440e872016-03-31 15:15:50 -07002988 failMessage = "Nodes have different leaderboards"
Jon Halla440e872016-03-31 15:15:50 -07002989 activeCLIs = [ main.CLIs[i] for i in main.activeNodes ]
Jon Hall41d39f12016-04-11 22:54:35 -07002990 sameResult, oldLeaders = main.HA.consistentLeaderboards( activeCLIs )
Jon Halla440e872016-03-31 15:15:50 -07002991 if sameResult:
2992 oldLeader = oldLeaders[ 0 ][ 0 ]
2993 main.log.warn( oldLeader )
acsmars71adceb2015-08-31 15:09:26 -07002994 else:
Jon Halla440e872016-03-31 15:15:50 -07002995 oldLeader = None
acsmars71adceb2015-08-31 15:09:26 -07002996 utilities.assert_equals(
Jon Halla440e872016-03-31 15:15:50 -07002997 expect=True,
acsmars71adceb2015-08-31 15:09:26 -07002998 actual=sameResult,
Jon Halla440e872016-03-31 15:15:50 -07002999 onpass="Leaderboards are consistent for the election topic",
acsmars71adceb2015-08-31 15:09:26 -07003000 onfail=failMessage )
Jon Hall5cf14d52015-07-16 12:15:19 -07003001
3002 main.step( "Find current leader and withdraw" )
acsmars71adceb2015-08-31 15:09:26 -07003003 withdrawResult = main.TRUE
Jon Hall5cf14d52015-07-16 12:15:19 -07003004 # do some sanity checking on leader before using it
acsmars71adceb2015-08-31 15:09:26 -07003005 if oldLeader is None:
3006 main.log.error( "Leadership isn't consistent." )
3007 withdrawResult = main.FALSE
3008 # Get the CLI of the oldLeader
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003009 for i in main.activeNodes:
acsmars71adceb2015-08-31 15:09:26 -07003010 if oldLeader == main.nodes[ i ].ip_address:
3011 oldLeaderCLI = main.CLIs[ i ]
Jon Hall5cf14d52015-07-16 12:15:19 -07003012 break
3013 else: # FOR/ELSE statement
3014 main.log.error( "Leader election, could not find current leader" )
3015 if oldLeader:
acsmars71adceb2015-08-31 15:09:26 -07003016 withdrawResult = oldLeaderCLI.electionTestWithdraw()
Jon Hall5cf14d52015-07-16 12:15:19 -07003017 utilities.assert_equals(
3018 expect=main.TRUE,
3019 actual=withdrawResult,
3020 onpass="Node was withdrawn from election",
3021 onfail="Node was not withdrawn from election" )
3022
acsmars71adceb2015-08-31 15:09:26 -07003023 main.step( "Check that a new node was elected leader" )
acsmars71adceb2015-08-31 15:09:26 -07003024 failMessage = "Nodes have different leaders"
acsmars71adceb2015-08-31 15:09:26 -07003025 # Get new leaders and candidates
Jon Hall41d39f12016-04-11 22:54:35 -07003026 newLeaderResult, newLeaders = main.HA.consistentLeaderboards( activeCLIs )
Jon Hall3a7843a2016-04-12 03:01:09 -07003027 newLeader = None
Jon Halla440e872016-03-31 15:15:50 -07003028 if newLeaderResult:
Jon Hall3a7843a2016-04-12 03:01:09 -07003029 if newLeaders[ 0 ][ 0 ] == 'none':
3030 main.log.error( "No leader was elected on at least 1 node" )
3031 if not expectNoLeader:
3032 newLeaderResult = False
Jon Hall25463a82016-04-13 14:03:52 -07003033 newLeader = newLeaders[ 0 ][ 0 ]
acsmars71adceb2015-08-31 15:09:26 -07003034
3035 # Check that the new leader is not the older leader, which was withdrawn
3036 if newLeader == oldLeader:
Jon Halla440e872016-03-31 15:15:50 -07003037 newLeaderResult = False
Jon Hall6e709752016-02-01 13:38:46 -08003038 main.log.error( "All nodes still see old leader: " + str( oldLeader ) +
acsmars71adceb2015-08-31 15:09:26 -07003039 " as the current leader" )
Jon Hall5cf14d52015-07-16 12:15:19 -07003040 utilities.assert_equals(
Jon Halla440e872016-03-31 15:15:50 -07003041 expect=True,
acsmars71adceb2015-08-31 15:09:26 -07003042 actual=newLeaderResult,
Jon Hall5cf14d52015-07-16 12:15:19 -07003043 onpass="Leadership election passed",
3044 onfail="Something went wrong with Leadership election" )
3045
Jon Halla440e872016-03-31 15:15:50 -07003046 main.step( "Check that that new leader was the candidate of old leader" )
Jon Hall6e709752016-02-01 13:38:46 -08003047 # candidates[ 2 ] should become the top candidate after withdrawl
acsmars71adceb2015-08-31 15:09:26 -07003048 correctCandidateResult = main.TRUE
3049 if expectNoLeader:
3050 if newLeader == 'none':
3051 main.log.info( "No leader expected. None found. Pass" )
3052 correctCandidateResult = main.TRUE
3053 else:
3054 main.log.info( "Expected no leader, got: " + str( newLeader ) )
3055 correctCandidateResult = main.FALSE
Jon Halla440e872016-03-31 15:15:50 -07003056 elif len( oldLeaders[0] ) >= 3:
3057 if newLeader == oldLeaders[ 0 ][ 2 ]:
3058 # correct leader was elected
3059 correctCandidateResult = main.TRUE
3060 else:
3061 correctCandidateResult = main.FALSE
3062 main.log.error( "Candidate {} was elected. {} should have had priority.".format(
3063 newLeader, oldLeaders[ 0 ][ 2 ] ) )
Jon Hall6e709752016-02-01 13:38:46 -08003064 else:
3065 main.log.warn( "Could not determine who should be the correct leader" )
Jon Halla440e872016-03-31 15:15:50 -07003066 main.log.debug( oldLeaders[ 0 ] )
Jon Hall6e709752016-02-01 13:38:46 -08003067 correctCandidateResult = main.FALSE
acsmars71adceb2015-08-31 15:09:26 -07003068 utilities.assert_equals(
3069 expect=main.TRUE,
3070 actual=correctCandidateResult,
3071 onpass="Correct Candidate Elected",
3072 onfail="Incorrect Candidate Elected" )
3073
Jon Hall5cf14d52015-07-16 12:15:19 -07003074 main.step( "Run for election on old leader( just so everyone " +
3075 "is in the hat )" )
acsmars71adceb2015-08-31 15:09:26 -07003076 if oldLeaderCLI is not None:
3077 runResult = oldLeaderCLI.electionTestRun()
Jon Hall5cf14d52015-07-16 12:15:19 -07003078 else:
acsmars71adceb2015-08-31 15:09:26 -07003079 main.log.error( "No old leader to re-elect" )
Jon Hall5cf14d52015-07-16 12:15:19 -07003080 runResult = main.FALSE
3081 utilities.assert_equals(
3082 expect=main.TRUE,
3083 actual=runResult,
3084 onpass="App re-ran for election",
3085 onfail="App failed to run for election" )
Jon Halla440e872016-03-31 15:15:50 -07003086
acsmars71adceb2015-08-31 15:09:26 -07003087 main.step(
3088 "Check that oldLeader is a candidate, and leader if only 1 node" )
Jon Hall5cf14d52015-07-16 12:15:19 -07003089 # verify leader didn't just change
Jon Halla440e872016-03-31 15:15:50 -07003090 # Get new leaders and candidates
3091 reRunLeaders = []
3092 time.sleep( 5 ) # Paremterize
Jon Hall41d39f12016-04-11 22:54:35 -07003093 positionResult, reRunLeaders = main.HA.consistentLeaderboards( activeCLIs )
acsmars71adceb2015-08-31 15:09:26 -07003094
3095 # Check that the re-elected node is last on the candidate List
Jon Hall3a7843a2016-04-12 03:01:09 -07003096 if not reRunLeaders[0]:
3097 positionResult = main.FALSE
3098 elif oldLeader != reRunLeaders[ 0 ][ -1 ]:
Jon Halla440e872016-03-31 15:15:50 -07003099 main.log.error( "Old Leader ({}) not in the proper position: {} ".format( str( oldLeader),
3100 str( reRunLeaders[ 0 ] ) ) )
acsmars71adceb2015-08-31 15:09:26 -07003101 positionResult = main.FALSE
Jon Hall5cf14d52015-07-16 12:15:19 -07003102 utilities.assert_equals(
Jon Halla440e872016-03-31 15:15:50 -07003103 expect=True,
acsmars71adceb2015-08-31 15:09:26 -07003104 actual=positionResult,
Jon Hall5cf14d52015-07-16 12:15:19 -07003105 onpass="Old leader successfully re-ran for election",
3106 onfail="Something went wrong with Leadership election after " +
3107 "the old leader re-ran for election" )
3108
3109 def CASE16( self, main ):
3110 """
3111 Install Distributed Primitives app
3112 """
3113 import time
Jon Halle1a3b752015-07-22 13:02:46 -07003114 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07003115 assert main, "main not defined"
3116 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07003117 assert main.CLIs, "main.CLIs not defined"
3118 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07003119
3120 # Variables for the distributed primitives tests
3121 global pCounterName
Jon Hall5cf14d52015-07-16 12:15:19 -07003122 global pCounterValue
Jon Hall5cf14d52015-07-16 12:15:19 -07003123 global onosSet
3124 global onosSetName
3125 pCounterName = "TestON-Partitions"
Jon Hall5cf14d52015-07-16 12:15:19 -07003126 pCounterValue = 0
Jon Hall5cf14d52015-07-16 12:15:19 -07003127 onosSet = set([])
3128 onosSetName = "TestON-set"
3129
3130 description = "Install Primitives app"
3131 main.case( description )
3132 main.step( "Install Primitives app" )
3133 appName = "org.onosproject.distributedprimitives"
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003134 node = main.activeNodes[0]
3135 appResults = main.CLIs[node].activateApp( appName )
Jon Hall5cf14d52015-07-16 12:15:19 -07003136 utilities.assert_equals( expect=main.TRUE,
3137 actual=appResults,
3138 onpass="Primitives app activated",
3139 onfail="Primitives app not activated" )
3140 time.sleep( 5 ) # To allow all nodes to activate
3141
3142 def CASE17( self, main ):
3143 """
3144 Check for basic functionality with distributed primitives
3145 """
Jon Hall5cf14d52015-07-16 12:15:19 -07003146 # Make sure variables are defined/set
Jon Halle1a3b752015-07-22 13:02:46 -07003147 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07003148 assert main, "main not defined"
3149 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07003150 assert main.CLIs, "main.CLIs not defined"
3151 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07003152 assert pCounterName, "pCounterName not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07003153 assert onosSetName, "onosSetName not defined"
3154 # NOTE: assert fails if value is 0/None/Empty/False
3155 try:
3156 pCounterValue
3157 except NameError:
3158 main.log.error( "pCounterValue not defined, setting to 0" )
3159 pCounterValue = 0
3160 try:
Jon Hall5cf14d52015-07-16 12:15:19 -07003161 onosSet
3162 except NameError:
3163 main.log.error( "onosSet not defined, setting to empty Set" )
3164 onosSet = set([])
3165 # Variables for the distributed primitives tests. These are local only
3166 addValue = "a"
3167 addAllValue = "a b c d e f"
3168 retainValue = "c d e f"
3169
3170 description = "Check for basic functionality with distributed " +\
3171 "primitives"
3172 main.case( description )
Jon Halle1a3b752015-07-22 13:02:46 -07003173 main.caseExplanation = "Test the methods of the distributed " +\
3174 "primitives (counters and sets) throught the cli"
Jon Hall5cf14d52015-07-16 12:15:19 -07003175 # DISTRIBUTED ATOMIC COUNTERS
Jon Halle1a3b752015-07-22 13:02:46 -07003176 # Partitioned counters
3177 main.step( "Increment then get a default counter on each node" )
Jon Hall5cf14d52015-07-16 12:15:19 -07003178 pCounters = []
3179 threads = []
3180 addedPValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003181 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003182 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3183 name="counterAddAndGet-" + str( i ),
Jon Hall5cf14d52015-07-16 12:15:19 -07003184 args=[ pCounterName ] )
3185 pCounterValue += 1
3186 addedPValues.append( pCounterValue )
3187 threads.append( t )
3188 t.start()
3189
3190 for t in threads:
3191 t.join()
3192 pCounters.append( t.result )
3193 # Check that counter incremented numController times
3194 pCounterResults = True
3195 for i in addedPValues:
3196 tmpResult = i in pCounters
3197 pCounterResults = pCounterResults and tmpResult
3198 if not tmpResult:
3199 main.log.error( str( i ) + " is not in partitioned "
3200 "counter incremented results" )
3201 utilities.assert_equals( expect=True,
3202 actual=pCounterResults,
3203 onpass="Default counter incremented",
3204 onfail="Error incrementing default" +
3205 " counter" )
3206
Jon Halle1a3b752015-07-22 13:02:46 -07003207 main.step( "Get then Increment a default counter on each node" )
3208 pCounters = []
3209 threads = []
3210 addedPValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003211 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003212 t = main.Thread( target=main.CLIs[i].counterTestGetAndAdd,
3213 name="counterGetAndAdd-" + str( i ),
3214 args=[ pCounterName ] )
3215 addedPValues.append( pCounterValue )
3216 pCounterValue += 1
3217 threads.append( t )
3218 t.start()
3219
3220 for t in threads:
3221 t.join()
3222 pCounters.append( t.result )
3223 # Check that counter incremented numController times
3224 pCounterResults = True
3225 for i in addedPValues:
3226 tmpResult = i in pCounters
3227 pCounterResults = pCounterResults and tmpResult
3228 if not tmpResult:
3229 main.log.error( str( i ) + " is not in partitioned "
3230 "counter incremented results" )
3231 utilities.assert_equals( expect=True,
3232 actual=pCounterResults,
3233 onpass="Default counter incremented",
3234 onfail="Error incrementing default" +
3235 " counter" )
3236
3237 main.step( "Counters we added have the correct values" )
Jon Hall41d39f12016-04-11 22:54:35 -07003238 incrementCheck = main.HA.counterCheck( pCounterName, pCounterValue )
Jon Halle1a3b752015-07-22 13:02:46 -07003239 utilities.assert_equals( expect=main.TRUE,
3240 actual=incrementCheck,
3241 onpass="Added counters are correct",
3242 onfail="Added counters are incorrect" )
3243
3244 main.step( "Add -8 to then get a default counter on each node" )
3245 pCounters = []
3246 threads = []
3247 addedPValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003248 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003249 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3250 name="counterIncrement-" + str( i ),
3251 args=[ pCounterName ],
3252 kwargs={ "delta": -8 } )
3253 pCounterValue += -8
3254 addedPValues.append( pCounterValue )
3255 threads.append( t )
3256 t.start()
3257
3258 for t in threads:
3259 t.join()
3260 pCounters.append( t.result )
3261 # Check that counter incremented numController times
3262 pCounterResults = True
3263 for i in addedPValues:
3264 tmpResult = i in pCounters
3265 pCounterResults = pCounterResults and tmpResult
3266 if not tmpResult:
3267 main.log.error( str( i ) + " is not in partitioned "
3268 "counter incremented results" )
3269 utilities.assert_equals( expect=True,
3270 actual=pCounterResults,
3271 onpass="Default counter incremented",
3272 onfail="Error incrementing default" +
3273 " counter" )
3274
3275 main.step( "Add 5 to then get a default counter on each node" )
3276 pCounters = []
3277 threads = []
3278 addedPValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003279 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003280 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3281 name="counterIncrement-" + str( i ),
3282 args=[ pCounterName ],
3283 kwargs={ "delta": 5 } )
3284 pCounterValue += 5
3285 addedPValues.append( pCounterValue )
3286 threads.append( t )
3287 t.start()
3288
3289 for t in threads:
3290 t.join()
3291 pCounters.append( t.result )
3292 # Check that counter incremented numController times
3293 pCounterResults = True
3294 for i in addedPValues:
3295 tmpResult = i in pCounters
3296 pCounterResults = pCounterResults and tmpResult
3297 if not tmpResult:
3298 main.log.error( str( i ) + " is not in partitioned "
3299 "counter incremented results" )
3300 utilities.assert_equals( expect=True,
3301 actual=pCounterResults,
3302 onpass="Default counter incremented",
3303 onfail="Error incrementing default" +
3304 " counter" )
3305
3306 main.step( "Get then add 5 to a default counter on each node" )
3307 pCounters = []
3308 threads = []
3309 addedPValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003310 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003311 t = main.Thread( target=main.CLIs[i].counterTestGetAndAdd,
3312 name="counterIncrement-" + str( i ),
3313 args=[ pCounterName ],
3314 kwargs={ "delta": 5 } )
3315 addedPValues.append( pCounterValue )
3316 pCounterValue += 5
3317 threads.append( t )
3318 t.start()
3319
3320 for t in threads:
3321 t.join()
3322 pCounters.append( t.result )
3323 # Check that counter incremented numController times
3324 pCounterResults = True
3325 for i in addedPValues:
3326 tmpResult = i in pCounters
3327 pCounterResults = pCounterResults and tmpResult
3328 if not tmpResult:
3329 main.log.error( str( i ) + " is not in partitioned "
3330 "counter incremented results" )
3331 utilities.assert_equals( expect=True,
3332 actual=pCounterResults,
3333 onpass="Default counter incremented",
3334 onfail="Error incrementing default" +
3335 " counter" )
3336
3337 main.step( "Counters we added have the correct values" )
Jon Hall41d39f12016-04-11 22:54:35 -07003338 incrementCheck = main.HA.counterCheck( pCounterName, pCounterValue )
Jon Halle1a3b752015-07-22 13:02:46 -07003339 utilities.assert_equals( expect=main.TRUE,
3340 actual=incrementCheck,
3341 onpass="Added counters are correct",
3342 onfail="Added counters are incorrect" )
3343
Jon Hall5cf14d52015-07-16 12:15:19 -07003344 # DISTRIBUTED SETS
3345 main.step( "Distributed Set get" )
3346 size = len( onosSet )
3347 getResponses = []
3348 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003349 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003350 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003351 name="setTestGet-" + str( i ),
3352 args=[ onosSetName ] )
3353 threads.append( t )
3354 t.start()
3355 for t in threads:
3356 t.join()
3357 getResponses.append( t.result )
3358
3359 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003360 for i in range( len( main.activeNodes ) ):
3361 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003362 if isinstance( getResponses[ i ], list):
3363 current = set( getResponses[ i ] )
3364 if len( current ) == len( getResponses[ i ] ):
3365 # no repeats
3366 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003367 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003368 " has incorrect view" +
3369 " of set " + onosSetName + ":\n" +
3370 str( getResponses[ i ] ) )
3371 main.log.debug( "Expected: " + str( onosSet ) )
3372 main.log.debug( "Actual: " + str( current ) )
3373 getResults = main.FALSE
3374 else:
3375 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003376 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003377 " has repeat elements in" +
3378 " set " + onosSetName + ":\n" +
3379 str( getResponses[ i ] ) )
3380 getResults = main.FALSE
3381 elif getResponses[ i ] == main.ERROR:
3382 getResults = main.FALSE
3383 utilities.assert_equals( expect=main.TRUE,
3384 actual=getResults,
3385 onpass="Set elements are correct",
3386 onfail="Set elements are incorrect" )
3387
3388 main.step( "Distributed Set size" )
3389 sizeResponses = []
3390 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003391 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003392 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07003393 name="setTestSize-" + str( i ),
3394 args=[ onosSetName ] )
3395 threads.append( t )
3396 t.start()
3397 for t in threads:
3398 t.join()
3399 sizeResponses.append( t.result )
3400
3401 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003402 for i in range( len( main.activeNodes ) ):
3403 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003404 if size != sizeResponses[ i ]:
3405 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003406 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003407 " expected a size of " + str( size ) +
3408 " for set " + onosSetName +
3409 " but got " + str( sizeResponses[ i ] ) )
3410 utilities.assert_equals( expect=main.TRUE,
3411 actual=sizeResults,
3412 onpass="Set sizes are correct",
3413 onfail="Set sizes are incorrect" )
3414
3415 main.step( "Distributed Set add()" )
3416 onosSet.add( addValue )
3417 addResponses = []
3418 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003419 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003420 t = main.Thread( target=main.CLIs[i].setTestAdd,
Jon Hall5cf14d52015-07-16 12:15:19 -07003421 name="setTestAdd-" + str( i ),
3422 args=[ onosSetName, addValue ] )
3423 threads.append( t )
3424 t.start()
3425 for t in threads:
3426 t.join()
3427 addResponses.append( t.result )
3428
3429 # main.TRUE = successfully changed the set
3430 # main.FALSE = action resulted in no change in set
3431 # main.ERROR - Some error in executing the function
3432 addResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003433 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003434 if addResponses[ i ] == main.TRUE:
3435 # All is well
3436 pass
3437 elif addResponses[ i ] == main.FALSE:
3438 # Already in set, probably fine
3439 pass
3440 elif addResponses[ i ] == main.ERROR:
3441 # Error in execution
3442 addResults = main.FALSE
3443 else:
3444 # unexpected result
3445 addResults = main.FALSE
3446 if addResults != main.TRUE:
3447 main.log.error( "Error executing set add" )
3448
3449 # Check if set is still correct
3450 size = len( onosSet )
3451 getResponses = []
3452 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003453 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003454 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003455 name="setTestGet-" + str( i ),
3456 args=[ onosSetName ] )
3457 threads.append( t )
3458 t.start()
3459 for t in threads:
3460 t.join()
3461 getResponses.append( t.result )
3462 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003463 for i in range( len( main.activeNodes ) ):
3464 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003465 if isinstance( getResponses[ i ], list):
3466 current = set( getResponses[ i ] )
3467 if len( current ) == len( getResponses[ i ] ):
3468 # no repeats
3469 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003470 main.log.error( "ONOS" + node + " has incorrect view" +
Jon Hall5cf14d52015-07-16 12:15:19 -07003471 " of set " + onosSetName + ":\n" +
3472 str( getResponses[ i ] ) )
3473 main.log.debug( "Expected: " + str( onosSet ) )
3474 main.log.debug( "Actual: " + str( current ) )
3475 getResults = main.FALSE
3476 else:
3477 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003478 main.log.error( "ONOS" + node + " has repeat elements in" +
Jon Hall5cf14d52015-07-16 12:15:19 -07003479 " set " + onosSetName + ":\n" +
3480 str( getResponses[ i ] ) )
3481 getResults = main.FALSE
3482 elif getResponses[ i ] == main.ERROR:
3483 getResults = main.FALSE
3484 sizeResponses = []
3485 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003486 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003487 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07003488 name="setTestSize-" + str( i ),
3489 args=[ onosSetName ] )
3490 threads.append( t )
3491 t.start()
3492 for t in threads:
3493 t.join()
3494 sizeResponses.append( t.result )
3495 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003496 for i in range( len( main.activeNodes ) ):
3497 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003498 if size != sizeResponses[ i ]:
3499 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003500 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003501 " expected a size of " + str( size ) +
3502 " for set " + onosSetName +
3503 " but got " + str( sizeResponses[ i ] ) )
3504 addResults = addResults and getResults and sizeResults
3505 utilities.assert_equals( expect=main.TRUE,
3506 actual=addResults,
3507 onpass="Set add correct",
3508 onfail="Set add was incorrect" )
3509
3510 main.step( "Distributed Set addAll()" )
3511 onosSet.update( addAllValue.split() )
3512 addResponses = []
3513 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003514 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003515 t = main.Thread( target=main.CLIs[i].setTestAdd,
Jon Hall5cf14d52015-07-16 12:15:19 -07003516 name="setTestAddAll-" + str( i ),
3517 args=[ onosSetName, addAllValue ] )
3518 threads.append( t )
3519 t.start()
3520 for t in threads:
3521 t.join()
3522 addResponses.append( t.result )
3523
3524 # main.TRUE = successfully changed the set
3525 # main.FALSE = action resulted in no change in set
3526 # main.ERROR - Some error in executing the function
3527 addAllResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003528 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003529 if addResponses[ i ] == main.TRUE:
3530 # All is well
3531 pass
3532 elif addResponses[ i ] == main.FALSE:
3533 # Already in set, probably fine
3534 pass
3535 elif addResponses[ i ] == main.ERROR:
3536 # Error in execution
3537 addAllResults = main.FALSE
3538 else:
3539 # unexpected result
3540 addAllResults = main.FALSE
3541 if addAllResults != main.TRUE:
3542 main.log.error( "Error executing set addAll" )
3543
3544 # Check if set is still correct
3545 size = len( onosSet )
3546 getResponses = []
3547 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003548 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003549 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003550 name="setTestGet-" + str( i ),
3551 args=[ onosSetName ] )
3552 threads.append( t )
3553 t.start()
3554 for t in threads:
3555 t.join()
3556 getResponses.append( t.result )
3557 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003558 for i in range( len( main.activeNodes ) ):
3559 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003560 if isinstance( getResponses[ i ], list):
3561 current = set( getResponses[ i ] )
3562 if len( current ) == len( getResponses[ i ] ):
3563 # no repeats
3564 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003565 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003566 " has incorrect view" +
3567 " of set " + onosSetName + ":\n" +
3568 str( getResponses[ i ] ) )
3569 main.log.debug( "Expected: " + str( onosSet ) )
3570 main.log.debug( "Actual: " + str( current ) )
3571 getResults = main.FALSE
3572 else:
3573 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003574 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003575 " has repeat elements in" +
3576 " set " + onosSetName + ":\n" +
3577 str( getResponses[ i ] ) )
3578 getResults = main.FALSE
3579 elif getResponses[ i ] == main.ERROR:
3580 getResults = main.FALSE
3581 sizeResponses = []
3582 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003583 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003584 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07003585 name="setTestSize-" + str( i ),
3586 args=[ onosSetName ] )
3587 threads.append( t )
3588 t.start()
3589 for t in threads:
3590 t.join()
3591 sizeResponses.append( t.result )
3592 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003593 for i in range( len( main.activeNodes ) ):
3594 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003595 if size != sizeResponses[ i ]:
3596 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003597 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003598 " expected a size of " + str( size ) +
3599 " for set " + onosSetName +
3600 " but got " + str( sizeResponses[ i ] ) )
3601 addAllResults = addAllResults and getResults and sizeResults
3602 utilities.assert_equals( expect=main.TRUE,
3603 actual=addAllResults,
3604 onpass="Set addAll correct",
3605 onfail="Set addAll was incorrect" )
3606
3607 main.step( "Distributed Set contains()" )
3608 containsResponses = []
3609 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003610 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003611 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003612 name="setContains-" + str( i ),
3613 args=[ onosSetName ],
3614 kwargs={ "values": addValue } )
3615 threads.append( t )
3616 t.start()
3617 for t in threads:
3618 t.join()
3619 # NOTE: This is the tuple
3620 containsResponses.append( t.result )
3621
3622 containsResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003623 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003624 if containsResponses[ i ] == main.ERROR:
3625 containsResults = main.FALSE
3626 else:
3627 containsResults = containsResults and\
3628 containsResponses[ i ][ 1 ]
3629 utilities.assert_equals( expect=main.TRUE,
3630 actual=containsResults,
3631 onpass="Set contains is functional",
3632 onfail="Set contains failed" )
3633
3634 main.step( "Distributed Set containsAll()" )
3635 containsAllResponses = []
3636 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003637 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003638 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003639 name="setContainsAll-" + str( i ),
3640 args=[ onosSetName ],
3641 kwargs={ "values": addAllValue } )
3642 threads.append( t )
3643 t.start()
3644 for t in threads:
3645 t.join()
3646 # NOTE: This is the tuple
3647 containsAllResponses.append( t.result )
3648
3649 containsAllResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003650 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003651 if containsResponses[ i ] == main.ERROR:
3652 containsResults = main.FALSE
3653 else:
3654 containsResults = containsResults and\
3655 containsResponses[ i ][ 1 ]
3656 utilities.assert_equals( expect=main.TRUE,
3657 actual=containsAllResults,
3658 onpass="Set containsAll is functional",
3659 onfail="Set containsAll failed" )
3660
3661 main.step( "Distributed Set remove()" )
3662 onosSet.remove( addValue )
3663 removeResponses = []
3664 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003665 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003666 t = main.Thread( target=main.CLIs[i].setTestRemove,
Jon Hall5cf14d52015-07-16 12:15:19 -07003667 name="setTestRemove-" + str( i ),
3668 args=[ onosSetName, addValue ] )
3669 threads.append( t )
3670 t.start()
3671 for t in threads:
3672 t.join()
3673 removeResponses.append( t.result )
3674
3675 # main.TRUE = successfully changed the set
3676 # main.FALSE = action resulted in no change in set
3677 # main.ERROR - Some error in executing the function
3678 removeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003679 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003680 if removeResponses[ i ] == main.TRUE:
3681 # All is well
3682 pass
3683 elif removeResponses[ i ] == main.FALSE:
3684 # not in set, probably fine
3685 pass
3686 elif removeResponses[ i ] == main.ERROR:
3687 # Error in execution
3688 removeResults = main.FALSE
3689 else:
3690 # unexpected result
3691 removeResults = main.FALSE
3692 if removeResults != main.TRUE:
3693 main.log.error( "Error executing set remove" )
3694
3695 # Check if set is still correct
3696 size = len( onosSet )
3697 getResponses = []
3698 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003699 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003700 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003701 name="setTestGet-" + str( i ),
3702 args=[ onosSetName ] )
3703 threads.append( t )
3704 t.start()
3705 for t in threads:
3706 t.join()
3707 getResponses.append( t.result )
3708 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003709 for i in range( len( main.activeNodes ) ):
3710 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003711 if isinstance( getResponses[ i ], list):
3712 current = set( getResponses[ i ] )
3713 if len( current ) == len( getResponses[ i ] ):
3714 # no repeats
3715 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003716 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003717 " has incorrect view" +
3718 " of set " + onosSetName + ":\n" +
3719 str( getResponses[ i ] ) )
3720 main.log.debug( "Expected: " + str( onosSet ) )
3721 main.log.debug( "Actual: " + str( current ) )
3722 getResults = main.FALSE
3723 else:
3724 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003725 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003726 " has repeat elements in" +
3727 " set " + onosSetName + ":\n" +
3728 str( getResponses[ i ] ) )
3729 getResults = main.FALSE
3730 elif getResponses[ i ] == main.ERROR:
3731 getResults = main.FALSE
3732 sizeResponses = []
3733 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003734 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003735 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07003736 name="setTestSize-" + str( i ),
3737 args=[ onosSetName ] )
3738 threads.append( t )
3739 t.start()
3740 for t in threads:
3741 t.join()
3742 sizeResponses.append( t.result )
3743 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003744 for i in range( len( main.activeNodes ) ):
3745 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003746 if size != sizeResponses[ i ]:
3747 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003748 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003749 " expected a size of " + str( size ) +
3750 " for set " + onosSetName +
3751 " but got " + str( sizeResponses[ i ] ) )
3752 removeResults = removeResults and getResults and sizeResults
3753 utilities.assert_equals( expect=main.TRUE,
3754 actual=removeResults,
3755 onpass="Set remove correct",
3756 onfail="Set remove was incorrect" )
3757
3758 main.step( "Distributed Set removeAll()" )
3759 onosSet.difference_update( addAllValue.split() )
3760 removeAllResponses = []
3761 threads = []
3762 try:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003763 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003764 t = main.Thread( target=main.CLIs[i].setTestRemove,
Jon Hall5cf14d52015-07-16 12:15:19 -07003765 name="setTestRemoveAll-" + str( i ),
3766 args=[ onosSetName, addAllValue ] )
3767 threads.append( t )
3768 t.start()
3769 for t in threads:
3770 t.join()
3771 removeAllResponses.append( t.result )
3772 except Exception, e:
3773 main.log.exception(e)
3774
3775 # main.TRUE = successfully changed the set
3776 # main.FALSE = action resulted in no change in set
3777 # main.ERROR - Some error in executing the function
3778 removeAllResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003779 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003780 if removeAllResponses[ i ] == main.TRUE:
3781 # All is well
3782 pass
3783 elif removeAllResponses[ i ] == main.FALSE:
3784 # not in set, probably fine
3785 pass
3786 elif removeAllResponses[ i ] == main.ERROR:
3787 # Error in execution
3788 removeAllResults = main.FALSE
3789 else:
3790 # unexpected result
3791 removeAllResults = main.FALSE
3792 if removeAllResults != main.TRUE:
3793 main.log.error( "Error executing set removeAll" )
3794
3795 # Check if set is still correct
3796 size = len( onosSet )
3797 getResponses = []
3798 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003799 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003800 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003801 name="setTestGet-" + str( i ),
3802 args=[ onosSetName ] )
3803 threads.append( t )
3804 t.start()
3805 for t in threads:
3806 t.join()
3807 getResponses.append( t.result )
3808 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003809 for i in range( len( main.activeNodes ) ):
3810 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003811 if isinstance( getResponses[ i ], list):
3812 current = set( getResponses[ i ] )
3813 if len( current ) == len( getResponses[ i ] ):
3814 # no repeats
3815 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003816 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003817 " has incorrect view" +
3818 " of set " + onosSetName + ":\n" +
3819 str( getResponses[ i ] ) )
3820 main.log.debug( "Expected: " + str( onosSet ) )
3821 main.log.debug( "Actual: " + str( current ) )
3822 getResults = main.FALSE
3823 else:
3824 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003825 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003826 " has repeat elements in" +
3827 " set " + onosSetName + ":\n" +
3828 str( getResponses[ i ] ) )
3829 getResults = main.FALSE
3830 elif getResponses[ i ] == main.ERROR:
3831 getResults = main.FALSE
3832 sizeResponses = []
3833 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003834 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003835 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07003836 name="setTestSize-" + str( i ),
3837 args=[ onosSetName ] )
3838 threads.append( t )
3839 t.start()
3840 for t in threads:
3841 t.join()
3842 sizeResponses.append( t.result )
3843 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003844 for i in range( len( main.activeNodes ) ):
3845 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003846 if size != sizeResponses[ i ]:
3847 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003848 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003849 " expected a size of " + str( size ) +
3850 " for set " + onosSetName +
3851 " but got " + str( sizeResponses[ i ] ) )
3852 removeAllResults = removeAllResults and getResults and sizeResults
3853 utilities.assert_equals( expect=main.TRUE,
3854 actual=removeAllResults,
3855 onpass="Set removeAll correct",
3856 onfail="Set removeAll was incorrect" )
3857
3858 main.step( "Distributed Set addAll()" )
3859 onosSet.update( addAllValue.split() )
3860 addResponses = []
3861 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003862 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003863 t = main.Thread( target=main.CLIs[i].setTestAdd,
Jon Hall5cf14d52015-07-16 12:15:19 -07003864 name="setTestAddAll-" + str( i ),
3865 args=[ onosSetName, addAllValue ] )
3866 threads.append( t )
3867 t.start()
3868 for t in threads:
3869 t.join()
3870 addResponses.append( t.result )
3871
3872 # main.TRUE = successfully changed the set
3873 # main.FALSE = action resulted in no change in set
3874 # main.ERROR - Some error in executing the function
3875 addAllResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003876 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003877 if addResponses[ i ] == main.TRUE:
3878 # All is well
3879 pass
3880 elif addResponses[ i ] == main.FALSE:
3881 # Already in set, probably fine
3882 pass
3883 elif addResponses[ i ] == main.ERROR:
3884 # Error in execution
3885 addAllResults = main.FALSE
3886 else:
3887 # unexpected result
3888 addAllResults = main.FALSE
3889 if addAllResults != main.TRUE:
3890 main.log.error( "Error executing set addAll" )
3891
3892 # Check if set is still correct
3893 size = len( onosSet )
3894 getResponses = []
3895 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003896 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003897 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003898 name="setTestGet-" + str( i ),
3899 args=[ onosSetName ] )
3900 threads.append( t )
3901 t.start()
3902 for t in threads:
3903 t.join()
3904 getResponses.append( t.result )
3905 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003906 for i in range( len( main.activeNodes ) ):
3907 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003908 if isinstance( getResponses[ i ], list):
3909 current = set( getResponses[ i ] )
3910 if len( current ) == len( getResponses[ i ] ):
3911 # no repeats
3912 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003913 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003914 " has incorrect view" +
3915 " of set " + onosSetName + ":\n" +
3916 str( getResponses[ i ] ) )
3917 main.log.debug( "Expected: " + str( onosSet ) )
3918 main.log.debug( "Actual: " + str( current ) )
3919 getResults = main.FALSE
3920 else:
3921 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003922 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003923 " has repeat elements in" +
3924 " set " + onosSetName + ":\n" +
3925 str( getResponses[ i ] ) )
3926 getResults = main.FALSE
3927 elif getResponses[ i ] == main.ERROR:
3928 getResults = main.FALSE
3929 sizeResponses = []
3930 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003931 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003932 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07003933 name="setTestSize-" + str( i ),
3934 args=[ onosSetName ] )
3935 threads.append( t )
3936 t.start()
3937 for t in threads:
3938 t.join()
3939 sizeResponses.append( t.result )
3940 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003941 for i in range( len( main.activeNodes ) ):
3942 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003943 if size != sizeResponses[ i ]:
3944 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003945 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003946 " expected a size of " + str( size ) +
3947 " for set " + onosSetName +
3948 " but got " + str( sizeResponses[ i ] ) )
3949 addAllResults = addAllResults and getResults and sizeResults
3950 utilities.assert_equals( expect=main.TRUE,
3951 actual=addAllResults,
3952 onpass="Set addAll correct",
3953 onfail="Set addAll was incorrect" )
3954
3955 main.step( "Distributed Set clear()" )
3956 onosSet.clear()
3957 clearResponses = []
3958 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003959 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003960 t = main.Thread( target=main.CLIs[i].setTestRemove,
Jon Hall5cf14d52015-07-16 12:15:19 -07003961 name="setTestClear-" + str( i ),
3962 args=[ onosSetName, " "], # Values doesn't matter
3963 kwargs={ "clear": True } )
3964 threads.append( t )
3965 t.start()
3966 for t in threads:
3967 t.join()
3968 clearResponses.append( t.result )
3969
3970 # main.TRUE = successfully changed the set
3971 # main.FALSE = action resulted in no change in set
3972 # main.ERROR - Some error in executing the function
3973 clearResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003974 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003975 if clearResponses[ i ] == main.TRUE:
3976 # All is well
3977 pass
3978 elif clearResponses[ i ] == main.FALSE:
3979 # Nothing set, probably fine
3980 pass
3981 elif clearResponses[ i ] == main.ERROR:
3982 # Error in execution
3983 clearResults = main.FALSE
3984 else:
3985 # unexpected result
3986 clearResults = main.FALSE
3987 if clearResults != main.TRUE:
3988 main.log.error( "Error executing set clear" )
3989
3990 # Check if set is still correct
3991 size = len( onosSet )
3992 getResponses = []
3993 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003994 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003995 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003996 name="setTestGet-" + str( i ),
3997 args=[ onosSetName ] )
3998 threads.append( t )
3999 t.start()
4000 for t in threads:
4001 t.join()
4002 getResponses.append( t.result )
4003 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004004 for i in range( len( main.activeNodes ) ):
4005 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004006 if isinstance( getResponses[ i ], list):
4007 current = set( getResponses[ i ] )
4008 if len( current ) == len( getResponses[ i ] ):
4009 # no repeats
4010 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004011 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004012 " has incorrect view" +
4013 " of set " + onosSetName + ":\n" +
4014 str( getResponses[ i ] ) )
4015 main.log.debug( "Expected: " + str( onosSet ) )
4016 main.log.debug( "Actual: " + str( current ) )
4017 getResults = main.FALSE
4018 else:
4019 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004020 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004021 " has repeat elements in" +
4022 " set " + onosSetName + ":\n" +
4023 str( getResponses[ i ] ) )
4024 getResults = main.FALSE
4025 elif getResponses[ i ] == main.ERROR:
4026 getResults = main.FALSE
4027 sizeResponses = []
4028 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004029 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004030 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07004031 name="setTestSize-" + str( i ),
4032 args=[ onosSetName ] )
4033 threads.append( t )
4034 t.start()
4035 for t in threads:
4036 t.join()
4037 sizeResponses.append( t.result )
4038 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004039 for i in range( len( main.activeNodes ) ):
4040 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004041 if size != sizeResponses[ i ]:
4042 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004043 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004044 " expected a size of " + str( size ) +
4045 " for set " + onosSetName +
4046 " but got " + str( sizeResponses[ i ] ) )
4047 clearResults = clearResults and getResults and sizeResults
4048 utilities.assert_equals( expect=main.TRUE,
4049 actual=clearResults,
4050 onpass="Set clear correct",
4051 onfail="Set clear was incorrect" )
4052
4053 main.step( "Distributed Set addAll()" )
4054 onosSet.update( addAllValue.split() )
4055 addResponses = []
4056 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004057 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004058 t = main.Thread( target=main.CLIs[i].setTestAdd,
Jon Hall5cf14d52015-07-16 12:15:19 -07004059 name="setTestAddAll-" + str( i ),
4060 args=[ onosSetName, addAllValue ] )
4061 threads.append( t )
4062 t.start()
4063 for t in threads:
4064 t.join()
4065 addResponses.append( t.result )
4066
4067 # main.TRUE = successfully changed the set
4068 # main.FALSE = action resulted in no change in set
4069 # main.ERROR - Some error in executing the function
4070 addAllResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004071 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07004072 if addResponses[ i ] == main.TRUE:
4073 # All is well
4074 pass
4075 elif addResponses[ i ] == main.FALSE:
4076 # Already in set, probably fine
4077 pass
4078 elif addResponses[ i ] == main.ERROR:
4079 # Error in execution
4080 addAllResults = main.FALSE
4081 else:
4082 # unexpected result
4083 addAllResults = main.FALSE
4084 if addAllResults != main.TRUE:
4085 main.log.error( "Error executing set addAll" )
4086
4087 # Check if set is still correct
4088 size = len( onosSet )
4089 getResponses = []
4090 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004091 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004092 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07004093 name="setTestGet-" + str( i ),
4094 args=[ onosSetName ] )
4095 threads.append( t )
4096 t.start()
4097 for t in threads:
4098 t.join()
4099 getResponses.append( t.result )
4100 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004101 for i in range( len( main.activeNodes ) ):
4102 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004103 if isinstance( getResponses[ i ], list):
4104 current = set( getResponses[ i ] )
4105 if len( current ) == len( getResponses[ i ] ):
4106 # no repeats
4107 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004108 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004109 " has incorrect view" +
4110 " of set " + onosSetName + ":\n" +
4111 str( getResponses[ i ] ) )
4112 main.log.debug( "Expected: " + str( onosSet ) )
4113 main.log.debug( "Actual: " + str( current ) )
4114 getResults = main.FALSE
4115 else:
4116 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004117 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004118 " has repeat elements in" +
4119 " set " + onosSetName + ":\n" +
4120 str( getResponses[ i ] ) )
4121 getResults = main.FALSE
4122 elif getResponses[ i ] == main.ERROR:
4123 getResults = main.FALSE
4124 sizeResponses = []
4125 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004126 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004127 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07004128 name="setTestSize-" + str( i ),
4129 args=[ onosSetName ] )
4130 threads.append( t )
4131 t.start()
4132 for t in threads:
4133 t.join()
4134 sizeResponses.append( t.result )
4135 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004136 for i in range( len( main.activeNodes ) ):
4137 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004138 if size != sizeResponses[ i ]:
4139 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004140 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004141 " expected a size of " + str( size ) +
4142 " for set " + onosSetName +
4143 " but got " + str( sizeResponses[ i ] ) )
4144 addAllResults = addAllResults and getResults and sizeResults
4145 utilities.assert_equals( expect=main.TRUE,
4146 actual=addAllResults,
4147 onpass="Set addAll correct",
4148 onfail="Set addAll was incorrect" )
4149
4150 main.step( "Distributed Set retain()" )
4151 onosSet.intersection_update( retainValue.split() )
4152 retainResponses = []
4153 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004154 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004155 t = main.Thread( target=main.CLIs[i].setTestRemove,
Jon Hall5cf14d52015-07-16 12:15:19 -07004156 name="setTestRetain-" + str( i ),
4157 args=[ onosSetName, retainValue ],
4158 kwargs={ "retain": True } )
4159 threads.append( t )
4160 t.start()
4161 for t in threads:
4162 t.join()
4163 retainResponses.append( t.result )
4164
4165 # main.TRUE = successfully changed the set
4166 # main.FALSE = action resulted in no change in set
4167 # main.ERROR - Some error in executing the function
4168 retainResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004169 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07004170 if retainResponses[ i ] == main.TRUE:
4171 # All is well
4172 pass
4173 elif retainResponses[ i ] == main.FALSE:
4174 # Already in set, probably fine
4175 pass
4176 elif retainResponses[ i ] == main.ERROR:
4177 # Error in execution
4178 retainResults = main.FALSE
4179 else:
4180 # unexpected result
4181 retainResults = main.FALSE
4182 if retainResults != main.TRUE:
4183 main.log.error( "Error executing set retain" )
4184
4185 # Check if set is still correct
4186 size = len( onosSet )
4187 getResponses = []
4188 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004189 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004190 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07004191 name="setTestGet-" + str( i ),
4192 args=[ onosSetName ] )
4193 threads.append( t )
4194 t.start()
4195 for t in threads:
4196 t.join()
4197 getResponses.append( t.result )
4198 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004199 for i in range( len( main.activeNodes ) ):
4200 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004201 if isinstance( getResponses[ i ], list):
4202 current = set( getResponses[ i ] )
4203 if len( current ) == len( getResponses[ i ] ):
4204 # no repeats
4205 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004206 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004207 " has incorrect view" +
4208 " of set " + onosSetName + ":\n" +
4209 str( getResponses[ i ] ) )
4210 main.log.debug( "Expected: " + str( onosSet ) )
4211 main.log.debug( "Actual: " + str( current ) )
4212 getResults = main.FALSE
4213 else:
4214 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004215 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004216 " has repeat elements in" +
4217 " set " + onosSetName + ":\n" +
4218 str( getResponses[ i ] ) )
4219 getResults = main.FALSE
4220 elif getResponses[ i ] == main.ERROR:
4221 getResults = main.FALSE
4222 sizeResponses = []
4223 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004224 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004225 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07004226 name="setTestSize-" + str( i ),
4227 args=[ onosSetName ] )
4228 threads.append( t )
4229 t.start()
4230 for t in threads:
4231 t.join()
4232 sizeResponses.append( t.result )
4233 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004234 for i in range( len( main.activeNodes ) ):
4235 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004236 if size != sizeResponses[ i ]:
4237 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004238 main.log.error( "ONOS" + node + " expected a size of " +
Jon Hall5cf14d52015-07-16 12:15:19 -07004239 str( size ) + " for set " + onosSetName +
4240 " but got " + str( sizeResponses[ i ] ) )
4241 retainResults = retainResults and getResults and sizeResults
4242 utilities.assert_equals( expect=main.TRUE,
4243 actual=retainResults,
4244 onpass="Set retain correct",
4245 onfail="Set retain was incorrect" )
4246
Jon Hall2a5002c2015-08-21 16:49:11 -07004247 # Transactional maps
4248 main.step( "Partitioned Transactional maps put" )
4249 tMapValue = "Testing"
4250 numKeys = 100
4251 putResult = True
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004252 node = main.activeNodes[0]
4253 putResponses = main.CLIs[node].transactionalMapPut( numKeys, tMapValue )
Jon Hall6e709752016-02-01 13:38:46 -08004254 if putResponses and len( putResponses ) == 100:
Jon Hall2a5002c2015-08-21 16:49:11 -07004255 for i in putResponses:
4256 if putResponses[ i ][ 'value' ] != tMapValue:
4257 putResult = False
4258 else:
4259 putResult = False
4260 if not putResult:
4261 main.log.debug( "Put response values: " + str( putResponses ) )
4262 utilities.assert_equals( expect=True,
4263 actual=putResult,
4264 onpass="Partitioned Transactional Map put successful",
4265 onfail="Partitioned Transactional Map put values are incorrect" )
4266
4267 main.step( "Partitioned Transactional maps get" )
Jon Hall9bfadd22016-05-11 14:48:07 -07004268 # FIXME: is this sleep needed?
4269 time.sleep( 5 )
4270
Jon Hall2a5002c2015-08-21 16:49:11 -07004271 getCheck = True
4272 for n in range( 1, numKeys + 1 ):
4273 getResponses = []
4274 threads = []
4275 valueCheck = True
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004276 for i in main.activeNodes:
Jon Hall2a5002c2015-08-21 16:49:11 -07004277 t = main.Thread( target=main.CLIs[i].transactionalMapGet,
4278 name="TMap-get-" + str( i ),
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004279 args=[ "Key" + str( n ) ] )
Jon Hall2a5002c2015-08-21 16:49:11 -07004280 threads.append( t )
4281 t.start()
4282 for t in threads:
4283 t.join()
4284 getResponses.append( t.result )
4285 for node in getResponses:
4286 if node != tMapValue:
4287 valueCheck = False
4288 if not valueCheck:
4289 main.log.warn( "Values for key 'Key" + str( n ) + "' do not match:" )
4290 main.log.warn( getResponses )
4291 getCheck = getCheck and valueCheck
4292 utilities.assert_equals( expect=True,
4293 actual=getCheck,
4294 onpass="Partitioned Transactional Map get values were correct",
4295 onfail="Partitioned Transactional Map values incorrect" )