blob: 9b0f7a7e77d113adcda29786c6a59c6ee8a34d35 [file] [log] [blame]
Jon Hall5cf14d52015-07-16 12:15:19 -07001"""
2Description: This test is to determine if ONOS can handle
3 a minority of it's nodes restarting
4
5List of test cases:
6CASE1: Compile ONOS and push it to the test machines
7CASE2: Assign devices to controllers
8CASE21: Assign mastership to controllers
9CASE3: Assign intents
10CASE4: Ping across added host intents
11CASE5: Reading state of ONOS
Jon Hallb3ed8ed2015-10-28 16:43:55 -070012CASE61: The Failure inducing case.
13CASE62: The Failure recovery case.
Jon Hall5cf14d52015-07-16 12:15:19 -070014CASE7: Check state after control plane failure
15CASE8: Compare topo
16CASE9: Link s3-s28 down
17CASE10: Link s3-s28 up
18CASE11: Switch down
19CASE12: Switch up
20CASE13: Clean up
21CASE14: start election app on all onos nodes
22CASE15: Check that Leadership Election is still functional
23CASE16: Install Distributed Primitives app
24CASE17: Check for basic functionality with distributed primitives
25"""
26
27
Jon Hallb3ed8ed2015-10-28 16:43:55 -070028class HAstopNodes:
Jon Hall5cf14d52015-07-16 12:15:19 -070029
30 def __init__( self ):
31 self.default = ''
32
33 def CASE1( self, main ):
34 """
35 CASE1 is to compile ONOS and push it to the test machines
36
37 Startup sequence:
38 cell <name>
39 onos-verify-cell
40 NOTE: temporary - onos-remove-raft-logs
41 onos-uninstall
42 start mininet
43 git pull
44 mvn clean install
45 onos-package
46 onos-install -f
47 onos-wait-for-start
48 start cli sessions
49 start tcpdump
50 """
Jon Halle1a3b752015-07-22 13:02:46 -070051 import imp
Jon Hall3b489db2015-10-05 14:38:37 -070052 import pexpect
Jon Hallf3d16e72015-12-16 17:45:08 -080053 import time
Jon Halla440e872016-03-31 15:15:50 -070054 import json
Jon Hallb3ed8ed2015-10-28 16:43:55 -070055 main.log.info( "ONOS HA test: Stop a minority of ONOS nodes - " +
Jon Hall5cf14d52015-07-16 12:15:19 -070056 "initialization" )
57 main.case( "Setting up test environment" )
Jon Hall783bbf92015-07-23 14:33:19 -070058 main.caseExplanation = "Setup the test environment including " +\
Jon Hall5cf14d52015-07-16 12:15:19 -070059 "installing ONOS, starting Mininet and ONOS" +\
60 "cli sessions."
Jon Hall5cf14d52015-07-16 12:15:19 -070061
62 # load some variables from the params file
63 PULLCODE = False
64 if main.params[ 'Git' ] == 'True':
65 PULLCODE = True
66 gitBranch = main.params[ 'branch' ]
67 cellName = main.params[ 'ENV' ][ 'cellName' ]
68
Jon Halle1a3b752015-07-22 13:02:46 -070069 main.numCtrls = int( main.params[ 'num_controllers' ] )
Jon Hall5cf14d52015-07-16 12:15:19 -070070 if main.ONOSbench.maxNodes:
Jon Halle1a3b752015-07-22 13:02:46 -070071 if main.ONOSbench.maxNodes < main.numCtrls:
72 main.numCtrls = int( main.ONOSbench.maxNodes )
73 # set global variables
Jon Hall5cf14d52015-07-16 12:15:19 -070074 global ONOS1Port
75 global ONOS2Port
76 global ONOS3Port
77 global ONOS4Port
78 global ONOS5Port
79 global ONOS6Port
80 global ONOS7Port
Jon Halla440e872016-03-31 15:15:50 -070081 # These are for csv plotting in jenkins
82 global labels
83 global data
84 labels = []
85 data = []
Jon Hall5cf14d52015-07-16 12:15:19 -070086
87 # FIXME: just get controller port from params?
88 # TODO: do we really need all these?
89 ONOS1Port = main.params[ 'CTRL' ][ 'port1' ]
90 ONOS2Port = main.params[ 'CTRL' ][ 'port2' ]
91 ONOS3Port = main.params[ 'CTRL' ][ 'port3' ]
92 ONOS4Port = main.params[ 'CTRL' ][ 'port4' ]
93 ONOS5Port = main.params[ 'CTRL' ][ 'port5' ]
94 ONOS6Port = main.params[ 'CTRL' ][ 'port6' ]
95 ONOS7Port = main.params[ 'CTRL' ][ 'port7' ]
96
Jon Halle1a3b752015-07-22 13:02:46 -070097 try:
Jon Hall53c5e662016-04-13 16:06:56 -070098 from tests.HA.dependencies.HA import HA
Jon Hall41d39f12016-04-11 22:54:35 -070099 main.HA = HA()
Jon Halle1a3b752015-07-22 13:02:46 -0700100 except Exception as e:
101 main.log.exception( e )
102 main.cleanup()
103 main.exit()
104
105 main.CLIs = []
106 main.nodes = []
Jon Hall5cf14d52015-07-16 12:15:19 -0700107 ipList = []
Jon Halle1a3b752015-07-22 13:02:46 -0700108 for i in range( 1, main.numCtrls + 1 ):
109 try:
110 main.CLIs.append( getattr( main, 'ONOScli' + str( i ) ) )
111 main.nodes.append( getattr( main, 'ONOS' + str( i ) ) )
112 ipList.append( main.nodes[ -1 ].ip_address )
113 except AttributeError:
114 break
Jon Hall5cf14d52015-07-16 12:15:19 -0700115
116 main.step( "Create cell file" )
117 cellAppString = main.params[ 'ENV' ][ 'appString' ]
118 main.ONOSbench.createCellFile( main.ONOSbench.ip_address, cellName,
119 main.Mininet1.ip_address,
120 cellAppString, ipList )
121 main.step( "Applying cell variable to environment" )
122 cellResult = main.ONOSbench.setCell( cellName )
123 verifyResult = main.ONOSbench.verifyCell()
124
125 # FIXME:this is short term fix
126 main.log.info( "Removing raft logs" )
127 main.ONOSbench.onosRemoveRaftLogs()
128
129 main.log.info( "Uninstalling ONOS" )
Jon Halle1a3b752015-07-22 13:02:46 -0700130 for node in main.nodes:
Jon Hall5cf14d52015-07-16 12:15:19 -0700131 main.ONOSbench.onosUninstall( node.ip_address )
132
133 # Make sure ONOS is DEAD
134 main.log.info( "Killing any ONOS processes" )
135 killResults = main.TRUE
Jon Halle1a3b752015-07-22 13:02:46 -0700136 for node in main.nodes:
Jon Hall5cf14d52015-07-16 12:15:19 -0700137 killed = main.ONOSbench.onosKill( node.ip_address )
138 killResults = killResults and killed
139
140 cleanInstallResult = main.TRUE
141 gitPullResult = main.TRUE
142
143 main.step( "Starting Mininet" )
144 # scp topo file to mininet
145 # TODO: move to params?
146 topoName = "obelisk.py"
147 filePath = main.ONOSbench.home + "/tools/test/topos/"
kelvin-onlabd9e23de2015-08-06 10:34:44 -0700148 main.ONOSbench.scp( main.Mininet1,
149 filePath + topoName,
150 main.Mininet1.home,
151 direction="to" )
Jon Hall5cf14d52015-07-16 12:15:19 -0700152 mnResult = main.Mininet1.startNet( )
153 utilities.assert_equals( expect=main.TRUE, actual=mnResult,
154 onpass="Mininet Started",
155 onfail="Error starting Mininet" )
156
157 main.step( "Git checkout and pull " + gitBranch )
158 if PULLCODE:
159 main.ONOSbench.gitCheckout( gitBranch )
160 gitPullResult = main.ONOSbench.gitPull()
161 # values of 1 or 3 are good
162 utilities.assert_lesser( expect=0, actual=gitPullResult,
163 onpass="Git pull successful",
164 onfail="Git pull failed" )
165 main.ONOSbench.getVersion( report=True )
166
167 main.step( "Using mvn clean install" )
168 cleanInstallResult = main.TRUE
169 if PULLCODE and gitPullResult == main.TRUE:
170 cleanInstallResult = main.ONOSbench.cleanInstall()
171 else:
172 main.log.warn( "Did not pull new code so skipping mvn " +
173 "clean install" )
174 utilities.assert_equals( expect=main.TRUE,
175 actual=cleanInstallResult,
176 onpass="MCI successful",
177 onfail="MCI failed" )
178 # GRAPHS
179 # NOTE: important params here:
180 # job = name of Jenkins job
181 # Plot Name = Plot-HA, only can be used if multiple plots
182 # index = The number of the graph under plot name
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700183 job = "HAstopNodes"
Jon Hall5cf14d52015-07-16 12:15:19 -0700184 plotName = "Plot-HA"
Jon Hall843f8bc2016-03-18 14:28:13 -0700185 index = "2"
Jon Hall5cf14d52015-07-16 12:15:19 -0700186 graphs = '<ac:structured-macro ac:name="html">\n'
187 graphs += '<ac:plain-text-body><![CDATA[\n'
188 graphs += '<iframe src="https://onos-jenkins.onlab.us/job/' + job +\
Jon Halla9845df2016-01-15 14:55:58 -0800189 '/plot/' + plotName + '/getPlot?index=' + index +\
Jon Hall5cf14d52015-07-16 12:15:19 -0700190 '&width=500&height=300"' +\
191 'noborder="0" width="500" height="300" scrolling="yes" ' +\
192 'seamless="seamless"></iframe>\n'
193 graphs += ']]></ac:plain-text-body>\n'
194 graphs += '</ac:structured-macro>\n'
195 main.log.wiki(graphs)
196
197 main.step( "Creating ONOS package" )
Jon Hall3b489db2015-10-05 14:38:37 -0700198 # copy gen-partions file to ONOS
199 # NOTE: this assumes TestON and ONOS are on the same machine
Jon Hall53c5e662016-04-13 16:06:56 -0700200 srcFile = main.testDir + "/HA/dependencies/onos-gen-partitions"
Jon Hall3b489db2015-10-05 14:38:37 -0700201 dstDir = main.ONOSbench.home + "/tools/test/bin/onos-gen-partitions"
202 cpResult = main.ONOSbench.secureCopy( main.ONOSbench.user_name,
203 main.ONOSbench.ip_address,
204 srcFile,
205 dstDir,
206 pwd=main.ONOSbench.pwd,
207 direction="from" )
Jon Hall5cf14d52015-07-16 12:15:19 -0700208 packageResult = main.ONOSbench.onosPackage()
209 utilities.assert_equals( expect=main.TRUE, actual=packageResult,
210 onpass="ONOS package successful",
211 onfail="ONOS package failed" )
212
213 main.step( "Installing ONOS package" )
214 onosInstallResult = main.TRUE
Jon Halle1a3b752015-07-22 13:02:46 -0700215 for node in main.nodes:
Jon Hall5cf14d52015-07-16 12:15:19 -0700216 tmpResult = main.ONOSbench.onosInstall( options="-f",
217 node=node.ip_address )
218 onosInstallResult = onosInstallResult and tmpResult
219 utilities.assert_equals( expect=main.TRUE, actual=onosInstallResult,
220 onpass="ONOS install successful",
221 onfail="ONOS install failed" )
Jon Hall3b489db2015-10-05 14:38:37 -0700222 # clean up gen-partitions file
223 try:
224 main.ONOSbench.handle.sendline( "cd " + main.ONOSbench.home )
225 main.ONOSbench.handle.expect( main.ONOSbench.home + "\$" )
226 main.ONOSbench.handle.sendline( "git checkout -- tools/test/bin/onos-gen-partitions" )
227 main.ONOSbench.handle.expect( main.ONOSbench.home + "\$" )
228 main.log.info( " Cleaning custom gen partitions file, response was: \n" +
229 str( main.ONOSbench.handle.before ) )
230 except ( pexpect.TIMEOUT, pexpect.EOF ):
231 main.log.exception( "ONOSbench: pexpect exception found:" +
232 main.ONOSbench.handle.before )
233 main.cleanup()
234 main.exit()
Jon Hall5cf14d52015-07-16 12:15:19 -0700235
236 main.step( "Checking if ONOS is up yet" )
237 for i in range( 2 ):
238 onosIsupResult = main.TRUE
Jon Halle1a3b752015-07-22 13:02:46 -0700239 for node in main.nodes:
Jon Hall5cf14d52015-07-16 12:15:19 -0700240 started = main.ONOSbench.isup( node.ip_address )
241 if not started:
Jon Hallc6793552016-01-19 14:18:37 -0800242 main.log.error( node.name + " hasn't started" )
Jon Hall5cf14d52015-07-16 12:15:19 -0700243 onosIsupResult = onosIsupResult and started
244 if onosIsupResult == main.TRUE:
245 break
246 utilities.assert_equals( expect=main.TRUE, actual=onosIsupResult,
247 onpass="ONOS startup successful",
248 onfail="ONOS startup failed" )
249
Jon Hall6509dbf2016-06-21 17:01:17 -0700250 main.step( "Starting ONOS CLI sessions" )
Jon Hall5cf14d52015-07-16 12:15:19 -0700251 cliResults = main.TRUE
252 threads = []
Jon Halle1a3b752015-07-22 13:02:46 -0700253 for i in range( main.numCtrls ):
254 t = main.Thread( target=main.CLIs[i].startOnosCli,
Jon Hall5cf14d52015-07-16 12:15:19 -0700255 name="startOnosCli-" + str( i ),
Jon Halle1a3b752015-07-22 13:02:46 -0700256 args=[main.nodes[i].ip_address] )
Jon Hall5cf14d52015-07-16 12:15:19 -0700257 threads.append( t )
258 t.start()
259
260 for t in threads:
261 t.join()
262 cliResults = cliResults and t.result
263 utilities.assert_equals( expect=main.TRUE, actual=cliResults,
264 onpass="ONOS cli startup successful",
265 onfail="ONOS cli startup failed" )
266
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700267 # Create a list of active nodes for use when some nodes are stopped
268 main.activeNodes = [ i for i in range( 0, len( main.CLIs ) ) ]
269
Jon Hall5cf14d52015-07-16 12:15:19 -0700270 if main.params[ 'tcpdump' ].lower() == "true":
271 main.step( "Start Packet Capture MN" )
272 main.Mininet2.startTcpdump(
273 str( main.params[ 'MNtcpdump' ][ 'folder' ] ) + str( main.TEST )
274 + "-MN.pcap",
275 intf=main.params[ 'MNtcpdump' ][ 'intf' ],
276 port=main.params[ 'MNtcpdump' ][ 'port' ] )
277
Jon Halla440e872016-03-31 15:15:50 -0700278 main.step( "Checking ONOS nodes" )
Jon Hall41d39f12016-04-11 22:54:35 -0700279 nodeResults = utilities.retry( main.HA.nodesCheck,
280 False,
281 args=[main.activeNodes],
282 attempts=5 )
Jon Halla440e872016-03-31 15:15:50 -0700283
Jon Hall41d39f12016-04-11 22:54:35 -0700284 utilities.assert_equals( expect=True, actual=nodeResults,
Jon Halla440e872016-03-31 15:15:50 -0700285 onpass="Nodes check successful",
286 onfail="Nodes check NOT successful" )
287
288 if not nodeResults:
Jon Hall7ac7bc32016-05-05 10:57:02 -0700289 for i in main.activeNodes:
290 cli = main.CLIs[i]
Jon Halla440e872016-03-31 15:15:50 -0700291 main.log.debug( "{} components not ACTIVE: \n{}".format(
292 cli.name,
293 cli.sendline( "scr:list | grep -v ACTIVE" ) ) )
Jon Hall5cf14d52015-07-16 12:15:19 -0700294 main.log.error( "Failed to start ONOS, stopping test" )
295 main.cleanup()
296 main.exit()
297
Jon Hall172b7ba2016-04-07 18:12:20 -0700298 main.step( "Activate apps defined in the params file" )
299 # get data from the params
300 apps = main.params.get( 'apps' )
301 if apps:
302 apps = apps.split(',')
303 main.log.warn( apps )
304 activateResult = True
305 for app in apps:
306 main.CLIs[ 0 ].app( app, "Activate" )
307 # TODO: check this worked
308 time.sleep( 10 ) # wait for apps to activate
309 for app in apps:
310 state = main.CLIs[ 0 ].appStatus( app )
311 if state == "ACTIVE":
312 activateResult = activeResult and True
313 else:
314 main.log.error( "{} is in {} state".format( app, state ) )
315 activeResult = False
316 utilities.assert_equals( expect=True,
317 actual=activateResult,
318 onpass="Successfully activated apps",
319 onfail="Failed to activate apps" )
320 else:
321 main.log.warn( "No apps were specified to be loaded after startup" )
322
323 main.step( "Set ONOS configurations" )
324 config = main.params.get( 'ONOS_Configuration' )
325 if config:
326 main.log.debug( config )
327 checkResult = main.TRUE
328 for component in config:
329 for setting in config[component]:
330 value = config[component][setting]
331 check = main.CLIs[ 0 ].setCfg( component, setting, value )
332 main.log.info( "Value was changed? {}".format( main.TRUE == check ) )
333 checkResult = check and checkResult
334 utilities.assert_equals( expect=main.TRUE,
335 actual=checkResult,
336 onpass="Successfully set config",
337 onfail="Failed to set config" )
338 else:
339 main.log.warn( "No configurations were specified to be changed after startup" )
340
Jon Hall9d2dcad2016-04-08 10:15:20 -0700341 main.step( "App Ids check" )
342 appCheck = main.TRUE
343 threads = []
344 for i in main.activeNodes:
345 t = main.Thread( target=main.CLIs[i].appToIDCheck,
346 name="appToIDCheck-" + str( i ),
347 args=[] )
348 threads.append( t )
349 t.start()
350
351 for t in threads:
352 t.join()
353 appCheck = appCheck and t.result
354 if appCheck != main.TRUE:
355 node = main.activeNodes[0]
356 main.log.warn( main.CLIs[node].apps() )
357 main.log.warn( main.CLIs[node].appIDs() )
358 utilities.assert_equals( expect=main.TRUE, actual=appCheck,
359 onpass="App Ids seem to be correct",
360 onfail="Something is wrong with app Ids" )
361
Jon Hall5cf14d52015-07-16 12:15:19 -0700362 def CASE2( self, main ):
363 """
364 Assign devices to controllers
365 """
366 import re
Jon Halle1a3b752015-07-22 13:02:46 -0700367 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700368 assert main, "main not defined"
369 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -0700370 assert main.CLIs, "main.CLIs not defined"
371 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700372 assert ONOS1Port, "ONOS1Port not defined"
373 assert ONOS2Port, "ONOS2Port not defined"
374 assert ONOS3Port, "ONOS3Port not defined"
375 assert ONOS4Port, "ONOS4Port not defined"
376 assert ONOS5Port, "ONOS5Port not defined"
377 assert ONOS6Port, "ONOS6Port not defined"
378 assert ONOS7Port, "ONOS7Port not defined"
379
380 main.case( "Assigning devices to controllers" )
Jon Hall783bbf92015-07-23 14:33:19 -0700381 main.caseExplanation = "Assign switches to ONOS using 'ovs-vsctl' " +\
Jon Hall5cf14d52015-07-16 12:15:19 -0700382 "and check that an ONOS node becomes the " +\
383 "master of the device."
384 main.step( "Assign switches to controllers" )
385
386 ipList = []
Jon Halle1a3b752015-07-22 13:02:46 -0700387 for i in range( main.numCtrls ):
388 ipList.append( main.nodes[ i ].ip_address )
Jon Hall5cf14d52015-07-16 12:15:19 -0700389 swList = []
390 for i in range( 1, 29 ):
391 swList.append( "s" + str( i ) )
392 main.Mininet1.assignSwController( sw=swList, ip=ipList )
393
394 mastershipCheck = main.TRUE
395 for i in range( 1, 29 ):
396 response = main.Mininet1.getSwController( "s" + str( i ) )
397 try:
398 main.log.info( str( response ) )
399 except Exception:
400 main.log.info( repr( response ) )
Jon Halle1a3b752015-07-22 13:02:46 -0700401 for node in main.nodes:
Jon Hall5cf14d52015-07-16 12:15:19 -0700402 if re.search( "tcp:" + node.ip_address, response ):
403 mastershipCheck = mastershipCheck and main.TRUE
404 else:
405 main.log.error( "Error, node " + node.ip_address + " is " +
406 "not in the list of controllers s" +
407 str( i ) + " is connecting to." )
408 mastershipCheck = main.FALSE
409 utilities.assert_equals(
410 expect=main.TRUE,
411 actual=mastershipCheck,
412 onpass="Switch mastership assigned correctly",
413 onfail="Switches not assigned correctly to controllers" )
414
415 def CASE21( self, main ):
416 """
417 Assign mastership to controllers
418 """
Jon Hall5cf14d52015-07-16 12:15:19 -0700419 import time
Jon Halle1a3b752015-07-22 13:02:46 -0700420 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700421 assert main, "main not defined"
422 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -0700423 assert main.CLIs, "main.CLIs not defined"
424 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700425 assert ONOS1Port, "ONOS1Port not defined"
426 assert ONOS2Port, "ONOS2Port not defined"
427 assert ONOS3Port, "ONOS3Port not defined"
428 assert ONOS4Port, "ONOS4Port not defined"
429 assert ONOS5Port, "ONOS5Port not defined"
430 assert ONOS6Port, "ONOS6Port not defined"
431 assert ONOS7Port, "ONOS7Port not defined"
432
433 main.case( "Assigning Controller roles for switches" )
Jon Hall783bbf92015-07-23 14:33:19 -0700434 main.caseExplanation = "Check that ONOS is connected to each " +\
Jon Hall5cf14d52015-07-16 12:15:19 -0700435 "device. Then manually assign" +\
436 " mastership to specific ONOS nodes using" +\
437 " 'device-role'"
438 main.step( "Assign mastership of switches to specific controllers" )
439 # Manually assign mastership to the controller we want
440 roleCall = main.TRUE
441
442 ipList = [ ]
443 deviceList = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700444 onosCli = main.CLIs[ main.activeNodes[0] ]
Jon Hall5cf14d52015-07-16 12:15:19 -0700445 try:
446 # Assign mastership to specific controllers. This assignment was
447 # determined for a 7 node cluser, but will work with any sized
448 # cluster
449 for i in range( 1, 29 ): # switches 1 through 28
450 # set up correct variables:
451 if i == 1:
452 c = 0
Jon Halle1a3b752015-07-22 13:02:46 -0700453 ip = main.nodes[ c ].ip_address # ONOS1
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700454 deviceId = onosCli.getDevice( "1000" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700455 elif i == 2:
Jon Halle1a3b752015-07-22 13:02:46 -0700456 c = 1 % main.numCtrls
457 ip = main.nodes[ c ].ip_address # ONOS2
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700458 deviceId = onosCli.getDevice( "2000" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700459 elif i == 3:
Jon Halle1a3b752015-07-22 13:02:46 -0700460 c = 1 % main.numCtrls
461 ip = main.nodes[ c ].ip_address # ONOS2
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700462 deviceId = onosCli.getDevice( "3000" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700463 elif i == 4:
Jon Halle1a3b752015-07-22 13:02:46 -0700464 c = 3 % main.numCtrls
465 ip = main.nodes[ c ].ip_address # ONOS4
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700466 deviceId = onosCli.getDevice( "3004" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700467 elif i == 5:
Jon Halle1a3b752015-07-22 13:02:46 -0700468 c = 2 % main.numCtrls
469 ip = main.nodes[ c ].ip_address # ONOS3
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700470 deviceId = onosCli.getDevice( "5000" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700471 elif i == 6:
Jon Halle1a3b752015-07-22 13:02:46 -0700472 c = 2 % main.numCtrls
473 ip = main.nodes[ c ].ip_address # ONOS3
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700474 deviceId = onosCli.getDevice( "6000" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700475 elif i == 7:
Jon Halle1a3b752015-07-22 13:02:46 -0700476 c = 5 % main.numCtrls
477 ip = main.nodes[ c ].ip_address # ONOS6
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700478 deviceId = onosCli.getDevice( "6007" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700479 elif i >= 8 and i <= 17:
Jon Halle1a3b752015-07-22 13:02:46 -0700480 c = 4 % main.numCtrls
481 ip = main.nodes[ c ].ip_address # ONOS5
Jon Hall5cf14d52015-07-16 12:15:19 -0700482 dpid = '3' + str( i ).zfill( 3 )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700483 deviceId = onosCli.getDevice( dpid ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700484 elif i >= 18 and i <= 27:
Jon Halle1a3b752015-07-22 13:02:46 -0700485 c = 6 % main.numCtrls
486 ip = main.nodes[ c ].ip_address # ONOS7
Jon Hall5cf14d52015-07-16 12:15:19 -0700487 dpid = '6' + str( i ).zfill( 3 )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700488 deviceId = onosCli.getDevice( dpid ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700489 elif i == 28:
490 c = 0
Jon Halle1a3b752015-07-22 13:02:46 -0700491 ip = main.nodes[ c ].ip_address # ONOS1
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700492 deviceId = onosCli.getDevice( "2800" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700493 else:
494 main.log.error( "You didn't write an else statement for " +
495 "switch s" + str( i ) )
496 roleCall = main.FALSE
497 # Assign switch
498 assert deviceId, "No device id for s" + str( i ) + " in ONOS"
499 # TODO: make this controller dynamic
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700500 roleCall = roleCall and onosCli.deviceRole( deviceId, ip )
Jon Hall5cf14d52015-07-16 12:15:19 -0700501 ipList.append( ip )
502 deviceList.append( deviceId )
503 except ( AttributeError, AssertionError ):
504 main.log.exception( "Something is wrong with ONOS device view" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700505 main.log.info( onosCli.devices() )
Jon Hall5cf14d52015-07-16 12:15:19 -0700506 utilities.assert_equals(
507 expect=main.TRUE,
508 actual=roleCall,
509 onpass="Re-assigned switch mastership to designated controller",
510 onfail="Something wrong with deviceRole calls" )
511
512 main.step( "Check mastership was correctly assigned" )
513 roleCheck = main.TRUE
514 # NOTE: This is due to the fact that device mastership change is not
515 # atomic and is actually a multi step process
516 time.sleep( 5 )
517 for i in range( len( ipList ) ):
518 ip = ipList[i]
519 deviceId = deviceList[i]
520 # Check assignment
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700521 master = onosCli.getRole( deviceId ).get( 'master' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700522 if ip in master:
523 roleCheck = roleCheck and main.TRUE
524 else:
525 roleCheck = roleCheck and main.FALSE
526 main.log.error( "Error, controller " + ip + " is not" +
527 " master " + "of device " +
528 str( deviceId ) + ". Master is " +
529 repr( master ) + "." )
530 utilities.assert_equals(
531 expect=main.TRUE,
532 actual=roleCheck,
533 onpass="Switches were successfully reassigned to designated " +
534 "controller",
535 onfail="Switches were not successfully reassigned" )
536
537 def CASE3( self, main ):
538 """
539 Assign intents
540 """
541 import time
542 import json
Jon Halle1a3b752015-07-22 13:02:46 -0700543 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700544 assert main, "main not defined"
545 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -0700546 assert main.CLIs, "main.CLIs not defined"
547 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700548 main.case( "Adding host Intents" )
Jon Hall783bbf92015-07-23 14:33:19 -0700549 main.caseExplanation = "Discover hosts by using pingall then " +\
Jon Hall5cf14d52015-07-16 12:15:19 -0700550 "assign predetermined host-to-host intents." +\
551 " After installation, check that the intent" +\
552 " is distributed to all nodes and the state" +\
553 " is INSTALLED"
554
555 # install onos-app-fwd
556 main.step( "Install reactive forwarding app" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700557 onosCli = main.CLIs[ main.activeNodes[0] ]
558 installResults = onosCli.activateApp( "org.onosproject.fwd" )
Jon Hall5cf14d52015-07-16 12:15:19 -0700559 utilities.assert_equals( expect=main.TRUE, actual=installResults,
560 onpass="Install fwd successful",
561 onfail="Install fwd failed" )
562
563 main.step( "Check app ids" )
564 appCheck = main.TRUE
565 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700566 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -0700567 t = main.Thread( target=main.CLIs[i].appToIDCheck,
Jon Hall5cf14d52015-07-16 12:15:19 -0700568 name="appToIDCheck-" + str( i ),
569 args=[] )
570 threads.append( t )
571 t.start()
572
573 for t in threads:
574 t.join()
575 appCheck = appCheck and t.result
576 if appCheck != main.TRUE:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700577 main.log.warn( onosCli.apps() )
578 main.log.warn( onosCli.appIDs() )
Jon Hall5cf14d52015-07-16 12:15:19 -0700579 utilities.assert_equals( expect=main.TRUE, actual=appCheck,
580 onpass="App Ids seem to be correct",
581 onfail="Something is wrong with app Ids" )
582
583 main.step( "Discovering Hosts( Via pingall for now )" )
584 # FIXME: Once we have a host discovery mechanism, use that instead
585 # REACTIVE FWD test
586 pingResult = main.FALSE
Jon Hall96091e62015-09-21 17:34:17 -0700587 passMsg = "Reactive Pingall test passed"
588 time1 = time.time()
589 pingResult = main.Mininet1.pingall()
590 time2 = time.time()
591 if not pingResult:
592 main.log.warn("First pingall failed. Trying again...")
Jon Hall5cf14d52015-07-16 12:15:19 -0700593 pingResult = main.Mininet1.pingall()
Jon Hall96091e62015-09-21 17:34:17 -0700594 passMsg += " on the second try"
595 utilities.assert_equals(
596 expect=main.TRUE,
597 actual=pingResult,
598 onpass= passMsg,
599 onfail="Reactive Pingall failed, " +
600 "one or more ping pairs failed" )
601 main.log.info( "Time for pingall: %2f seconds" %
602 ( time2 - time1 ) )
Jon Hall5cf14d52015-07-16 12:15:19 -0700603 # timeout for fwd flows
604 time.sleep( 11 )
605 # uninstall onos-app-fwd
606 main.step( "Uninstall reactive forwarding app" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700607 node = main.activeNodes[0]
608 uninstallResult = main.CLIs[node].deactivateApp( "org.onosproject.fwd" )
Jon Hall5cf14d52015-07-16 12:15:19 -0700609 utilities.assert_equals( expect=main.TRUE, actual=uninstallResult,
610 onpass="Uninstall fwd successful",
611 onfail="Uninstall fwd failed" )
612
613 main.step( "Check app ids" )
614 threads = []
615 appCheck2 = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700616 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -0700617 t = main.Thread( target=main.CLIs[i].appToIDCheck,
Jon Hall5cf14d52015-07-16 12:15:19 -0700618 name="appToIDCheck-" + str( i ),
619 args=[] )
620 threads.append( t )
621 t.start()
622
623 for t in threads:
624 t.join()
625 appCheck2 = appCheck2 and t.result
626 if appCheck2 != main.TRUE:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700627 node = main.activeNodes[0]
628 main.log.warn( main.CLIs[node].apps() )
629 main.log.warn( main.CLIs[node].appIDs() )
Jon Hall5cf14d52015-07-16 12:15:19 -0700630 utilities.assert_equals( expect=main.TRUE, actual=appCheck2,
631 onpass="App Ids seem to be correct",
632 onfail="Something is wrong with app Ids" )
633
634 main.step( "Add host intents via cli" )
635 intentIds = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700636 # TODO: move the host numbers to params
637 # Maybe look at all the paths we ping?
Jon Hall5cf14d52015-07-16 12:15:19 -0700638 intentAddResult = True
639 hostResult = main.TRUE
640 for i in range( 8, 18 ):
641 main.log.info( "Adding host intent between h" + str( i ) +
642 " and h" + str( i + 10 ) )
643 host1 = "00:00:00:00:00:" + \
644 str( hex( i )[ 2: ] ).zfill( 2 ).upper()
645 host2 = "00:00:00:00:00:" + \
646 str( hex( i + 10 )[ 2: ] ).zfill( 2 ).upper()
647 # NOTE: getHost can return None
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700648 host1Dict = onosCli.getHost( host1 )
649 host2Dict = onosCli.getHost( host2 )
Jon Hall5cf14d52015-07-16 12:15:19 -0700650 host1Id = None
651 host2Id = None
652 if host1Dict and host2Dict:
653 host1Id = host1Dict.get( 'id', None )
654 host2Id = host2Dict.get( 'id', None )
655 if host1Id and host2Id:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700656 nodeNum = ( i % len( main.activeNodes ) )
657 node = main.activeNodes[nodeNum]
658 tmpId = main.CLIs[node].addHostIntent( host1Id, host2Id )
Jon Hall5cf14d52015-07-16 12:15:19 -0700659 if tmpId:
660 main.log.info( "Added intent with id: " + tmpId )
661 intentIds.append( tmpId )
662 else:
663 main.log.error( "addHostIntent returned: " +
664 repr( tmpId ) )
665 else:
666 main.log.error( "Error, getHost() failed for h" + str( i ) +
667 " and/or h" + str( i + 10 ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700668 node = main.activeNodes[0]
669 hosts = main.CLIs[node].hosts()
Jon Hall5cf14d52015-07-16 12:15:19 -0700670 main.log.warn( "Hosts output: " )
671 try:
672 main.log.warn( json.dumps( json.loads( hosts ),
673 sort_keys=True,
674 indent=4,
675 separators=( ',', ': ' ) ) )
676 except ( ValueError, TypeError ):
677 main.log.warn( repr( hosts ) )
678 hostResult = main.FALSE
679 utilities.assert_equals( expect=main.TRUE, actual=hostResult,
680 onpass="Found a host id for each host",
681 onfail="Error looking up host ids" )
682
683 intentStart = time.time()
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700684 onosIds = onosCli.getAllIntentsId()
Jon Hall5cf14d52015-07-16 12:15:19 -0700685 main.log.info( "Submitted intents: " + str( intentIds ) )
686 main.log.info( "Intents in ONOS: " + str( onosIds ) )
687 for intent in intentIds:
688 if intent in onosIds:
689 pass # intent submitted is in onos
690 else:
691 intentAddResult = False
692 if intentAddResult:
693 intentStop = time.time()
694 else:
695 intentStop = None
696 # Print the intent states
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700697 intents = onosCli.intents()
Jon Hall5cf14d52015-07-16 12:15:19 -0700698 intentStates = []
699 installedCheck = True
700 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
701 count = 0
702 try:
703 for intent in json.loads( intents ):
704 state = intent.get( 'state', None )
705 if "INSTALLED" not in state:
706 installedCheck = False
707 intentId = intent.get( 'id', None )
708 intentStates.append( ( intentId, state ) )
709 except ( ValueError, TypeError ):
710 main.log.exception( "Error parsing intents" )
711 # add submitted intents not in the store
712 tmplist = [ i for i, s in intentStates ]
713 missingIntents = False
714 for i in intentIds:
715 if i not in tmplist:
716 intentStates.append( ( i, " - " ) )
717 missingIntents = True
718 intentStates.sort()
719 for i, s in intentStates:
720 count += 1
721 main.log.info( "%-6s%-15s%-15s" %
722 ( str( count ), str( i ), str( s ) ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700723 leaders = onosCli.leaders()
Jon Hall5cf14d52015-07-16 12:15:19 -0700724 try:
725 missing = False
726 if leaders:
727 parsedLeaders = json.loads( leaders )
728 main.log.warn( json.dumps( parsedLeaders,
729 sort_keys=True,
730 indent=4,
731 separators=( ',', ': ' ) ) )
732 # check for all intent partitions
733 topics = []
734 for i in range( 14 ):
735 topics.append( "intent-partition-" + str( i ) )
736 main.log.debug( topics )
737 ONOStopics = [ j['topic'] for j in parsedLeaders ]
738 for topic in topics:
739 if topic not in ONOStopics:
740 main.log.error( "Error: " + topic +
741 " not in leaders" )
742 missing = True
743 else:
744 main.log.error( "leaders() returned None" )
745 except ( ValueError, TypeError ):
746 main.log.exception( "Error parsing leaders" )
747 main.log.error( repr( leaders ) )
748 # Check all nodes
749 if missing:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700750 for i in main.activeNodes:
751 response = main.CLIs[i].leaders( jsonFormat=False)
752 main.log.warn( str( main.CLIs[i].name ) + " leaders output: \n" +
Jon Hall5cf14d52015-07-16 12:15:19 -0700753 str( response ) )
754
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700755 partitions = onosCli.partitions()
Jon Hall5cf14d52015-07-16 12:15:19 -0700756 try:
757 if partitions :
758 parsedPartitions = json.loads( partitions )
759 main.log.warn( json.dumps( parsedPartitions,
760 sort_keys=True,
761 indent=4,
762 separators=( ',', ': ' ) ) )
763 # TODO check for a leader in all paritions
764 # TODO check for consistency among nodes
765 else:
766 main.log.error( "partitions() returned None" )
767 except ( ValueError, TypeError ):
768 main.log.exception( "Error parsing partitions" )
769 main.log.error( repr( partitions ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700770 pendingMap = onosCli.pendingMap()
Jon Hall5cf14d52015-07-16 12:15:19 -0700771 try:
772 if pendingMap :
773 parsedPending = json.loads( pendingMap )
774 main.log.warn( json.dumps( parsedPending,
775 sort_keys=True,
776 indent=4,
777 separators=( ',', ': ' ) ) )
778 # TODO check something here?
779 else:
780 main.log.error( "pendingMap() returned None" )
781 except ( ValueError, TypeError ):
782 main.log.exception( "Error parsing pending map" )
783 main.log.error( repr( pendingMap ) )
784
785 intentAddResult = bool( intentAddResult and not missingIntents and
786 installedCheck )
787 if not intentAddResult:
788 main.log.error( "Error in pushing host intents to ONOS" )
789
790 main.step( "Intent Anti-Entropy dispersion" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700791 for j in range(100):
Jon Hall5cf14d52015-07-16 12:15:19 -0700792 correct = True
793 main.log.info( "Submitted intents: " + str( sorted( intentIds ) ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700794 for i in main.activeNodes:
Jon Hall5cf14d52015-07-16 12:15:19 -0700795 onosIds = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700796 ids = main.CLIs[i].getAllIntentsId()
Jon Hall5cf14d52015-07-16 12:15:19 -0700797 onosIds.append( ids )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700798 main.log.debug( "Intents in " + main.CLIs[i].name + ": " +
Jon Hall5cf14d52015-07-16 12:15:19 -0700799 str( sorted( onosIds ) ) )
800 if sorted( ids ) != sorted( intentIds ):
801 main.log.warn( "Set of intent IDs doesn't match" )
802 correct = False
803 break
804 else:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700805 intents = json.loads( main.CLIs[i].intents() )
Jon Hall5cf14d52015-07-16 12:15:19 -0700806 for intent in intents:
807 if intent[ 'state' ] != "INSTALLED":
808 main.log.warn( "Intent " + intent[ 'id' ] +
809 " is " + intent[ 'state' ] )
810 correct = False
811 break
812 if correct:
813 break
814 else:
815 time.sleep(1)
816 if not intentStop:
817 intentStop = time.time()
818 global gossipTime
819 gossipTime = intentStop - intentStart
820 main.log.info( "It took about " + str( gossipTime ) +
821 " seconds for all intents to appear in each node" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700822 gossipPeriod = int( main.params['timers']['gossip'] )
823 maxGossipTime = gossipPeriod * len( main.activeNodes )
Jon Hall5cf14d52015-07-16 12:15:19 -0700824 utilities.assert_greater_equals(
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700825 expect=maxGossipTime, actual=gossipTime,
Jon Hall5cf14d52015-07-16 12:15:19 -0700826 onpass="ECM anti-entropy for intents worked within " +
827 "expected time",
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700828 onfail="Intent ECM anti-entropy took too long. " +
829 "Expected time:{}, Actual time:{}".format( maxGossipTime,
830 gossipTime ) )
831 if gossipTime <= maxGossipTime:
Jon Hall5cf14d52015-07-16 12:15:19 -0700832 intentAddResult = True
833
834 if not intentAddResult or "key" in pendingMap:
835 import time
836 installedCheck = True
837 main.log.info( "Sleeping 60 seconds to see if intents are found" )
838 time.sleep( 60 )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700839 onosIds = onosCli.getAllIntentsId()
Jon Hall5cf14d52015-07-16 12:15:19 -0700840 main.log.info( "Submitted intents: " + str( intentIds ) )
841 main.log.info( "Intents in ONOS: " + str( onosIds ) )
842 # Print the intent states
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700843 intents = onosCli.intents()
Jon Hall5cf14d52015-07-16 12:15:19 -0700844 intentStates = []
845 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
846 count = 0
847 try:
848 for intent in json.loads( intents ):
849 # Iter through intents of a node
850 state = intent.get( 'state', None )
851 if "INSTALLED" not in state:
852 installedCheck = False
853 intentId = intent.get( 'id', None )
854 intentStates.append( ( intentId, state ) )
855 except ( ValueError, TypeError ):
856 main.log.exception( "Error parsing intents" )
857 # add submitted intents not in the store
858 tmplist = [ i for i, s in intentStates ]
859 for i in intentIds:
860 if i not in tmplist:
861 intentStates.append( ( i, " - " ) )
862 intentStates.sort()
863 for i, s in intentStates:
864 count += 1
865 main.log.info( "%-6s%-15s%-15s" %
866 ( str( count ), str( i ), str( s ) ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700867 leaders = onosCli.leaders()
Jon Hall5cf14d52015-07-16 12:15:19 -0700868 try:
869 missing = False
870 if leaders:
871 parsedLeaders = json.loads( leaders )
872 main.log.warn( json.dumps( parsedLeaders,
873 sort_keys=True,
874 indent=4,
875 separators=( ',', ': ' ) ) )
876 # check for all intent partitions
877 # check for election
878 topics = []
879 for i in range( 14 ):
880 topics.append( "intent-partition-" + str( i ) )
881 # FIXME: this should only be after we start the app
882 topics.append( "org.onosproject.election" )
883 main.log.debug( topics )
884 ONOStopics = [ j['topic'] for j in parsedLeaders ]
885 for topic in topics:
886 if topic not in ONOStopics:
887 main.log.error( "Error: " + topic +
888 " not in leaders" )
889 missing = True
890 else:
891 main.log.error( "leaders() returned None" )
892 except ( ValueError, TypeError ):
893 main.log.exception( "Error parsing leaders" )
894 main.log.error( repr( leaders ) )
895 # Check all nodes
896 if missing:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700897 for i in main.activeNodes:
898 node = main.CLIs[i]
Jon Hall5cf14d52015-07-16 12:15:19 -0700899 response = node.leaders( jsonFormat=False)
900 main.log.warn( str( node.name ) + " leaders output: \n" +
901 str( response ) )
902
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700903 partitions = onosCli.partitions()
Jon Hall5cf14d52015-07-16 12:15:19 -0700904 try:
905 if partitions :
906 parsedPartitions = json.loads( partitions )
907 main.log.warn( json.dumps( parsedPartitions,
908 sort_keys=True,
909 indent=4,
910 separators=( ',', ': ' ) ) )
911 # TODO check for a leader in all paritions
912 # TODO check for consistency among nodes
913 else:
914 main.log.error( "partitions() returned None" )
915 except ( ValueError, TypeError ):
916 main.log.exception( "Error parsing partitions" )
917 main.log.error( repr( partitions ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700918 pendingMap = onosCli.pendingMap()
Jon Hall5cf14d52015-07-16 12:15:19 -0700919 try:
920 if pendingMap :
921 parsedPending = json.loads( pendingMap )
922 main.log.warn( json.dumps( parsedPending,
923 sort_keys=True,
924 indent=4,
925 separators=( ',', ': ' ) ) )
926 # TODO check something here?
927 else:
928 main.log.error( "pendingMap() returned None" )
929 except ( ValueError, TypeError ):
930 main.log.exception( "Error parsing pending map" )
931 main.log.error( repr( pendingMap ) )
932
933 def CASE4( self, main ):
934 """
935 Ping across added host intents
936 """
937 import json
938 import time
Jon Halle1a3b752015-07-22 13:02:46 -0700939 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700940 assert main, "main not defined"
941 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -0700942 assert main.CLIs, "main.CLIs not defined"
943 assert main.nodes, "main.nodes not defined"
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700944 main.case( "Verify connectivity by sending traffic across Intents" )
Jon Hall783bbf92015-07-23 14:33:19 -0700945 main.caseExplanation = "Ping across added host intents to check " +\
Jon Hall5cf14d52015-07-16 12:15:19 -0700946 "functionality and check the state of " +\
947 "the intent"
Jon Hall5cf14d52015-07-16 12:15:19 -0700948
Jon Hall41d39f12016-04-11 22:54:35 -0700949 onosCli = main.CLIs[ main.activeNodes[0] ]
Jon Hall5cf14d52015-07-16 12:15:19 -0700950 main.step( "Check Intent state" )
951 installedCheck = False
952 loopCount = 0
953 while not installedCheck and loopCount < 40:
954 installedCheck = True
955 # Print the intent states
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700956 intents = onosCli.intents()
Jon Hall5cf14d52015-07-16 12:15:19 -0700957 intentStates = []
958 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
959 count = 0
960 # Iter through intents of a node
961 try:
962 for intent in json.loads( intents ):
963 state = intent.get( 'state', None )
964 if "INSTALLED" not in state:
965 installedCheck = False
966 intentId = intent.get( 'id', None )
967 intentStates.append( ( intentId, state ) )
968 except ( ValueError, TypeError ):
969 main.log.exception( "Error parsing intents." )
970 # Print states
971 intentStates.sort()
972 for i, s in intentStates:
973 count += 1
974 main.log.info( "%-6s%-15s%-15s" %
975 ( str( count ), str( i ), str( s ) ) )
976 if not installedCheck:
977 time.sleep( 1 )
978 loopCount += 1
979 utilities.assert_equals( expect=True, actual=installedCheck,
980 onpass="Intents are all INSTALLED",
981 onfail="Intents are not all in " +
982 "INSTALLED state" )
983
Jon Hall9d2dcad2016-04-08 10:15:20 -0700984 main.step( "Ping across added host intents" )
Jon Hall9d2dcad2016-04-08 10:15:20 -0700985 PingResult = main.TRUE
986 for i in range( 8, 18 ):
987 ping = main.Mininet1.pingHost( src="h" + str( i ),
988 target="h" + str( i + 10 ) )
989 PingResult = PingResult and ping
990 if ping == main.FALSE:
991 main.log.warn( "Ping failed between h" + str( i ) +
992 " and h" + str( i + 10 ) )
993 elif ping == main.TRUE:
994 main.log.info( "Ping test passed!" )
995 # Don't set PingResult or you'd override failures
996 if PingResult == main.FALSE:
997 main.log.error(
998 "Intents have not been installed correctly, pings failed." )
999 # TODO: pretty print
1000 main.log.warn( "ONOS1 intents: " )
1001 try:
1002 tmpIntents = onosCli.intents()
1003 main.log.warn( json.dumps( json.loads( tmpIntents ),
1004 sort_keys=True,
1005 indent=4,
1006 separators=( ',', ': ' ) ) )
1007 except ( ValueError, TypeError ):
1008 main.log.warn( repr( tmpIntents ) )
1009 utilities.assert_equals(
1010 expect=main.TRUE,
1011 actual=PingResult,
1012 onpass="Intents have been installed correctly and pings work",
1013 onfail="Intents have not been installed correctly, pings failed." )
1014
Jon Hall5cf14d52015-07-16 12:15:19 -07001015 main.step( "Check leadership of topics" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001016 leaders = onosCli.leaders()
Jon Hall5cf14d52015-07-16 12:15:19 -07001017 topicCheck = main.TRUE
1018 try:
1019 if leaders:
1020 parsedLeaders = json.loads( leaders )
1021 main.log.warn( json.dumps( parsedLeaders,
1022 sort_keys=True,
1023 indent=4,
1024 separators=( ',', ': ' ) ) )
1025 # check for all intent partitions
1026 # check for election
1027 # TODO: Look at Devices as topics now that it uses this system
1028 topics = []
1029 for i in range( 14 ):
1030 topics.append( "intent-partition-" + str( i ) )
1031 # FIXME: this should only be after we start the app
1032 # FIXME: topics.append( "org.onosproject.election" )
1033 # Print leaders output
1034 main.log.debug( topics )
1035 ONOStopics = [ j['topic'] for j in parsedLeaders ]
1036 for topic in topics:
1037 if topic not in ONOStopics:
1038 main.log.error( "Error: " + topic +
1039 " not in leaders" )
1040 topicCheck = main.FALSE
1041 else:
1042 main.log.error( "leaders() returned None" )
1043 topicCheck = main.FALSE
1044 except ( ValueError, TypeError ):
1045 topicCheck = main.FALSE
1046 main.log.exception( "Error parsing leaders" )
1047 main.log.error( repr( leaders ) )
1048 # TODO: Check for a leader of these topics
1049 # Check all nodes
1050 if topicCheck:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001051 for i in main.activeNodes:
1052 node = main.CLIs[i]
Jon Hall5cf14d52015-07-16 12:15:19 -07001053 response = node.leaders( jsonFormat=False)
1054 main.log.warn( str( node.name ) + " leaders output: \n" +
1055 str( response ) )
1056
1057 utilities.assert_equals( expect=main.TRUE, actual=topicCheck,
1058 onpass="intent Partitions is in leaders",
1059 onfail="Some topics were lost " )
1060 # Print partitions
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001061 partitions = onosCli.partitions()
Jon Hall5cf14d52015-07-16 12:15:19 -07001062 try:
1063 if partitions :
1064 parsedPartitions = json.loads( partitions )
1065 main.log.warn( json.dumps( parsedPartitions,
1066 sort_keys=True,
1067 indent=4,
1068 separators=( ',', ': ' ) ) )
1069 # TODO check for a leader in all paritions
1070 # TODO check for consistency among nodes
1071 else:
1072 main.log.error( "partitions() returned None" )
1073 except ( ValueError, TypeError ):
1074 main.log.exception( "Error parsing partitions" )
1075 main.log.error( repr( partitions ) )
1076 # Print Pending Map
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001077 pendingMap = onosCli.pendingMap()
Jon Hall5cf14d52015-07-16 12:15:19 -07001078 try:
1079 if pendingMap :
1080 parsedPending = json.loads( pendingMap )
1081 main.log.warn( json.dumps( parsedPending,
1082 sort_keys=True,
1083 indent=4,
1084 separators=( ',', ': ' ) ) )
1085 # TODO check something here?
1086 else:
1087 main.log.error( "pendingMap() returned None" )
1088 except ( ValueError, TypeError ):
1089 main.log.exception( "Error parsing pending map" )
1090 main.log.error( repr( pendingMap ) )
1091
1092 if not installedCheck:
1093 main.log.info( "Waiting 60 seconds to see if the state of " +
1094 "intents change" )
1095 time.sleep( 60 )
1096 # Print the intent states
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001097 intents = onosCli.intents()
Jon Hall5cf14d52015-07-16 12:15:19 -07001098 intentStates = []
1099 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
1100 count = 0
1101 # Iter through intents of a node
1102 try:
1103 for intent in json.loads( intents ):
1104 state = intent.get( 'state', None )
1105 if "INSTALLED" not in state:
1106 installedCheck = False
1107 intentId = intent.get( 'id', None )
1108 intentStates.append( ( intentId, state ) )
1109 except ( ValueError, TypeError ):
1110 main.log.exception( "Error parsing intents." )
1111 intentStates.sort()
1112 for i, s in intentStates:
1113 count += 1
1114 main.log.info( "%-6s%-15s%-15s" %
1115 ( str( count ), str( i ), str( s ) ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001116 leaders = onosCli.leaders()
Jon Hall5cf14d52015-07-16 12:15:19 -07001117 try:
1118 missing = False
1119 if leaders:
1120 parsedLeaders = json.loads( leaders )
1121 main.log.warn( json.dumps( parsedLeaders,
1122 sort_keys=True,
1123 indent=4,
1124 separators=( ',', ': ' ) ) )
1125 # check for all intent partitions
1126 # check for election
1127 topics = []
1128 for i in range( 14 ):
1129 topics.append( "intent-partition-" + str( i ) )
1130 # FIXME: this should only be after we start the app
1131 topics.append( "org.onosproject.election" )
1132 main.log.debug( topics )
1133 ONOStopics = [ j['topic'] for j in parsedLeaders ]
1134 for topic in topics:
1135 if topic not in ONOStopics:
1136 main.log.error( "Error: " + topic +
1137 " not in leaders" )
1138 missing = True
1139 else:
1140 main.log.error( "leaders() returned None" )
1141 except ( ValueError, TypeError ):
1142 main.log.exception( "Error parsing leaders" )
1143 main.log.error( repr( leaders ) )
1144 if missing:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001145 for i in main.activeNodes:
1146 node = main.CLIs[i]
Jon Hall5cf14d52015-07-16 12:15:19 -07001147 response = node.leaders( jsonFormat=False)
1148 main.log.warn( str( node.name ) + " leaders output: \n" +
1149 str( response ) )
1150
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001151 partitions = onosCli.partitions()
Jon Hall5cf14d52015-07-16 12:15:19 -07001152 try:
1153 if partitions :
1154 parsedPartitions = json.loads( partitions )
1155 main.log.warn( json.dumps( parsedPartitions,
1156 sort_keys=True,
1157 indent=4,
1158 separators=( ',', ': ' ) ) )
1159 # TODO check for a leader in all paritions
1160 # TODO check for consistency among nodes
1161 else:
1162 main.log.error( "partitions() returned None" )
1163 except ( ValueError, TypeError ):
1164 main.log.exception( "Error parsing partitions" )
1165 main.log.error( repr( partitions ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001166 pendingMap = onosCli.pendingMap()
Jon Hall5cf14d52015-07-16 12:15:19 -07001167 try:
1168 if pendingMap :
1169 parsedPending = json.loads( pendingMap )
1170 main.log.warn( json.dumps( parsedPending,
1171 sort_keys=True,
1172 indent=4,
1173 separators=( ',', ': ' ) ) )
1174 # TODO check something here?
1175 else:
1176 main.log.error( "pendingMap() returned None" )
1177 except ( ValueError, TypeError ):
1178 main.log.exception( "Error parsing pending map" )
1179 main.log.error( repr( pendingMap ) )
1180 # Print flowrules
Jon Hall41d39f12016-04-11 22:54:35 -07001181 main.log.debug( onosCli.flows( jsonFormat=False ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001182 main.step( "Wait a minute then ping again" )
1183 # the wait is above
1184 PingResult = main.TRUE
1185 for i in range( 8, 18 ):
1186 ping = main.Mininet1.pingHost( src="h" + str( i ),
1187 target="h" + str( i + 10 ) )
1188 PingResult = PingResult and ping
1189 if ping == main.FALSE:
1190 main.log.warn( "Ping failed between h" + str( i ) +
1191 " and h" + str( i + 10 ) )
1192 elif ping == main.TRUE:
1193 main.log.info( "Ping test passed!" )
1194 # Don't set PingResult or you'd override failures
1195 if PingResult == main.FALSE:
1196 main.log.error(
1197 "Intents have not been installed correctly, pings failed." )
1198 # TODO: pretty print
1199 main.log.warn( "ONOS1 intents: " )
1200 try:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001201 tmpIntents = onosCli.intents()
Jon Hall5cf14d52015-07-16 12:15:19 -07001202 main.log.warn( json.dumps( json.loads( tmpIntents ),
1203 sort_keys=True,
1204 indent=4,
1205 separators=( ',', ': ' ) ) )
1206 except ( ValueError, TypeError ):
1207 main.log.warn( repr( tmpIntents ) )
1208 utilities.assert_equals(
1209 expect=main.TRUE,
1210 actual=PingResult,
1211 onpass="Intents have been installed correctly and pings work",
1212 onfail="Intents have not been installed correctly, pings failed." )
1213
1214 def CASE5( self, main ):
1215 """
1216 Reading state of ONOS
1217 """
1218 import json
1219 import time
Jon Halle1a3b752015-07-22 13:02:46 -07001220 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07001221 assert main, "main not defined"
1222 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07001223 assert main.CLIs, "main.CLIs not defined"
1224 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07001225
1226 main.case( "Setting up and gathering data for current state" )
1227 # The general idea for this test case is to pull the state of
1228 # ( intents,flows, topology,... ) from each ONOS node
1229 # We can then compare them with each other and also with past states
1230
1231 main.step( "Check that each switch has a master" )
1232 global mastershipState
1233 mastershipState = '[]'
1234
1235 # Assert that each device has a master
1236 rolesNotNull = main.TRUE
1237 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001238 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001239 t = main.Thread( target=main.CLIs[i].rolesNotNull,
Jon Hall5cf14d52015-07-16 12:15:19 -07001240 name="rolesNotNull-" + str( i ),
1241 args=[] )
1242 threads.append( t )
1243 t.start()
1244
1245 for t in threads:
1246 t.join()
1247 rolesNotNull = rolesNotNull and t.result
1248 utilities.assert_equals(
1249 expect=main.TRUE,
1250 actual=rolesNotNull,
1251 onpass="Each device has a master",
1252 onfail="Some devices don't have a master assigned" )
1253
1254 main.step( "Get the Mastership of each switch from each controller" )
1255 ONOSMastership = []
1256 mastershipCheck = main.FALSE
1257 consistentMastership = True
1258 rolesResults = True
1259 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001260 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001261 t = main.Thread( target=main.CLIs[i].roles,
Jon Hall5cf14d52015-07-16 12:15:19 -07001262 name="roles-" + str( i ),
1263 args=[] )
1264 threads.append( t )
1265 t.start()
1266
1267 for t in threads:
1268 t.join()
1269 ONOSMastership.append( t.result )
1270
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001271 for i in range( len( ONOSMastership ) ):
1272 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001273 if not ONOSMastership[i] or "Error" in ONOSMastership[i]:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001274 main.log.error( "Error in getting ONOS" + node + " roles" )
1275 main.log.warn( "ONOS" + node + " mastership response: " +
1276 repr( ONOSMastership[i] ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001277 rolesResults = False
1278 utilities.assert_equals(
1279 expect=True,
1280 actual=rolesResults,
1281 onpass="No error in reading roles output",
1282 onfail="Error in reading roles from ONOS" )
1283
1284 main.step( "Check for consistency in roles from each controller" )
1285 if all([ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
1286 main.log.info(
1287 "Switch roles are consistent across all ONOS nodes" )
1288 else:
1289 consistentMastership = False
1290 utilities.assert_equals(
1291 expect=True,
1292 actual=consistentMastership,
1293 onpass="Switch roles are consistent across all ONOS nodes",
1294 onfail="ONOS nodes have different views of switch roles" )
1295
1296 if rolesResults and not consistentMastership:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001297 for i in range( len( main.activeNodes ) ):
1298 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001299 try:
1300 main.log.warn(
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001301 "ONOS" + node + " roles: ",
Jon Hall5cf14d52015-07-16 12:15:19 -07001302 json.dumps(
1303 json.loads( ONOSMastership[ i ] ),
1304 sort_keys=True,
1305 indent=4,
1306 separators=( ',', ': ' ) ) )
1307 except ( ValueError, TypeError ):
1308 main.log.warn( repr( ONOSMastership[ i ] ) )
1309 elif rolesResults and consistentMastership:
1310 mastershipCheck = main.TRUE
1311 mastershipState = ONOSMastership[ 0 ]
1312
1313 main.step( "Get the intents from each controller" )
1314 global intentState
1315 intentState = []
1316 ONOSIntents = []
1317 intentCheck = main.FALSE
1318 consistentIntents = True
1319 intentsResults = True
1320 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001321 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001322 t = main.Thread( target=main.CLIs[i].intents,
Jon Hall5cf14d52015-07-16 12:15:19 -07001323 name="intents-" + str( i ),
1324 args=[],
1325 kwargs={ 'jsonFormat': True } )
1326 threads.append( t )
1327 t.start()
1328
1329 for t in threads:
1330 t.join()
1331 ONOSIntents.append( t.result )
1332
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001333 for i in range( len( ONOSIntents ) ):
1334 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001335 if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001336 main.log.error( "Error in getting ONOS" + node + " intents" )
1337 main.log.warn( "ONOS" + node + " intents response: " +
Jon Hall5cf14d52015-07-16 12:15:19 -07001338 repr( ONOSIntents[ i ] ) )
1339 intentsResults = False
1340 utilities.assert_equals(
1341 expect=True,
1342 actual=intentsResults,
1343 onpass="No error in reading intents output",
1344 onfail="Error in reading intents from ONOS" )
1345
1346 main.step( "Check for consistency in Intents from each controller" )
1347 if all([ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
1348 main.log.info( "Intents are consistent across all ONOS " +
1349 "nodes" )
1350 else:
1351 consistentIntents = False
1352 main.log.error( "Intents not consistent" )
1353 utilities.assert_equals(
1354 expect=True,
1355 actual=consistentIntents,
1356 onpass="Intents are consistent across all ONOS nodes",
1357 onfail="ONOS nodes have different views of intents" )
1358
1359 if intentsResults:
1360 # Try to make it easy to figure out what is happening
1361 #
1362 # Intent ONOS1 ONOS2 ...
1363 # 0x01 INSTALLED INSTALLING
1364 # ... ... ...
1365 # ... ... ...
1366 title = " Id"
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001367 for n in main.activeNodes:
Jon Hall5cf14d52015-07-16 12:15:19 -07001368 title += " " * 10 + "ONOS" + str( n + 1 )
1369 main.log.warn( title )
1370 # get all intent keys in the cluster
1371 keys = []
Jon Halla440e872016-03-31 15:15:50 -07001372 try:
1373 # Get the set of all intent keys
Jon Hall5cf14d52015-07-16 12:15:19 -07001374 for nodeStr in ONOSIntents:
1375 node = json.loads( nodeStr )
1376 for intent in node:
Jon Halla440e872016-03-31 15:15:50 -07001377 keys.append( intent.get( 'id' ) )
1378 keys = set( keys )
1379 # For each intent key, print the state on each node
1380 for key in keys:
1381 row = "%-13s" % key
1382 for nodeStr in ONOSIntents:
1383 node = json.loads( nodeStr )
1384 for intent in node:
1385 if intent.get( 'id', "Error" ) == key:
1386 row += "%-15s" % intent.get( 'state' )
1387 main.log.warn( row )
1388 # End of intent state table
1389 except ValueError as e:
1390 main.log.exception( e )
1391 main.log.debug( "nodeStr was: " + repr( nodeStr ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001392
1393 if intentsResults and not consistentIntents:
1394 # print the json objects
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001395 n = str( main.activeNodes[-1] + 1 )
1396 main.log.debug( "ONOS" + n + " intents: " )
Jon Hall5cf14d52015-07-16 12:15:19 -07001397 main.log.debug( json.dumps( json.loads( ONOSIntents[ -1 ] ),
1398 sort_keys=True,
1399 indent=4,
1400 separators=( ',', ': ' ) ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001401 for i in range( len( ONOSIntents ) ):
1402 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001403 if ONOSIntents[ i ] != ONOSIntents[ -1 ]:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001404 main.log.debug( "ONOS" + node + " intents: " )
Jon Hall5cf14d52015-07-16 12:15:19 -07001405 main.log.debug( json.dumps( json.loads( ONOSIntents[i] ),
1406 sort_keys=True,
1407 indent=4,
1408 separators=( ',', ': ' ) ) )
1409 else:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001410 main.log.debug( "ONOS" + node + " intents match ONOS" +
1411 n + " intents" )
Jon Hall5cf14d52015-07-16 12:15:19 -07001412 elif intentsResults and consistentIntents:
1413 intentCheck = main.TRUE
1414 intentState = ONOSIntents[ 0 ]
1415
1416 main.step( "Get the flows from each controller" )
1417 global flowState
1418 flowState = []
1419 ONOSFlows = []
1420 ONOSFlowsJson = []
1421 flowCheck = main.FALSE
1422 consistentFlows = True
1423 flowsResults = True
1424 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001425 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001426 t = main.Thread( target=main.CLIs[i].flows,
Jon Hall5cf14d52015-07-16 12:15:19 -07001427 name="flows-" + str( i ),
1428 args=[],
1429 kwargs={ 'jsonFormat': True } )
1430 threads.append( t )
1431 t.start()
1432
1433 # NOTE: Flows command can take some time to run
1434 time.sleep(30)
1435 for t in threads:
1436 t.join()
1437 result = t.result
1438 ONOSFlows.append( result )
1439
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001440 for i in range( len( ONOSFlows ) ):
1441 num = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001442 if not ONOSFlows[ i ] or "Error" in ONOSFlows[ i ]:
1443 main.log.error( "Error in getting ONOS" + num + " flows" )
1444 main.log.warn( "ONOS" + num + " flows response: " +
1445 repr( ONOSFlows[ i ] ) )
1446 flowsResults = False
1447 ONOSFlowsJson.append( None )
1448 else:
1449 try:
1450 ONOSFlowsJson.append( json.loads( ONOSFlows[ i ] ) )
1451 except ( ValueError, TypeError ):
1452 # FIXME: change this to log.error?
1453 main.log.exception( "Error in parsing ONOS" + num +
1454 " response as json." )
1455 main.log.error( repr( ONOSFlows[ i ] ) )
1456 ONOSFlowsJson.append( None )
1457 flowsResults = False
1458 utilities.assert_equals(
1459 expect=True,
1460 actual=flowsResults,
1461 onpass="No error in reading flows output",
1462 onfail="Error in reading flows from ONOS" )
1463
1464 main.step( "Check for consistency in Flows from each controller" )
1465 tmp = [ len( i ) == len( ONOSFlowsJson[ 0 ] ) for i in ONOSFlowsJson ]
1466 if all( tmp ):
1467 main.log.info( "Flow count is consistent across all ONOS nodes" )
1468 else:
1469 consistentFlows = False
1470 utilities.assert_equals(
1471 expect=True,
1472 actual=consistentFlows,
1473 onpass="The flow count is consistent across all ONOS nodes",
1474 onfail="ONOS nodes have different flow counts" )
1475
1476 if flowsResults and not consistentFlows:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001477 for i in range( len( ONOSFlows ) ):
1478 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001479 try:
1480 main.log.warn(
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001481 "ONOS" + node + " flows: " +
Jon Hall5cf14d52015-07-16 12:15:19 -07001482 json.dumps( json.loads( ONOSFlows[i] ), sort_keys=True,
1483 indent=4, separators=( ',', ': ' ) ) )
1484 except ( ValueError, TypeError ):
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001485 main.log.warn( "ONOS" + node + " flows: " +
1486 repr( ONOSFlows[ i ] ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001487 elif flowsResults and consistentFlows:
1488 flowCheck = main.TRUE
1489 flowState = ONOSFlows[ 0 ]
1490
1491 main.step( "Get the OF Table entries" )
1492 global flows
1493 flows = []
1494 for i in range( 1, 29 ):
GlennRC68467eb2015-11-16 18:01:01 -08001495 flows.append( main.Mininet1.getFlowTable( "s" + str( i ), version="1.3", debug=False ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001496 if flowCheck == main.FALSE:
1497 for table in flows:
1498 main.log.warn( table )
1499 # TODO: Compare switch flow tables with ONOS flow tables
1500
1501 main.step( "Start continuous pings" )
1502 main.Mininet2.pingLong(
1503 src=main.params[ 'PING' ][ 'source1' ],
1504 target=main.params[ 'PING' ][ 'target1' ],
1505 pingTime=500 )
1506 main.Mininet2.pingLong(
1507 src=main.params[ 'PING' ][ 'source2' ],
1508 target=main.params[ 'PING' ][ 'target2' ],
1509 pingTime=500 )
1510 main.Mininet2.pingLong(
1511 src=main.params[ 'PING' ][ 'source3' ],
1512 target=main.params[ 'PING' ][ 'target3' ],
1513 pingTime=500 )
1514 main.Mininet2.pingLong(
1515 src=main.params[ 'PING' ][ 'source4' ],
1516 target=main.params[ 'PING' ][ 'target4' ],
1517 pingTime=500 )
1518 main.Mininet2.pingLong(
1519 src=main.params[ 'PING' ][ 'source5' ],
1520 target=main.params[ 'PING' ][ 'target5' ],
1521 pingTime=500 )
1522 main.Mininet2.pingLong(
1523 src=main.params[ 'PING' ][ 'source6' ],
1524 target=main.params[ 'PING' ][ 'target6' ],
1525 pingTime=500 )
1526 main.Mininet2.pingLong(
1527 src=main.params[ 'PING' ][ 'source7' ],
1528 target=main.params[ 'PING' ][ 'target7' ],
1529 pingTime=500 )
1530 main.Mininet2.pingLong(
1531 src=main.params[ 'PING' ][ 'source8' ],
1532 target=main.params[ 'PING' ][ 'target8' ],
1533 pingTime=500 )
1534 main.Mininet2.pingLong(
1535 src=main.params[ 'PING' ][ 'source9' ],
1536 target=main.params[ 'PING' ][ 'target9' ],
1537 pingTime=500 )
1538 main.Mininet2.pingLong(
1539 src=main.params[ 'PING' ][ 'source10' ],
1540 target=main.params[ 'PING' ][ 'target10' ],
1541 pingTime=500 )
1542
1543 main.step( "Collecting topology information from ONOS" )
1544 devices = []
1545 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001546 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001547 t = main.Thread( target=main.CLIs[i].devices,
Jon Hall5cf14d52015-07-16 12:15:19 -07001548 name="devices-" + str( i ),
1549 args=[ ] )
1550 threads.append( t )
1551 t.start()
1552
1553 for t in threads:
1554 t.join()
1555 devices.append( t.result )
1556 hosts = []
1557 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001558 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001559 t = main.Thread( target=main.CLIs[i].hosts,
Jon Hall5cf14d52015-07-16 12:15:19 -07001560 name="hosts-" + str( i ),
1561 args=[ ] )
1562 threads.append( t )
1563 t.start()
1564
1565 for t in threads:
1566 t.join()
1567 try:
1568 hosts.append( json.loads( t.result ) )
1569 except ( ValueError, TypeError ):
1570 # FIXME: better handling of this, print which node
1571 # Maybe use thread name?
1572 main.log.exception( "Error parsing json output of hosts" )
Jon Hallf3d16e72015-12-16 17:45:08 -08001573 main.log.warn( repr( t.result ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001574 hosts.append( None )
1575
1576 ports = []
1577 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001578 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001579 t = main.Thread( target=main.CLIs[i].ports,
Jon Hall5cf14d52015-07-16 12:15:19 -07001580 name="ports-" + str( i ),
1581 args=[ ] )
1582 threads.append( t )
1583 t.start()
1584
1585 for t in threads:
1586 t.join()
1587 ports.append( t.result )
1588 links = []
1589 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001590 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001591 t = main.Thread( target=main.CLIs[i].links,
Jon Hall5cf14d52015-07-16 12:15:19 -07001592 name="links-" + str( i ),
1593 args=[ ] )
1594 threads.append( t )
1595 t.start()
1596
1597 for t in threads:
1598 t.join()
1599 links.append( t.result )
1600 clusters = []
1601 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001602 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001603 t = main.Thread( target=main.CLIs[i].clusters,
Jon Hall5cf14d52015-07-16 12:15:19 -07001604 name="clusters-" + str( i ),
1605 args=[ ] )
1606 threads.append( t )
1607 t.start()
1608
1609 for t in threads:
1610 t.join()
1611 clusters.append( t.result )
1612 # Compare json objects for hosts and dataplane clusters
1613
1614 # hosts
1615 main.step( "Host view is consistent across ONOS nodes" )
1616 consistentHostsResult = main.TRUE
1617 for controller in range( len( hosts ) ):
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001618 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hallf3d16e72015-12-16 17:45:08 -08001619 if hosts[ controller ] and "Error" not in hosts[ controller ]:
Jon Hall5cf14d52015-07-16 12:15:19 -07001620 if hosts[ controller ] == hosts[ 0 ]:
1621 continue
1622 else: # hosts not consistent
1623 main.log.error( "hosts from ONOS" +
1624 controllerStr +
1625 " is inconsistent with ONOS1" )
1626 main.log.warn( repr( hosts[ controller ] ) )
1627 consistentHostsResult = main.FALSE
1628
1629 else:
1630 main.log.error( "Error in getting ONOS hosts from ONOS" +
1631 controllerStr )
1632 consistentHostsResult = main.FALSE
1633 main.log.warn( "ONOS" + controllerStr +
1634 " hosts response: " +
1635 repr( hosts[ controller ] ) )
1636 utilities.assert_equals(
1637 expect=main.TRUE,
1638 actual=consistentHostsResult,
1639 onpass="Hosts view is consistent across all ONOS nodes",
1640 onfail="ONOS nodes have different views of hosts" )
1641
1642 main.step( "Each host has an IP address" )
1643 ipResult = main.TRUE
1644 for controller in range( 0, len( hosts ) ):
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001645 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hallf3d16e72015-12-16 17:45:08 -08001646 if hosts[ controller ]:
1647 for host in hosts[ controller ]:
1648 if not host.get( 'ipAddresses', [ ] ):
1649 main.log.error( "Error with host ips on controller" +
1650 controllerStr + ": " + str( host ) )
1651 ipResult = main.FALSE
Jon Hall5cf14d52015-07-16 12:15:19 -07001652 utilities.assert_equals(
1653 expect=main.TRUE,
1654 actual=ipResult,
1655 onpass="The ips of the hosts aren't empty",
1656 onfail="The ip of at least one host is missing" )
1657
1658 # Strongly connected clusters of devices
1659 main.step( "Cluster view is consistent across ONOS nodes" )
1660 consistentClustersResult = main.TRUE
1661 for controller in range( len( clusters ) ):
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001662 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001663 if "Error" not in clusters[ controller ]:
1664 if clusters[ controller ] == clusters[ 0 ]:
1665 continue
1666 else: # clusters not consistent
1667 main.log.error( "clusters from ONOS" + controllerStr +
1668 " is inconsistent with ONOS1" )
1669 consistentClustersResult = main.FALSE
1670
1671 else:
1672 main.log.error( "Error in getting dataplane clusters " +
1673 "from ONOS" + controllerStr )
1674 consistentClustersResult = main.FALSE
1675 main.log.warn( "ONOS" + controllerStr +
1676 " clusters response: " +
1677 repr( clusters[ controller ] ) )
1678 utilities.assert_equals(
1679 expect=main.TRUE,
1680 actual=consistentClustersResult,
1681 onpass="Clusters view is consistent across all ONOS nodes",
1682 onfail="ONOS nodes have different views of clusters" )
Jon Hall64948022016-05-12 13:38:50 -07001683 if not consistentClustersResult:
Jon Hall172b7ba2016-04-07 18:12:20 -07001684 main.log.debug( clusters )
Jon Hall64948022016-05-12 13:38:50 -07001685
Jon Hall5cf14d52015-07-16 12:15:19 -07001686 # there should always only be one cluster
1687 main.step( "Cluster view correct across ONOS nodes" )
1688 try:
1689 numClusters = len( json.loads( clusters[ 0 ] ) )
1690 except ( ValueError, TypeError ):
1691 main.log.exception( "Error parsing clusters[0]: " +
1692 repr( clusters[ 0 ] ) )
Jon Hall6e709752016-02-01 13:38:46 -08001693 numClusters = "ERROR"
Jon Hall5cf14d52015-07-16 12:15:19 -07001694 clusterResults = main.FALSE
1695 if numClusters == 1:
1696 clusterResults = main.TRUE
1697 utilities.assert_equals(
1698 expect=1,
1699 actual=numClusters,
1700 onpass="ONOS shows 1 SCC",
1701 onfail="ONOS shows " + str( numClusters ) + " SCCs" )
1702
1703 main.step( "Comparing ONOS topology to MN" )
1704 devicesResults = main.TRUE
1705 linksResults = main.TRUE
1706 hostsResults = main.TRUE
1707 mnSwitches = main.Mininet1.getSwitches()
1708 mnLinks = main.Mininet1.getLinks()
1709 mnHosts = main.Mininet1.getHosts()
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001710 for controller in main.activeNodes:
1711 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001712 if devices[ controller ] and ports[ controller ] and\
1713 "Error" not in devices[ controller ] and\
1714 "Error" not in ports[ controller ]:
Jon Hall6e709752016-02-01 13:38:46 -08001715 currentDevicesResult = main.Mininet1.compareSwitches(
1716 mnSwitches,
1717 json.loads( devices[ controller ] ),
1718 json.loads( ports[ controller ] ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001719 else:
1720 currentDevicesResult = main.FALSE
1721 utilities.assert_equals( expect=main.TRUE,
1722 actual=currentDevicesResult,
1723 onpass="ONOS" + controllerStr +
1724 " Switches view is correct",
1725 onfail="ONOS" + controllerStr +
1726 " Switches view is incorrect" )
1727 if links[ controller ] and "Error" not in links[ controller ]:
1728 currentLinksResult = main.Mininet1.compareLinks(
1729 mnSwitches, mnLinks,
1730 json.loads( links[ controller ] ) )
1731 else:
1732 currentLinksResult = main.FALSE
1733 utilities.assert_equals( expect=main.TRUE,
1734 actual=currentLinksResult,
1735 onpass="ONOS" + controllerStr +
1736 " links view is correct",
1737 onfail="ONOS" + controllerStr +
1738 " links view is incorrect" )
1739
Jon Hall657cdf62015-12-17 14:40:51 -08001740 if hosts[ controller ] and "Error" not in hosts[ controller ]:
Jon Hall5cf14d52015-07-16 12:15:19 -07001741 currentHostsResult = main.Mininet1.compareHosts(
1742 mnHosts,
1743 hosts[ controller ] )
1744 else:
1745 currentHostsResult = main.FALSE
1746 utilities.assert_equals( expect=main.TRUE,
1747 actual=currentHostsResult,
1748 onpass="ONOS" + controllerStr +
1749 " hosts exist in Mininet",
1750 onfail="ONOS" + controllerStr +
1751 " hosts don't match Mininet" )
1752
1753 devicesResults = devicesResults and currentDevicesResult
1754 linksResults = linksResults and currentLinksResult
1755 hostsResults = hostsResults and currentHostsResult
1756
1757 main.step( "Device information is correct" )
1758 utilities.assert_equals(
1759 expect=main.TRUE,
1760 actual=devicesResults,
1761 onpass="Device information is correct",
1762 onfail="Device information is incorrect" )
1763
1764 main.step( "Links are correct" )
1765 utilities.assert_equals(
1766 expect=main.TRUE,
1767 actual=linksResults,
1768 onpass="Link are correct",
1769 onfail="Links are incorrect" )
1770
1771 main.step( "Hosts are correct" )
1772 utilities.assert_equals(
1773 expect=main.TRUE,
1774 actual=hostsResults,
1775 onpass="Hosts are correct",
1776 onfail="Hosts are incorrect" )
1777
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001778 def CASE61( self, main ):
Jon Hall5cf14d52015-07-16 12:15:19 -07001779 """
1780 The Failure case.
1781 """
Jon Halle1a3b752015-07-22 13:02:46 -07001782 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07001783 assert main, "main not defined"
1784 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07001785 assert main.CLIs, "main.CLIs not defined"
1786 assert main.nodes, "main.nodes not defined"
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001787 main.case( "Stop minority of ONOS nodes" )
Jon Hall96091e62015-09-21 17:34:17 -07001788
1789 main.step( "Checking ONOS Logs for errors" )
1790 for node in main.nodes:
1791 main.log.debug( "Checking logs for errors on " + node.name + ":" )
1792 main.log.warn( main.ONOSbench.checkLogs( node.ip_address ) )
1793
Jon Hall3b489db2015-10-05 14:38:37 -07001794 n = len( main.nodes ) # Number of nodes
1795 p = ( ( n + 1 ) / 2 ) + 1 # Number of partitions
1796 main.kill = [ 0 ] # ONOS node to kill, listed by index in main.nodes
1797 if n > 3:
1798 main.kill.append( p - 1 )
1799 # NOTE: This only works for cluster sizes of 3,5, or 7.
1800
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001801 main.step( "Stopping " + str( len( main.kill ) ) + " ONOS nodes" )
Jon Hall3b489db2015-10-05 14:38:37 -07001802 killResults = main.TRUE
1803 for i in main.kill:
1804 killResults = killResults and\
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001805 main.ONOSbench.onosStop( main.nodes[i].ip_address )
1806 main.activeNodes.remove( i )
Jon Hall5cf14d52015-07-16 12:15:19 -07001807 utilities.assert_equals( expect=main.TRUE, actual=killResults,
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001808 onpass="ONOS nodes stopped successfully",
1809 onfail="ONOS nodes NOT successfully stopped" )
1810
Jon Halld2871c22016-07-26 11:01:14 -07001811 main.step( "Checking ONOS nodes" )
1812 nodeResults = utilities.retry( main.HA.nodesCheck,
1813 False,
1814 args=[main.activeNodes],
1815 sleep=15,
1816 attempts=5 )
1817
1818 utilities.assert_equals( expect=True, actual=nodeResults,
1819 onpass="Nodes check successful",
1820 onfail="Nodes check NOT successful" )
1821
1822 if not nodeResults:
1823 for i in main.activeNodes:
1824 cli = main.CLIs[i]
1825 main.log.debug( "{} components not ACTIVE: \n{}".format(
1826 cli.name,
1827 cli.sendline( "scr:list | grep -v ACTIVE" ) ) )
1828 main.log.error( "Failed to start ONOS, stopping test" )
1829 main.cleanup()
1830 main.exit()
1831
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001832 def CASE62( self, main ):
1833 """
1834 The bring up stopped nodes
1835 """
1836 import time
1837 assert main.numCtrls, "main.numCtrls not defined"
1838 assert main, "main not defined"
1839 assert utilities.assert_equals, "utilities.assert_equals not defined"
1840 assert main.CLIs, "main.CLIs not defined"
1841 assert main.nodes, "main.nodes not defined"
1842 assert main.kill, "main.kill not defined"
1843 main.case( "Restart minority of ONOS nodes" )
1844
1845 main.step( "Restarting " + str( len( main.kill ) ) + " ONOS nodes" )
1846 startResults = main.TRUE
1847 restartTime = time.time()
1848 for i in main.kill:
1849 startResults = startResults and\
1850 main.ONOSbench.onosStart( main.nodes[i].ip_address )
1851 utilities.assert_equals( expect=main.TRUE, actual=startResults,
1852 onpass="ONOS nodes started successfully",
1853 onfail="ONOS nodes NOT successfully started" )
Jon Hall5cf14d52015-07-16 12:15:19 -07001854
1855 main.step( "Checking if ONOS is up yet" )
1856 count = 0
1857 onosIsupResult = main.FALSE
1858 while onosIsupResult == main.FALSE and count < 10:
Jon Hall3b489db2015-10-05 14:38:37 -07001859 onosIsupResult = main.TRUE
1860 for i in main.kill:
1861 onosIsupResult = onosIsupResult and\
1862 main.ONOSbench.isup( main.nodes[i].ip_address )
Jon Hall5cf14d52015-07-16 12:15:19 -07001863 count = count + 1
Jon Hall5cf14d52015-07-16 12:15:19 -07001864 utilities.assert_equals( expect=main.TRUE, actual=onosIsupResult,
1865 onpass="ONOS restarted successfully",
1866 onfail="ONOS restart NOT successful" )
1867
Jon Halle1a3b752015-07-22 13:02:46 -07001868 main.step( "Restarting ONOS main.CLIs" )
Jon Hall3b489db2015-10-05 14:38:37 -07001869 cliResults = main.TRUE
1870 for i in main.kill:
1871 cliResults = cliResults and\
1872 main.CLIs[i].startOnosCli( main.nodes[i].ip_address )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001873 main.activeNodes.append( i )
Jon Hall5cf14d52015-07-16 12:15:19 -07001874 utilities.assert_equals( expect=main.TRUE, actual=cliResults,
1875 onpass="ONOS cli restarted",
1876 onfail="ONOS cli did not restart" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001877 main.activeNodes.sort()
1878 try:
1879 assert list( set( main.activeNodes ) ) == main.activeNodes,\
1880 "List of active nodes has duplicates, this likely indicates something was run out of order"
1881 except AssertionError:
1882 main.log.exception( "" )
1883 main.cleanup()
1884 main.exit()
Jon Hall5cf14d52015-07-16 12:15:19 -07001885
1886 # Grab the time of restart so we chan check how long the gossip
1887 # protocol has had time to work
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001888 main.restartTime = time.time() - restartTime
Jon Hall5cf14d52015-07-16 12:15:19 -07001889 main.log.debug( "Restart time: " + str( main.restartTime ) )
Jon Halld2871c22016-07-26 11:01:14 -07001890
1891 main.step( "Checking ONOS nodes" )
1892 nodeResults = utilities.retry( main.HA.nodesCheck,
1893 False,
1894 args=[main.activeNodes],
1895 sleep=15,
1896 attempts=5 )
1897
1898 utilities.assert_equals( expect=True, actual=nodeResults,
1899 onpass="Nodes check successful",
1900 onfail="Nodes check NOT successful" )
1901
1902 if not nodeResults:
1903 for i in main.activeNodes:
1904 cli = main.CLIs[i]
1905 main.log.debug( "{} components not ACTIVE: \n{}".format(
1906 cli.name,
1907 cli.sendline( "scr:list | grep -v ACTIVE" ) ) )
1908 main.log.error( "Failed to start ONOS, stopping test" )
1909 main.cleanup()
1910 main.exit()
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001911 node = main.activeNodes[0]
1912 main.log.debug( main.CLIs[node].nodes( jsonFormat=False ) )
1913 main.log.debug( main.CLIs[node].leaders( jsonFormat=False ) )
1914 main.log.debug( main.CLIs[node].partitions( jsonFormat=False ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001915
Jon Halla440e872016-03-31 15:15:50 -07001916 main.step( "Rerun for election on the node(s) that were killed" )
1917 runResults = main.TRUE
1918 for i in main.kill:
1919 runResults = runResults and\
1920 main.CLIs[i].electionTestRun()
1921 utilities.assert_equals( expect=main.TRUE, actual=runResults,
1922 onpass="ONOS nodes reran for election topic",
1923 onfail="Errror rerunning for election" )
1924
Jon Hall5cf14d52015-07-16 12:15:19 -07001925 def CASE7( self, main ):
1926 """
1927 Check state after ONOS failure
1928 """
1929 import json
Jon Halle1a3b752015-07-22 13:02:46 -07001930 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07001931 assert main, "main not defined"
1932 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07001933 assert main.CLIs, "main.CLIs not defined"
1934 assert main.nodes, "main.nodes not defined"
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001935 try:
1936 main.kill
1937 except AttributeError:
1938 main.kill = []
1939
Jon Hall5cf14d52015-07-16 12:15:19 -07001940 main.case( "Running ONOS Constant State Tests" )
1941
1942 main.step( "Check that each switch has a master" )
1943 # Assert that each device has a master
1944 rolesNotNull = main.TRUE
1945 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001946 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001947 t = main.Thread( target=main.CLIs[i].rolesNotNull,
Jon Hall5cf14d52015-07-16 12:15:19 -07001948 name="rolesNotNull-" + str( i ),
1949 args=[ ] )
1950 threads.append( t )
1951 t.start()
1952
1953 for t in threads:
1954 t.join()
1955 rolesNotNull = rolesNotNull and t.result
1956 utilities.assert_equals(
1957 expect=main.TRUE,
1958 actual=rolesNotNull,
1959 onpass="Each device has a master",
1960 onfail="Some devices don't have a master assigned" )
1961
1962 main.step( "Read device roles from ONOS" )
1963 ONOSMastership = []
Jon Halla440e872016-03-31 15:15:50 -07001964 mastershipCheck = main.FALSE
Jon Hall5cf14d52015-07-16 12:15:19 -07001965 consistentMastership = True
1966 rolesResults = True
1967 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001968 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001969 t = main.Thread( target=main.CLIs[i].roles,
Jon Hall5cf14d52015-07-16 12:15:19 -07001970 name="roles-" + str( i ),
1971 args=[] )
1972 threads.append( t )
1973 t.start()
1974
1975 for t in threads:
1976 t.join()
1977 ONOSMastership.append( t.result )
1978
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001979 for i in range( len( ONOSMastership ) ):
1980 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001981 if not ONOSMastership[i] or "Error" in ONOSMastership[i]:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001982 main.log.error( "Error in getting ONOS" + node + " roles" )
1983 main.log.warn( "ONOS" + node + " mastership response: " +
1984 repr( ONOSMastership[i] ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001985 rolesResults = False
1986 utilities.assert_equals(
1987 expect=True,
1988 actual=rolesResults,
1989 onpass="No error in reading roles output",
1990 onfail="Error in reading roles from ONOS" )
1991
1992 main.step( "Check for consistency in roles from each controller" )
1993 if all([ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
1994 main.log.info(
1995 "Switch roles are consistent across all ONOS nodes" )
1996 else:
1997 consistentMastership = False
1998 utilities.assert_equals(
1999 expect=True,
2000 actual=consistentMastership,
2001 onpass="Switch roles are consistent across all ONOS nodes",
2002 onfail="ONOS nodes have different views of switch roles" )
2003
2004 if rolesResults and not consistentMastership:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002005 for i in range( len( ONOSMastership ) ):
2006 node = str( main.activeNodes[i] + 1 )
2007 main.log.warn( "ONOS" + node + " roles: ",
2008 json.dumps( json.loads( ONOSMastership[ i ] ),
2009 sort_keys=True,
2010 indent=4,
2011 separators=( ',', ': ' ) ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07002012
2013 # NOTE: we expect mastership to change on controller failure
Jon Hall5cf14d52015-07-16 12:15:19 -07002014
2015 main.step( "Get the intents and compare across all nodes" )
2016 ONOSIntents = []
2017 intentCheck = main.FALSE
2018 consistentIntents = True
2019 intentsResults = True
2020 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002021 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07002022 t = main.Thread( target=main.CLIs[i].intents,
Jon Hall5cf14d52015-07-16 12:15:19 -07002023 name="intents-" + str( i ),
2024 args=[],
2025 kwargs={ 'jsonFormat': True } )
2026 threads.append( t )
2027 t.start()
2028
2029 for t in threads:
2030 t.join()
2031 ONOSIntents.append( t.result )
2032
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002033 for i in range( len( ONOSIntents) ):
2034 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07002035 if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002036 main.log.error( "Error in getting ONOS" + node + " intents" )
2037 main.log.warn( "ONOS" + node + " intents response: " +
Jon Hall5cf14d52015-07-16 12:15:19 -07002038 repr( ONOSIntents[ i ] ) )
2039 intentsResults = False
2040 utilities.assert_equals(
2041 expect=True,
2042 actual=intentsResults,
2043 onpass="No error in reading intents output",
2044 onfail="Error in reading intents from ONOS" )
2045
2046 main.step( "Check for consistency in Intents from each controller" )
2047 if all([ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
2048 main.log.info( "Intents are consistent across all ONOS " +
2049 "nodes" )
2050 else:
2051 consistentIntents = False
2052
2053 # Try to make it easy to figure out what is happening
2054 #
2055 # Intent ONOS1 ONOS2 ...
2056 # 0x01 INSTALLED INSTALLING
2057 # ... ... ...
2058 # ... ... ...
2059 title = " ID"
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002060 for n in main.activeNodes:
Jon Hall5cf14d52015-07-16 12:15:19 -07002061 title += " " * 10 + "ONOS" + str( n + 1 )
2062 main.log.warn( title )
2063 # get all intent keys in the cluster
2064 keys = []
2065 for nodeStr in ONOSIntents:
2066 node = json.loads( nodeStr )
2067 for intent in node:
2068 keys.append( intent.get( 'id' ) )
2069 keys = set( keys )
2070 for key in keys:
2071 row = "%-13s" % key
2072 for nodeStr in ONOSIntents:
2073 node = json.loads( nodeStr )
2074 for intent in node:
2075 if intent.get( 'id' ) == key:
2076 row += "%-15s" % intent.get( 'state' )
2077 main.log.warn( row )
2078 # End table view
2079
2080 utilities.assert_equals(
2081 expect=True,
2082 actual=consistentIntents,
2083 onpass="Intents are consistent across all ONOS nodes",
2084 onfail="ONOS nodes have different views of intents" )
2085 intentStates = []
2086 for node in ONOSIntents: # Iter through ONOS nodes
2087 nodeStates = []
2088 # Iter through intents of a node
2089 try:
2090 for intent in json.loads( node ):
2091 nodeStates.append( intent[ 'state' ] )
2092 except ( ValueError, TypeError ):
2093 main.log.exception( "Error in parsing intents" )
2094 main.log.error( repr( node ) )
2095 intentStates.append( nodeStates )
2096 out = [ (i, nodeStates.count( i ) ) for i in set( nodeStates ) ]
2097 main.log.info( dict( out ) )
2098
2099 if intentsResults and not consistentIntents:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002100 for i in range( len( main.activeNodes ) ):
2101 node = str( main.activeNodes[i] + 1 )
2102 main.log.warn( "ONOS" + node + " intents: " )
Jon Hall5cf14d52015-07-16 12:15:19 -07002103 main.log.warn( json.dumps(
2104 json.loads( ONOSIntents[ i ] ),
2105 sort_keys=True,
2106 indent=4,
2107 separators=( ',', ': ' ) ) )
2108 elif intentsResults and consistentIntents:
2109 intentCheck = main.TRUE
2110
2111 # NOTE: Store has no durability, so intents are lost across system
2112 # restarts
2113 main.step( "Compare current intents with intents before the failure" )
2114 # NOTE: this requires case 5 to pass for intentState to be set.
2115 # maybe we should stop the test if that fails?
2116 sameIntents = main.FALSE
Jon Halla440e872016-03-31 15:15:50 -07002117 try:
2118 intentState
2119 except NameError:
2120 main.log.warn( "No previous intent state was saved" )
2121 else:
2122 if intentState and intentState == ONOSIntents[ 0 ]:
2123 sameIntents = main.TRUE
2124 main.log.info( "Intents are consistent with before failure" )
2125 # TODO: possibly the states have changed? we may need to figure out
2126 # what the acceptable states are
2127 elif len( intentState ) == len( ONOSIntents[ 0 ] ):
2128 sameIntents = main.TRUE
2129 try:
2130 before = json.loads( intentState )
2131 after = json.loads( ONOSIntents[ 0 ] )
2132 for intent in before:
2133 if intent not in after:
2134 sameIntents = main.FALSE
2135 main.log.debug( "Intent is not currently in ONOS " +
2136 "(at least in the same form):" )
2137 main.log.debug( json.dumps( intent ) )
2138 except ( ValueError, TypeError ):
2139 main.log.exception( "Exception printing intents" )
2140 main.log.debug( repr( ONOSIntents[0] ) )
2141 main.log.debug( repr( intentState ) )
2142 if sameIntents == main.FALSE:
2143 try:
2144 main.log.debug( "ONOS intents before: " )
2145 main.log.debug( json.dumps( json.loads( intentState ),
2146 sort_keys=True, indent=4,
2147 separators=( ',', ': ' ) ) )
2148 main.log.debug( "Current ONOS intents: " )
2149 main.log.debug( json.dumps( json.loads( ONOSIntents[ 0 ] ),
2150 sort_keys=True, indent=4,
2151 separators=( ',', ': ' ) ) )
2152 except ( ValueError, TypeError ):
2153 main.log.exception( "Exception printing intents" )
2154 main.log.debug( repr( ONOSIntents[0] ) )
2155 main.log.debug( repr( intentState ) )
2156 utilities.assert_equals(
2157 expect=main.TRUE,
2158 actual=sameIntents,
2159 onpass="Intents are consistent with before failure",
2160 onfail="The Intents changed during failure" )
Jon Hall5cf14d52015-07-16 12:15:19 -07002161 intentCheck = intentCheck and sameIntents
2162
2163 main.step( "Get the OF Table entries and compare to before " +
2164 "component failure" )
2165 FlowTables = main.TRUE
Jon Hall5cf14d52015-07-16 12:15:19 -07002166 for i in range( 28 ):
2167 main.log.info( "Checking flow table on s" + str( i + 1 ) )
GlennRC68467eb2015-11-16 18:01:01 -08002168 tmpFlows = main.Mininet1.getFlowTable( "s" + str( i + 1 ), version="1.3", debug=False )
Jon Hall41d39f12016-04-11 22:54:35 -07002169 curSwitch = main.Mininet1.flowTableComp( flows[i], tmpFlows )
2170 FlowTables = FlowTables and curSwitch
2171 if curSwitch == main.FALSE:
GlennRC68467eb2015-11-16 18:01:01 -08002172 main.log.warn( "Differences in flow table for switch: s{}".format( i + 1 ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07002173 utilities.assert_equals(
2174 expect=main.TRUE,
2175 actual=FlowTables,
2176 onpass="No changes were found in the flow tables",
2177 onfail="Changes were found in the flow tables" )
2178
2179 main.Mininet2.pingLongKill()
2180 '''
2181 main.step( "Check the continuous pings to ensure that no packets " +
2182 "were dropped during component failure" )
2183 main.Mininet2.pingKill( main.params[ 'TESTONUSER' ],
2184 main.params[ 'TESTONIP' ] )
2185 LossInPings = main.FALSE
2186 # NOTE: checkForLoss returns main.FALSE with 0% packet loss
2187 for i in range( 8, 18 ):
2188 main.log.info(
2189 "Checking for a loss in pings along flow from s" +
2190 str( i ) )
2191 LossInPings = main.Mininet2.checkForLoss(
2192 "/tmp/ping.h" +
2193 str( i ) ) or LossInPings
2194 if LossInPings == main.TRUE:
2195 main.log.info( "Loss in ping detected" )
2196 elif LossInPings == main.ERROR:
2197 main.log.info( "There are multiple mininet process running" )
2198 elif LossInPings == main.FALSE:
2199 main.log.info( "No Loss in the pings" )
2200 main.log.info( "No loss of dataplane connectivity" )
2201 utilities.assert_equals(
2202 expect=main.FALSE,
2203 actual=LossInPings,
2204 onpass="No Loss of connectivity",
2205 onfail="Loss of dataplane connectivity detected" )
2206 '''
2207
2208 main.step( "Leadership Election is still functional" )
2209 # Test of LeadershipElection
2210 leaderList = []
Jon Hall5cf14d52015-07-16 12:15:19 -07002211
Jon Hall3b489db2015-10-05 14:38:37 -07002212 restarted = []
2213 for i in main.kill:
2214 restarted.append( main.nodes[i].ip_address )
Jon Hall5cf14d52015-07-16 12:15:19 -07002215 leaderResult = main.TRUE
Jon Hall3b489db2015-10-05 14:38:37 -07002216
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002217 for i in main.activeNodes:
2218 cli = main.CLIs[i]
Jon Hall5cf14d52015-07-16 12:15:19 -07002219 leaderN = cli.electionTestLeader()
2220 leaderList.append( leaderN )
2221 if leaderN == main.FALSE:
2222 # error in response
2223 main.log.error( "Something is wrong with " +
2224 "electionTestLeader function, check the" +
2225 " error logs" )
2226 leaderResult = main.FALSE
2227 elif leaderN is None:
2228 main.log.error( cli.name +
2229 " shows no leader for the election-app was" +
2230 " elected after the old one died" )
2231 leaderResult = main.FALSE
2232 elif leaderN in restarted:
2233 main.log.error( cli.name + " shows " + str( leaderN ) +
2234 " as leader for the election-app, but it " +
2235 "was restarted" )
2236 leaderResult = main.FALSE
2237 if len( set( leaderList ) ) != 1:
2238 leaderResult = main.FALSE
2239 main.log.error(
2240 "Inconsistent view of leader for the election test app" )
2241 # TODO: print the list
2242 utilities.assert_equals(
2243 expect=main.TRUE,
2244 actual=leaderResult,
2245 onpass="Leadership election passed",
2246 onfail="Something went wrong with Leadership election" )
2247
2248 def CASE8( self, main ):
2249 """
2250 Compare topo
2251 """
2252 import json
2253 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002254 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002255 assert main, "main not defined"
2256 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002257 assert main.CLIs, "main.CLIs not defined"
2258 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002259
2260 main.case( "Compare ONOS Topology view to Mininet topology" )
Jon Hall783bbf92015-07-23 14:33:19 -07002261 main.caseExplanation = "Compare topology objects between Mininet" +\
Jon Hall5cf14d52015-07-16 12:15:19 -07002262 " and ONOS"
Jon Hall5cf14d52015-07-16 12:15:19 -07002263 topoResult = main.FALSE
Jon Hall6e709752016-02-01 13:38:46 -08002264 topoFailMsg = "ONOS topology don't match Mininet"
Jon Hall5cf14d52015-07-16 12:15:19 -07002265 elapsed = 0
2266 count = 0
Jon Halle9b1fa32015-12-08 15:32:21 -08002267 main.step( "Comparing ONOS topology to MN topology" )
Jon Hall5cf14d52015-07-16 12:15:19 -07002268 startTime = time.time()
2269 # Give time for Gossip to work
Jon Halle9b1fa32015-12-08 15:32:21 -08002270 while topoResult == main.FALSE and ( elapsed < 60 or count < 3 ):
Jon Hall96091e62015-09-21 17:34:17 -07002271 devicesResults = main.TRUE
2272 linksResults = main.TRUE
2273 hostsResults = main.TRUE
2274 hostAttachmentResults = True
Jon Hall5cf14d52015-07-16 12:15:19 -07002275 count += 1
2276 cliStart = time.time()
2277 devices = []
2278 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002279 for i in main.activeNodes:
Jon Hall6e709752016-02-01 13:38:46 -08002280 t = main.Thread( target=utilities.retry,
Jon Hall5cf14d52015-07-16 12:15:19 -07002281 name="devices-" + str( i ),
Jon Hall6e709752016-02-01 13:38:46 -08002282 args=[ main.CLIs[i].devices, [ None ] ],
2283 kwargs= { 'sleep': 5, 'attempts': 5,
2284 'randomTime': True } )
Jon Hall5cf14d52015-07-16 12:15:19 -07002285 threads.append( t )
2286 t.start()
2287
2288 for t in threads:
2289 t.join()
2290 devices.append( t.result )
2291 hosts = []
2292 ipResult = main.TRUE
2293 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002294 for i in main.activeNodes:
Jon Halld8f6de82015-12-17 17:04:34 -08002295 t = main.Thread( target=utilities.retry,
Jon Hall5cf14d52015-07-16 12:15:19 -07002296 name="hosts-" + str( i ),
Jon Halld8f6de82015-12-17 17:04:34 -08002297 args=[ main.CLIs[i].hosts, [ None ] ],
2298 kwargs= { 'sleep': 5, 'attempts': 5,
2299 'randomTime': True } )
Jon Hall5cf14d52015-07-16 12:15:19 -07002300 threads.append( t )
2301 t.start()
2302
2303 for t in threads:
2304 t.join()
2305 try:
2306 hosts.append( json.loads( t.result ) )
2307 except ( ValueError, TypeError ):
2308 main.log.exception( "Error parsing hosts results" )
2309 main.log.error( repr( t.result ) )
Jon Hallf3d16e72015-12-16 17:45:08 -08002310 hosts.append( None )
Jon Hall5cf14d52015-07-16 12:15:19 -07002311 for controller in range( 0, len( hosts ) ):
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002312 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hallacd1b182015-12-17 11:43:20 -08002313 if hosts[ controller ]:
2314 for host in hosts[ controller ]:
2315 if host is None or host.get( 'ipAddresses', [] ) == []:
2316 main.log.error(
2317 "Error with host ipAddresses on controller" +
2318 controllerStr + ": " + str( host ) )
2319 ipResult = main.FALSE
Jon Hall5cf14d52015-07-16 12:15:19 -07002320 ports = []
2321 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002322 for i in main.activeNodes:
Jon Hall6e709752016-02-01 13:38:46 -08002323 t = main.Thread( target=utilities.retry,
Jon Hall5cf14d52015-07-16 12:15:19 -07002324 name="ports-" + str( i ),
Jon Hall6e709752016-02-01 13:38:46 -08002325 args=[ main.CLIs[i].ports, [ None ] ],
2326 kwargs= { 'sleep': 5, 'attempts': 5,
2327 'randomTime': True } )
Jon Hall5cf14d52015-07-16 12:15:19 -07002328 threads.append( t )
2329 t.start()
2330
2331 for t in threads:
2332 t.join()
2333 ports.append( t.result )
2334 links = []
2335 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002336 for i in main.activeNodes:
Jon Hall6e709752016-02-01 13:38:46 -08002337 t = main.Thread( target=utilities.retry,
Jon Hall5cf14d52015-07-16 12:15:19 -07002338 name="links-" + str( i ),
Jon Hall6e709752016-02-01 13:38:46 -08002339 args=[ main.CLIs[i].links, [ None ] ],
2340 kwargs= { 'sleep': 5, 'attempts': 5,
2341 'randomTime': True } )
Jon Hall5cf14d52015-07-16 12:15:19 -07002342 threads.append( t )
2343 t.start()
2344
2345 for t in threads:
2346 t.join()
2347 links.append( t.result )
2348 clusters = []
2349 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002350 for i in main.activeNodes:
Jon Hall6e709752016-02-01 13:38:46 -08002351 t = main.Thread( target=utilities.retry,
Jon Hall5cf14d52015-07-16 12:15:19 -07002352 name="clusters-" + str( i ),
Jon Hall6e709752016-02-01 13:38:46 -08002353 args=[ main.CLIs[i].clusters, [ None ] ],
2354 kwargs= { 'sleep': 5, 'attempts': 5,
2355 'randomTime': True } )
Jon Hall5cf14d52015-07-16 12:15:19 -07002356 threads.append( t )
2357 t.start()
2358
2359 for t in threads:
2360 t.join()
2361 clusters.append( t.result )
2362
2363 elapsed = time.time() - startTime
2364 cliTime = time.time() - cliStart
2365 print "Elapsed time: " + str( elapsed )
2366 print "CLI time: " + str( cliTime )
2367
Jon Hall6e709752016-02-01 13:38:46 -08002368 if all( e is None for e in devices ) and\
2369 all( e is None for e in hosts ) and\
2370 all( e is None for e in ports ) and\
2371 all( e is None for e in links ) and\
2372 all( e is None for e in clusters ):
2373 topoFailMsg = "Could not get topology from ONOS"
2374 main.log.error( topoFailMsg )
2375 continue # Try again, No use trying to compare
2376
Jon Hall5cf14d52015-07-16 12:15:19 -07002377 mnSwitches = main.Mininet1.getSwitches()
2378 mnLinks = main.Mininet1.getLinks()
2379 mnHosts = main.Mininet1.getHosts()
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002380 for controller in range( len( main.activeNodes ) ):
2381 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07002382 if devices[ controller ] and ports[ controller ] and\
2383 "Error" not in devices[ controller ] and\
2384 "Error" not in ports[ controller ]:
2385
Jon Hallc6793552016-01-19 14:18:37 -08002386 try:
2387 currentDevicesResult = main.Mininet1.compareSwitches(
2388 mnSwitches,
2389 json.loads( devices[ controller ] ),
2390 json.loads( ports[ controller ] ) )
2391 except ( TypeError, ValueError ) as e:
2392 main.log.exception( "Object not as expected; devices={!r}\nports={!r}".format(
2393 devices[ controller ], ports[ controller ] ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07002394 else:
2395 currentDevicesResult = main.FALSE
2396 utilities.assert_equals( expect=main.TRUE,
2397 actual=currentDevicesResult,
2398 onpass="ONOS" + controllerStr +
2399 " Switches view is correct",
2400 onfail="ONOS" + controllerStr +
2401 " Switches view is incorrect" )
2402
2403 if links[ controller ] and "Error" not in links[ controller ]:
2404 currentLinksResult = main.Mininet1.compareLinks(
2405 mnSwitches, mnLinks,
2406 json.loads( links[ controller ] ) )
2407 else:
2408 currentLinksResult = main.FALSE
2409 utilities.assert_equals( expect=main.TRUE,
2410 actual=currentLinksResult,
2411 onpass="ONOS" + controllerStr +
2412 " links view is correct",
2413 onfail="ONOS" + controllerStr +
2414 " links view is incorrect" )
Jon Hall657cdf62015-12-17 14:40:51 -08002415 if hosts[ controller ] and "Error" not in hosts[ controller ]:
Jon Hall5cf14d52015-07-16 12:15:19 -07002416 currentHostsResult = main.Mininet1.compareHosts(
2417 mnHosts,
2418 hosts[ controller ] )
Jon Hall13b446e2016-01-05 12:17:01 -08002419 elif hosts[ controller ] == []:
2420 currentHostsResult = main.TRUE
Jon Hall5cf14d52015-07-16 12:15:19 -07002421 else:
2422 currentHostsResult = main.FALSE
2423 utilities.assert_equals( expect=main.TRUE,
2424 actual=currentHostsResult,
2425 onpass="ONOS" + controllerStr +
2426 " hosts exist in Mininet",
2427 onfail="ONOS" + controllerStr +
2428 " hosts don't match Mininet" )
2429 # CHECKING HOST ATTACHMENT POINTS
2430 hostAttachment = True
2431 zeroHosts = False
2432 # FIXME: topo-HA/obelisk specific mappings:
2433 # key is mac and value is dpid
2434 mappings = {}
2435 for i in range( 1, 29 ): # hosts 1 through 28
2436 # set up correct variables:
2437 macId = "00:" * 5 + hex( i ).split( "0x" )[1].upper().zfill(2)
2438 if i == 1:
2439 deviceId = "1000".zfill(16)
2440 elif i == 2:
2441 deviceId = "2000".zfill(16)
2442 elif i == 3:
2443 deviceId = "3000".zfill(16)
2444 elif i == 4:
2445 deviceId = "3004".zfill(16)
2446 elif i == 5:
2447 deviceId = "5000".zfill(16)
2448 elif i == 6:
2449 deviceId = "6000".zfill(16)
2450 elif i == 7:
2451 deviceId = "6007".zfill(16)
2452 elif i >= 8 and i <= 17:
2453 dpid = '3' + str( i ).zfill( 3 )
2454 deviceId = dpid.zfill(16)
2455 elif i >= 18 and i <= 27:
2456 dpid = '6' + str( i ).zfill( 3 )
2457 deviceId = dpid.zfill(16)
2458 elif i == 28:
2459 deviceId = "2800".zfill(16)
2460 mappings[ macId ] = deviceId
Jon Halld8f6de82015-12-17 17:04:34 -08002461 if hosts[ controller ] is not None and "Error" not in hosts[ controller ]:
Jon Hall5cf14d52015-07-16 12:15:19 -07002462 if hosts[ controller ] == []:
2463 main.log.warn( "There are no hosts discovered" )
2464 zeroHosts = True
2465 else:
2466 for host in hosts[ controller ]:
2467 mac = None
2468 location = None
2469 device = None
2470 port = None
2471 try:
2472 mac = host.get( 'mac' )
2473 assert mac, "mac field could not be found for this host object"
2474
2475 location = host.get( 'location' )
2476 assert location, "location field could not be found for this host object"
2477
2478 # Trim the protocol identifier off deviceId
2479 device = str( location.get( 'elementId' ) ).split(':')[1]
2480 assert device, "elementId field could not be found for this host location object"
2481
2482 port = location.get( 'port' )
2483 assert port, "port field could not be found for this host location object"
2484
2485 # Now check if this matches where they should be
2486 if mac and device and port:
2487 if str( port ) != "1":
2488 main.log.error( "The attachment port is incorrect for " +
2489 "host " + str( mac ) +
2490 ". Expected: 1 Actual: " + str( port) )
2491 hostAttachment = False
2492 if device != mappings[ str( mac ) ]:
2493 main.log.error( "The attachment device is incorrect for " +
2494 "host " + str( mac ) +
2495 ". Expected: " + mappings[ str( mac ) ] +
2496 " Actual: " + device )
2497 hostAttachment = False
2498 else:
2499 hostAttachment = False
2500 except AssertionError:
2501 main.log.exception( "Json object not as expected" )
2502 main.log.error( repr( host ) )
2503 hostAttachment = False
2504 else:
2505 main.log.error( "No hosts json output or \"Error\"" +
2506 " in output. hosts = " +
2507 repr( hosts[ controller ] ) )
2508 if zeroHosts is False:
2509 hostAttachment = True
2510
2511 # END CHECKING HOST ATTACHMENT POINTS
2512 devicesResults = devicesResults and currentDevicesResult
2513 linksResults = linksResults and currentLinksResult
2514 hostsResults = hostsResults and currentHostsResult
2515 hostAttachmentResults = hostAttachmentResults and\
2516 hostAttachment
Jon Halla440e872016-03-31 15:15:50 -07002517 topoResult = ( devicesResults and linksResults
2518 and hostsResults and ipResult and
2519 hostAttachmentResults )
Jon Halle9b1fa32015-12-08 15:32:21 -08002520 utilities.assert_equals( expect=True,
2521 actual=topoResult,
2522 onpass="ONOS topology matches Mininet",
Jon Hall6e709752016-02-01 13:38:46 -08002523 onfail=topoFailMsg )
Jon Halle9b1fa32015-12-08 15:32:21 -08002524 # End of While loop to pull ONOS state
Jon Hall5cf14d52015-07-16 12:15:19 -07002525
2526 # Compare json objects for hosts and dataplane clusters
2527
2528 # hosts
2529 main.step( "Hosts view is consistent across all ONOS nodes" )
2530 consistentHostsResult = main.TRUE
2531 for controller in range( len( hosts ) ):
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002532 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hall13b446e2016-01-05 12:17:01 -08002533 if hosts[ controller ] is not None and "Error" not in hosts[ controller ]:
Jon Hall5cf14d52015-07-16 12:15:19 -07002534 if hosts[ controller ] == hosts[ 0 ]:
2535 continue
2536 else: # hosts not consistent
2537 main.log.error( "hosts from ONOS" + controllerStr +
2538 " is inconsistent with ONOS1" )
2539 main.log.warn( repr( hosts[ controller ] ) )
2540 consistentHostsResult = main.FALSE
2541
2542 else:
2543 main.log.error( "Error in getting ONOS hosts from ONOS" +
2544 controllerStr )
2545 consistentHostsResult = main.FALSE
2546 main.log.warn( "ONOS" + controllerStr +
2547 " hosts response: " +
2548 repr( hosts[ controller ] ) )
2549 utilities.assert_equals(
2550 expect=main.TRUE,
2551 actual=consistentHostsResult,
2552 onpass="Hosts view is consistent across all ONOS nodes",
2553 onfail="ONOS nodes have different views of hosts" )
2554
2555 main.step( "Hosts information is correct" )
2556 hostsResults = hostsResults and ipResult
2557 utilities.assert_equals(
2558 expect=main.TRUE,
2559 actual=hostsResults,
2560 onpass="Host information is correct",
2561 onfail="Host information is incorrect" )
2562
2563 main.step( "Host attachment points to the network" )
2564 utilities.assert_equals(
2565 expect=True,
2566 actual=hostAttachmentResults,
2567 onpass="Hosts are correctly attached to the network",
2568 onfail="ONOS did not correctly attach hosts to the network" )
2569
2570 # Strongly connected clusters of devices
2571 main.step( "Clusters view is consistent across all ONOS nodes" )
2572 consistentClustersResult = main.TRUE
2573 for controller in range( len( clusters ) ):
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002574 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07002575 if "Error" not in clusters[ controller ]:
2576 if clusters[ controller ] == clusters[ 0 ]:
2577 continue
2578 else: # clusters not consistent
2579 main.log.error( "clusters from ONOS" +
2580 controllerStr +
2581 " is inconsistent with ONOS1" )
2582 consistentClustersResult = main.FALSE
Jon Hall5cf14d52015-07-16 12:15:19 -07002583 else:
2584 main.log.error( "Error in getting dataplane clusters " +
2585 "from ONOS" + controllerStr )
2586 consistentClustersResult = main.FALSE
2587 main.log.warn( "ONOS" + controllerStr +
2588 " clusters response: " +
2589 repr( clusters[ controller ] ) )
2590 utilities.assert_equals(
2591 expect=main.TRUE,
2592 actual=consistentClustersResult,
2593 onpass="Clusters view is consistent across all ONOS nodes",
2594 onfail="ONOS nodes have different views of clusters" )
Jon Hall64948022016-05-12 13:38:50 -07002595 if not consistentClustersResult:
2596 main.log.debug( clusters )
Jon Hall5cf14d52015-07-16 12:15:19 -07002597
2598 main.step( "There is only one SCC" )
2599 # there should always only be one cluster
2600 try:
2601 numClusters = len( json.loads( clusters[ 0 ] ) )
2602 except ( ValueError, TypeError ):
2603 main.log.exception( "Error parsing clusters[0]: " +
2604 repr( clusters[0] ) )
Jon Halla440e872016-03-31 15:15:50 -07002605 numClusters = "ERROR"
Jon Hall5cf14d52015-07-16 12:15:19 -07002606 clusterResults = main.FALSE
2607 if numClusters == 1:
2608 clusterResults = main.TRUE
2609 utilities.assert_equals(
2610 expect=1,
2611 actual=numClusters,
2612 onpass="ONOS shows 1 SCC",
2613 onfail="ONOS shows " + str( numClusters ) + " SCCs" )
2614
2615 topoResult = ( devicesResults and linksResults
2616 and hostsResults and consistentHostsResult
2617 and consistentClustersResult and clusterResults
2618 and ipResult and hostAttachmentResults )
2619
2620 topoResult = topoResult and int( count <= 2 )
2621 note = "note it takes about " + str( int( cliTime ) ) + \
2622 " seconds for the test to make all the cli calls to fetch " +\
2623 "the topology from each ONOS instance"
2624 main.log.info(
2625 "Very crass estimate for topology discovery/convergence( " +
2626 str( note ) + " ): " + str( elapsed ) + " seconds, " +
2627 str( count ) + " tries" )
2628
2629 main.step( "Device information is correct" )
2630 utilities.assert_equals(
2631 expect=main.TRUE,
2632 actual=devicesResults,
2633 onpass="Device information is correct",
2634 onfail="Device information is incorrect" )
2635
2636 main.step( "Links are correct" )
2637 utilities.assert_equals(
2638 expect=main.TRUE,
2639 actual=linksResults,
2640 onpass="Link are correct",
2641 onfail="Links are incorrect" )
2642
Jon Halla440e872016-03-31 15:15:50 -07002643 main.step( "Hosts are correct" )
2644 utilities.assert_equals(
2645 expect=main.TRUE,
2646 actual=hostsResults,
2647 onpass="Hosts are correct",
2648 onfail="Hosts are incorrect" )
2649
Jon Hall5cf14d52015-07-16 12:15:19 -07002650 # FIXME: move this to an ONOS state case
2651 main.step( "Checking ONOS nodes" )
Jon Hall41d39f12016-04-11 22:54:35 -07002652 nodeResults = utilities.retry( main.HA.nodesCheck,
2653 False,
2654 args=[main.activeNodes],
2655 attempts=5 )
Jon Hall5cf14d52015-07-16 12:15:19 -07002656
Jon Hall41d39f12016-04-11 22:54:35 -07002657 utilities.assert_equals( expect=True, actual=nodeResults,
Jon Hall5cf14d52015-07-16 12:15:19 -07002658 onpass="Nodes check successful",
2659 onfail="Nodes check NOT successful" )
Jon Halla440e872016-03-31 15:15:50 -07002660 if not nodeResults:
Jon Hall41d39f12016-04-11 22:54:35 -07002661 for i in main.activeNodes:
Jon Halla440e872016-03-31 15:15:50 -07002662 main.log.debug( "{} components not ACTIVE: \n{}".format(
Jon Hall41d39f12016-04-11 22:54:35 -07002663 main.CLIs[i].name,
2664 main.CLIs[i].sendline( "scr:list | grep -v ACTIVE" ) ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07002665
Jon Halld2871c22016-07-26 11:01:14 -07002666 if not topoResult:
2667 main.cleanup()
2668 main.exit()
2669
Jon Hall5cf14d52015-07-16 12:15:19 -07002670 def CASE9( self, main ):
2671 """
2672 Link s3-s28 down
2673 """
2674 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002675 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002676 assert main, "main not defined"
2677 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002678 assert main.CLIs, "main.CLIs not defined"
2679 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002680 # NOTE: You should probably run a topology check after this
2681
2682 linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
2683
2684 description = "Turn off a link to ensure that Link Discovery " +\
2685 "is working properly"
2686 main.case( description )
2687
2688 main.step( "Kill Link between s3 and s28" )
2689 LinkDown = main.Mininet1.link( END1="s3", END2="s28", OPTION="down" )
2690 main.log.info( "Waiting " + str( linkSleep ) +
2691 " seconds for link down to be discovered" )
2692 time.sleep( linkSleep )
2693 utilities.assert_equals( expect=main.TRUE, actual=LinkDown,
2694 onpass="Link down successful",
2695 onfail="Failed to bring link down" )
2696 # TODO do some sort of check here
2697
2698 def CASE10( self, main ):
2699 """
2700 Link s3-s28 up
2701 """
2702 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002703 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002704 assert main, "main not defined"
2705 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002706 assert main.CLIs, "main.CLIs not defined"
2707 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002708 # NOTE: You should probably run a topology check after this
2709
2710 linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
2711
2712 description = "Restore a link to ensure that Link Discovery is " + \
2713 "working properly"
2714 main.case( description )
2715
2716 main.step( "Bring link between s3 and s28 back up" )
2717 LinkUp = main.Mininet1.link( END1="s3", END2="s28", OPTION="up" )
2718 main.log.info( "Waiting " + str( linkSleep ) +
2719 " seconds for link up to be discovered" )
2720 time.sleep( linkSleep )
2721 utilities.assert_equals( expect=main.TRUE, actual=LinkUp,
2722 onpass="Link up successful",
2723 onfail="Failed to bring link up" )
2724 # TODO do some sort of check here
2725
2726 def CASE11( self, main ):
2727 """
2728 Switch Down
2729 """
2730 # NOTE: You should probably run a topology check after this
2731 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002732 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002733 assert main, "main not defined"
2734 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002735 assert main.CLIs, "main.CLIs not defined"
2736 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002737
2738 switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
2739
2740 description = "Killing a switch to ensure it is discovered correctly"
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002741 onosCli = main.CLIs[ main.activeNodes[0] ]
Jon Hall5cf14d52015-07-16 12:15:19 -07002742 main.case( description )
2743 switch = main.params[ 'kill' ][ 'switch' ]
2744 switchDPID = main.params[ 'kill' ][ 'dpid' ]
2745
2746 # TODO: Make this switch parameterizable
2747 main.step( "Kill " + switch )
2748 main.log.info( "Deleting " + switch )
2749 main.Mininet1.delSwitch( switch )
2750 main.log.info( "Waiting " + str( switchSleep ) +
2751 " seconds for switch down to be discovered" )
2752 time.sleep( switchSleep )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002753 device = onosCli.getDevice( dpid=switchDPID )
Jon Hall5cf14d52015-07-16 12:15:19 -07002754 # Peek at the deleted switch
2755 main.log.warn( str( device ) )
2756 result = main.FALSE
2757 if device and device[ 'available' ] is False:
2758 result = main.TRUE
2759 utilities.assert_equals( expect=main.TRUE, actual=result,
2760 onpass="Kill switch successful",
2761 onfail="Failed to kill switch?" )
2762
2763 def CASE12( self, main ):
2764 """
2765 Switch Up
2766 """
2767 # NOTE: You should probably run a topology check after this
2768 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002769 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002770 assert main, "main not defined"
2771 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002772 assert main.CLIs, "main.CLIs not defined"
2773 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002774 assert ONOS1Port, "ONOS1Port not defined"
2775 assert ONOS2Port, "ONOS2Port not defined"
2776 assert ONOS3Port, "ONOS3Port not defined"
2777 assert ONOS4Port, "ONOS4Port not defined"
2778 assert ONOS5Port, "ONOS5Port not defined"
2779 assert ONOS6Port, "ONOS6Port not defined"
2780 assert ONOS7Port, "ONOS7Port not defined"
2781
2782 switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
2783 switch = main.params[ 'kill' ][ 'switch' ]
2784 switchDPID = main.params[ 'kill' ][ 'dpid' ]
2785 links = main.params[ 'kill' ][ 'links' ].split()
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002786 onosCli = main.CLIs[ main.activeNodes[0] ]
Jon Hall5cf14d52015-07-16 12:15:19 -07002787 description = "Adding a switch to ensure it is discovered correctly"
2788 main.case( description )
2789
2790 main.step( "Add back " + switch )
2791 main.Mininet1.addSwitch( switch, dpid=switchDPID )
2792 for peer in links:
2793 main.Mininet1.addLink( switch, peer )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002794 ipList = [ node.ip_address for node in main.nodes ]
Jon Hall5cf14d52015-07-16 12:15:19 -07002795 main.Mininet1.assignSwController( sw=switch, ip=ipList )
2796 main.log.info( "Waiting " + str( switchSleep ) +
2797 " seconds for switch up to be discovered" )
2798 time.sleep( switchSleep )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002799 device = onosCli.getDevice( dpid=switchDPID )
Jon Hall5cf14d52015-07-16 12:15:19 -07002800 # Peek at the deleted switch
2801 main.log.warn( str( device ) )
2802 result = main.FALSE
2803 if device and device[ 'available' ]:
2804 result = main.TRUE
2805 utilities.assert_equals( expect=main.TRUE, actual=result,
2806 onpass="add switch successful",
2807 onfail="Failed to add switch?" )
2808
2809 def CASE13( self, main ):
2810 """
2811 Clean up
2812 """
2813 import os
2814 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002815 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002816 assert main, "main not defined"
2817 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002818 assert main.CLIs, "main.CLIs not defined"
2819 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002820
2821 # printing colors to terminal
2822 colors = { 'cyan': '\033[96m', 'purple': '\033[95m',
2823 'blue': '\033[94m', 'green': '\033[92m',
2824 'yellow': '\033[93m', 'red': '\033[91m', 'end': '\033[0m' }
2825 main.case( "Test Cleanup" )
2826 main.step( "Killing tcpdumps" )
2827 main.Mininet2.stopTcpdump()
2828
2829 testname = main.TEST
Jon Hall96091e62015-09-21 17:34:17 -07002830 if main.params[ 'BACKUP' ][ 'ENABLED' ] == "True":
Jon Hall5cf14d52015-07-16 12:15:19 -07002831 main.step( "Copying MN pcap and ONOS log files to test station" )
2832 teststationUser = main.params[ 'BACKUP' ][ 'TESTONUSER' ]
2833 teststationIP = main.params[ 'BACKUP' ][ 'TESTONIP' ]
Jon Hall96091e62015-09-21 17:34:17 -07002834 # NOTE: MN Pcap file is being saved to logdir.
2835 # We scp this file as MN and TestON aren't necessarily the same vm
2836
2837 # FIXME: To be replaced with a Jenkin's post script
Jon Hall5cf14d52015-07-16 12:15:19 -07002838 # TODO: Load these from params
2839 # NOTE: must end in /
2840 logFolder = "/opt/onos/log/"
2841 logFiles = [ "karaf.log", "karaf.log.1" ]
2842 # NOTE: must end in /
Jon Hall5cf14d52015-07-16 12:15:19 -07002843 for f in logFiles:
Jon Halle1a3b752015-07-22 13:02:46 -07002844 for node in main.nodes:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002845 dstName = main.logdir + "/" + node.name + "-" + f
Jon Hall96091e62015-09-21 17:34:17 -07002846 main.ONOSbench.secureCopy( node.user_name, node.ip_address,
2847 logFolder + f, dstName )
Jon Hall5cf14d52015-07-16 12:15:19 -07002848 # std*.log's
2849 # NOTE: must end in /
2850 logFolder = "/opt/onos/var/"
2851 logFiles = [ "stderr.log", "stdout.log" ]
2852 # NOTE: must end in /
Jon Hall5cf14d52015-07-16 12:15:19 -07002853 for f in logFiles:
Jon Halle1a3b752015-07-22 13:02:46 -07002854 for node in main.nodes:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002855 dstName = main.logdir + "/" + node.name + "-" + f
Jon Hall96091e62015-09-21 17:34:17 -07002856 main.ONOSbench.secureCopy( node.user_name, node.ip_address,
2857 logFolder + f, dstName )
2858 else:
2859 main.log.debug( "skipping saving log files" )
Jon Hall5cf14d52015-07-16 12:15:19 -07002860
2861 main.step( "Stopping Mininet" )
2862 mnResult = main.Mininet1.stopNet()
2863 utilities.assert_equals( expect=main.TRUE, actual=mnResult,
2864 onpass="Mininet stopped",
2865 onfail="MN cleanup NOT successful" )
2866
2867 main.step( "Checking ONOS Logs for errors" )
Jon Halle1a3b752015-07-22 13:02:46 -07002868 for node in main.nodes:
Jon Hall96091e62015-09-21 17:34:17 -07002869 main.log.debug( "Checking logs for errors on " + node.name + ":" )
2870 main.log.warn( main.ONOSbench.checkLogs( node.ip_address ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07002871
2872 try:
2873 timerLog = open( main.logdir + "/Timers.csv", 'w')
2874 # Overwrite with empty line and close
2875 labels = "Gossip Intents, Restart"
2876 data = str( gossipTime ) + ", " + str( main.restartTime )
2877 timerLog.write( labels + "\n" + data )
2878 timerLog.close()
2879 except NameError, e:
2880 main.log.exception(e)
2881
2882 def CASE14( self, main ):
2883 """
2884 start election app on all onos nodes
2885 """
Jon Halle1a3b752015-07-22 13:02:46 -07002886 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002887 assert main, "main not defined"
2888 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002889 assert main.CLIs, "main.CLIs not defined"
2890 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002891
2892 main.case("Start Leadership Election app")
2893 main.step( "Install leadership election app" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002894 onosCli = main.CLIs[ main.activeNodes[0] ]
2895 appResult = onosCli.activateApp( "org.onosproject.election" )
Jon Hall5cf14d52015-07-16 12:15:19 -07002896 utilities.assert_equals(
2897 expect=main.TRUE,
2898 actual=appResult,
2899 onpass="Election app installed",
2900 onfail="Something went wrong with installing Leadership election" )
2901
2902 main.step( "Run for election on each node" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002903 for i in main.activeNodes:
2904 main.CLIs[i].electionTestRun()
Jon Hall25463a82016-04-13 14:03:52 -07002905 time.sleep(5)
2906 activeCLIs = [ main.CLIs[i] for i in main.activeNodes ]
2907 sameResult, leaders = main.HA.consistentLeaderboards( activeCLIs )
Jon Hall5cf14d52015-07-16 12:15:19 -07002908 utilities.assert_equals(
Jon Hall25463a82016-04-13 14:03:52 -07002909 expect=True,
2910 actual=sameResult,
2911 onpass="All nodes see the same leaderboards",
2912 onfail="Inconsistent leaderboards" )
Jon Hall5cf14d52015-07-16 12:15:19 -07002913
Jon Hall25463a82016-04-13 14:03:52 -07002914 if sameResult:
2915 leader = leaders[ 0 ][ 0 ]
2916 if main.nodes[main.activeNodes[0]].ip_address in leader:
2917 correctLeader = True
2918 else:
2919 correctLeader = False
2920 main.step( "First node was elected leader" )
2921 utilities.assert_equals(
2922 expect=True,
2923 actual=correctLeader,
2924 onpass="Correct leader was elected",
2925 onfail="Incorrect leader" )
Jon Hall5cf14d52015-07-16 12:15:19 -07002926
2927 def CASE15( self, main ):
2928 """
2929 Check that Leadership Election is still functional
acsmars71adceb2015-08-31 15:09:26 -07002930 15.1 Run election on each node
2931 15.2 Check that each node has the same leaders and candidates
2932 15.3 Find current leader and withdraw
2933 15.4 Check that a new node was elected leader
2934 15.5 Check that that new leader was the candidate of old leader
2935 15.6 Run for election on old leader
2936 15.7 Check that oldLeader is a candidate, and leader if only 1 node
2937 15.8 Make sure that the old leader was added to the candidate list
2938
2939 old and new variable prefixes refer to data from before vs after
2940 withdrawl and later before withdrawl vs after re-election
Jon Hall5cf14d52015-07-16 12:15:19 -07002941 """
2942 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002943 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002944 assert main, "main not defined"
2945 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002946 assert main.CLIs, "main.CLIs not defined"
2947 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002948
Jon Hall5cf14d52015-07-16 12:15:19 -07002949 description = "Check that Leadership Election is still functional"
2950 main.case( description )
Jon Halla440e872016-03-31 15:15:50 -07002951 # NOTE: Need to re-run after restarts since being a canidate is not persistant
Jon Hall5cf14d52015-07-16 12:15:19 -07002952
Jon Halla440e872016-03-31 15:15:50 -07002953 oldLeaders = [] # list of lists of each nodes' candidates before
2954 newLeaders = [] # list of lists of each nodes' candidates after
acsmars71adceb2015-08-31 15:09:26 -07002955 oldLeader = '' # the old leader from oldLeaders, None if not same
2956 newLeader = '' # the new leaders fron newLoeaders, None if not same
2957 oldLeaderCLI = None # the CLI of the old leader used for re-electing
2958 expectNoLeader = False # True when there is only one leader
2959 if main.numCtrls == 1:
2960 expectNoLeader = True
2961
2962 main.step( "Run for election on each node" )
2963 electionResult = main.TRUE
2964
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002965 for i in main.activeNodes: # run test election on each node
2966 if main.CLIs[i].electionTestRun() == main.FALSE:
acsmars71adceb2015-08-31 15:09:26 -07002967 electionResult = main.FALSE
Jon Hall5cf14d52015-07-16 12:15:19 -07002968 utilities.assert_equals(
2969 expect=main.TRUE,
acsmars71adceb2015-08-31 15:09:26 -07002970 actual=electionResult,
2971 onpass="All nodes successfully ran for leadership",
2972 onfail="At least one node failed to run for leadership" )
2973
acsmars3a72bde2015-09-02 14:16:22 -07002974 if electionResult == main.FALSE:
2975 main.log.error(
2976 "Skipping Test Case because Election Test App isn't loaded" )
2977 main.skipCase()
2978
acsmars71adceb2015-08-31 15:09:26 -07002979 main.step( "Check that each node shows the same leader and candidates" )
Jon Halla440e872016-03-31 15:15:50 -07002980 failMessage = "Nodes have different leaderboards"
Jon Halla440e872016-03-31 15:15:50 -07002981 activeCLIs = [ main.CLIs[i] for i in main.activeNodes ]
Jon Hall41d39f12016-04-11 22:54:35 -07002982 sameResult, oldLeaders = main.HA.consistentLeaderboards( activeCLIs )
Jon Halla440e872016-03-31 15:15:50 -07002983 if sameResult:
2984 oldLeader = oldLeaders[ 0 ][ 0 ]
2985 main.log.warn( oldLeader )
acsmars71adceb2015-08-31 15:09:26 -07002986 else:
Jon Halla440e872016-03-31 15:15:50 -07002987 oldLeader = None
acsmars71adceb2015-08-31 15:09:26 -07002988 utilities.assert_equals(
Jon Halla440e872016-03-31 15:15:50 -07002989 expect=True,
acsmars71adceb2015-08-31 15:09:26 -07002990 actual=sameResult,
Jon Halla440e872016-03-31 15:15:50 -07002991 onpass="Leaderboards are consistent for the election topic",
acsmars71adceb2015-08-31 15:09:26 -07002992 onfail=failMessage )
Jon Hall5cf14d52015-07-16 12:15:19 -07002993
2994 main.step( "Find current leader and withdraw" )
acsmars71adceb2015-08-31 15:09:26 -07002995 withdrawResult = main.TRUE
Jon Hall5cf14d52015-07-16 12:15:19 -07002996 # do some sanity checking on leader before using it
acsmars71adceb2015-08-31 15:09:26 -07002997 if oldLeader is None:
2998 main.log.error( "Leadership isn't consistent." )
2999 withdrawResult = main.FALSE
3000 # Get the CLI of the oldLeader
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003001 for i in main.activeNodes:
acsmars71adceb2015-08-31 15:09:26 -07003002 if oldLeader == main.nodes[ i ].ip_address:
3003 oldLeaderCLI = main.CLIs[ i ]
Jon Hall5cf14d52015-07-16 12:15:19 -07003004 break
3005 else: # FOR/ELSE statement
3006 main.log.error( "Leader election, could not find current leader" )
3007 if oldLeader:
acsmars71adceb2015-08-31 15:09:26 -07003008 withdrawResult = oldLeaderCLI.electionTestWithdraw()
Jon Hall5cf14d52015-07-16 12:15:19 -07003009 utilities.assert_equals(
3010 expect=main.TRUE,
3011 actual=withdrawResult,
3012 onpass="Node was withdrawn from election",
3013 onfail="Node was not withdrawn from election" )
3014
acsmars71adceb2015-08-31 15:09:26 -07003015 main.step( "Check that a new node was elected leader" )
acsmars71adceb2015-08-31 15:09:26 -07003016 failMessage = "Nodes have different leaders"
acsmars71adceb2015-08-31 15:09:26 -07003017 # Get new leaders and candidates
Jon Hall41d39f12016-04-11 22:54:35 -07003018 newLeaderResult, newLeaders = main.HA.consistentLeaderboards( activeCLIs )
Jon Hall3a7843a2016-04-12 03:01:09 -07003019 newLeader = None
Jon Halla440e872016-03-31 15:15:50 -07003020 if newLeaderResult:
Jon Hall3a7843a2016-04-12 03:01:09 -07003021 if newLeaders[ 0 ][ 0 ] == 'none':
3022 main.log.error( "No leader was elected on at least 1 node" )
3023 if not expectNoLeader:
3024 newLeaderResult = False
Jon Hall25463a82016-04-13 14:03:52 -07003025 newLeader = newLeaders[ 0 ][ 0 ]
acsmars71adceb2015-08-31 15:09:26 -07003026
3027 # Check that the new leader is not the older leader, which was withdrawn
3028 if newLeader == oldLeader:
Jon Halla440e872016-03-31 15:15:50 -07003029 newLeaderResult = False
Jon Hall6e709752016-02-01 13:38:46 -08003030 main.log.error( "All nodes still see old leader: " + str( oldLeader ) +
acsmars71adceb2015-08-31 15:09:26 -07003031 " as the current leader" )
Jon Hall5cf14d52015-07-16 12:15:19 -07003032 utilities.assert_equals(
Jon Halla440e872016-03-31 15:15:50 -07003033 expect=True,
acsmars71adceb2015-08-31 15:09:26 -07003034 actual=newLeaderResult,
Jon Hall5cf14d52015-07-16 12:15:19 -07003035 onpass="Leadership election passed",
3036 onfail="Something went wrong with Leadership election" )
3037
Jon Halla440e872016-03-31 15:15:50 -07003038 main.step( "Check that that new leader was the candidate of old leader" )
Jon Hall6e709752016-02-01 13:38:46 -08003039 # candidates[ 2 ] should become the top candidate after withdrawl
acsmars71adceb2015-08-31 15:09:26 -07003040 correctCandidateResult = main.TRUE
3041 if expectNoLeader:
3042 if newLeader == 'none':
3043 main.log.info( "No leader expected. None found. Pass" )
3044 correctCandidateResult = main.TRUE
3045 else:
3046 main.log.info( "Expected no leader, got: " + str( newLeader ) )
3047 correctCandidateResult = main.FALSE
Jon Halla440e872016-03-31 15:15:50 -07003048 elif len( oldLeaders[0] ) >= 3:
3049 if newLeader == oldLeaders[ 0 ][ 2 ]:
3050 # correct leader was elected
3051 correctCandidateResult = main.TRUE
3052 else:
3053 correctCandidateResult = main.FALSE
3054 main.log.error( "Candidate {} was elected. {} should have had priority.".format(
3055 newLeader, oldLeaders[ 0 ][ 2 ] ) )
Jon Hall6e709752016-02-01 13:38:46 -08003056 else:
3057 main.log.warn( "Could not determine who should be the correct leader" )
Jon Halla440e872016-03-31 15:15:50 -07003058 main.log.debug( oldLeaders[ 0 ] )
Jon Hall6e709752016-02-01 13:38:46 -08003059 correctCandidateResult = main.FALSE
acsmars71adceb2015-08-31 15:09:26 -07003060 utilities.assert_equals(
3061 expect=main.TRUE,
3062 actual=correctCandidateResult,
3063 onpass="Correct Candidate Elected",
3064 onfail="Incorrect Candidate Elected" )
3065
Jon Hall5cf14d52015-07-16 12:15:19 -07003066 main.step( "Run for election on old leader( just so everyone " +
3067 "is in the hat )" )
acsmars71adceb2015-08-31 15:09:26 -07003068 if oldLeaderCLI is not None:
3069 runResult = oldLeaderCLI.electionTestRun()
Jon Hall5cf14d52015-07-16 12:15:19 -07003070 else:
acsmars71adceb2015-08-31 15:09:26 -07003071 main.log.error( "No old leader to re-elect" )
Jon Hall5cf14d52015-07-16 12:15:19 -07003072 runResult = main.FALSE
3073 utilities.assert_equals(
3074 expect=main.TRUE,
3075 actual=runResult,
3076 onpass="App re-ran for election",
3077 onfail="App failed to run for election" )
Jon Halla440e872016-03-31 15:15:50 -07003078
acsmars71adceb2015-08-31 15:09:26 -07003079 main.step(
3080 "Check that oldLeader is a candidate, and leader if only 1 node" )
Jon Hall5cf14d52015-07-16 12:15:19 -07003081 # verify leader didn't just change
Jon Halla440e872016-03-31 15:15:50 -07003082 # Get new leaders and candidates
3083 reRunLeaders = []
3084 time.sleep( 5 ) # Paremterize
Jon Hall41d39f12016-04-11 22:54:35 -07003085 positionResult, reRunLeaders = main.HA.consistentLeaderboards( activeCLIs )
acsmars71adceb2015-08-31 15:09:26 -07003086
3087 # Check that the re-elected node is last on the candidate List
Jon Hall3a7843a2016-04-12 03:01:09 -07003088 if not reRunLeaders[0]:
3089 positionResult = main.FALSE
3090 elif oldLeader != reRunLeaders[ 0 ][ -1 ]:
Jon Halla440e872016-03-31 15:15:50 -07003091 main.log.error( "Old Leader ({}) not in the proper position: {} ".format( str( oldLeader),
3092 str( reRunLeaders[ 0 ] ) ) )
acsmars71adceb2015-08-31 15:09:26 -07003093 positionResult = main.FALSE
Jon Hall5cf14d52015-07-16 12:15:19 -07003094 utilities.assert_equals(
Jon Halla440e872016-03-31 15:15:50 -07003095 expect=True,
acsmars71adceb2015-08-31 15:09:26 -07003096 actual=positionResult,
Jon Hall5cf14d52015-07-16 12:15:19 -07003097 onpass="Old leader successfully re-ran for election",
3098 onfail="Something went wrong with Leadership election after " +
3099 "the old leader re-ran for election" )
3100
3101 def CASE16( self, main ):
3102 """
3103 Install Distributed Primitives app
3104 """
3105 import time
Jon Halle1a3b752015-07-22 13:02:46 -07003106 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07003107 assert main, "main not defined"
3108 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07003109 assert main.CLIs, "main.CLIs not defined"
3110 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07003111
3112 # Variables for the distributed primitives tests
3113 global pCounterName
Jon Hall5cf14d52015-07-16 12:15:19 -07003114 global pCounterValue
Jon Hall5cf14d52015-07-16 12:15:19 -07003115 global onosSet
3116 global onosSetName
3117 pCounterName = "TestON-Partitions"
Jon Hall5cf14d52015-07-16 12:15:19 -07003118 pCounterValue = 0
Jon Hall5cf14d52015-07-16 12:15:19 -07003119 onosSet = set([])
3120 onosSetName = "TestON-set"
3121
3122 description = "Install Primitives app"
3123 main.case( description )
3124 main.step( "Install Primitives app" )
3125 appName = "org.onosproject.distributedprimitives"
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003126 node = main.activeNodes[0]
3127 appResults = main.CLIs[node].activateApp( appName )
Jon Hall5cf14d52015-07-16 12:15:19 -07003128 utilities.assert_equals( expect=main.TRUE,
3129 actual=appResults,
3130 onpass="Primitives app activated",
3131 onfail="Primitives app not activated" )
3132 time.sleep( 5 ) # To allow all nodes to activate
3133
3134 def CASE17( self, main ):
3135 """
3136 Check for basic functionality with distributed primitives
3137 """
Jon Hall5cf14d52015-07-16 12:15:19 -07003138 # Make sure variables are defined/set
Jon Halle1a3b752015-07-22 13:02:46 -07003139 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07003140 assert main, "main not defined"
3141 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07003142 assert main.CLIs, "main.CLIs not defined"
3143 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07003144 assert pCounterName, "pCounterName not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07003145 assert onosSetName, "onosSetName not defined"
3146 # NOTE: assert fails if value is 0/None/Empty/False
3147 try:
3148 pCounterValue
3149 except NameError:
3150 main.log.error( "pCounterValue not defined, setting to 0" )
3151 pCounterValue = 0
3152 try:
Jon Hall5cf14d52015-07-16 12:15:19 -07003153 onosSet
3154 except NameError:
3155 main.log.error( "onosSet not defined, setting to empty Set" )
3156 onosSet = set([])
3157 # Variables for the distributed primitives tests. These are local only
3158 addValue = "a"
3159 addAllValue = "a b c d e f"
3160 retainValue = "c d e f"
3161
3162 description = "Check for basic functionality with distributed " +\
3163 "primitives"
3164 main.case( description )
Jon Halle1a3b752015-07-22 13:02:46 -07003165 main.caseExplanation = "Test the methods of the distributed " +\
3166 "primitives (counters and sets) throught the cli"
Jon Hall5cf14d52015-07-16 12:15:19 -07003167 # DISTRIBUTED ATOMIC COUNTERS
Jon Halle1a3b752015-07-22 13:02:46 -07003168 # Partitioned counters
3169 main.step( "Increment then get a default counter on each node" )
Jon Hall5cf14d52015-07-16 12:15:19 -07003170 pCounters = []
3171 threads = []
3172 addedPValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003173 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003174 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3175 name="counterAddAndGet-" + str( i ),
Jon Hall5cf14d52015-07-16 12:15:19 -07003176 args=[ pCounterName ] )
3177 pCounterValue += 1
3178 addedPValues.append( pCounterValue )
3179 threads.append( t )
3180 t.start()
3181
3182 for t in threads:
3183 t.join()
3184 pCounters.append( t.result )
3185 # Check that counter incremented numController times
3186 pCounterResults = True
3187 for i in addedPValues:
3188 tmpResult = i in pCounters
3189 pCounterResults = pCounterResults and tmpResult
3190 if not tmpResult:
3191 main.log.error( str( i ) + " is not in partitioned "
3192 "counter incremented results" )
3193 utilities.assert_equals( expect=True,
3194 actual=pCounterResults,
3195 onpass="Default counter incremented",
3196 onfail="Error incrementing default" +
3197 " counter" )
3198
Jon Halle1a3b752015-07-22 13:02:46 -07003199 main.step( "Get then Increment a default counter on each node" )
3200 pCounters = []
3201 threads = []
3202 addedPValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003203 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003204 t = main.Thread( target=main.CLIs[i].counterTestGetAndAdd,
3205 name="counterGetAndAdd-" + str( i ),
3206 args=[ pCounterName ] )
3207 addedPValues.append( pCounterValue )
3208 pCounterValue += 1
3209 threads.append( t )
3210 t.start()
3211
3212 for t in threads:
3213 t.join()
3214 pCounters.append( t.result )
3215 # Check that counter incremented numController times
3216 pCounterResults = True
3217 for i in addedPValues:
3218 tmpResult = i in pCounters
3219 pCounterResults = pCounterResults and tmpResult
3220 if not tmpResult:
3221 main.log.error( str( i ) + " is not in partitioned "
3222 "counter incremented results" )
3223 utilities.assert_equals( expect=True,
3224 actual=pCounterResults,
3225 onpass="Default counter incremented",
3226 onfail="Error incrementing default" +
3227 " counter" )
3228
3229 main.step( "Counters we added have the correct values" )
Jon Hall41d39f12016-04-11 22:54:35 -07003230 incrementCheck = main.HA.counterCheck( pCounterName, pCounterValue )
Jon Halle1a3b752015-07-22 13:02:46 -07003231 utilities.assert_equals( expect=main.TRUE,
3232 actual=incrementCheck,
3233 onpass="Added counters are correct",
3234 onfail="Added counters are incorrect" )
3235
3236 main.step( "Add -8 to then get a default counter on each node" )
3237 pCounters = []
3238 threads = []
3239 addedPValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003240 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003241 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3242 name="counterIncrement-" + str( i ),
3243 args=[ pCounterName ],
3244 kwargs={ "delta": -8 } )
3245 pCounterValue += -8
3246 addedPValues.append( pCounterValue )
3247 threads.append( t )
3248 t.start()
3249
3250 for t in threads:
3251 t.join()
3252 pCounters.append( t.result )
3253 # Check that counter incremented numController times
3254 pCounterResults = True
3255 for i in addedPValues:
3256 tmpResult = i in pCounters
3257 pCounterResults = pCounterResults and tmpResult
3258 if not tmpResult:
3259 main.log.error( str( i ) + " is not in partitioned "
3260 "counter incremented results" )
3261 utilities.assert_equals( expect=True,
3262 actual=pCounterResults,
3263 onpass="Default counter incremented",
3264 onfail="Error incrementing default" +
3265 " counter" )
3266
3267 main.step( "Add 5 to then get a default counter on each node" )
3268 pCounters = []
3269 threads = []
3270 addedPValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003271 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003272 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3273 name="counterIncrement-" + str( i ),
3274 args=[ pCounterName ],
3275 kwargs={ "delta": 5 } )
3276 pCounterValue += 5
3277 addedPValues.append( pCounterValue )
3278 threads.append( t )
3279 t.start()
3280
3281 for t in threads:
3282 t.join()
3283 pCounters.append( t.result )
3284 # Check that counter incremented numController times
3285 pCounterResults = True
3286 for i in addedPValues:
3287 tmpResult = i in pCounters
3288 pCounterResults = pCounterResults and tmpResult
3289 if not tmpResult:
3290 main.log.error( str( i ) + " is not in partitioned "
3291 "counter incremented results" )
3292 utilities.assert_equals( expect=True,
3293 actual=pCounterResults,
3294 onpass="Default counter incremented",
3295 onfail="Error incrementing default" +
3296 " counter" )
3297
3298 main.step( "Get then add 5 to a default counter on each node" )
3299 pCounters = []
3300 threads = []
3301 addedPValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003302 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003303 t = main.Thread( target=main.CLIs[i].counterTestGetAndAdd,
3304 name="counterIncrement-" + str( i ),
3305 args=[ pCounterName ],
3306 kwargs={ "delta": 5 } )
3307 addedPValues.append( pCounterValue )
3308 pCounterValue += 5
3309 threads.append( t )
3310 t.start()
3311
3312 for t in threads:
3313 t.join()
3314 pCounters.append( t.result )
3315 # Check that counter incremented numController times
3316 pCounterResults = True
3317 for i in addedPValues:
3318 tmpResult = i in pCounters
3319 pCounterResults = pCounterResults and tmpResult
3320 if not tmpResult:
3321 main.log.error( str( i ) + " is not in partitioned "
3322 "counter incremented results" )
3323 utilities.assert_equals( expect=True,
3324 actual=pCounterResults,
3325 onpass="Default counter incremented",
3326 onfail="Error incrementing default" +
3327 " counter" )
3328
3329 main.step( "Counters we added have the correct values" )
Jon Hall41d39f12016-04-11 22:54:35 -07003330 incrementCheck = main.HA.counterCheck( pCounterName, pCounterValue )
Jon Halle1a3b752015-07-22 13:02:46 -07003331 utilities.assert_equals( expect=main.TRUE,
3332 actual=incrementCheck,
3333 onpass="Added counters are correct",
3334 onfail="Added counters are incorrect" )
3335
Jon Hall5cf14d52015-07-16 12:15:19 -07003336 # DISTRIBUTED SETS
3337 main.step( "Distributed Set get" )
3338 size = len( onosSet )
3339 getResponses = []
3340 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003341 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003342 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003343 name="setTestGet-" + str( i ),
3344 args=[ onosSetName ] )
3345 threads.append( t )
3346 t.start()
3347 for t in threads:
3348 t.join()
3349 getResponses.append( t.result )
3350
3351 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003352 for i in range( len( main.activeNodes ) ):
3353 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003354 if isinstance( getResponses[ i ], list):
3355 current = set( getResponses[ i ] )
3356 if len( current ) == len( getResponses[ i ] ):
3357 # no repeats
3358 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003359 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003360 " has incorrect view" +
3361 " of set " + onosSetName + ":\n" +
3362 str( getResponses[ i ] ) )
3363 main.log.debug( "Expected: " + str( onosSet ) )
3364 main.log.debug( "Actual: " + str( current ) )
3365 getResults = main.FALSE
3366 else:
3367 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003368 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003369 " has repeat elements in" +
3370 " set " + onosSetName + ":\n" +
3371 str( getResponses[ i ] ) )
3372 getResults = main.FALSE
3373 elif getResponses[ i ] == main.ERROR:
3374 getResults = main.FALSE
3375 utilities.assert_equals( expect=main.TRUE,
3376 actual=getResults,
3377 onpass="Set elements are correct",
3378 onfail="Set elements are incorrect" )
3379
3380 main.step( "Distributed Set size" )
3381 sizeResponses = []
3382 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003383 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003384 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07003385 name="setTestSize-" + str( i ),
3386 args=[ onosSetName ] )
3387 threads.append( t )
3388 t.start()
3389 for t in threads:
3390 t.join()
3391 sizeResponses.append( t.result )
3392
3393 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003394 for i in range( len( main.activeNodes ) ):
3395 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003396 if size != sizeResponses[ i ]:
3397 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003398 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003399 " expected a size of " + str( size ) +
3400 " for set " + onosSetName +
3401 " but got " + str( sizeResponses[ i ] ) )
3402 utilities.assert_equals( expect=main.TRUE,
3403 actual=sizeResults,
3404 onpass="Set sizes are correct",
3405 onfail="Set sizes are incorrect" )
3406
3407 main.step( "Distributed Set add()" )
3408 onosSet.add( addValue )
3409 addResponses = []
3410 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003411 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003412 t = main.Thread( target=main.CLIs[i].setTestAdd,
Jon Hall5cf14d52015-07-16 12:15:19 -07003413 name="setTestAdd-" + str( i ),
3414 args=[ onosSetName, addValue ] )
3415 threads.append( t )
3416 t.start()
3417 for t in threads:
3418 t.join()
3419 addResponses.append( t.result )
3420
3421 # main.TRUE = successfully changed the set
3422 # main.FALSE = action resulted in no change in set
3423 # main.ERROR - Some error in executing the function
3424 addResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003425 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003426 if addResponses[ i ] == main.TRUE:
3427 # All is well
3428 pass
3429 elif addResponses[ i ] == main.FALSE:
3430 # Already in set, probably fine
3431 pass
3432 elif addResponses[ i ] == main.ERROR:
3433 # Error in execution
3434 addResults = main.FALSE
3435 else:
3436 # unexpected result
3437 addResults = main.FALSE
3438 if addResults != main.TRUE:
3439 main.log.error( "Error executing set add" )
3440
3441 # Check if set is still correct
3442 size = len( onosSet )
3443 getResponses = []
3444 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003445 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003446 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003447 name="setTestGet-" + str( i ),
3448 args=[ onosSetName ] )
3449 threads.append( t )
3450 t.start()
3451 for t in threads:
3452 t.join()
3453 getResponses.append( t.result )
3454 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003455 for i in range( len( main.activeNodes ) ):
3456 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003457 if isinstance( getResponses[ i ], list):
3458 current = set( getResponses[ i ] )
3459 if len( current ) == len( getResponses[ i ] ):
3460 # no repeats
3461 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003462 main.log.error( "ONOS" + node + " has incorrect view" +
Jon Hall5cf14d52015-07-16 12:15:19 -07003463 " of set " + onosSetName + ":\n" +
3464 str( getResponses[ i ] ) )
3465 main.log.debug( "Expected: " + str( onosSet ) )
3466 main.log.debug( "Actual: " + str( current ) )
3467 getResults = main.FALSE
3468 else:
3469 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003470 main.log.error( "ONOS" + node + " has repeat elements in" +
Jon Hall5cf14d52015-07-16 12:15:19 -07003471 " set " + onosSetName + ":\n" +
3472 str( getResponses[ i ] ) )
3473 getResults = main.FALSE
3474 elif getResponses[ i ] == main.ERROR:
3475 getResults = main.FALSE
3476 sizeResponses = []
3477 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003478 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003479 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07003480 name="setTestSize-" + str( i ),
3481 args=[ onosSetName ] )
3482 threads.append( t )
3483 t.start()
3484 for t in threads:
3485 t.join()
3486 sizeResponses.append( t.result )
3487 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003488 for i in range( len( main.activeNodes ) ):
3489 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003490 if size != sizeResponses[ i ]:
3491 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003492 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003493 " expected a size of " + str( size ) +
3494 " for set " + onosSetName +
3495 " but got " + str( sizeResponses[ i ] ) )
3496 addResults = addResults and getResults and sizeResults
3497 utilities.assert_equals( expect=main.TRUE,
3498 actual=addResults,
3499 onpass="Set add correct",
3500 onfail="Set add was incorrect" )
3501
3502 main.step( "Distributed Set addAll()" )
3503 onosSet.update( addAllValue.split() )
3504 addResponses = []
3505 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003506 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003507 t = main.Thread( target=main.CLIs[i].setTestAdd,
Jon Hall5cf14d52015-07-16 12:15:19 -07003508 name="setTestAddAll-" + str( i ),
3509 args=[ onosSetName, addAllValue ] )
3510 threads.append( t )
3511 t.start()
3512 for t in threads:
3513 t.join()
3514 addResponses.append( t.result )
3515
3516 # main.TRUE = successfully changed the set
3517 # main.FALSE = action resulted in no change in set
3518 # main.ERROR - Some error in executing the function
3519 addAllResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003520 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003521 if addResponses[ i ] == main.TRUE:
3522 # All is well
3523 pass
3524 elif addResponses[ i ] == main.FALSE:
3525 # Already in set, probably fine
3526 pass
3527 elif addResponses[ i ] == main.ERROR:
3528 # Error in execution
3529 addAllResults = main.FALSE
3530 else:
3531 # unexpected result
3532 addAllResults = main.FALSE
3533 if addAllResults != main.TRUE:
3534 main.log.error( "Error executing set addAll" )
3535
3536 # Check if set is still correct
3537 size = len( onosSet )
3538 getResponses = []
3539 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003540 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003541 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003542 name="setTestGet-" + str( i ),
3543 args=[ onosSetName ] )
3544 threads.append( t )
3545 t.start()
3546 for t in threads:
3547 t.join()
3548 getResponses.append( t.result )
3549 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003550 for i in range( len( main.activeNodes ) ):
3551 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003552 if isinstance( getResponses[ i ], list):
3553 current = set( getResponses[ i ] )
3554 if len( current ) == len( getResponses[ i ] ):
3555 # no repeats
3556 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003557 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003558 " has incorrect view" +
3559 " of set " + onosSetName + ":\n" +
3560 str( getResponses[ i ] ) )
3561 main.log.debug( "Expected: " + str( onosSet ) )
3562 main.log.debug( "Actual: " + str( current ) )
3563 getResults = main.FALSE
3564 else:
3565 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003566 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003567 " has repeat elements in" +
3568 " set " + onosSetName + ":\n" +
3569 str( getResponses[ i ] ) )
3570 getResults = main.FALSE
3571 elif getResponses[ i ] == main.ERROR:
3572 getResults = main.FALSE
3573 sizeResponses = []
3574 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003575 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003576 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07003577 name="setTestSize-" + str( i ),
3578 args=[ onosSetName ] )
3579 threads.append( t )
3580 t.start()
3581 for t in threads:
3582 t.join()
3583 sizeResponses.append( t.result )
3584 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003585 for i in range( len( main.activeNodes ) ):
3586 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003587 if size != sizeResponses[ i ]:
3588 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003589 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003590 " expected a size of " + str( size ) +
3591 " for set " + onosSetName +
3592 " but got " + str( sizeResponses[ i ] ) )
3593 addAllResults = addAllResults and getResults and sizeResults
3594 utilities.assert_equals( expect=main.TRUE,
3595 actual=addAllResults,
3596 onpass="Set addAll correct",
3597 onfail="Set addAll was incorrect" )
3598
3599 main.step( "Distributed Set contains()" )
3600 containsResponses = []
3601 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003602 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003603 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003604 name="setContains-" + str( i ),
3605 args=[ onosSetName ],
3606 kwargs={ "values": addValue } )
3607 threads.append( t )
3608 t.start()
3609 for t in threads:
3610 t.join()
3611 # NOTE: This is the tuple
3612 containsResponses.append( t.result )
3613
3614 containsResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003615 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003616 if containsResponses[ i ] == main.ERROR:
3617 containsResults = main.FALSE
3618 else:
3619 containsResults = containsResults and\
3620 containsResponses[ i ][ 1 ]
3621 utilities.assert_equals( expect=main.TRUE,
3622 actual=containsResults,
3623 onpass="Set contains is functional",
3624 onfail="Set contains failed" )
3625
3626 main.step( "Distributed Set containsAll()" )
3627 containsAllResponses = []
3628 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003629 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003630 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003631 name="setContainsAll-" + str( i ),
3632 args=[ onosSetName ],
3633 kwargs={ "values": addAllValue } )
3634 threads.append( t )
3635 t.start()
3636 for t in threads:
3637 t.join()
3638 # NOTE: This is the tuple
3639 containsAllResponses.append( t.result )
3640
3641 containsAllResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003642 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003643 if containsResponses[ i ] == main.ERROR:
3644 containsResults = main.FALSE
3645 else:
3646 containsResults = containsResults and\
3647 containsResponses[ i ][ 1 ]
3648 utilities.assert_equals( expect=main.TRUE,
3649 actual=containsAllResults,
3650 onpass="Set containsAll is functional",
3651 onfail="Set containsAll failed" )
3652
3653 main.step( "Distributed Set remove()" )
3654 onosSet.remove( addValue )
3655 removeResponses = []
3656 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003657 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003658 t = main.Thread( target=main.CLIs[i].setTestRemove,
Jon Hall5cf14d52015-07-16 12:15:19 -07003659 name="setTestRemove-" + str( i ),
3660 args=[ onosSetName, addValue ] )
3661 threads.append( t )
3662 t.start()
3663 for t in threads:
3664 t.join()
3665 removeResponses.append( t.result )
3666
3667 # main.TRUE = successfully changed the set
3668 # main.FALSE = action resulted in no change in set
3669 # main.ERROR - Some error in executing the function
3670 removeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003671 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003672 if removeResponses[ i ] == main.TRUE:
3673 # All is well
3674 pass
3675 elif removeResponses[ i ] == main.FALSE:
3676 # not in set, probably fine
3677 pass
3678 elif removeResponses[ i ] == main.ERROR:
3679 # Error in execution
3680 removeResults = main.FALSE
3681 else:
3682 # unexpected result
3683 removeResults = main.FALSE
3684 if removeResults != main.TRUE:
3685 main.log.error( "Error executing set remove" )
3686
3687 # Check if set is still correct
3688 size = len( onosSet )
3689 getResponses = []
3690 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003691 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003692 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003693 name="setTestGet-" + str( i ),
3694 args=[ onosSetName ] )
3695 threads.append( t )
3696 t.start()
3697 for t in threads:
3698 t.join()
3699 getResponses.append( t.result )
3700 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003701 for i in range( len( main.activeNodes ) ):
3702 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003703 if isinstance( getResponses[ i ], list):
3704 current = set( getResponses[ i ] )
3705 if len( current ) == len( getResponses[ i ] ):
3706 # no repeats
3707 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003708 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003709 " has incorrect view" +
3710 " of set " + onosSetName + ":\n" +
3711 str( getResponses[ i ] ) )
3712 main.log.debug( "Expected: " + str( onosSet ) )
3713 main.log.debug( "Actual: " + str( current ) )
3714 getResults = main.FALSE
3715 else:
3716 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003717 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003718 " has repeat elements in" +
3719 " set " + onosSetName + ":\n" +
3720 str( getResponses[ i ] ) )
3721 getResults = main.FALSE
3722 elif getResponses[ i ] == main.ERROR:
3723 getResults = main.FALSE
3724 sizeResponses = []
3725 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003726 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003727 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07003728 name="setTestSize-" + str( i ),
3729 args=[ onosSetName ] )
3730 threads.append( t )
3731 t.start()
3732 for t in threads:
3733 t.join()
3734 sizeResponses.append( t.result )
3735 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003736 for i in range( len( main.activeNodes ) ):
3737 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003738 if size != sizeResponses[ i ]:
3739 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003740 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003741 " expected a size of " + str( size ) +
3742 " for set " + onosSetName +
3743 " but got " + str( sizeResponses[ i ] ) )
3744 removeResults = removeResults and getResults and sizeResults
3745 utilities.assert_equals( expect=main.TRUE,
3746 actual=removeResults,
3747 onpass="Set remove correct",
3748 onfail="Set remove was incorrect" )
3749
3750 main.step( "Distributed Set removeAll()" )
3751 onosSet.difference_update( addAllValue.split() )
3752 removeAllResponses = []
3753 threads = []
3754 try:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003755 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003756 t = main.Thread( target=main.CLIs[i].setTestRemove,
Jon Hall5cf14d52015-07-16 12:15:19 -07003757 name="setTestRemoveAll-" + str( i ),
3758 args=[ onosSetName, addAllValue ] )
3759 threads.append( t )
3760 t.start()
3761 for t in threads:
3762 t.join()
3763 removeAllResponses.append( t.result )
3764 except Exception, e:
3765 main.log.exception(e)
3766
3767 # main.TRUE = successfully changed the set
3768 # main.FALSE = action resulted in no change in set
3769 # main.ERROR - Some error in executing the function
3770 removeAllResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003771 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003772 if removeAllResponses[ i ] == main.TRUE:
3773 # All is well
3774 pass
3775 elif removeAllResponses[ i ] == main.FALSE:
3776 # not in set, probably fine
3777 pass
3778 elif removeAllResponses[ i ] == main.ERROR:
3779 # Error in execution
3780 removeAllResults = main.FALSE
3781 else:
3782 # unexpected result
3783 removeAllResults = main.FALSE
3784 if removeAllResults != main.TRUE:
3785 main.log.error( "Error executing set removeAll" )
3786
3787 # Check if set is still correct
3788 size = len( onosSet )
3789 getResponses = []
3790 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003791 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003792 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003793 name="setTestGet-" + str( i ),
3794 args=[ onosSetName ] )
3795 threads.append( t )
3796 t.start()
3797 for t in threads:
3798 t.join()
3799 getResponses.append( t.result )
3800 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003801 for i in range( len( main.activeNodes ) ):
3802 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003803 if isinstance( getResponses[ i ], list):
3804 current = set( getResponses[ i ] )
3805 if len( current ) == len( getResponses[ i ] ):
3806 # no repeats
3807 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003808 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003809 " has incorrect view" +
3810 " of set " + onosSetName + ":\n" +
3811 str( getResponses[ i ] ) )
3812 main.log.debug( "Expected: " + str( onosSet ) )
3813 main.log.debug( "Actual: " + str( current ) )
3814 getResults = main.FALSE
3815 else:
3816 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003817 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003818 " has repeat elements in" +
3819 " set " + onosSetName + ":\n" +
3820 str( getResponses[ i ] ) )
3821 getResults = main.FALSE
3822 elif getResponses[ i ] == main.ERROR:
3823 getResults = main.FALSE
3824 sizeResponses = []
3825 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003826 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003827 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07003828 name="setTestSize-" + str( i ),
3829 args=[ onosSetName ] )
3830 threads.append( t )
3831 t.start()
3832 for t in threads:
3833 t.join()
3834 sizeResponses.append( t.result )
3835 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003836 for i in range( len( main.activeNodes ) ):
3837 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003838 if size != sizeResponses[ i ]:
3839 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003840 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003841 " expected a size of " + str( size ) +
3842 " for set " + onosSetName +
3843 " but got " + str( sizeResponses[ i ] ) )
3844 removeAllResults = removeAllResults and getResults and sizeResults
3845 utilities.assert_equals( expect=main.TRUE,
3846 actual=removeAllResults,
3847 onpass="Set removeAll correct",
3848 onfail="Set removeAll was incorrect" )
3849
3850 main.step( "Distributed Set addAll()" )
3851 onosSet.update( addAllValue.split() )
3852 addResponses = []
3853 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003854 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003855 t = main.Thread( target=main.CLIs[i].setTestAdd,
Jon Hall5cf14d52015-07-16 12:15:19 -07003856 name="setTestAddAll-" + str( i ),
3857 args=[ onosSetName, addAllValue ] )
3858 threads.append( t )
3859 t.start()
3860 for t in threads:
3861 t.join()
3862 addResponses.append( t.result )
3863
3864 # main.TRUE = successfully changed the set
3865 # main.FALSE = action resulted in no change in set
3866 # main.ERROR - Some error in executing the function
3867 addAllResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003868 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003869 if addResponses[ i ] == main.TRUE:
3870 # All is well
3871 pass
3872 elif addResponses[ i ] == main.FALSE:
3873 # Already in set, probably fine
3874 pass
3875 elif addResponses[ i ] == main.ERROR:
3876 # Error in execution
3877 addAllResults = main.FALSE
3878 else:
3879 # unexpected result
3880 addAllResults = main.FALSE
3881 if addAllResults != main.TRUE:
3882 main.log.error( "Error executing set addAll" )
3883
3884 # Check if set is still correct
3885 size = len( onosSet )
3886 getResponses = []
3887 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003888 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003889 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003890 name="setTestGet-" + str( i ),
3891 args=[ onosSetName ] )
3892 threads.append( t )
3893 t.start()
3894 for t in threads:
3895 t.join()
3896 getResponses.append( t.result )
3897 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003898 for i in range( len( main.activeNodes ) ):
3899 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003900 if isinstance( getResponses[ i ], list):
3901 current = set( getResponses[ i ] )
3902 if len( current ) == len( getResponses[ i ] ):
3903 # no repeats
3904 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003905 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003906 " has incorrect view" +
3907 " of set " + onosSetName + ":\n" +
3908 str( getResponses[ i ] ) )
3909 main.log.debug( "Expected: " + str( onosSet ) )
3910 main.log.debug( "Actual: " + str( current ) )
3911 getResults = main.FALSE
3912 else:
3913 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003914 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003915 " has repeat elements in" +
3916 " set " + onosSetName + ":\n" +
3917 str( getResponses[ i ] ) )
3918 getResults = main.FALSE
3919 elif getResponses[ i ] == main.ERROR:
3920 getResults = main.FALSE
3921 sizeResponses = []
3922 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003923 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003924 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07003925 name="setTestSize-" + str( i ),
3926 args=[ onosSetName ] )
3927 threads.append( t )
3928 t.start()
3929 for t in threads:
3930 t.join()
3931 sizeResponses.append( t.result )
3932 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003933 for i in range( len( main.activeNodes ) ):
3934 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003935 if size != sizeResponses[ i ]:
3936 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003937 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003938 " expected a size of " + str( size ) +
3939 " for set " + onosSetName +
3940 " but got " + str( sizeResponses[ i ] ) )
3941 addAllResults = addAllResults and getResults and sizeResults
3942 utilities.assert_equals( expect=main.TRUE,
3943 actual=addAllResults,
3944 onpass="Set addAll correct",
3945 onfail="Set addAll was incorrect" )
3946
3947 main.step( "Distributed Set clear()" )
3948 onosSet.clear()
3949 clearResponses = []
3950 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003951 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003952 t = main.Thread( target=main.CLIs[i].setTestRemove,
Jon Hall5cf14d52015-07-16 12:15:19 -07003953 name="setTestClear-" + str( i ),
3954 args=[ onosSetName, " "], # Values doesn't matter
3955 kwargs={ "clear": True } )
3956 threads.append( t )
3957 t.start()
3958 for t in threads:
3959 t.join()
3960 clearResponses.append( t.result )
3961
3962 # main.TRUE = successfully changed the set
3963 # main.FALSE = action resulted in no change in set
3964 # main.ERROR - Some error in executing the function
3965 clearResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003966 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003967 if clearResponses[ i ] == main.TRUE:
3968 # All is well
3969 pass
3970 elif clearResponses[ i ] == main.FALSE:
3971 # Nothing set, probably fine
3972 pass
3973 elif clearResponses[ i ] == main.ERROR:
3974 # Error in execution
3975 clearResults = main.FALSE
3976 else:
3977 # unexpected result
3978 clearResults = main.FALSE
3979 if clearResults != main.TRUE:
3980 main.log.error( "Error executing set clear" )
3981
3982 # Check if set is still correct
3983 size = len( onosSet )
3984 getResponses = []
3985 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003986 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003987 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003988 name="setTestGet-" + str( i ),
3989 args=[ onosSetName ] )
3990 threads.append( t )
3991 t.start()
3992 for t in threads:
3993 t.join()
3994 getResponses.append( t.result )
3995 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003996 for i in range( len( main.activeNodes ) ):
3997 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003998 if isinstance( getResponses[ i ], list):
3999 current = set( getResponses[ i ] )
4000 if len( current ) == len( getResponses[ i ] ):
4001 # no repeats
4002 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004003 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004004 " has incorrect view" +
4005 " of set " + onosSetName + ":\n" +
4006 str( getResponses[ i ] ) )
4007 main.log.debug( "Expected: " + str( onosSet ) )
4008 main.log.debug( "Actual: " + str( current ) )
4009 getResults = main.FALSE
4010 else:
4011 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004012 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004013 " has repeat elements in" +
4014 " set " + onosSetName + ":\n" +
4015 str( getResponses[ i ] ) )
4016 getResults = main.FALSE
4017 elif getResponses[ i ] == main.ERROR:
4018 getResults = main.FALSE
4019 sizeResponses = []
4020 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004021 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004022 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07004023 name="setTestSize-" + str( i ),
4024 args=[ onosSetName ] )
4025 threads.append( t )
4026 t.start()
4027 for t in threads:
4028 t.join()
4029 sizeResponses.append( t.result )
4030 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004031 for i in range( len( main.activeNodes ) ):
4032 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004033 if size != sizeResponses[ i ]:
4034 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004035 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004036 " expected a size of " + str( size ) +
4037 " for set " + onosSetName +
4038 " but got " + str( sizeResponses[ i ] ) )
4039 clearResults = clearResults and getResults and sizeResults
4040 utilities.assert_equals( expect=main.TRUE,
4041 actual=clearResults,
4042 onpass="Set clear correct",
4043 onfail="Set clear was incorrect" )
4044
4045 main.step( "Distributed Set addAll()" )
4046 onosSet.update( addAllValue.split() )
4047 addResponses = []
4048 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004049 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004050 t = main.Thread( target=main.CLIs[i].setTestAdd,
Jon Hall5cf14d52015-07-16 12:15:19 -07004051 name="setTestAddAll-" + str( i ),
4052 args=[ onosSetName, addAllValue ] )
4053 threads.append( t )
4054 t.start()
4055 for t in threads:
4056 t.join()
4057 addResponses.append( t.result )
4058
4059 # main.TRUE = successfully changed the set
4060 # main.FALSE = action resulted in no change in set
4061 # main.ERROR - Some error in executing the function
4062 addAllResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004063 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07004064 if addResponses[ i ] == main.TRUE:
4065 # All is well
4066 pass
4067 elif addResponses[ i ] == main.FALSE:
4068 # Already in set, probably fine
4069 pass
4070 elif addResponses[ i ] == main.ERROR:
4071 # Error in execution
4072 addAllResults = main.FALSE
4073 else:
4074 # unexpected result
4075 addAllResults = main.FALSE
4076 if addAllResults != main.TRUE:
4077 main.log.error( "Error executing set addAll" )
4078
4079 # Check if set is still correct
4080 size = len( onosSet )
4081 getResponses = []
4082 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004083 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004084 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07004085 name="setTestGet-" + str( i ),
4086 args=[ onosSetName ] )
4087 threads.append( t )
4088 t.start()
4089 for t in threads:
4090 t.join()
4091 getResponses.append( t.result )
4092 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004093 for i in range( len( main.activeNodes ) ):
4094 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004095 if isinstance( getResponses[ i ], list):
4096 current = set( getResponses[ i ] )
4097 if len( current ) == len( getResponses[ i ] ):
4098 # no repeats
4099 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004100 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004101 " has incorrect view" +
4102 " of set " + onosSetName + ":\n" +
4103 str( getResponses[ i ] ) )
4104 main.log.debug( "Expected: " + str( onosSet ) )
4105 main.log.debug( "Actual: " + str( current ) )
4106 getResults = main.FALSE
4107 else:
4108 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004109 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004110 " has repeat elements in" +
4111 " set " + onosSetName + ":\n" +
4112 str( getResponses[ i ] ) )
4113 getResults = main.FALSE
4114 elif getResponses[ i ] == main.ERROR:
4115 getResults = main.FALSE
4116 sizeResponses = []
4117 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004118 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004119 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07004120 name="setTestSize-" + str( i ),
4121 args=[ onosSetName ] )
4122 threads.append( t )
4123 t.start()
4124 for t in threads:
4125 t.join()
4126 sizeResponses.append( t.result )
4127 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004128 for i in range( len( main.activeNodes ) ):
4129 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004130 if size != sizeResponses[ i ]:
4131 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004132 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004133 " expected a size of " + str( size ) +
4134 " for set " + onosSetName +
4135 " but got " + str( sizeResponses[ i ] ) )
4136 addAllResults = addAllResults and getResults and sizeResults
4137 utilities.assert_equals( expect=main.TRUE,
4138 actual=addAllResults,
4139 onpass="Set addAll correct",
4140 onfail="Set addAll was incorrect" )
4141
4142 main.step( "Distributed Set retain()" )
4143 onosSet.intersection_update( retainValue.split() )
4144 retainResponses = []
4145 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004146 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004147 t = main.Thread( target=main.CLIs[i].setTestRemove,
Jon Hall5cf14d52015-07-16 12:15:19 -07004148 name="setTestRetain-" + str( i ),
4149 args=[ onosSetName, retainValue ],
4150 kwargs={ "retain": True } )
4151 threads.append( t )
4152 t.start()
4153 for t in threads:
4154 t.join()
4155 retainResponses.append( t.result )
4156
4157 # main.TRUE = successfully changed the set
4158 # main.FALSE = action resulted in no change in set
4159 # main.ERROR - Some error in executing the function
4160 retainResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004161 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07004162 if retainResponses[ i ] == main.TRUE:
4163 # All is well
4164 pass
4165 elif retainResponses[ i ] == main.FALSE:
4166 # Already in set, probably fine
4167 pass
4168 elif retainResponses[ i ] == main.ERROR:
4169 # Error in execution
4170 retainResults = main.FALSE
4171 else:
4172 # unexpected result
4173 retainResults = main.FALSE
4174 if retainResults != main.TRUE:
4175 main.log.error( "Error executing set retain" )
4176
4177 # Check if set is still correct
4178 size = len( onosSet )
4179 getResponses = []
4180 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004181 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004182 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07004183 name="setTestGet-" + str( i ),
4184 args=[ onosSetName ] )
4185 threads.append( t )
4186 t.start()
4187 for t in threads:
4188 t.join()
4189 getResponses.append( t.result )
4190 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004191 for i in range( len( main.activeNodes ) ):
4192 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004193 if isinstance( getResponses[ i ], list):
4194 current = set( getResponses[ i ] )
4195 if len( current ) == len( getResponses[ i ] ):
4196 # no repeats
4197 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004198 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004199 " has incorrect view" +
4200 " of set " + onosSetName + ":\n" +
4201 str( getResponses[ i ] ) )
4202 main.log.debug( "Expected: " + str( onosSet ) )
4203 main.log.debug( "Actual: " + str( current ) )
4204 getResults = main.FALSE
4205 else:
4206 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004207 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004208 " has repeat elements in" +
4209 " set " + onosSetName + ":\n" +
4210 str( getResponses[ i ] ) )
4211 getResults = main.FALSE
4212 elif getResponses[ i ] == main.ERROR:
4213 getResults = main.FALSE
4214 sizeResponses = []
4215 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004216 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004217 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07004218 name="setTestSize-" + str( i ),
4219 args=[ onosSetName ] )
4220 threads.append( t )
4221 t.start()
4222 for t in threads:
4223 t.join()
4224 sizeResponses.append( t.result )
4225 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004226 for i in range( len( main.activeNodes ) ):
4227 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004228 if size != sizeResponses[ i ]:
4229 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004230 main.log.error( "ONOS" + node + " expected a size of " +
Jon Hall5cf14d52015-07-16 12:15:19 -07004231 str( size ) + " for set " + onosSetName +
4232 " but got " + str( sizeResponses[ i ] ) )
4233 retainResults = retainResults and getResults and sizeResults
4234 utilities.assert_equals( expect=main.TRUE,
4235 actual=retainResults,
4236 onpass="Set retain correct",
4237 onfail="Set retain was incorrect" )
4238
Jon Hall2a5002c2015-08-21 16:49:11 -07004239 # Transactional maps
4240 main.step( "Partitioned Transactional maps put" )
4241 tMapValue = "Testing"
4242 numKeys = 100
4243 putResult = True
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004244 node = main.activeNodes[0]
4245 putResponses = main.CLIs[node].transactionalMapPut( numKeys, tMapValue )
Jon Hall6e709752016-02-01 13:38:46 -08004246 if putResponses and len( putResponses ) == 100:
Jon Hall2a5002c2015-08-21 16:49:11 -07004247 for i in putResponses:
4248 if putResponses[ i ][ 'value' ] != tMapValue:
4249 putResult = False
4250 else:
4251 putResult = False
4252 if not putResult:
4253 main.log.debug( "Put response values: " + str( putResponses ) )
4254 utilities.assert_equals( expect=True,
4255 actual=putResult,
4256 onpass="Partitioned Transactional Map put successful",
4257 onfail="Partitioned Transactional Map put values are incorrect" )
4258
4259 main.step( "Partitioned Transactional maps get" )
Jon Hall9bfadd22016-05-11 14:48:07 -07004260 # FIXME: is this sleep needed?
4261 time.sleep( 5 )
4262
Jon Hall2a5002c2015-08-21 16:49:11 -07004263 getCheck = True
4264 for n in range( 1, numKeys + 1 ):
4265 getResponses = []
4266 threads = []
4267 valueCheck = True
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004268 for i in main.activeNodes:
Jon Hall2a5002c2015-08-21 16:49:11 -07004269 t = main.Thread( target=main.CLIs[i].transactionalMapGet,
4270 name="TMap-get-" + str( i ),
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004271 args=[ "Key" + str( n ) ] )
Jon Hall2a5002c2015-08-21 16:49:11 -07004272 threads.append( t )
4273 t.start()
4274 for t in threads:
4275 t.join()
4276 getResponses.append( t.result )
4277 for node in getResponses:
4278 if node != tMapValue:
4279 valueCheck = False
4280 if not valueCheck:
4281 main.log.warn( "Values for key 'Key" + str( n ) + "' do not match:" )
4282 main.log.warn( getResponses )
4283 getCheck = getCheck and valueCheck
4284 utilities.assert_equals( expect=True,
4285 actual=getCheck,
4286 onpass="Partitioned Transactional Map get values were correct",
4287 onfail="Partitioned Transactional Map values incorrect" )