blob: eb9ad750dcae83cbd4f065d56f008770a07aec1f [file] [log] [blame]
Jon Hall5cf14d52015-07-16 12:15:19 -07001"""
2Description: This test is to determine if ONOS can handle
3 a minority of it's nodes restarting
4
5List of test cases:
6CASE1: Compile ONOS and push it to the test machines
7CASE2: Assign devices to controllers
8CASE21: Assign mastership to controllers
9CASE3: Assign intents
10CASE4: Ping across added host intents
11CASE5: Reading state of ONOS
Jon Hallb3ed8ed2015-10-28 16:43:55 -070012CASE61: The Failure inducing case.
13CASE62: The Failure recovery case.
Jon Hall5cf14d52015-07-16 12:15:19 -070014CASE7: Check state after control plane failure
15CASE8: Compare topo
16CASE9: Link s3-s28 down
17CASE10: Link s3-s28 up
18CASE11: Switch down
19CASE12: Switch up
20CASE13: Clean up
21CASE14: start election app on all onos nodes
22CASE15: Check that Leadership Election is still functional
23CASE16: Install Distributed Primitives app
24CASE17: Check for basic functionality with distributed primitives
25"""
26
27
Jon Hallb3ed8ed2015-10-28 16:43:55 -070028class HAkillNodes:
Jon Hall5cf14d52015-07-16 12:15:19 -070029
30 def __init__( self ):
31 self.default = ''
32
33 def CASE1( self, main ):
34 """
35 CASE1 is to compile ONOS and push it to the test machines
36
37 Startup sequence:
38 cell <name>
39 onos-verify-cell
40 NOTE: temporary - onos-remove-raft-logs
41 onos-uninstall
42 start mininet
43 git pull
44 mvn clean install
45 onos-package
46 onos-install -f
47 onos-wait-for-start
48 start cli sessions
49 start tcpdump
50 """
Jon Halle1a3b752015-07-22 13:02:46 -070051 import imp
Jon Hall3b489db2015-10-05 14:38:37 -070052 import pexpect
Jon Hall6e709752016-02-01 13:38:46 -080053 import time
Jon Halla440e872016-03-31 15:15:50 -070054 import json
Jon Hall6e709752016-02-01 13:38:46 -080055 main.log.info( "ONOS HA test: Restart a minority of ONOS nodes - " +
Jon Hall5cf14d52015-07-16 12:15:19 -070056 "initialization" )
57 main.case( "Setting up test environment" )
Jon Hall783bbf92015-07-23 14:33:19 -070058 main.caseExplanation = "Setup the test environment including " +\
Jon Hall5cf14d52015-07-16 12:15:19 -070059 "installing ONOS, starting Mininet and ONOS" +\
60 "cli sessions."
Jon Hall5cf14d52015-07-16 12:15:19 -070061
62 # load some variables from the params file
63 PULLCODE = False
64 if main.params[ 'Git' ] == 'True':
65 PULLCODE = True
66 gitBranch = main.params[ 'branch' ]
67 cellName = main.params[ 'ENV' ][ 'cellName' ]
68
Jon Halle1a3b752015-07-22 13:02:46 -070069 main.numCtrls = int( main.params[ 'num_controllers' ] )
Jon Hall5cf14d52015-07-16 12:15:19 -070070 if main.ONOSbench.maxNodes:
Jon Halle1a3b752015-07-22 13:02:46 -070071 if main.ONOSbench.maxNodes < main.numCtrls:
72 main.numCtrls = int( main.ONOSbench.maxNodes )
73 # set global variables
Jon Hall5cf14d52015-07-16 12:15:19 -070074 global ONOS1Port
75 global ONOS2Port
76 global ONOS3Port
77 global ONOS4Port
78 global ONOS5Port
79 global ONOS6Port
80 global ONOS7Port
Jon Halla440e872016-03-31 15:15:50 -070081 # These are for csv plotting in jenkins
82 global labels
83 global data
84 labels = []
85 data = []
Jon Hall5cf14d52015-07-16 12:15:19 -070086
87 # FIXME: just get controller port from params?
88 # TODO: do we really need all these?
89 ONOS1Port = main.params[ 'CTRL' ][ 'port1' ]
90 ONOS2Port = main.params[ 'CTRL' ][ 'port2' ]
91 ONOS3Port = main.params[ 'CTRL' ][ 'port3' ]
92 ONOS4Port = main.params[ 'CTRL' ][ 'port4' ]
93 ONOS5Port = main.params[ 'CTRL' ][ 'port5' ]
94 ONOS6Port = main.params[ 'CTRL' ][ 'port6' ]
95 ONOS7Port = main.params[ 'CTRL' ][ 'port7' ]
96
Jon Halle1a3b752015-07-22 13:02:46 -070097 try:
Jon Hall53c5e662016-04-13 16:06:56 -070098 from tests.HA.dependencies.HA import HA
Jon Hall41d39f12016-04-11 22:54:35 -070099 main.HA = HA()
Jon Halle1a3b752015-07-22 13:02:46 -0700100 except Exception as e:
101 main.log.exception( e )
102 main.cleanup()
103 main.exit()
104
105 main.CLIs = []
106 main.nodes = []
Jon Hall5cf14d52015-07-16 12:15:19 -0700107 ipList = []
Jon Halle1a3b752015-07-22 13:02:46 -0700108 for i in range( 1, main.numCtrls + 1 ):
109 try:
110 main.CLIs.append( getattr( main, 'ONOScli' + str( i ) ) )
111 main.nodes.append( getattr( main, 'ONOS' + str( i ) ) )
112 ipList.append( main.nodes[ -1 ].ip_address )
113 except AttributeError:
114 break
Jon Hall5cf14d52015-07-16 12:15:19 -0700115
116 main.step( "Create cell file" )
117 cellAppString = main.params[ 'ENV' ][ 'appString' ]
118 main.ONOSbench.createCellFile( main.ONOSbench.ip_address, cellName,
119 main.Mininet1.ip_address,
120 cellAppString, ipList )
121 main.step( "Applying cell variable to environment" )
122 cellResult = main.ONOSbench.setCell( cellName )
123 verifyResult = main.ONOSbench.verifyCell()
124
125 # FIXME:this is short term fix
126 main.log.info( "Removing raft logs" )
127 main.ONOSbench.onosRemoveRaftLogs()
128
129 main.log.info( "Uninstalling ONOS" )
Jon Halle1a3b752015-07-22 13:02:46 -0700130 for node in main.nodes:
Jon Hall5cf14d52015-07-16 12:15:19 -0700131 main.ONOSbench.onosUninstall( node.ip_address )
132
133 # Make sure ONOS is DEAD
134 main.log.info( "Killing any ONOS processes" )
135 killResults = main.TRUE
Jon Halle1a3b752015-07-22 13:02:46 -0700136 for node in main.nodes:
Jon Hall5cf14d52015-07-16 12:15:19 -0700137 killed = main.ONOSbench.onosKill( node.ip_address )
138 killResults = killResults and killed
139
140 cleanInstallResult = main.TRUE
141 gitPullResult = main.TRUE
142
143 main.step( "Starting Mininet" )
144 # scp topo file to mininet
145 # TODO: move to params?
146 topoName = "obelisk.py"
147 filePath = main.ONOSbench.home + "/tools/test/topos/"
kelvin-onlabd9e23de2015-08-06 10:34:44 -0700148 main.ONOSbench.scp( main.Mininet1,
149 filePath + topoName,
150 main.Mininet1.home,
151 direction="to" )
Jon Hall5cf14d52015-07-16 12:15:19 -0700152 mnResult = main.Mininet1.startNet( )
153 utilities.assert_equals( expect=main.TRUE, actual=mnResult,
154 onpass="Mininet Started",
155 onfail="Error starting Mininet" )
156
157 main.step( "Git checkout and pull " + gitBranch )
158 if PULLCODE:
159 main.ONOSbench.gitCheckout( gitBranch )
160 gitPullResult = main.ONOSbench.gitPull()
161 # values of 1 or 3 are good
162 utilities.assert_lesser( expect=0, actual=gitPullResult,
163 onpass="Git pull successful",
164 onfail="Git pull failed" )
165 main.ONOSbench.getVersion( report=True )
166
167 main.step( "Using mvn clean install" )
168 cleanInstallResult = main.TRUE
169 if PULLCODE and gitPullResult == main.TRUE:
170 cleanInstallResult = main.ONOSbench.cleanInstall()
171 else:
172 main.log.warn( "Did not pull new code so skipping mvn " +
173 "clean install" )
174 utilities.assert_equals( expect=main.TRUE,
175 actual=cleanInstallResult,
176 onpass="MCI successful",
177 onfail="MCI failed" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700178
179 main.step( "Make sure ONOS service doesn't automatically respawn" )
180 handle = main.ONOSbench.handle
181 handle.sendline( "sed -i -e 's/^respawn$/#respawn/g' tools/package/init/onos.conf" )
182 handle.expect( "\$" ) # $ from the command
183 handle.expect( "\$" ) # $ from the prompt
184
Jon Hall5cf14d52015-07-16 12:15:19 -0700185 # GRAPHS
186 # NOTE: important params here:
187 # job = name of Jenkins job
188 # Plot Name = Plot-HA, only can be used if multiple plots
189 # index = The number of the graph under plot name
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700190 job = "HAkillNodes"
Jon Hall5cf14d52015-07-16 12:15:19 -0700191 plotName = "Plot-HA"
Jon Hall843f8bc2016-03-18 14:28:13 -0700192 index = "2"
Jon Hall5cf14d52015-07-16 12:15:19 -0700193 graphs = '<ac:structured-macro ac:name="html">\n'
194 graphs += '<ac:plain-text-body><![CDATA[\n'
195 graphs += '<iframe src="https://onos-jenkins.onlab.us/job/' + job +\
Jon Halla9845df2016-01-15 14:55:58 -0800196 '/plot/' + plotName + '/getPlot?index=' + index +\
Jon Hall5cf14d52015-07-16 12:15:19 -0700197 '&width=500&height=300"' +\
198 'noborder="0" width="500" height="300" scrolling="yes" ' +\
199 'seamless="seamless"></iframe>\n'
200 graphs += ']]></ac:plain-text-body>\n'
201 graphs += '</ac:structured-macro>\n'
202 main.log.wiki(graphs)
203
204 main.step( "Creating ONOS package" )
Jon Hall3b489db2015-10-05 14:38:37 -0700205 # copy gen-partions file to ONOS
206 # NOTE: this assumes TestON and ONOS are on the same machine
Jon Hall53c5e662016-04-13 16:06:56 -0700207 srcFile = main.testDir + "/HA/dependencies/onos-gen-partitions"
Jon Hall3b489db2015-10-05 14:38:37 -0700208 dstDir = main.ONOSbench.home + "/tools/test/bin/onos-gen-partitions"
209 cpResult = main.ONOSbench.secureCopy( main.ONOSbench.user_name,
210 main.ONOSbench.ip_address,
211 srcFile,
212 dstDir,
213 pwd=main.ONOSbench.pwd,
214 direction="from" )
Jon Hallbd60ea02016-08-23 10:03:59 -0700215 packageResult = main.ONOSbench.buckBuild()
Jon Hall5cf14d52015-07-16 12:15:19 -0700216 utilities.assert_equals( expect=main.TRUE, actual=packageResult,
217 onpass="ONOS package successful",
218 onfail="ONOS package failed" )
219
220 main.step( "Installing ONOS package" )
221 onosInstallResult = main.TRUE
Jon Halle1a3b752015-07-22 13:02:46 -0700222 for node in main.nodes:
Jon Hall5cf14d52015-07-16 12:15:19 -0700223 tmpResult = main.ONOSbench.onosInstall( options="-f",
224 node=node.ip_address )
225 onosInstallResult = onosInstallResult and tmpResult
226 utilities.assert_equals( expect=main.TRUE, actual=onosInstallResult,
227 onpass="ONOS install successful",
228 onfail="ONOS install failed" )
Jon Hall3b489db2015-10-05 14:38:37 -0700229 # clean up gen-partitions file
230 try:
231 main.ONOSbench.handle.sendline( "cd " + main.ONOSbench.home )
232 main.ONOSbench.handle.expect( main.ONOSbench.home + "\$" )
233 main.ONOSbench.handle.sendline( "git checkout -- tools/test/bin/onos-gen-partitions" )
234 main.ONOSbench.handle.expect( main.ONOSbench.home + "\$" )
235 main.log.info( " Cleaning custom gen partitions file, response was: \n" +
236 str( main.ONOSbench.handle.before ) )
237 except ( pexpect.TIMEOUT, pexpect.EOF ):
238 main.log.exception( "ONOSbench: pexpect exception found:" +
239 main.ONOSbench.handle.before )
240 main.cleanup()
241 main.exit()
Jon Hall5cf14d52015-07-16 12:15:19 -0700242
You Wangf5de25b2017-01-06 15:13:01 -0800243 main.step( "Set up ONOS secure SSH" )
244 secureSshResult = main.TRUE
245 for node in main.nodes:
246 secureSshResult = secureSshResult and main.ONOSbench.onosSecureSSH( node=node.ip_address )
247 utilities.assert_equals( expect=main.TRUE, actual=secureSshResult,
248 onpass="Test step PASS",
249 onfail="Test step FAIL" )
250
Jon Hall5cf14d52015-07-16 12:15:19 -0700251 main.step( "Checking if ONOS is up yet" )
252 for i in range( 2 ):
253 onosIsupResult = main.TRUE
Jon Halle1a3b752015-07-22 13:02:46 -0700254 for node in main.nodes:
Jon Hall5cf14d52015-07-16 12:15:19 -0700255 started = main.ONOSbench.isup( node.ip_address )
256 if not started:
Jon Hallc6793552016-01-19 14:18:37 -0800257 main.log.error( node.name + " hasn't started" )
Jon Hall5cf14d52015-07-16 12:15:19 -0700258 onosIsupResult = onosIsupResult and started
259 if onosIsupResult == main.TRUE:
260 break
261 utilities.assert_equals( expect=main.TRUE, actual=onosIsupResult,
262 onpass="ONOS startup successful",
263 onfail="ONOS startup failed" )
264
Jon Hall6509dbf2016-06-21 17:01:17 -0700265 main.step( "Starting ONOS CLI sessions" )
Jon Hall5cf14d52015-07-16 12:15:19 -0700266 cliResults = main.TRUE
267 threads = []
Jon Halle1a3b752015-07-22 13:02:46 -0700268 for i in range( main.numCtrls ):
269 t = main.Thread( target=main.CLIs[i].startOnosCli,
Jon Hall5cf14d52015-07-16 12:15:19 -0700270 name="startOnosCli-" + str( i ),
Jon Halle1a3b752015-07-22 13:02:46 -0700271 args=[main.nodes[i].ip_address] )
Jon Hall5cf14d52015-07-16 12:15:19 -0700272 threads.append( t )
273 t.start()
274
275 for t in threads:
276 t.join()
277 cliResults = cliResults and t.result
278 utilities.assert_equals( expect=main.TRUE, actual=cliResults,
279 onpass="ONOS cli startup successful",
280 onfail="ONOS cli startup failed" )
281
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700282 # Create a list of active nodes for use when some nodes are stopped
283 main.activeNodes = [ i for i in range( 0, len( main.CLIs ) ) ]
284
Jon Hall5cf14d52015-07-16 12:15:19 -0700285 if main.params[ 'tcpdump' ].lower() == "true":
286 main.step( "Start Packet Capture MN" )
287 main.Mininet2.startTcpdump(
288 str( main.params[ 'MNtcpdump' ][ 'folder' ] ) + str( main.TEST )
289 + "-MN.pcap",
290 intf=main.params[ 'MNtcpdump' ][ 'intf' ],
291 port=main.params[ 'MNtcpdump' ][ 'port' ] )
292
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700293 main.step( "Clean up ONOS service changes" )
294 handle.sendline( "git checkout -- tools/package/init/onos.conf" )
295 handle.expect( "\$" )
296
Jon Halla440e872016-03-31 15:15:50 -0700297 main.step( "Checking ONOS nodes" )
Jon Hall41d39f12016-04-11 22:54:35 -0700298 nodeResults = utilities.retry( main.HA.nodesCheck,
299 False,
300 args=[main.activeNodes],
301 attempts=5 )
Jon Halla440e872016-03-31 15:15:50 -0700302
Jon Hall41d39f12016-04-11 22:54:35 -0700303 utilities.assert_equals( expect=True, actual=nodeResults,
Jon Halla440e872016-03-31 15:15:50 -0700304 onpass="Nodes check successful",
305 onfail="Nodes check NOT successful" )
306
307 if not nodeResults:
Jon Hall7ac7bc32016-05-05 10:57:02 -0700308 for i in main.activeNodes:
309 cli = main.CLIs[i]
Jon Halla440e872016-03-31 15:15:50 -0700310 main.log.debug( "{} components not ACTIVE: \n{}".format(
311 cli.name,
312 cli.sendline( "scr:list | grep -v ACTIVE" ) ) )
Jon Hall5cf14d52015-07-16 12:15:19 -0700313 main.log.error( "Failed to start ONOS, stopping test" )
314 main.cleanup()
315 main.exit()
316
Jon Hall172b7ba2016-04-07 18:12:20 -0700317 main.step( "Activate apps defined in the params file" )
318 # get data from the params
319 apps = main.params.get( 'apps' )
320 if apps:
321 apps = apps.split(',')
322 main.log.warn( apps )
323 activateResult = True
324 for app in apps:
325 main.CLIs[ 0 ].app( app, "Activate" )
326 # TODO: check this worked
327 time.sleep( 10 ) # wait for apps to activate
328 for app in apps:
329 state = main.CLIs[ 0 ].appStatus( app )
330 if state == "ACTIVE":
331 activateResult = activeResult and True
332 else:
333 main.log.error( "{} is in {} state".format( app, state ) )
334 activeResult = False
335 utilities.assert_equals( expect=True,
336 actual=activateResult,
337 onpass="Successfully activated apps",
338 onfail="Failed to activate apps" )
339 else:
340 main.log.warn( "No apps were specified to be loaded after startup" )
341
342 main.step( "Set ONOS configurations" )
343 config = main.params.get( 'ONOS_Configuration' )
344 if config:
345 main.log.debug( config )
346 checkResult = main.TRUE
347 for component in config:
348 for setting in config[component]:
349 value = config[component][setting]
350 check = main.CLIs[ 0 ].setCfg( component, setting, value )
351 main.log.info( "Value was changed? {}".format( main.TRUE == check ) )
352 checkResult = check and checkResult
353 utilities.assert_equals( expect=main.TRUE,
354 actual=checkResult,
355 onpass="Successfully set config",
356 onfail="Failed to set config" )
357 else:
358 main.log.warn( "No configurations were specified to be changed after startup" )
359
Jon Hall9d2dcad2016-04-08 10:15:20 -0700360 main.step( "App Ids check" )
361 appCheck = main.TRUE
362 threads = []
363 for i in main.activeNodes:
364 t = main.Thread( target=main.CLIs[i].appToIDCheck,
365 name="appToIDCheck-" + str( i ),
366 args=[] )
367 threads.append( t )
368 t.start()
369
370 for t in threads:
371 t.join()
372 appCheck = appCheck and t.result
373 if appCheck != main.TRUE:
374 node = main.activeNodes[0]
375 main.log.warn( main.CLIs[node].apps() )
376 main.log.warn( main.CLIs[node].appIDs() )
377 utilities.assert_equals( expect=main.TRUE, actual=appCheck,
378 onpass="App Ids seem to be correct",
379 onfail="Something is wrong with app Ids" )
380
Jon Hall5cf14d52015-07-16 12:15:19 -0700381 def CASE2( self, main ):
382 """
383 Assign devices to controllers
384 """
385 import re
Jon Halle1a3b752015-07-22 13:02:46 -0700386 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700387 assert main, "main not defined"
388 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -0700389 assert main.CLIs, "main.CLIs not defined"
390 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700391 assert ONOS1Port, "ONOS1Port not defined"
392 assert ONOS2Port, "ONOS2Port not defined"
393 assert ONOS3Port, "ONOS3Port not defined"
394 assert ONOS4Port, "ONOS4Port not defined"
395 assert ONOS5Port, "ONOS5Port not defined"
396 assert ONOS6Port, "ONOS6Port not defined"
397 assert ONOS7Port, "ONOS7Port not defined"
398
399 main.case( "Assigning devices to controllers" )
Jon Hall783bbf92015-07-23 14:33:19 -0700400 main.caseExplanation = "Assign switches to ONOS using 'ovs-vsctl' " +\
Jon Hall5cf14d52015-07-16 12:15:19 -0700401 "and check that an ONOS node becomes the " +\
402 "master of the device."
403 main.step( "Assign switches to controllers" )
404
405 ipList = []
Jon Halle1a3b752015-07-22 13:02:46 -0700406 for i in range( main.numCtrls ):
407 ipList.append( main.nodes[ i ].ip_address )
Jon Hall5cf14d52015-07-16 12:15:19 -0700408 swList = []
409 for i in range( 1, 29 ):
410 swList.append( "s" + str( i ) )
411 main.Mininet1.assignSwController( sw=swList, ip=ipList )
412
413 mastershipCheck = main.TRUE
414 for i in range( 1, 29 ):
415 response = main.Mininet1.getSwController( "s" + str( i ) )
416 try:
417 main.log.info( str( response ) )
418 except Exception:
419 main.log.info( repr( response ) )
Jon Halle1a3b752015-07-22 13:02:46 -0700420 for node in main.nodes:
Jon Hall5cf14d52015-07-16 12:15:19 -0700421 if re.search( "tcp:" + node.ip_address, response ):
422 mastershipCheck = mastershipCheck and main.TRUE
423 else:
424 main.log.error( "Error, node " + node.ip_address + " is " +
425 "not in the list of controllers s" +
426 str( i ) + " is connecting to." )
427 mastershipCheck = main.FALSE
428 utilities.assert_equals(
429 expect=main.TRUE,
430 actual=mastershipCheck,
431 onpass="Switch mastership assigned correctly",
432 onfail="Switches not assigned correctly to controllers" )
433
434 def CASE21( self, main ):
435 """
436 Assign mastership to controllers
437 """
Jon Hall5cf14d52015-07-16 12:15:19 -0700438 import time
Jon Halle1a3b752015-07-22 13:02:46 -0700439 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700440 assert main, "main not defined"
441 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -0700442 assert main.CLIs, "main.CLIs not defined"
443 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700444 assert ONOS1Port, "ONOS1Port not defined"
445 assert ONOS2Port, "ONOS2Port not defined"
446 assert ONOS3Port, "ONOS3Port not defined"
447 assert ONOS4Port, "ONOS4Port not defined"
448 assert ONOS5Port, "ONOS5Port not defined"
449 assert ONOS6Port, "ONOS6Port not defined"
450 assert ONOS7Port, "ONOS7Port not defined"
451
452 main.case( "Assigning Controller roles for switches" )
Jon Hall783bbf92015-07-23 14:33:19 -0700453 main.caseExplanation = "Check that ONOS is connected to each " +\
Jon Hall5cf14d52015-07-16 12:15:19 -0700454 "device. Then manually assign" +\
455 " mastership to specific ONOS nodes using" +\
456 " 'device-role'"
457 main.step( "Assign mastership of switches to specific controllers" )
458 # Manually assign mastership to the controller we want
459 roleCall = main.TRUE
460
461 ipList = [ ]
462 deviceList = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700463 onosCli = main.CLIs[ main.activeNodes[0] ]
Jon Hall5cf14d52015-07-16 12:15:19 -0700464 try:
465 # Assign mastership to specific controllers. This assignment was
466 # determined for a 7 node cluser, but will work with any sized
467 # cluster
468 for i in range( 1, 29 ): # switches 1 through 28
469 # set up correct variables:
470 if i == 1:
471 c = 0
Jon Halle1a3b752015-07-22 13:02:46 -0700472 ip = main.nodes[ c ].ip_address # ONOS1
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700473 deviceId = onosCli.getDevice( "1000" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700474 elif i == 2:
Jon Halle1a3b752015-07-22 13:02:46 -0700475 c = 1 % main.numCtrls
476 ip = main.nodes[ c ].ip_address # ONOS2
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700477 deviceId = onosCli.getDevice( "2000" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700478 elif i == 3:
Jon Halle1a3b752015-07-22 13:02:46 -0700479 c = 1 % main.numCtrls
480 ip = main.nodes[ c ].ip_address # ONOS2
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700481 deviceId = onosCli.getDevice( "3000" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700482 elif i == 4:
Jon Halle1a3b752015-07-22 13:02:46 -0700483 c = 3 % main.numCtrls
484 ip = main.nodes[ c ].ip_address # ONOS4
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700485 deviceId = onosCli.getDevice( "3004" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700486 elif i == 5:
Jon Halle1a3b752015-07-22 13:02:46 -0700487 c = 2 % main.numCtrls
488 ip = main.nodes[ c ].ip_address # ONOS3
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700489 deviceId = onosCli.getDevice( "5000" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700490 elif i == 6:
Jon Halle1a3b752015-07-22 13:02:46 -0700491 c = 2 % main.numCtrls
492 ip = main.nodes[ c ].ip_address # ONOS3
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700493 deviceId = onosCli.getDevice( "6000" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700494 elif i == 7:
Jon Halle1a3b752015-07-22 13:02:46 -0700495 c = 5 % main.numCtrls
496 ip = main.nodes[ c ].ip_address # ONOS6
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700497 deviceId = onosCli.getDevice( "6007" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700498 elif i >= 8 and i <= 17:
Jon Halle1a3b752015-07-22 13:02:46 -0700499 c = 4 % main.numCtrls
500 ip = main.nodes[ c ].ip_address # ONOS5
Jon Hall5cf14d52015-07-16 12:15:19 -0700501 dpid = '3' + str( i ).zfill( 3 )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700502 deviceId = onosCli.getDevice( dpid ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700503 elif i >= 18 and i <= 27:
Jon Halle1a3b752015-07-22 13:02:46 -0700504 c = 6 % main.numCtrls
505 ip = main.nodes[ c ].ip_address # ONOS7
Jon Hall5cf14d52015-07-16 12:15:19 -0700506 dpid = '6' + str( i ).zfill( 3 )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700507 deviceId = onosCli.getDevice( dpid ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700508 elif i == 28:
509 c = 0
Jon Halle1a3b752015-07-22 13:02:46 -0700510 ip = main.nodes[ c ].ip_address # ONOS1
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700511 deviceId = onosCli.getDevice( "2800" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700512 else:
513 main.log.error( "You didn't write an else statement for " +
514 "switch s" + str( i ) )
515 roleCall = main.FALSE
516 # Assign switch
517 assert deviceId, "No device id for s" + str( i ) + " in ONOS"
518 # TODO: make this controller dynamic
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700519 roleCall = roleCall and onosCli.deviceRole( deviceId, ip )
Jon Hall5cf14d52015-07-16 12:15:19 -0700520 ipList.append( ip )
521 deviceList.append( deviceId )
522 except ( AttributeError, AssertionError ):
523 main.log.exception( "Something is wrong with ONOS device view" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700524 main.log.info( onosCli.devices() )
Jon Hall5cf14d52015-07-16 12:15:19 -0700525 utilities.assert_equals(
526 expect=main.TRUE,
527 actual=roleCall,
528 onpass="Re-assigned switch mastership to designated controller",
529 onfail="Something wrong with deviceRole calls" )
530
531 main.step( "Check mastership was correctly assigned" )
532 roleCheck = main.TRUE
533 # NOTE: This is due to the fact that device mastership change is not
534 # atomic and is actually a multi step process
535 time.sleep( 5 )
536 for i in range( len( ipList ) ):
537 ip = ipList[i]
538 deviceId = deviceList[i]
539 # Check assignment
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700540 master = onosCli.getRole( deviceId ).get( 'master' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700541 if ip in master:
542 roleCheck = roleCheck and main.TRUE
543 else:
544 roleCheck = roleCheck and main.FALSE
545 main.log.error( "Error, controller " + ip + " is not" +
546 " master " + "of device " +
547 str( deviceId ) + ". Master is " +
548 repr( master ) + "." )
549 utilities.assert_equals(
550 expect=main.TRUE,
551 actual=roleCheck,
552 onpass="Switches were successfully reassigned to designated " +
553 "controller",
554 onfail="Switches were not successfully reassigned" )
555
556 def CASE3( self, main ):
557 """
558 Assign intents
559 """
560 import time
561 import json
Jon Halle1a3b752015-07-22 13:02:46 -0700562 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700563 assert main, "main not defined"
564 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -0700565 assert main.CLIs, "main.CLIs not defined"
566 assert main.nodes, "main.nodes not defined"
Jon Halla440e872016-03-31 15:15:50 -0700567 try:
568 labels
569 except NameError:
570 main.log.error( "labels not defined, setting to []" )
571 labels = []
572 try:
573 data
574 except NameError:
575 main.log.error( "data not defined, setting to []" )
576 data = []
Jon Hall5cf14d52015-07-16 12:15:19 -0700577 main.case( "Adding host Intents" )
Jon Hall783bbf92015-07-23 14:33:19 -0700578 main.caseExplanation = "Discover hosts by using pingall then " +\
Jon Hall5cf14d52015-07-16 12:15:19 -0700579 "assign predetermined host-to-host intents." +\
580 " After installation, check that the intent" +\
581 " is distributed to all nodes and the state" +\
582 " is INSTALLED"
583
584 # install onos-app-fwd
585 main.step( "Install reactive forwarding app" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700586 onosCli = main.CLIs[ main.activeNodes[0] ]
587 installResults = onosCli.activateApp( "org.onosproject.fwd" )
Jon Hall5cf14d52015-07-16 12:15:19 -0700588 utilities.assert_equals( expect=main.TRUE, actual=installResults,
589 onpass="Install fwd successful",
590 onfail="Install fwd failed" )
591
592 main.step( "Check app ids" )
593 appCheck = main.TRUE
594 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700595 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -0700596 t = main.Thread( target=main.CLIs[i].appToIDCheck,
Jon Hall5cf14d52015-07-16 12:15:19 -0700597 name="appToIDCheck-" + str( i ),
598 args=[] )
599 threads.append( t )
600 t.start()
601
602 for t in threads:
603 t.join()
604 appCheck = appCheck and t.result
605 if appCheck != main.TRUE:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700606 main.log.warn( onosCli.apps() )
607 main.log.warn( onosCli.appIDs() )
Jon Hall5cf14d52015-07-16 12:15:19 -0700608 utilities.assert_equals( expect=main.TRUE, actual=appCheck,
609 onpass="App Ids seem to be correct",
610 onfail="Something is wrong with app Ids" )
611
612 main.step( "Discovering Hosts( Via pingall for now )" )
613 # FIXME: Once we have a host discovery mechanism, use that instead
614 # REACTIVE FWD test
615 pingResult = main.FALSE
Jon Hall96091e62015-09-21 17:34:17 -0700616 passMsg = "Reactive Pingall test passed"
617 time1 = time.time()
618 pingResult = main.Mininet1.pingall()
619 time2 = time.time()
620 if not pingResult:
621 main.log.warn("First pingall failed. Trying again...")
Jon Hall5cf14d52015-07-16 12:15:19 -0700622 pingResult = main.Mininet1.pingall()
Jon Hall96091e62015-09-21 17:34:17 -0700623 passMsg += " on the second try"
624 utilities.assert_equals(
625 expect=main.TRUE,
626 actual=pingResult,
627 onpass= passMsg,
628 onfail="Reactive Pingall failed, " +
629 "one or more ping pairs failed" )
630 main.log.info( "Time for pingall: %2f seconds" %
631 ( time2 - time1 ) )
Jon Hall5cf14d52015-07-16 12:15:19 -0700632 # timeout for fwd flows
633 time.sleep( 11 )
634 # uninstall onos-app-fwd
635 main.step( "Uninstall reactive forwarding app" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700636 node = main.activeNodes[0]
637 uninstallResult = main.CLIs[node].deactivateApp( "org.onosproject.fwd" )
Jon Hall5cf14d52015-07-16 12:15:19 -0700638 utilities.assert_equals( expect=main.TRUE, actual=uninstallResult,
639 onpass="Uninstall fwd successful",
640 onfail="Uninstall fwd failed" )
641
642 main.step( "Check app ids" )
643 threads = []
644 appCheck2 = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700645 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -0700646 t = main.Thread( target=main.CLIs[i].appToIDCheck,
Jon Hall5cf14d52015-07-16 12:15:19 -0700647 name="appToIDCheck-" + str( i ),
648 args=[] )
649 threads.append( t )
650 t.start()
651
652 for t in threads:
653 t.join()
654 appCheck2 = appCheck2 and t.result
655 if appCheck2 != main.TRUE:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700656 node = main.activeNodes[0]
657 main.log.warn( main.CLIs[node].apps() )
658 main.log.warn( main.CLIs[node].appIDs() )
Jon Hall5cf14d52015-07-16 12:15:19 -0700659 utilities.assert_equals( expect=main.TRUE, actual=appCheck2,
660 onpass="App Ids seem to be correct",
661 onfail="Something is wrong with app Ids" )
662
663 main.step( "Add host intents via cli" )
664 intentIds = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700665 # TODO: move the host numbers to params
666 # Maybe look at all the paths we ping?
Jon Hall5cf14d52015-07-16 12:15:19 -0700667 intentAddResult = True
668 hostResult = main.TRUE
669 for i in range( 8, 18 ):
670 main.log.info( "Adding host intent between h" + str( i ) +
671 " and h" + str( i + 10 ) )
672 host1 = "00:00:00:00:00:" + \
673 str( hex( i )[ 2: ] ).zfill( 2 ).upper()
674 host2 = "00:00:00:00:00:" + \
675 str( hex( i + 10 )[ 2: ] ).zfill( 2 ).upper()
676 # NOTE: getHost can return None
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700677 host1Dict = onosCli.getHost( host1 )
678 host2Dict = onosCli.getHost( host2 )
Jon Hall5cf14d52015-07-16 12:15:19 -0700679 host1Id = None
680 host2Id = None
681 if host1Dict and host2Dict:
682 host1Id = host1Dict.get( 'id', None )
683 host2Id = host2Dict.get( 'id', None )
684 if host1Id and host2Id:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700685 nodeNum = ( i % len( main.activeNodes ) )
686 node = main.activeNodes[nodeNum]
687 tmpId = main.CLIs[node].addHostIntent( host1Id, host2Id )
Jon Hall5cf14d52015-07-16 12:15:19 -0700688 if tmpId:
689 main.log.info( "Added intent with id: " + tmpId )
690 intentIds.append( tmpId )
691 else:
692 main.log.error( "addHostIntent returned: " +
693 repr( tmpId ) )
694 else:
695 main.log.error( "Error, getHost() failed for h" + str( i ) +
696 " and/or h" + str( i + 10 ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700697 node = main.activeNodes[0]
698 hosts = main.CLIs[node].hosts()
Jon Hall5cf14d52015-07-16 12:15:19 -0700699 main.log.warn( "Hosts output: " )
700 try:
701 main.log.warn( json.dumps( json.loads( hosts ),
702 sort_keys=True,
703 indent=4,
704 separators=( ',', ': ' ) ) )
705 except ( ValueError, TypeError ):
706 main.log.warn( repr( hosts ) )
707 hostResult = main.FALSE
708 utilities.assert_equals( expect=main.TRUE, actual=hostResult,
709 onpass="Found a host id for each host",
710 onfail="Error looking up host ids" )
711
712 intentStart = time.time()
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700713 onosIds = onosCli.getAllIntentsId()
Jon Hall5cf14d52015-07-16 12:15:19 -0700714 main.log.info( "Submitted intents: " + str( intentIds ) )
715 main.log.info( "Intents in ONOS: " + str( onosIds ) )
716 for intent in intentIds:
717 if intent in onosIds:
718 pass # intent submitted is in onos
719 else:
720 intentAddResult = False
721 if intentAddResult:
722 intentStop = time.time()
723 else:
724 intentStop = None
725 # Print the intent states
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700726 intents = onosCli.intents()
Jon Hall5cf14d52015-07-16 12:15:19 -0700727 intentStates = []
728 installedCheck = True
729 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
730 count = 0
731 try:
732 for intent in json.loads( intents ):
733 state = intent.get( 'state', None )
734 if "INSTALLED" not in state:
735 installedCheck = False
736 intentId = intent.get( 'id', None )
737 intentStates.append( ( intentId, state ) )
738 except ( ValueError, TypeError ):
739 main.log.exception( "Error parsing intents" )
740 # add submitted intents not in the store
741 tmplist = [ i for i, s in intentStates ]
742 missingIntents = False
743 for i in intentIds:
744 if i not in tmplist:
745 intentStates.append( ( i, " - " ) )
746 missingIntents = True
747 intentStates.sort()
748 for i, s in intentStates:
749 count += 1
750 main.log.info( "%-6s%-15s%-15s" %
751 ( str( count ), str( i ), str( s ) ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700752 leaders = onosCli.leaders()
Jon Hall5cf14d52015-07-16 12:15:19 -0700753 try:
754 missing = False
755 if leaders:
756 parsedLeaders = json.loads( leaders )
757 main.log.warn( json.dumps( parsedLeaders,
758 sort_keys=True,
759 indent=4,
760 separators=( ',', ': ' ) ) )
761 # check for all intent partitions
762 topics = []
763 for i in range( 14 ):
Jon Hall8dafdcc2016-09-16 10:21:25 -0700764 topics.append( "work-partition-" + str( i ) )
Jon Hall5cf14d52015-07-16 12:15:19 -0700765 main.log.debug( topics )
766 ONOStopics = [ j['topic'] for j in parsedLeaders ]
767 for topic in topics:
768 if topic not in ONOStopics:
769 main.log.error( "Error: " + topic +
770 " not in leaders" )
771 missing = True
772 else:
773 main.log.error( "leaders() returned None" )
774 except ( ValueError, TypeError ):
775 main.log.exception( "Error parsing leaders" )
776 main.log.error( repr( leaders ) )
777 # Check all nodes
778 if missing:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700779 for i in main.activeNodes:
780 response = main.CLIs[i].leaders( jsonFormat=False)
781 main.log.warn( str( main.CLIs[i].name ) + " leaders output: \n" +
Jon Hall5cf14d52015-07-16 12:15:19 -0700782 str( response ) )
783
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700784 partitions = onosCli.partitions()
Jon Hall5cf14d52015-07-16 12:15:19 -0700785 try:
786 if partitions :
787 parsedPartitions = json.loads( partitions )
788 main.log.warn( json.dumps( parsedPartitions,
789 sort_keys=True,
790 indent=4,
791 separators=( ',', ': ' ) ) )
792 # TODO check for a leader in all paritions
793 # TODO check for consistency among nodes
794 else:
795 main.log.error( "partitions() returned None" )
796 except ( ValueError, TypeError ):
797 main.log.exception( "Error parsing partitions" )
798 main.log.error( repr( partitions ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700799 pendingMap = onosCli.pendingMap()
Jon Hall5cf14d52015-07-16 12:15:19 -0700800 try:
801 if pendingMap :
802 parsedPending = json.loads( pendingMap )
803 main.log.warn( json.dumps( parsedPending,
804 sort_keys=True,
805 indent=4,
806 separators=( ',', ': ' ) ) )
807 # TODO check something here?
808 else:
809 main.log.error( "pendingMap() returned None" )
810 except ( ValueError, TypeError ):
811 main.log.exception( "Error parsing pending map" )
812 main.log.error( repr( pendingMap ) )
813
814 intentAddResult = bool( intentAddResult and not missingIntents and
815 installedCheck )
816 if not intentAddResult:
817 main.log.error( "Error in pushing host intents to ONOS" )
818
819 main.step( "Intent Anti-Entropy dispersion" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700820 for j in range(100):
Jon Hall5cf14d52015-07-16 12:15:19 -0700821 correct = True
822 main.log.info( "Submitted intents: " + str( sorted( intentIds ) ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700823 for i in main.activeNodes:
Jon Hall5cf14d52015-07-16 12:15:19 -0700824 onosIds = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700825 ids = main.CLIs[i].getAllIntentsId()
Jon Hall5cf14d52015-07-16 12:15:19 -0700826 onosIds.append( ids )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700827 main.log.debug( "Intents in " + main.CLIs[i].name + ": " +
Jon Hall5cf14d52015-07-16 12:15:19 -0700828 str( sorted( onosIds ) ) )
829 if sorted( ids ) != sorted( intentIds ):
830 main.log.warn( "Set of intent IDs doesn't match" )
831 correct = False
832 break
833 else:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700834 intents = json.loads( main.CLIs[i].intents() )
Jon Hall5cf14d52015-07-16 12:15:19 -0700835 for intent in intents:
836 if intent[ 'state' ] != "INSTALLED":
837 main.log.warn( "Intent " + intent[ 'id' ] +
838 " is " + intent[ 'state' ] )
839 correct = False
840 break
841 if correct:
842 break
843 else:
844 time.sleep(1)
845 if not intentStop:
846 intentStop = time.time()
847 global gossipTime
848 gossipTime = intentStop - intentStart
849 main.log.info( "It took about " + str( gossipTime ) +
850 " seconds for all intents to appear in each node" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700851 gossipPeriod = int( main.params['timers']['gossip'] )
852 maxGossipTime = gossipPeriod * len( main.activeNodes )
Jon Hall5cf14d52015-07-16 12:15:19 -0700853 utilities.assert_greater_equals(
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700854 expect=maxGossipTime, actual=gossipTime,
Jon Hall5cf14d52015-07-16 12:15:19 -0700855 onpass="ECM anti-entropy for intents worked within " +
856 "expected time",
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700857 onfail="Intent ECM anti-entropy took too long. " +
858 "Expected time:{}, Actual time:{}".format( maxGossipTime,
859 gossipTime ) )
860 if gossipTime <= maxGossipTime:
Jon Hall5cf14d52015-07-16 12:15:19 -0700861 intentAddResult = True
862
863 if not intentAddResult or "key" in pendingMap:
864 import time
865 installedCheck = True
866 main.log.info( "Sleeping 60 seconds to see if intents are found" )
867 time.sleep( 60 )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700868 onosIds = onosCli.getAllIntentsId()
Jon Hall5cf14d52015-07-16 12:15:19 -0700869 main.log.info( "Submitted intents: " + str( intentIds ) )
870 main.log.info( "Intents in ONOS: " + str( onosIds ) )
871 # Print the intent states
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700872 intents = onosCli.intents()
Jon Hall5cf14d52015-07-16 12:15:19 -0700873 intentStates = []
874 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
875 count = 0
876 try:
877 for intent in json.loads( intents ):
878 # Iter through intents of a node
879 state = intent.get( 'state', None )
880 if "INSTALLED" not in state:
881 installedCheck = False
882 intentId = intent.get( 'id', None )
883 intentStates.append( ( intentId, state ) )
884 except ( ValueError, TypeError ):
885 main.log.exception( "Error parsing intents" )
886 # add submitted intents not in the store
887 tmplist = [ i for i, s in intentStates ]
888 for i in intentIds:
889 if i not in tmplist:
890 intentStates.append( ( i, " - " ) )
891 intentStates.sort()
892 for i, s in intentStates:
893 count += 1
894 main.log.info( "%-6s%-15s%-15s" %
895 ( str( count ), str( i ), str( s ) ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700896 leaders = onosCli.leaders()
Jon Hall5cf14d52015-07-16 12:15:19 -0700897 try:
898 missing = False
899 if leaders:
900 parsedLeaders = json.loads( leaders )
901 main.log.warn( json.dumps( parsedLeaders,
902 sort_keys=True,
903 indent=4,
904 separators=( ',', ': ' ) ) )
905 # check for all intent partitions
906 # check for election
907 topics = []
908 for i in range( 14 ):
Jon Hall8dafdcc2016-09-16 10:21:25 -0700909 topics.append( "work-partition-" + str( i ) )
Jon Hall5cf14d52015-07-16 12:15:19 -0700910 # FIXME: this should only be after we start the app
911 topics.append( "org.onosproject.election" )
912 main.log.debug( topics )
913 ONOStopics = [ j['topic'] for j in parsedLeaders ]
914 for topic in topics:
915 if topic not in ONOStopics:
916 main.log.error( "Error: " + topic +
917 " not in leaders" )
918 missing = True
919 else:
920 main.log.error( "leaders() returned None" )
921 except ( ValueError, TypeError ):
922 main.log.exception( "Error parsing leaders" )
923 main.log.error( repr( leaders ) )
924 # Check all nodes
925 if missing:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700926 for i in main.activeNodes:
927 node = main.CLIs[i]
Jon Hall5cf14d52015-07-16 12:15:19 -0700928 response = node.leaders( jsonFormat=False)
929 main.log.warn( str( node.name ) + " leaders output: \n" +
930 str( response ) )
931
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700932 partitions = onosCli.partitions()
Jon Hall5cf14d52015-07-16 12:15:19 -0700933 try:
934 if partitions :
935 parsedPartitions = json.loads( partitions )
936 main.log.warn( json.dumps( parsedPartitions,
937 sort_keys=True,
938 indent=4,
939 separators=( ',', ': ' ) ) )
940 # TODO check for a leader in all paritions
941 # TODO check for consistency among nodes
942 else:
943 main.log.error( "partitions() returned None" )
944 except ( ValueError, TypeError ):
945 main.log.exception( "Error parsing partitions" )
946 main.log.error( repr( partitions ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700947 pendingMap = onosCli.pendingMap()
Jon Hall5cf14d52015-07-16 12:15:19 -0700948 try:
949 if pendingMap :
950 parsedPending = json.loads( pendingMap )
951 main.log.warn( json.dumps( parsedPending,
952 sort_keys=True,
953 indent=4,
954 separators=( ',', ': ' ) ) )
955 # TODO check something here?
956 else:
957 main.log.error( "pendingMap() returned None" )
958 except ( ValueError, TypeError ):
959 main.log.exception( "Error parsing pending map" )
960 main.log.error( repr( pendingMap ) )
961
962 def CASE4( self, main ):
963 """
964 Ping across added host intents
965 """
966 import json
967 import time
Jon Halle1a3b752015-07-22 13:02:46 -0700968 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700969 assert main, "main not defined"
970 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -0700971 assert main.CLIs, "main.CLIs not defined"
972 assert main.nodes, "main.nodes not defined"
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700973 main.case( "Verify connectivity by sending traffic across Intents" )
Jon Hall783bbf92015-07-23 14:33:19 -0700974 main.caseExplanation = "Ping across added host intents to check " +\
Jon Hall5cf14d52015-07-16 12:15:19 -0700975 "functionality and check the state of " +\
976 "the intent"
Jon Hall5cf14d52015-07-16 12:15:19 -0700977
Jon Hall41d39f12016-04-11 22:54:35 -0700978 onosCli = main.CLIs[ main.activeNodes[0] ]
Jon Hall5cf14d52015-07-16 12:15:19 -0700979 main.step( "Check Intent state" )
980 installedCheck = False
981 loopCount = 0
982 while not installedCheck and loopCount < 40:
983 installedCheck = True
984 # Print the intent states
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700985 intents = onosCli.intents()
Jon Hall5cf14d52015-07-16 12:15:19 -0700986 intentStates = []
987 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
988 count = 0
989 # Iter through intents of a node
990 try:
991 for intent in json.loads( intents ):
992 state = intent.get( 'state', None )
993 if "INSTALLED" not in state:
994 installedCheck = False
995 intentId = intent.get( 'id', None )
996 intentStates.append( ( intentId, state ) )
997 except ( ValueError, TypeError ):
998 main.log.exception( "Error parsing intents." )
999 # Print states
1000 intentStates.sort()
1001 for i, s in intentStates:
1002 count += 1
1003 main.log.info( "%-6s%-15s%-15s" %
1004 ( str( count ), str( i ), str( s ) ) )
1005 if not installedCheck:
1006 time.sleep( 1 )
1007 loopCount += 1
1008 utilities.assert_equals( expect=True, actual=installedCheck,
1009 onpass="Intents are all INSTALLED",
1010 onfail="Intents are not all in " +
1011 "INSTALLED state" )
1012
Jon Hall9d2dcad2016-04-08 10:15:20 -07001013 main.step( "Ping across added host intents" )
Jon Hall9d2dcad2016-04-08 10:15:20 -07001014 PingResult = main.TRUE
1015 for i in range( 8, 18 ):
1016 ping = main.Mininet1.pingHost( src="h" + str( i ),
1017 target="h" + str( i + 10 ) )
1018 PingResult = PingResult and ping
1019 if ping == main.FALSE:
1020 main.log.warn( "Ping failed between h" + str( i ) +
1021 " and h" + str( i + 10 ) )
1022 elif ping == main.TRUE:
1023 main.log.info( "Ping test passed!" )
1024 # Don't set PingResult or you'd override failures
1025 if PingResult == main.FALSE:
1026 main.log.error(
1027 "Intents have not been installed correctly, pings failed." )
1028 # TODO: pretty print
1029 main.log.warn( "ONOS1 intents: " )
1030 try:
1031 tmpIntents = onosCli.intents()
1032 main.log.warn( json.dumps( json.loads( tmpIntents ),
1033 sort_keys=True,
1034 indent=4,
1035 separators=( ',', ': ' ) ) )
1036 except ( ValueError, TypeError ):
1037 main.log.warn( repr( tmpIntents ) )
1038 utilities.assert_equals(
1039 expect=main.TRUE,
1040 actual=PingResult,
1041 onpass="Intents have been installed correctly and pings work",
1042 onfail="Intents have not been installed correctly, pings failed." )
1043
Jon Hall5cf14d52015-07-16 12:15:19 -07001044 main.step( "Check leadership of topics" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001045 leaders = onosCli.leaders()
Jon Hall5cf14d52015-07-16 12:15:19 -07001046 topicCheck = main.TRUE
1047 try:
1048 if leaders:
1049 parsedLeaders = json.loads( leaders )
1050 main.log.warn( json.dumps( parsedLeaders,
1051 sort_keys=True,
1052 indent=4,
1053 separators=( ',', ': ' ) ) )
1054 # check for all intent partitions
1055 # check for election
1056 # TODO: Look at Devices as topics now that it uses this system
1057 topics = []
1058 for i in range( 14 ):
Jon Hall8dafdcc2016-09-16 10:21:25 -07001059 topics.append( "work-partition-" + str( i ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001060 # FIXME: this should only be after we start the app
1061 # FIXME: topics.append( "org.onosproject.election" )
1062 # Print leaders output
1063 main.log.debug( topics )
1064 ONOStopics = [ j['topic'] for j in parsedLeaders ]
1065 for topic in topics:
1066 if topic not in ONOStopics:
1067 main.log.error( "Error: " + topic +
1068 " not in leaders" )
1069 topicCheck = main.FALSE
1070 else:
1071 main.log.error( "leaders() returned None" )
1072 topicCheck = main.FALSE
1073 except ( ValueError, TypeError ):
1074 topicCheck = main.FALSE
1075 main.log.exception( "Error parsing leaders" )
1076 main.log.error( repr( leaders ) )
1077 # TODO: Check for a leader of these topics
1078 # Check all nodes
1079 if topicCheck:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001080 for i in main.activeNodes:
1081 node = main.CLIs[i]
Jon Hall5cf14d52015-07-16 12:15:19 -07001082 response = node.leaders( jsonFormat=False)
1083 main.log.warn( str( node.name ) + " leaders output: \n" +
1084 str( response ) )
1085
1086 utilities.assert_equals( expect=main.TRUE, actual=topicCheck,
1087 onpass="intent Partitions is in leaders",
1088 onfail="Some topics were lost " )
1089 # Print partitions
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001090 partitions = onosCli.partitions()
Jon Hall5cf14d52015-07-16 12:15:19 -07001091 try:
1092 if partitions :
1093 parsedPartitions = json.loads( partitions )
1094 main.log.warn( json.dumps( parsedPartitions,
1095 sort_keys=True,
1096 indent=4,
1097 separators=( ',', ': ' ) ) )
1098 # TODO check for a leader in all paritions
1099 # TODO check for consistency among nodes
1100 else:
1101 main.log.error( "partitions() returned None" )
1102 except ( ValueError, TypeError ):
1103 main.log.exception( "Error parsing partitions" )
1104 main.log.error( repr( partitions ) )
1105 # Print Pending Map
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001106 pendingMap = onosCli.pendingMap()
Jon Hall5cf14d52015-07-16 12:15:19 -07001107 try:
1108 if pendingMap :
1109 parsedPending = json.loads( pendingMap )
1110 main.log.warn( json.dumps( parsedPending,
1111 sort_keys=True,
1112 indent=4,
1113 separators=( ',', ': ' ) ) )
1114 # TODO check something here?
1115 else:
1116 main.log.error( "pendingMap() returned None" )
1117 except ( ValueError, TypeError ):
1118 main.log.exception( "Error parsing pending map" )
1119 main.log.error( repr( pendingMap ) )
1120
1121 if not installedCheck:
1122 main.log.info( "Waiting 60 seconds to see if the state of " +
1123 "intents change" )
1124 time.sleep( 60 )
1125 # Print the intent states
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001126 intents = onosCli.intents()
Jon Hall5cf14d52015-07-16 12:15:19 -07001127 intentStates = []
1128 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
1129 count = 0
1130 # Iter through intents of a node
1131 try:
1132 for intent in json.loads( intents ):
1133 state = intent.get( 'state', None )
1134 if "INSTALLED" not in state:
1135 installedCheck = False
1136 intentId = intent.get( 'id', None )
1137 intentStates.append( ( intentId, state ) )
1138 except ( ValueError, TypeError ):
1139 main.log.exception( "Error parsing intents." )
1140 intentStates.sort()
1141 for i, s in intentStates:
1142 count += 1
1143 main.log.info( "%-6s%-15s%-15s" %
1144 ( str( count ), str( i ), str( s ) ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001145 leaders = onosCli.leaders()
Jon Hall5cf14d52015-07-16 12:15:19 -07001146 try:
1147 missing = False
1148 if leaders:
1149 parsedLeaders = json.loads( leaders )
1150 main.log.warn( json.dumps( parsedLeaders,
1151 sort_keys=True,
1152 indent=4,
1153 separators=( ',', ': ' ) ) )
1154 # check for all intent partitions
1155 # check for election
1156 topics = []
1157 for i in range( 14 ):
Jon Hall8dafdcc2016-09-16 10:21:25 -07001158 topics.append( "work-partition-" + str( i ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001159 # FIXME: this should only be after we start the app
1160 topics.append( "org.onosproject.election" )
1161 main.log.debug( topics )
1162 ONOStopics = [ j['topic'] for j in parsedLeaders ]
1163 for topic in topics:
1164 if topic not in ONOStopics:
1165 main.log.error( "Error: " + topic +
1166 " not in leaders" )
1167 missing = True
1168 else:
1169 main.log.error( "leaders() returned None" )
1170 except ( ValueError, TypeError ):
1171 main.log.exception( "Error parsing leaders" )
1172 main.log.error( repr( leaders ) )
1173 if missing:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001174 for i in main.activeNodes:
1175 node = main.CLIs[i]
Jon Hall5cf14d52015-07-16 12:15:19 -07001176 response = node.leaders( jsonFormat=False)
1177 main.log.warn( str( node.name ) + " leaders output: \n" +
1178 str( response ) )
1179
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001180 partitions = onosCli.partitions()
Jon Hall5cf14d52015-07-16 12:15:19 -07001181 try:
1182 if partitions :
1183 parsedPartitions = json.loads( partitions )
1184 main.log.warn( json.dumps( parsedPartitions,
1185 sort_keys=True,
1186 indent=4,
1187 separators=( ',', ': ' ) ) )
1188 # TODO check for a leader in all paritions
1189 # TODO check for consistency among nodes
1190 else:
1191 main.log.error( "partitions() returned None" )
1192 except ( ValueError, TypeError ):
1193 main.log.exception( "Error parsing partitions" )
1194 main.log.error( repr( partitions ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001195 pendingMap = onosCli.pendingMap()
Jon Hall5cf14d52015-07-16 12:15:19 -07001196 try:
1197 if pendingMap :
1198 parsedPending = json.loads( pendingMap )
1199 main.log.warn( json.dumps( parsedPending,
1200 sort_keys=True,
1201 indent=4,
1202 separators=( ',', ': ' ) ) )
1203 # TODO check something here?
1204 else:
1205 main.log.error( "pendingMap() returned None" )
1206 except ( ValueError, TypeError ):
1207 main.log.exception( "Error parsing pending map" )
1208 main.log.error( repr( pendingMap ) )
1209 # Print flowrules
Jon Hall41d39f12016-04-11 22:54:35 -07001210 main.log.debug( onosCli.flows( jsonFormat=False ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001211 main.step( "Wait a minute then ping again" )
1212 # the wait is above
1213 PingResult = main.TRUE
1214 for i in range( 8, 18 ):
1215 ping = main.Mininet1.pingHost( src="h" + str( i ),
1216 target="h" + str( i + 10 ) )
1217 PingResult = PingResult and ping
1218 if ping == main.FALSE:
1219 main.log.warn( "Ping failed between h" + str( i ) +
1220 " and h" + str( i + 10 ) )
1221 elif ping == main.TRUE:
1222 main.log.info( "Ping test passed!" )
1223 # Don't set PingResult or you'd override failures
1224 if PingResult == main.FALSE:
1225 main.log.error(
1226 "Intents have not been installed correctly, pings failed." )
1227 # TODO: pretty print
1228 main.log.warn( "ONOS1 intents: " )
1229 try:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001230 tmpIntents = onosCli.intents()
Jon Hall5cf14d52015-07-16 12:15:19 -07001231 main.log.warn( json.dumps( json.loads( tmpIntents ),
1232 sort_keys=True,
1233 indent=4,
1234 separators=( ',', ': ' ) ) )
1235 except ( ValueError, TypeError ):
1236 main.log.warn( repr( tmpIntents ) )
1237 utilities.assert_equals(
1238 expect=main.TRUE,
1239 actual=PingResult,
1240 onpass="Intents have been installed correctly and pings work",
1241 onfail="Intents have not been installed correctly, pings failed." )
1242
1243 def CASE5( self, main ):
1244 """
1245 Reading state of ONOS
1246 """
1247 import json
1248 import time
Jon Halle1a3b752015-07-22 13:02:46 -07001249 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07001250 assert main, "main not defined"
1251 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07001252 assert main.CLIs, "main.CLIs not defined"
1253 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07001254
1255 main.case( "Setting up and gathering data for current state" )
1256 # The general idea for this test case is to pull the state of
1257 # ( intents,flows, topology,... ) from each ONOS node
1258 # We can then compare them with each other and also with past states
1259
1260 main.step( "Check that each switch has a master" )
1261 global mastershipState
1262 mastershipState = '[]'
1263
1264 # Assert that each device has a master
1265 rolesNotNull = main.TRUE
1266 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001267 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001268 t = main.Thread( target=main.CLIs[i].rolesNotNull,
Jon Hall5cf14d52015-07-16 12:15:19 -07001269 name="rolesNotNull-" + str( i ),
1270 args=[] )
1271 threads.append( t )
1272 t.start()
1273
1274 for t in threads:
1275 t.join()
1276 rolesNotNull = rolesNotNull and t.result
1277 utilities.assert_equals(
1278 expect=main.TRUE,
1279 actual=rolesNotNull,
1280 onpass="Each device has a master",
1281 onfail="Some devices don't have a master assigned" )
1282
1283 main.step( "Get the Mastership of each switch from each controller" )
1284 ONOSMastership = []
1285 mastershipCheck = main.FALSE
1286 consistentMastership = True
1287 rolesResults = True
1288 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001289 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001290 t = main.Thread( target=main.CLIs[i].roles,
Jon Hall5cf14d52015-07-16 12:15:19 -07001291 name="roles-" + str( i ),
1292 args=[] )
1293 threads.append( t )
1294 t.start()
1295
1296 for t in threads:
1297 t.join()
1298 ONOSMastership.append( t.result )
1299
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001300 for i in range( len( ONOSMastership ) ):
1301 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001302 if not ONOSMastership[i] or "Error" in ONOSMastership[i]:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001303 main.log.error( "Error in getting ONOS" + node + " roles" )
1304 main.log.warn( "ONOS" + node + " mastership response: " +
1305 repr( ONOSMastership[i] ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001306 rolesResults = False
1307 utilities.assert_equals(
1308 expect=True,
1309 actual=rolesResults,
1310 onpass="No error in reading roles output",
1311 onfail="Error in reading roles from ONOS" )
1312
1313 main.step( "Check for consistency in roles from each controller" )
1314 if all([ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
1315 main.log.info(
1316 "Switch roles are consistent across all ONOS nodes" )
1317 else:
1318 consistentMastership = False
1319 utilities.assert_equals(
1320 expect=True,
1321 actual=consistentMastership,
1322 onpass="Switch roles are consistent across all ONOS nodes",
1323 onfail="ONOS nodes have different views of switch roles" )
1324
1325 if rolesResults and not consistentMastership:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001326 for i in range( len( main.activeNodes ) ):
1327 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001328 try:
1329 main.log.warn(
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001330 "ONOS" + node + " roles: ",
Jon Hall5cf14d52015-07-16 12:15:19 -07001331 json.dumps(
1332 json.loads( ONOSMastership[ i ] ),
1333 sort_keys=True,
1334 indent=4,
1335 separators=( ',', ': ' ) ) )
1336 except ( ValueError, TypeError ):
1337 main.log.warn( repr( ONOSMastership[ i ] ) )
1338 elif rolesResults and consistentMastership:
1339 mastershipCheck = main.TRUE
1340 mastershipState = ONOSMastership[ 0 ]
1341
1342 main.step( "Get the intents from each controller" )
1343 global intentState
1344 intentState = []
1345 ONOSIntents = []
1346 intentCheck = main.FALSE
1347 consistentIntents = True
1348 intentsResults = True
1349 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001350 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001351 t = main.Thread( target=main.CLIs[i].intents,
Jon Hall5cf14d52015-07-16 12:15:19 -07001352 name="intents-" + str( i ),
1353 args=[],
1354 kwargs={ 'jsonFormat': True } )
1355 threads.append( t )
1356 t.start()
1357
1358 for t in threads:
1359 t.join()
1360 ONOSIntents.append( t.result )
1361
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001362 for i in range( len( ONOSIntents ) ):
1363 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001364 if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001365 main.log.error( "Error in getting ONOS" + node + " intents" )
1366 main.log.warn( "ONOS" + node + " intents response: " +
Jon Hall5cf14d52015-07-16 12:15:19 -07001367 repr( ONOSIntents[ i ] ) )
1368 intentsResults = False
1369 utilities.assert_equals(
1370 expect=True,
1371 actual=intentsResults,
1372 onpass="No error in reading intents output",
1373 onfail="Error in reading intents from ONOS" )
1374
1375 main.step( "Check for consistency in Intents from each controller" )
1376 if all([ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
1377 main.log.info( "Intents are consistent across all ONOS " +
1378 "nodes" )
1379 else:
1380 consistentIntents = False
1381 main.log.error( "Intents not consistent" )
1382 utilities.assert_equals(
1383 expect=True,
1384 actual=consistentIntents,
1385 onpass="Intents are consistent across all ONOS nodes",
1386 onfail="ONOS nodes have different views of intents" )
1387
1388 if intentsResults:
1389 # Try to make it easy to figure out what is happening
1390 #
1391 # Intent ONOS1 ONOS2 ...
1392 # 0x01 INSTALLED INSTALLING
1393 # ... ... ...
1394 # ... ... ...
1395 title = " Id"
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001396 for n in main.activeNodes:
Jon Hall5cf14d52015-07-16 12:15:19 -07001397 title += " " * 10 + "ONOS" + str( n + 1 )
1398 main.log.warn( title )
1399 # get all intent keys in the cluster
1400 keys = []
Jon Halla440e872016-03-31 15:15:50 -07001401 try:
1402 # Get the set of all intent keys
Jon Hall5cf14d52015-07-16 12:15:19 -07001403 for nodeStr in ONOSIntents:
1404 node = json.loads( nodeStr )
1405 for intent in node:
Jon Halla440e872016-03-31 15:15:50 -07001406 keys.append( intent.get( 'id' ) )
1407 keys = set( keys )
1408 # For each intent key, print the state on each node
1409 for key in keys:
1410 row = "%-13s" % key
1411 for nodeStr in ONOSIntents:
1412 node = json.loads( nodeStr )
1413 for intent in node:
1414 if intent.get( 'id', "Error" ) == key:
1415 row += "%-15s" % intent.get( 'state' )
1416 main.log.warn( row )
1417 # End of intent state table
1418 except ValueError as e:
1419 main.log.exception( e )
1420 main.log.debug( "nodeStr was: " + repr( nodeStr ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001421
1422 if intentsResults and not consistentIntents:
1423 # print the json objects
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001424 n = str( main.activeNodes[-1] + 1 )
1425 main.log.debug( "ONOS" + n + " intents: " )
Jon Hall5cf14d52015-07-16 12:15:19 -07001426 main.log.debug( json.dumps( json.loads( ONOSIntents[ -1 ] ),
1427 sort_keys=True,
1428 indent=4,
1429 separators=( ',', ': ' ) ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001430 for i in range( len( ONOSIntents ) ):
1431 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001432 if ONOSIntents[ i ] != ONOSIntents[ -1 ]:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001433 main.log.debug( "ONOS" + node + " intents: " )
Jon Hall5cf14d52015-07-16 12:15:19 -07001434 main.log.debug( json.dumps( json.loads( ONOSIntents[i] ),
1435 sort_keys=True,
1436 indent=4,
1437 separators=( ',', ': ' ) ) )
1438 else:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001439 main.log.debug( "ONOS" + node + " intents match ONOS" +
1440 n + " intents" )
Jon Hall5cf14d52015-07-16 12:15:19 -07001441 elif intentsResults and consistentIntents:
1442 intentCheck = main.TRUE
1443 intentState = ONOSIntents[ 0 ]
1444
1445 main.step( "Get the flows from each controller" )
1446 global flowState
1447 flowState = []
1448 ONOSFlows = []
1449 ONOSFlowsJson = []
1450 flowCheck = main.FALSE
1451 consistentFlows = True
1452 flowsResults = True
1453 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001454 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001455 t = main.Thread( target=main.CLIs[i].flows,
Jon Hall5cf14d52015-07-16 12:15:19 -07001456 name="flows-" + str( i ),
1457 args=[],
1458 kwargs={ 'jsonFormat': True } )
1459 threads.append( t )
1460 t.start()
1461
1462 # NOTE: Flows command can take some time to run
1463 time.sleep(30)
1464 for t in threads:
1465 t.join()
1466 result = t.result
1467 ONOSFlows.append( result )
1468
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001469 for i in range( len( ONOSFlows ) ):
1470 num = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001471 if not ONOSFlows[ i ] or "Error" in ONOSFlows[ i ]:
1472 main.log.error( "Error in getting ONOS" + num + " flows" )
1473 main.log.warn( "ONOS" + num + " flows response: " +
1474 repr( ONOSFlows[ i ] ) )
1475 flowsResults = False
1476 ONOSFlowsJson.append( None )
1477 else:
1478 try:
1479 ONOSFlowsJson.append( json.loads( ONOSFlows[ i ] ) )
1480 except ( ValueError, TypeError ):
1481 # FIXME: change this to log.error?
1482 main.log.exception( "Error in parsing ONOS" + num +
1483 " response as json." )
1484 main.log.error( repr( ONOSFlows[ i ] ) )
1485 ONOSFlowsJson.append( None )
1486 flowsResults = False
1487 utilities.assert_equals(
1488 expect=True,
1489 actual=flowsResults,
1490 onpass="No error in reading flows output",
1491 onfail="Error in reading flows from ONOS" )
1492
1493 main.step( "Check for consistency in Flows from each controller" )
1494 tmp = [ len( i ) == len( ONOSFlowsJson[ 0 ] ) for i in ONOSFlowsJson ]
1495 if all( tmp ):
1496 main.log.info( "Flow count is consistent across all ONOS nodes" )
1497 else:
1498 consistentFlows = False
1499 utilities.assert_equals(
1500 expect=True,
1501 actual=consistentFlows,
1502 onpass="The flow count is consistent across all ONOS nodes",
1503 onfail="ONOS nodes have different flow counts" )
1504
1505 if flowsResults and not consistentFlows:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001506 for i in range( len( ONOSFlows ) ):
1507 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001508 try:
1509 main.log.warn(
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001510 "ONOS" + node + " flows: " +
Jon Hall5cf14d52015-07-16 12:15:19 -07001511 json.dumps( json.loads( ONOSFlows[i] ), sort_keys=True,
1512 indent=4, separators=( ',', ': ' ) ) )
1513 except ( ValueError, TypeError ):
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001514 main.log.warn( "ONOS" + node + " flows: " +
1515 repr( ONOSFlows[ i ] ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001516 elif flowsResults and consistentFlows:
1517 flowCheck = main.TRUE
1518 flowState = ONOSFlows[ 0 ]
1519
1520 main.step( "Get the OF Table entries" )
1521 global flows
1522 flows = []
1523 for i in range( 1, 29 ):
GlennRC68467eb2015-11-16 18:01:01 -08001524 flows.append( main.Mininet1.getFlowTable( "s" + str( i ), version="1.3", debug=False ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001525 if flowCheck == main.FALSE:
1526 for table in flows:
1527 main.log.warn( table )
1528 # TODO: Compare switch flow tables with ONOS flow tables
1529
1530 main.step( "Start continuous pings" )
1531 main.Mininet2.pingLong(
1532 src=main.params[ 'PING' ][ 'source1' ],
1533 target=main.params[ 'PING' ][ 'target1' ],
1534 pingTime=500 )
1535 main.Mininet2.pingLong(
1536 src=main.params[ 'PING' ][ 'source2' ],
1537 target=main.params[ 'PING' ][ 'target2' ],
1538 pingTime=500 )
1539 main.Mininet2.pingLong(
1540 src=main.params[ 'PING' ][ 'source3' ],
1541 target=main.params[ 'PING' ][ 'target3' ],
1542 pingTime=500 )
1543 main.Mininet2.pingLong(
1544 src=main.params[ 'PING' ][ 'source4' ],
1545 target=main.params[ 'PING' ][ 'target4' ],
1546 pingTime=500 )
1547 main.Mininet2.pingLong(
1548 src=main.params[ 'PING' ][ 'source5' ],
1549 target=main.params[ 'PING' ][ 'target5' ],
1550 pingTime=500 )
1551 main.Mininet2.pingLong(
1552 src=main.params[ 'PING' ][ 'source6' ],
1553 target=main.params[ 'PING' ][ 'target6' ],
1554 pingTime=500 )
1555 main.Mininet2.pingLong(
1556 src=main.params[ 'PING' ][ 'source7' ],
1557 target=main.params[ 'PING' ][ 'target7' ],
1558 pingTime=500 )
1559 main.Mininet2.pingLong(
1560 src=main.params[ 'PING' ][ 'source8' ],
1561 target=main.params[ 'PING' ][ 'target8' ],
1562 pingTime=500 )
1563 main.Mininet2.pingLong(
1564 src=main.params[ 'PING' ][ 'source9' ],
1565 target=main.params[ 'PING' ][ 'target9' ],
1566 pingTime=500 )
1567 main.Mininet2.pingLong(
1568 src=main.params[ 'PING' ][ 'source10' ],
1569 target=main.params[ 'PING' ][ 'target10' ],
1570 pingTime=500 )
1571
1572 main.step( "Collecting topology information from ONOS" )
1573 devices = []
1574 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001575 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001576 t = main.Thread( target=main.CLIs[i].devices,
Jon Hall5cf14d52015-07-16 12:15:19 -07001577 name="devices-" + str( i ),
1578 args=[ ] )
1579 threads.append( t )
1580 t.start()
1581
1582 for t in threads:
1583 t.join()
1584 devices.append( t.result )
1585 hosts = []
1586 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001587 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001588 t = main.Thread( target=main.CLIs[i].hosts,
Jon Hall5cf14d52015-07-16 12:15:19 -07001589 name="hosts-" + str( i ),
1590 args=[ ] )
1591 threads.append( t )
1592 t.start()
1593
1594 for t in threads:
1595 t.join()
1596 try:
1597 hosts.append( json.loads( t.result ) )
1598 except ( ValueError, TypeError ):
1599 # FIXME: better handling of this, print which node
1600 # Maybe use thread name?
1601 main.log.exception( "Error parsing json output of hosts" )
Jon Hallf3d16e72015-12-16 17:45:08 -08001602 main.log.warn( repr( t.result ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001603 hosts.append( None )
1604
1605 ports = []
1606 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001607 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001608 t = main.Thread( target=main.CLIs[i].ports,
Jon Hall5cf14d52015-07-16 12:15:19 -07001609 name="ports-" + str( i ),
1610 args=[ ] )
1611 threads.append( t )
1612 t.start()
1613
1614 for t in threads:
1615 t.join()
1616 ports.append( t.result )
1617 links = []
1618 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001619 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001620 t = main.Thread( target=main.CLIs[i].links,
Jon Hall5cf14d52015-07-16 12:15:19 -07001621 name="links-" + str( i ),
1622 args=[ ] )
1623 threads.append( t )
1624 t.start()
1625
1626 for t in threads:
1627 t.join()
1628 links.append( t.result )
1629 clusters = []
1630 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001631 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001632 t = main.Thread( target=main.CLIs[i].clusters,
Jon Hall5cf14d52015-07-16 12:15:19 -07001633 name="clusters-" + str( i ),
1634 args=[ ] )
1635 threads.append( t )
1636 t.start()
1637
1638 for t in threads:
1639 t.join()
1640 clusters.append( t.result )
1641 # Compare json objects for hosts and dataplane clusters
1642
1643 # hosts
1644 main.step( "Host view is consistent across ONOS nodes" )
1645 consistentHostsResult = main.TRUE
1646 for controller in range( len( hosts ) ):
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001647 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hallf3d16e72015-12-16 17:45:08 -08001648 if hosts[ controller ] and "Error" not in hosts[ controller ]:
Jon Hall5cf14d52015-07-16 12:15:19 -07001649 if hosts[ controller ] == hosts[ 0 ]:
1650 continue
1651 else: # hosts not consistent
1652 main.log.error( "hosts from ONOS" +
1653 controllerStr +
1654 " is inconsistent with ONOS1" )
1655 main.log.warn( repr( hosts[ controller ] ) )
1656 consistentHostsResult = main.FALSE
1657
1658 else:
1659 main.log.error( "Error in getting ONOS hosts from ONOS" +
1660 controllerStr )
1661 consistentHostsResult = main.FALSE
1662 main.log.warn( "ONOS" + controllerStr +
1663 " hosts response: " +
1664 repr( hosts[ controller ] ) )
1665 utilities.assert_equals(
1666 expect=main.TRUE,
1667 actual=consistentHostsResult,
1668 onpass="Hosts view is consistent across all ONOS nodes",
1669 onfail="ONOS nodes have different views of hosts" )
1670
1671 main.step( "Each host has an IP address" )
1672 ipResult = main.TRUE
1673 for controller in range( 0, len( hosts ) ):
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001674 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hallf3d16e72015-12-16 17:45:08 -08001675 if hosts[ controller ]:
1676 for host in hosts[ controller ]:
1677 if not host.get( 'ipAddresses', [ ] ):
1678 main.log.error( "Error with host ips on controller" +
1679 controllerStr + ": " + str( host ) )
1680 ipResult = main.FALSE
Jon Hall5cf14d52015-07-16 12:15:19 -07001681 utilities.assert_equals(
1682 expect=main.TRUE,
1683 actual=ipResult,
1684 onpass="The ips of the hosts aren't empty",
1685 onfail="The ip of at least one host is missing" )
1686
1687 # Strongly connected clusters of devices
1688 main.step( "Cluster view is consistent across ONOS nodes" )
1689 consistentClustersResult = main.TRUE
1690 for controller in range( len( clusters ) ):
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001691 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001692 if "Error" not in clusters[ controller ]:
1693 if clusters[ controller ] == clusters[ 0 ]:
1694 continue
1695 else: # clusters not consistent
1696 main.log.error( "clusters from ONOS" + controllerStr +
1697 " is inconsistent with ONOS1" )
1698 consistentClustersResult = main.FALSE
1699
1700 else:
1701 main.log.error( "Error in getting dataplane clusters " +
1702 "from ONOS" + controllerStr )
1703 consistentClustersResult = main.FALSE
1704 main.log.warn( "ONOS" + controllerStr +
1705 " clusters response: " +
1706 repr( clusters[ controller ] ) )
1707 utilities.assert_equals(
1708 expect=main.TRUE,
1709 actual=consistentClustersResult,
1710 onpass="Clusters view is consistent across all ONOS nodes",
1711 onfail="ONOS nodes have different views of clusters" )
Jon Hall64948022016-05-12 13:38:50 -07001712 if not consistentClustersResult:
Jon Hall172b7ba2016-04-07 18:12:20 -07001713 main.log.debug( clusters )
Jon Hall64948022016-05-12 13:38:50 -07001714
Jon Hall5cf14d52015-07-16 12:15:19 -07001715 # there should always only be one cluster
1716 main.step( "Cluster view correct across ONOS nodes" )
1717 try:
1718 numClusters = len( json.loads( clusters[ 0 ] ) )
1719 except ( ValueError, TypeError ):
1720 main.log.exception( "Error parsing clusters[0]: " +
1721 repr( clusters[ 0 ] ) )
Jon Hall6e709752016-02-01 13:38:46 -08001722 numClusters = "ERROR"
Jon Hall5cf14d52015-07-16 12:15:19 -07001723 clusterResults = main.FALSE
1724 if numClusters == 1:
1725 clusterResults = main.TRUE
1726 utilities.assert_equals(
1727 expect=1,
1728 actual=numClusters,
1729 onpass="ONOS shows 1 SCC",
1730 onfail="ONOS shows " + str( numClusters ) + " SCCs" )
1731
1732 main.step( "Comparing ONOS topology to MN" )
1733 devicesResults = main.TRUE
1734 linksResults = main.TRUE
1735 hostsResults = main.TRUE
1736 mnSwitches = main.Mininet1.getSwitches()
1737 mnLinks = main.Mininet1.getLinks()
1738 mnHosts = main.Mininet1.getHosts()
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001739 for controller in main.activeNodes:
1740 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001741 if devices[ controller ] and ports[ controller ] and\
1742 "Error" not in devices[ controller ] and\
1743 "Error" not in ports[ controller ]:
Jon Hall6e709752016-02-01 13:38:46 -08001744 currentDevicesResult = main.Mininet1.compareSwitches(
1745 mnSwitches,
1746 json.loads( devices[ controller ] ),
1747 json.loads( ports[ controller ] ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001748 else:
1749 currentDevicesResult = main.FALSE
1750 utilities.assert_equals( expect=main.TRUE,
1751 actual=currentDevicesResult,
1752 onpass="ONOS" + controllerStr +
1753 " Switches view is correct",
1754 onfail="ONOS" + controllerStr +
1755 " Switches view is incorrect" )
1756 if links[ controller ] and "Error" not in links[ controller ]:
1757 currentLinksResult = main.Mininet1.compareLinks(
1758 mnSwitches, mnLinks,
1759 json.loads( links[ controller ] ) )
1760 else:
1761 currentLinksResult = main.FALSE
1762 utilities.assert_equals( expect=main.TRUE,
1763 actual=currentLinksResult,
1764 onpass="ONOS" + controllerStr +
1765 " links view is correct",
1766 onfail="ONOS" + controllerStr +
1767 " links view is incorrect" )
1768
Jon Hall657cdf62015-12-17 14:40:51 -08001769 if hosts[ controller ] and "Error" not in hosts[ controller ]:
Jon Hall5cf14d52015-07-16 12:15:19 -07001770 currentHostsResult = main.Mininet1.compareHosts(
1771 mnHosts,
1772 hosts[ controller ] )
1773 else:
1774 currentHostsResult = main.FALSE
1775 utilities.assert_equals( expect=main.TRUE,
1776 actual=currentHostsResult,
1777 onpass="ONOS" + controllerStr +
1778 " hosts exist in Mininet",
1779 onfail="ONOS" + controllerStr +
1780 " hosts don't match Mininet" )
1781
1782 devicesResults = devicesResults and currentDevicesResult
1783 linksResults = linksResults and currentLinksResult
1784 hostsResults = hostsResults and currentHostsResult
1785
1786 main.step( "Device information is correct" )
1787 utilities.assert_equals(
1788 expect=main.TRUE,
1789 actual=devicesResults,
1790 onpass="Device information is correct",
1791 onfail="Device information is incorrect" )
1792
1793 main.step( "Links are correct" )
1794 utilities.assert_equals(
1795 expect=main.TRUE,
1796 actual=linksResults,
1797 onpass="Link are correct",
1798 onfail="Links are incorrect" )
1799
1800 main.step( "Hosts are correct" )
1801 utilities.assert_equals(
1802 expect=main.TRUE,
1803 actual=hostsResults,
1804 onpass="Hosts are correct",
1805 onfail="Hosts are incorrect" )
1806
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001807 def CASE61( self, main ):
Jon Hall5cf14d52015-07-16 12:15:19 -07001808 """
1809 The Failure case.
1810 """
Jon Halle1a3b752015-07-22 13:02:46 -07001811 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07001812 assert main, "main not defined"
1813 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07001814 assert main.CLIs, "main.CLIs not defined"
1815 assert main.nodes, "main.nodes not defined"
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001816 main.case( "Kill minority of ONOS nodes" )
Jon Hall96091e62015-09-21 17:34:17 -07001817
1818 main.step( "Checking ONOS Logs for errors" )
1819 for node in main.nodes:
1820 main.log.debug( "Checking logs for errors on " + node.name + ":" )
1821 main.log.warn( main.ONOSbench.checkLogs( node.ip_address ) )
1822
Jon Hall3b489db2015-10-05 14:38:37 -07001823 n = len( main.nodes ) # Number of nodes
1824 p = ( ( n + 1 ) / 2 ) + 1 # Number of partitions
1825 main.kill = [ 0 ] # ONOS node to kill, listed by index in main.nodes
1826 if n > 3:
1827 main.kill.append( p - 1 )
1828 # NOTE: This only works for cluster sizes of 3,5, or 7.
1829
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001830 main.step( "Kill " + str( len( main.kill ) ) + " ONOS nodes" )
Jon Hall3b489db2015-10-05 14:38:37 -07001831 killResults = main.TRUE
1832 for i in main.kill:
1833 killResults = killResults and\
1834 main.ONOSbench.onosKill( main.nodes[i].ip_address )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001835 main.activeNodes.remove( i )
Jon Hall5cf14d52015-07-16 12:15:19 -07001836 utilities.assert_equals( expect=main.TRUE, actual=killResults,
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001837 onpass="ONOS nodes killed successfully",
1838 onfail="ONOS nodes NOT successfully killed" )
1839
Jon Halld2871c22016-07-26 11:01:14 -07001840 main.step( "Checking ONOS nodes" )
1841 nodeResults = utilities.retry( main.HA.nodesCheck,
1842 False,
1843 args=[main.activeNodes],
1844 sleep=15,
1845 attempts=5 )
1846
1847 utilities.assert_equals( expect=True, actual=nodeResults,
1848 onpass="Nodes check successful",
1849 onfail="Nodes check NOT successful" )
1850
1851 if not nodeResults:
1852 for i in main.activeNodes:
1853 cli = main.CLIs[i]
1854 main.log.debug( "{} components not ACTIVE: \n{}".format(
1855 cli.name,
1856 cli.sendline( "scr:list | grep -v ACTIVE" ) ) )
1857 main.log.error( "Failed to start ONOS, stopping test" )
1858 main.cleanup()
1859 main.exit()
1860
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001861 def CASE62( self, main ):
1862 """
1863 The bring up stopped nodes
1864 """
1865 import time
1866 assert main.numCtrls, "main.numCtrls not defined"
1867 assert main, "main not defined"
1868 assert utilities.assert_equals, "utilities.assert_equals not defined"
1869 assert main.CLIs, "main.CLIs not defined"
1870 assert main.nodes, "main.nodes not defined"
1871 assert main.kill, "main.kill not defined"
1872 main.case( "Restart minority of ONOS nodes" )
1873
1874 main.step( "Restarting " + str( len( main.kill ) ) + " ONOS nodes" )
1875 startResults = main.TRUE
1876 restartTime = time.time()
1877 for i in main.kill:
1878 startResults = startResults and\
1879 main.ONOSbench.onosStart( main.nodes[i].ip_address )
1880 utilities.assert_equals( expect=main.TRUE, actual=startResults,
1881 onpass="ONOS nodes started successfully",
1882 onfail="ONOS nodes NOT successfully started" )
Jon Hall5cf14d52015-07-16 12:15:19 -07001883
1884 main.step( "Checking if ONOS is up yet" )
1885 count = 0
1886 onosIsupResult = main.FALSE
1887 while onosIsupResult == main.FALSE and count < 10:
Jon Hall3b489db2015-10-05 14:38:37 -07001888 onosIsupResult = main.TRUE
1889 for i in main.kill:
1890 onosIsupResult = onosIsupResult and\
1891 main.ONOSbench.isup( main.nodes[i].ip_address )
Jon Hall5cf14d52015-07-16 12:15:19 -07001892 count = count + 1
Jon Hall5cf14d52015-07-16 12:15:19 -07001893 utilities.assert_equals( expect=main.TRUE, actual=onosIsupResult,
1894 onpass="ONOS restarted successfully",
1895 onfail="ONOS restart NOT successful" )
1896
Jon Halle1a3b752015-07-22 13:02:46 -07001897 main.step( "Restarting ONOS main.CLIs" )
Jon Hall3b489db2015-10-05 14:38:37 -07001898 cliResults = main.TRUE
1899 for i in main.kill:
1900 cliResults = cliResults and\
1901 main.CLIs[i].startOnosCli( main.nodes[i].ip_address )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001902 main.activeNodes.append( i )
Jon Hall5cf14d52015-07-16 12:15:19 -07001903 utilities.assert_equals( expect=main.TRUE, actual=cliResults,
1904 onpass="ONOS cli restarted",
1905 onfail="ONOS cli did not restart" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001906 main.activeNodes.sort()
1907 try:
1908 assert list( set( main.activeNodes ) ) == main.activeNodes,\
1909 "List of active nodes has duplicates, this likely indicates something was run out of order"
1910 except AssertionError:
1911 main.log.exception( "" )
1912 main.cleanup()
1913 main.exit()
Jon Hall5cf14d52015-07-16 12:15:19 -07001914
1915 # Grab the time of restart so we chan check how long the gossip
1916 # protocol has had time to work
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001917 main.restartTime = time.time() - restartTime
Jon Hall5cf14d52015-07-16 12:15:19 -07001918 main.log.debug( "Restart time: " + str( main.restartTime ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001919 # TODO: MAke this configurable. Also, we are breaking the above timer
Jon Halld2871c22016-07-26 11:01:14 -07001920 main.step( "Checking ONOS nodes" )
1921 nodeResults = utilities.retry( main.HA.nodesCheck,
1922 False,
1923 args=[main.activeNodes],
1924 sleep=15,
1925 attempts=5 )
1926
1927 utilities.assert_equals( expect=True, actual=nodeResults,
1928 onpass="Nodes check successful",
1929 onfail="Nodes check NOT successful" )
1930
1931 if not nodeResults:
1932 for i in main.activeNodes:
1933 cli = main.CLIs[i]
1934 main.log.debug( "{} components not ACTIVE: \n{}".format(
1935 cli.name,
1936 cli.sendline( "scr:list | grep -v ACTIVE" ) ) )
1937 main.log.error( "Failed to start ONOS, stopping test" )
1938 main.cleanup()
1939 main.exit()
1940
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001941 node = main.activeNodes[0]
1942 main.log.debug( main.CLIs[node].nodes( jsonFormat=False ) )
1943 main.log.debug( main.CLIs[node].leaders( jsonFormat=False ) )
1944 main.log.debug( main.CLIs[node].partitions( jsonFormat=False ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001945
Jon Halla440e872016-03-31 15:15:50 -07001946 main.step( "Rerun for election on the node(s) that were killed" )
1947 runResults = main.TRUE
1948 for i in main.kill:
1949 runResults = runResults and\
1950 main.CLIs[i].electionTestRun()
1951 utilities.assert_equals( expect=main.TRUE, actual=runResults,
1952 onpass="ONOS nodes reran for election topic",
1953 onfail="Errror rerunning for election" )
1954
Jon Hall5cf14d52015-07-16 12:15:19 -07001955 def CASE7( self, main ):
1956 """
1957 Check state after ONOS failure
1958 """
1959 import json
Jon Halle1a3b752015-07-22 13:02:46 -07001960 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07001961 assert main, "main not defined"
1962 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07001963 assert main.CLIs, "main.CLIs not defined"
1964 assert main.nodes, "main.nodes not defined"
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001965 try:
1966 main.kill
1967 except AttributeError:
1968 main.kill = []
1969
Jon Hall5cf14d52015-07-16 12:15:19 -07001970 main.case( "Running ONOS Constant State Tests" )
1971
1972 main.step( "Check that each switch has a master" )
1973 # Assert that each device has a master
1974 rolesNotNull = main.TRUE
1975 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001976 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001977 t = main.Thread( target=main.CLIs[i].rolesNotNull,
Jon Hall5cf14d52015-07-16 12:15:19 -07001978 name="rolesNotNull-" + str( i ),
1979 args=[ ] )
1980 threads.append( t )
1981 t.start()
1982
1983 for t in threads:
1984 t.join()
1985 rolesNotNull = rolesNotNull and t.result
1986 utilities.assert_equals(
1987 expect=main.TRUE,
1988 actual=rolesNotNull,
1989 onpass="Each device has a master",
1990 onfail="Some devices don't have a master assigned" )
1991
1992 main.step( "Read device roles from ONOS" )
1993 ONOSMastership = []
Jon Halla440e872016-03-31 15:15:50 -07001994 mastershipCheck = main.FALSE
Jon Hall5cf14d52015-07-16 12:15:19 -07001995 consistentMastership = True
1996 rolesResults = True
1997 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001998 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001999 t = main.Thread( target=main.CLIs[i].roles,
Jon Hall5cf14d52015-07-16 12:15:19 -07002000 name="roles-" + str( i ),
2001 args=[] )
2002 threads.append( t )
2003 t.start()
2004
2005 for t in threads:
2006 t.join()
2007 ONOSMastership.append( t.result )
2008
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002009 for i in range( len( ONOSMastership ) ):
2010 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07002011 if not ONOSMastership[i] or "Error" in ONOSMastership[i]:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002012 main.log.error( "Error in getting ONOS" + node + " roles" )
2013 main.log.warn( "ONOS" + node + " mastership response: " +
2014 repr( ONOSMastership[i] ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07002015 rolesResults = False
2016 utilities.assert_equals(
2017 expect=True,
2018 actual=rolesResults,
2019 onpass="No error in reading roles output",
2020 onfail="Error in reading roles from ONOS" )
2021
2022 main.step( "Check for consistency in roles from each controller" )
2023 if all([ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
2024 main.log.info(
2025 "Switch roles are consistent across all ONOS nodes" )
2026 else:
2027 consistentMastership = False
2028 utilities.assert_equals(
2029 expect=True,
2030 actual=consistentMastership,
2031 onpass="Switch roles are consistent across all ONOS nodes",
2032 onfail="ONOS nodes have different views of switch roles" )
2033
2034 if rolesResults and not consistentMastership:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002035 for i in range( len( ONOSMastership ) ):
2036 node = str( main.activeNodes[i] + 1 )
2037 main.log.warn( "ONOS" + node + " roles: ",
2038 json.dumps( json.loads( ONOSMastership[ i ] ),
2039 sort_keys=True,
2040 indent=4,
2041 separators=( ',', ': ' ) ) )
Jon Halla440e872016-03-31 15:15:50 -07002042 elif rolesResults and consistentMastership:
2043 mastershipCheck = main.TRUE
Jon Hall5cf14d52015-07-16 12:15:19 -07002044
2045 # NOTE: we expect mastership to change on controller failure
Jon Hall5cf14d52015-07-16 12:15:19 -07002046
2047 main.step( "Get the intents and compare across all nodes" )
2048 ONOSIntents = []
2049 intentCheck = main.FALSE
2050 consistentIntents = True
2051 intentsResults = True
2052 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002053 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07002054 t = main.Thread( target=main.CLIs[i].intents,
Jon Hall5cf14d52015-07-16 12:15:19 -07002055 name="intents-" + str( i ),
2056 args=[],
2057 kwargs={ 'jsonFormat': True } )
2058 threads.append( t )
2059 t.start()
2060
2061 for t in threads:
2062 t.join()
2063 ONOSIntents.append( t.result )
2064
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002065 for i in range( len( ONOSIntents) ):
2066 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07002067 if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002068 main.log.error( "Error in getting ONOS" + node + " intents" )
2069 main.log.warn( "ONOS" + node + " intents response: " +
Jon Hall5cf14d52015-07-16 12:15:19 -07002070 repr( ONOSIntents[ i ] ) )
2071 intentsResults = False
2072 utilities.assert_equals(
2073 expect=True,
2074 actual=intentsResults,
2075 onpass="No error in reading intents output",
2076 onfail="Error in reading intents from ONOS" )
2077
2078 main.step( "Check for consistency in Intents from each controller" )
2079 if all([ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
2080 main.log.info( "Intents are consistent across all ONOS " +
2081 "nodes" )
2082 else:
2083 consistentIntents = False
2084
2085 # Try to make it easy to figure out what is happening
2086 #
2087 # Intent ONOS1 ONOS2 ...
2088 # 0x01 INSTALLED INSTALLING
2089 # ... ... ...
2090 # ... ... ...
2091 title = " ID"
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002092 for n in main.activeNodes:
Jon Hall5cf14d52015-07-16 12:15:19 -07002093 title += " " * 10 + "ONOS" + str( n + 1 )
2094 main.log.warn( title )
2095 # get all intent keys in the cluster
2096 keys = []
2097 for nodeStr in ONOSIntents:
2098 node = json.loads( nodeStr )
2099 for intent in node:
2100 keys.append( intent.get( 'id' ) )
2101 keys = set( keys )
2102 for key in keys:
2103 row = "%-13s" % key
2104 for nodeStr in ONOSIntents:
2105 node = json.loads( nodeStr )
2106 for intent in node:
2107 if intent.get( 'id' ) == key:
2108 row += "%-15s" % intent.get( 'state' )
2109 main.log.warn( row )
2110 # End table view
2111
2112 utilities.assert_equals(
2113 expect=True,
2114 actual=consistentIntents,
2115 onpass="Intents are consistent across all ONOS nodes",
2116 onfail="ONOS nodes have different views of intents" )
2117 intentStates = []
2118 for node in ONOSIntents: # Iter through ONOS nodes
2119 nodeStates = []
2120 # Iter through intents of a node
2121 try:
2122 for intent in json.loads( node ):
2123 nodeStates.append( intent[ 'state' ] )
2124 except ( ValueError, TypeError ):
2125 main.log.exception( "Error in parsing intents" )
2126 main.log.error( repr( node ) )
2127 intentStates.append( nodeStates )
2128 out = [ (i, nodeStates.count( i ) ) for i in set( nodeStates ) ]
2129 main.log.info( dict( out ) )
2130
2131 if intentsResults and not consistentIntents:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002132 for i in range( len( main.activeNodes ) ):
2133 node = str( main.activeNodes[i] + 1 )
2134 main.log.warn( "ONOS" + node + " intents: " )
Jon Hall5cf14d52015-07-16 12:15:19 -07002135 main.log.warn( json.dumps(
2136 json.loads( ONOSIntents[ i ] ),
2137 sort_keys=True,
2138 indent=4,
2139 separators=( ',', ': ' ) ) )
2140 elif intentsResults and consistentIntents:
2141 intentCheck = main.TRUE
2142
2143 # NOTE: Store has no durability, so intents are lost across system
2144 # restarts
2145 main.step( "Compare current intents with intents before the failure" )
2146 # NOTE: this requires case 5 to pass for intentState to be set.
2147 # maybe we should stop the test if that fails?
2148 sameIntents = main.FALSE
Jon Halla440e872016-03-31 15:15:50 -07002149 try:
2150 intentState
2151 except NameError:
2152 main.log.warn( "No previous intent state was saved" )
2153 else:
2154 if intentState and intentState == ONOSIntents[ 0 ]:
2155 sameIntents = main.TRUE
2156 main.log.info( "Intents are consistent with before failure" )
2157 # TODO: possibly the states have changed? we may need to figure out
2158 # what the acceptable states are
2159 elif len( intentState ) == len( ONOSIntents[ 0 ] ):
2160 sameIntents = main.TRUE
2161 try:
2162 before = json.loads( intentState )
2163 after = json.loads( ONOSIntents[ 0 ] )
2164 for intent in before:
2165 if intent not in after:
2166 sameIntents = main.FALSE
2167 main.log.debug( "Intent is not currently in ONOS " +
2168 "(at least in the same form):" )
2169 main.log.debug( json.dumps( intent ) )
2170 except ( ValueError, TypeError ):
2171 main.log.exception( "Exception printing intents" )
2172 main.log.debug( repr( ONOSIntents[0] ) )
2173 main.log.debug( repr( intentState ) )
2174 if sameIntents == main.FALSE:
2175 try:
2176 main.log.debug( "ONOS intents before: " )
2177 main.log.debug( json.dumps( json.loads( intentState ),
2178 sort_keys=True, indent=4,
2179 separators=( ',', ': ' ) ) )
2180 main.log.debug( "Current ONOS intents: " )
2181 main.log.debug( json.dumps( json.loads( ONOSIntents[ 0 ] ),
2182 sort_keys=True, indent=4,
2183 separators=( ',', ': ' ) ) )
2184 except ( ValueError, TypeError ):
2185 main.log.exception( "Exception printing intents" )
2186 main.log.debug( repr( ONOSIntents[0] ) )
2187 main.log.debug( repr( intentState ) )
2188 utilities.assert_equals(
2189 expect=main.TRUE,
2190 actual=sameIntents,
2191 onpass="Intents are consistent with before failure",
2192 onfail="The Intents changed during failure" )
Jon Hall5cf14d52015-07-16 12:15:19 -07002193 intentCheck = intentCheck and sameIntents
2194
2195 main.step( "Get the OF Table entries and compare to before " +
2196 "component failure" )
2197 FlowTables = main.TRUE
Jon Hall5cf14d52015-07-16 12:15:19 -07002198 for i in range( 28 ):
2199 main.log.info( "Checking flow table on s" + str( i + 1 ) )
GlennRC68467eb2015-11-16 18:01:01 -08002200 tmpFlows = main.Mininet1.getFlowTable( "s" + str( i + 1 ), version="1.3", debug=False )
Jon Hall41d39f12016-04-11 22:54:35 -07002201 curSwitch = main.Mininet1.flowTableComp( flows[i], tmpFlows )
2202 FlowTables = FlowTables and curSwitch
2203 if curSwitch == main.FALSE:
GlennRC68467eb2015-11-16 18:01:01 -08002204 main.log.warn( "Differences in flow table for switch: s{}".format( i + 1 ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07002205 utilities.assert_equals(
2206 expect=main.TRUE,
2207 actual=FlowTables,
2208 onpass="No changes were found in the flow tables",
2209 onfail="Changes were found in the flow tables" )
2210
2211 main.Mininet2.pingLongKill()
2212 '''
2213 main.step( "Check the continuous pings to ensure that no packets " +
2214 "were dropped during component failure" )
2215 main.Mininet2.pingKill( main.params[ 'TESTONUSER' ],
2216 main.params[ 'TESTONIP' ] )
2217 LossInPings = main.FALSE
2218 # NOTE: checkForLoss returns main.FALSE with 0% packet loss
2219 for i in range( 8, 18 ):
2220 main.log.info(
2221 "Checking for a loss in pings along flow from s" +
2222 str( i ) )
2223 LossInPings = main.Mininet2.checkForLoss(
2224 "/tmp/ping.h" +
2225 str( i ) ) or LossInPings
2226 if LossInPings == main.TRUE:
2227 main.log.info( "Loss in ping detected" )
2228 elif LossInPings == main.ERROR:
2229 main.log.info( "There are multiple mininet process running" )
2230 elif LossInPings == main.FALSE:
2231 main.log.info( "No Loss in the pings" )
2232 main.log.info( "No loss of dataplane connectivity" )
2233 utilities.assert_equals(
2234 expect=main.FALSE,
2235 actual=LossInPings,
2236 onpass="No Loss of connectivity",
2237 onfail="Loss of dataplane connectivity detected" )
2238 '''
2239
2240 main.step( "Leadership Election is still functional" )
2241 # Test of LeadershipElection
2242 leaderList = []
Jon Hall5cf14d52015-07-16 12:15:19 -07002243
Jon Hall3b489db2015-10-05 14:38:37 -07002244 restarted = []
2245 for i in main.kill:
2246 restarted.append( main.nodes[i].ip_address )
Jon Hall5cf14d52015-07-16 12:15:19 -07002247 leaderResult = main.TRUE
Jon Hall3b489db2015-10-05 14:38:37 -07002248
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002249 for i in main.activeNodes:
2250 cli = main.CLIs[i]
Jon Hall5cf14d52015-07-16 12:15:19 -07002251 leaderN = cli.electionTestLeader()
2252 leaderList.append( leaderN )
2253 if leaderN == main.FALSE:
2254 # error in response
2255 main.log.error( "Something is wrong with " +
2256 "electionTestLeader function, check the" +
2257 " error logs" )
2258 leaderResult = main.FALSE
2259 elif leaderN is None:
2260 main.log.error( cli.name +
2261 " shows no leader for the election-app was" +
2262 " elected after the old one died" )
2263 leaderResult = main.FALSE
2264 elif leaderN in restarted:
2265 main.log.error( cli.name + " shows " + str( leaderN ) +
2266 " as leader for the election-app, but it " +
2267 "was restarted" )
2268 leaderResult = main.FALSE
2269 if len( set( leaderList ) ) != 1:
2270 leaderResult = main.FALSE
2271 main.log.error(
2272 "Inconsistent view of leader for the election test app" )
2273 # TODO: print the list
2274 utilities.assert_equals(
2275 expect=main.TRUE,
2276 actual=leaderResult,
2277 onpass="Leadership election passed",
2278 onfail="Something went wrong with Leadership election" )
2279
2280 def CASE8( self, main ):
2281 """
2282 Compare topo
2283 """
2284 import json
2285 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002286 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002287 assert main, "main not defined"
2288 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002289 assert main.CLIs, "main.CLIs not defined"
2290 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002291
2292 main.case( "Compare ONOS Topology view to Mininet topology" )
Jon Hall783bbf92015-07-23 14:33:19 -07002293 main.caseExplanation = "Compare topology objects between Mininet" +\
Jon Hall5cf14d52015-07-16 12:15:19 -07002294 " and ONOS"
Jon Hall5cf14d52015-07-16 12:15:19 -07002295 topoResult = main.FALSE
Jon Hall6e709752016-02-01 13:38:46 -08002296 topoFailMsg = "ONOS topology don't match Mininet"
Jon Hall5cf14d52015-07-16 12:15:19 -07002297 elapsed = 0
2298 count = 0
Jon Halle9b1fa32015-12-08 15:32:21 -08002299 main.step( "Comparing ONOS topology to MN topology" )
Jon Hall5cf14d52015-07-16 12:15:19 -07002300 startTime = time.time()
2301 # Give time for Gossip to work
Jon Halle9b1fa32015-12-08 15:32:21 -08002302 while topoResult == main.FALSE and ( elapsed < 60 or count < 3 ):
Jon Hall96091e62015-09-21 17:34:17 -07002303 devicesResults = main.TRUE
2304 linksResults = main.TRUE
2305 hostsResults = main.TRUE
2306 hostAttachmentResults = True
Jon Hall5cf14d52015-07-16 12:15:19 -07002307 count += 1
2308 cliStart = time.time()
2309 devices = []
2310 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002311 for i in main.activeNodes:
Jon Hall6e709752016-02-01 13:38:46 -08002312 t = main.Thread( target=utilities.retry,
Jon Hall5cf14d52015-07-16 12:15:19 -07002313 name="devices-" + str( i ),
Jon Hall6e709752016-02-01 13:38:46 -08002314 args=[ main.CLIs[i].devices, [ None ] ],
2315 kwargs= { 'sleep': 5, 'attempts': 5,
2316 'randomTime': True } )
Jon Hall5cf14d52015-07-16 12:15:19 -07002317 threads.append( t )
2318 t.start()
2319
2320 for t in threads:
2321 t.join()
2322 devices.append( t.result )
2323 hosts = []
2324 ipResult = main.TRUE
2325 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002326 for i in main.activeNodes:
Jon Halld8f6de82015-12-17 17:04:34 -08002327 t = main.Thread( target=utilities.retry,
Jon Hall5cf14d52015-07-16 12:15:19 -07002328 name="hosts-" + str( i ),
Jon Halld8f6de82015-12-17 17:04:34 -08002329 args=[ main.CLIs[i].hosts, [ None ] ],
2330 kwargs= { 'sleep': 5, 'attempts': 5,
2331 'randomTime': True } )
Jon Hall5cf14d52015-07-16 12:15:19 -07002332 threads.append( t )
2333 t.start()
2334
2335 for t in threads:
2336 t.join()
2337 try:
2338 hosts.append( json.loads( t.result ) )
2339 except ( ValueError, TypeError ):
2340 main.log.exception( "Error parsing hosts results" )
2341 main.log.error( repr( t.result ) )
Jon Hallf3d16e72015-12-16 17:45:08 -08002342 hosts.append( None )
Jon Hall5cf14d52015-07-16 12:15:19 -07002343 for controller in range( 0, len( hosts ) ):
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002344 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hallacd1b182015-12-17 11:43:20 -08002345 if hosts[ controller ]:
2346 for host in hosts[ controller ]:
2347 if host is None or host.get( 'ipAddresses', [] ) == []:
2348 main.log.error(
2349 "Error with host ipAddresses on controller" +
2350 controllerStr + ": " + str( host ) )
2351 ipResult = main.FALSE
Jon Hall5cf14d52015-07-16 12:15:19 -07002352 ports = []
2353 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002354 for i in main.activeNodes:
Jon Hall6e709752016-02-01 13:38:46 -08002355 t = main.Thread( target=utilities.retry,
Jon Hall5cf14d52015-07-16 12:15:19 -07002356 name="ports-" + str( i ),
Jon Hall6e709752016-02-01 13:38:46 -08002357 args=[ main.CLIs[i].ports, [ None ] ],
2358 kwargs= { 'sleep': 5, 'attempts': 5,
2359 'randomTime': True } )
Jon Hall5cf14d52015-07-16 12:15:19 -07002360 threads.append( t )
2361 t.start()
2362
2363 for t in threads:
2364 t.join()
2365 ports.append( t.result )
2366 links = []
2367 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002368 for i in main.activeNodes:
Jon Hall6e709752016-02-01 13:38:46 -08002369 t = main.Thread( target=utilities.retry,
Jon Hall5cf14d52015-07-16 12:15:19 -07002370 name="links-" + str( i ),
Jon Hall6e709752016-02-01 13:38:46 -08002371 args=[ main.CLIs[i].links, [ None ] ],
2372 kwargs= { 'sleep': 5, 'attempts': 5,
2373 'randomTime': True } )
Jon Hall5cf14d52015-07-16 12:15:19 -07002374 threads.append( t )
2375 t.start()
2376
2377 for t in threads:
2378 t.join()
2379 links.append( t.result )
2380 clusters = []
2381 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002382 for i in main.activeNodes:
Jon Hall6e709752016-02-01 13:38:46 -08002383 t = main.Thread( target=utilities.retry,
Jon Hall5cf14d52015-07-16 12:15:19 -07002384 name="clusters-" + str( i ),
Jon Hall6e709752016-02-01 13:38:46 -08002385 args=[ main.CLIs[i].clusters, [ None ] ],
2386 kwargs= { 'sleep': 5, 'attempts': 5,
2387 'randomTime': True } )
Jon Hall5cf14d52015-07-16 12:15:19 -07002388 threads.append( t )
2389 t.start()
2390
2391 for t in threads:
2392 t.join()
2393 clusters.append( t.result )
2394
2395 elapsed = time.time() - startTime
2396 cliTime = time.time() - cliStart
2397 print "Elapsed time: " + str( elapsed )
2398 print "CLI time: " + str( cliTime )
2399
Jon Hall6e709752016-02-01 13:38:46 -08002400 if all( e is None for e in devices ) and\
2401 all( e is None for e in hosts ) and\
2402 all( e is None for e in ports ) and\
2403 all( e is None for e in links ) and\
2404 all( e is None for e in clusters ):
2405 topoFailMsg = "Could not get topology from ONOS"
2406 main.log.error( topoFailMsg )
2407 continue # Try again, No use trying to compare
2408
Jon Hall5cf14d52015-07-16 12:15:19 -07002409 mnSwitches = main.Mininet1.getSwitches()
2410 mnLinks = main.Mininet1.getLinks()
2411 mnHosts = main.Mininet1.getHosts()
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002412 for controller in range( len( main.activeNodes ) ):
2413 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07002414 if devices[ controller ] and ports[ controller ] and\
2415 "Error" not in devices[ controller ] and\
2416 "Error" not in ports[ controller ]:
2417
Jon Hallc6793552016-01-19 14:18:37 -08002418 try:
2419 currentDevicesResult = main.Mininet1.compareSwitches(
2420 mnSwitches,
2421 json.loads( devices[ controller ] ),
2422 json.loads( ports[ controller ] ) )
2423 except ( TypeError, ValueError ) as e:
2424 main.log.exception( "Object not as expected; devices={!r}\nports={!r}".format(
2425 devices[ controller ], ports[ controller ] ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07002426 else:
2427 currentDevicesResult = main.FALSE
2428 utilities.assert_equals( expect=main.TRUE,
2429 actual=currentDevicesResult,
2430 onpass="ONOS" + controllerStr +
2431 " Switches view is correct",
2432 onfail="ONOS" + controllerStr +
2433 " Switches view is incorrect" )
2434
2435 if links[ controller ] and "Error" not in links[ controller ]:
2436 currentLinksResult = main.Mininet1.compareLinks(
2437 mnSwitches, mnLinks,
2438 json.loads( links[ controller ] ) )
2439 else:
2440 currentLinksResult = main.FALSE
2441 utilities.assert_equals( expect=main.TRUE,
2442 actual=currentLinksResult,
2443 onpass="ONOS" + controllerStr +
2444 " links view is correct",
2445 onfail="ONOS" + controllerStr +
2446 " links view is incorrect" )
Jon Hall657cdf62015-12-17 14:40:51 -08002447 if hosts[ controller ] and "Error" not in hosts[ controller ]:
Jon Hall5cf14d52015-07-16 12:15:19 -07002448 currentHostsResult = main.Mininet1.compareHosts(
2449 mnHosts,
2450 hosts[ controller ] )
Jon Hall13b446e2016-01-05 12:17:01 -08002451 elif hosts[ controller ] == []:
2452 currentHostsResult = main.TRUE
Jon Hall5cf14d52015-07-16 12:15:19 -07002453 else:
2454 currentHostsResult = main.FALSE
2455 utilities.assert_equals( expect=main.TRUE,
2456 actual=currentHostsResult,
2457 onpass="ONOS" + controllerStr +
2458 " hosts exist in Mininet",
2459 onfail="ONOS" + controllerStr +
2460 " hosts don't match Mininet" )
2461 # CHECKING HOST ATTACHMENT POINTS
2462 hostAttachment = True
2463 zeroHosts = False
2464 # FIXME: topo-HA/obelisk specific mappings:
2465 # key is mac and value is dpid
2466 mappings = {}
2467 for i in range( 1, 29 ): # hosts 1 through 28
2468 # set up correct variables:
2469 macId = "00:" * 5 + hex( i ).split( "0x" )[1].upper().zfill(2)
2470 if i == 1:
2471 deviceId = "1000".zfill(16)
2472 elif i == 2:
2473 deviceId = "2000".zfill(16)
2474 elif i == 3:
2475 deviceId = "3000".zfill(16)
2476 elif i == 4:
2477 deviceId = "3004".zfill(16)
2478 elif i == 5:
2479 deviceId = "5000".zfill(16)
2480 elif i == 6:
2481 deviceId = "6000".zfill(16)
2482 elif i == 7:
2483 deviceId = "6007".zfill(16)
2484 elif i >= 8 and i <= 17:
2485 dpid = '3' + str( i ).zfill( 3 )
2486 deviceId = dpid.zfill(16)
2487 elif i >= 18 and i <= 27:
2488 dpid = '6' + str( i ).zfill( 3 )
2489 deviceId = dpid.zfill(16)
2490 elif i == 28:
2491 deviceId = "2800".zfill(16)
2492 mappings[ macId ] = deviceId
Jon Halld8f6de82015-12-17 17:04:34 -08002493 if hosts[ controller ] is not None and "Error" not in hosts[ controller ]:
Jon Hall5cf14d52015-07-16 12:15:19 -07002494 if hosts[ controller ] == []:
2495 main.log.warn( "There are no hosts discovered" )
2496 zeroHosts = True
2497 else:
2498 for host in hosts[ controller ]:
2499 mac = None
2500 location = None
2501 device = None
2502 port = None
2503 try:
2504 mac = host.get( 'mac' )
2505 assert mac, "mac field could not be found for this host object"
2506
2507 location = host.get( 'location' )
2508 assert location, "location field could not be found for this host object"
2509
2510 # Trim the protocol identifier off deviceId
2511 device = str( location.get( 'elementId' ) ).split(':')[1]
2512 assert device, "elementId field could not be found for this host location object"
2513
2514 port = location.get( 'port' )
2515 assert port, "port field could not be found for this host location object"
2516
2517 # Now check if this matches where they should be
2518 if mac and device and port:
2519 if str( port ) != "1":
2520 main.log.error( "The attachment port is incorrect for " +
2521 "host " + str( mac ) +
2522 ". Expected: 1 Actual: " + str( port) )
2523 hostAttachment = False
2524 if device != mappings[ str( mac ) ]:
2525 main.log.error( "The attachment device is incorrect for " +
2526 "host " + str( mac ) +
2527 ". Expected: " + mappings[ str( mac ) ] +
2528 " Actual: " + device )
2529 hostAttachment = False
2530 else:
2531 hostAttachment = False
2532 except AssertionError:
2533 main.log.exception( "Json object not as expected" )
2534 main.log.error( repr( host ) )
2535 hostAttachment = False
2536 else:
2537 main.log.error( "No hosts json output or \"Error\"" +
2538 " in output. hosts = " +
2539 repr( hosts[ controller ] ) )
2540 if zeroHosts is False:
2541 hostAttachment = True
2542
2543 # END CHECKING HOST ATTACHMENT POINTS
2544 devicesResults = devicesResults and currentDevicesResult
2545 linksResults = linksResults and currentLinksResult
2546 hostsResults = hostsResults and currentHostsResult
2547 hostAttachmentResults = hostAttachmentResults and\
2548 hostAttachment
Jon Halla440e872016-03-31 15:15:50 -07002549 topoResult = ( devicesResults and linksResults
2550 and hostsResults and ipResult and
2551 hostAttachmentResults )
Jon Halle9b1fa32015-12-08 15:32:21 -08002552 utilities.assert_equals( expect=True,
2553 actual=topoResult,
2554 onpass="ONOS topology matches Mininet",
Jon Hall6e709752016-02-01 13:38:46 -08002555 onfail=topoFailMsg )
Jon Halle9b1fa32015-12-08 15:32:21 -08002556 # End of While loop to pull ONOS state
Jon Hall5cf14d52015-07-16 12:15:19 -07002557
2558 # Compare json objects for hosts and dataplane clusters
2559
2560 # hosts
2561 main.step( "Hosts view is consistent across all ONOS nodes" )
2562 consistentHostsResult = main.TRUE
2563 for controller in range( len( hosts ) ):
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002564 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hall13b446e2016-01-05 12:17:01 -08002565 if hosts[ controller ] is not None and "Error" not in hosts[ controller ]:
Jon Hall5cf14d52015-07-16 12:15:19 -07002566 if hosts[ controller ] == hosts[ 0 ]:
2567 continue
2568 else: # hosts not consistent
2569 main.log.error( "hosts from ONOS" + controllerStr +
2570 " is inconsistent with ONOS1" )
2571 main.log.warn( repr( hosts[ controller ] ) )
2572 consistentHostsResult = main.FALSE
2573
2574 else:
2575 main.log.error( "Error in getting ONOS hosts from ONOS" +
2576 controllerStr )
2577 consistentHostsResult = main.FALSE
2578 main.log.warn( "ONOS" + controllerStr +
2579 " hosts response: " +
2580 repr( hosts[ controller ] ) )
2581 utilities.assert_equals(
2582 expect=main.TRUE,
2583 actual=consistentHostsResult,
2584 onpass="Hosts view is consistent across all ONOS nodes",
2585 onfail="ONOS nodes have different views of hosts" )
2586
2587 main.step( "Hosts information is correct" )
2588 hostsResults = hostsResults and ipResult
2589 utilities.assert_equals(
2590 expect=main.TRUE,
2591 actual=hostsResults,
2592 onpass="Host information is correct",
2593 onfail="Host information is incorrect" )
2594
2595 main.step( "Host attachment points to the network" )
2596 utilities.assert_equals(
2597 expect=True,
2598 actual=hostAttachmentResults,
2599 onpass="Hosts are correctly attached to the network",
2600 onfail="ONOS did not correctly attach hosts to the network" )
2601
2602 # Strongly connected clusters of devices
2603 main.step( "Clusters view is consistent across all ONOS nodes" )
2604 consistentClustersResult = main.TRUE
2605 for controller in range( len( clusters ) ):
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002606 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07002607 if "Error" not in clusters[ controller ]:
2608 if clusters[ controller ] == clusters[ 0 ]:
2609 continue
2610 else: # clusters not consistent
2611 main.log.error( "clusters from ONOS" +
2612 controllerStr +
2613 " is inconsistent with ONOS1" )
2614 consistentClustersResult = main.FALSE
Jon Hall5cf14d52015-07-16 12:15:19 -07002615 else:
2616 main.log.error( "Error in getting dataplane clusters " +
2617 "from ONOS" + controllerStr )
2618 consistentClustersResult = main.FALSE
2619 main.log.warn( "ONOS" + controllerStr +
2620 " clusters response: " +
2621 repr( clusters[ controller ] ) )
2622 utilities.assert_equals(
2623 expect=main.TRUE,
2624 actual=consistentClustersResult,
2625 onpass="Clusters view is consistent across all ONOS nodes",
2626 onfail="ONOS nodes have different views of clusters" )
Jon Hall64948022016-05-12 13:38:50 -07002627 if not consistentClustersResult:
2628 main.log.debug( clusters )
Jon Hall5cf14d52015-07-16 12:15:19 -07002629
2630 main.step( "There is only one SCC" )
2631 # there should always only be one cluster
2632 try:
2633 numClusters = len( json.loads( clusters[ 0 ] ) )
2634 except ( ValueError, TypeError ):
2635 main.log.exception( "Error parsing clusters[0]: " +
2636 repr( clusters[0] ) )
Jon Halla440e872016-03-31 15:15:50 -07002637 numClusters = "ERROR"
Jon Hall5cf14d52015-07-16 12:15:19 -07002638 clusterResults = main.FALSE
2639 if numClusters == 1:
2640 clusterResults = main.TRUE
2641 utilities.assert_equals(
2642 expect=1,
2643 actual=numClusters,
2644 onpass="ONOS shows 1 SCC",
2645 onfail="ONOS shows " + str( numClusters ) + " SCCs" )
2646
2647 topoResult = ( devicesResults and linksResults
2648 and hostsResults and consistentHostsResult
2649 and consistentClustersResult and clusterResults
2650 and ipResult and hostAttachmentResults )
2651
2652 topoResult = topoResult and int( count <= 2 )
2653 note = "note it takes about " + str( int( cliTime ) ) + \
2654 " seconds for the test to make all the cli calls to fetch " +\
2655 "the topology from each ONOS instance"
2656 main.log.info(
2657 "Very crass estimate for topology discovery/convergence( " +
2658 str( note ) + " ): " + str( elapsed ) + " seconds, " +
2659 str( count ) + " tries" )
2660
2661 main.step( "Device information is correct" )
2662 utilities.assert_equals(
2663 expect=main.TRUE,
2664 actual=devicesResults,
2665 onpass="Device information is correct",
2666 onfail="Device information is incorrect" )
2667
2668 main.step( "Links are correct" )
2669 utilities.assert_equals(
2670 expect=main.TRUE,
2671 actual=linksResults,
2672 onpass="Link are correct",
2673 onfail="Links are incorrect" )
2674
Jon Halla440e872016-03-31 15:15:50 -07002675 main.step( "Hosts are correct" )
2676 utilities.assert_equals(
2677 expect=main.TRUE,
2678 actual=hostsResults,
2679 onpass="Hosts are correct",
2680 onfail="Hosts are incorrect" )
2681
Jon Hall5cf14d52015-07-16 12:15:19 -07002682 # FIXME: move this to an ONOS state case
2683 main.step( "Checking ONOS nodes" )
Jon Hall41d39f12016-04-11 22:54:35 -07002684 nodeResults = utilities.retry( main.HA.nodesCheck,
2685 False,
2686 args=[main.activeNodes],
2687 attempts=5 )
Jon Hall5cf14d52015-07-16 12:15:19 -07002688
Jon Hall41d39f12016-04-11 22:54:35 -07002689 utilities.assert_equals( expect=True, actual=nodeResults,
Jon Hall5cf14d52015-07-16 12:15:19 -07002690 onpass="Nodes check successful",
2691 onfail="Nodes check NOT successful" )
Jon Halla440e872016-03-31 15:15:50 -07002692 if not nodeResults:
Jon Hall41d39f12016-04-11 22:54:35 -07002693 for i in main.activeNodes:
Jon Halla440e872016-03-31 15:15:50 -07002694 main.log.debug( "{} components not ACTIVE: \n{}".format(
Jon Hall41d39f12016-04-11 22:54:35 -07002695 main.CLIs[i].name,
2696 main.CLIs[i].sendline( "scr:list | grep -v ACTIVE" ) ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07002697
Jon Halld2871c22016-07-26 11:01:14 -07002698 if not topoResult:
2699 main.cleanup()
2700 main.exit()
2701
Jon Hall5cf14d52015-07-16 12:15:19 -07002702 def CASE9( self, main ):
2703 """
2704 Link s3-s28 down
2705 """
2706 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002707 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002708 assert main, "main not defined"
2709 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002710 assert main.CLIs, "main.CLIs not defined"
2711 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002712 # NOTE: You should probably run a topology check after this
2713
2714 linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
2715
2716 description = "Turn off a link to ensure that Link Discovery " +\
2717 "is working properly"
2718 main.case( description )
2719
2720 main.step( "Kill Link between s3 and s28" )
2721 LinkDown = main.Mininet1.link( END1="s3", END2="s28", OPTION="down" )
2722 main.log.info( "Waiting " + str( linkSleep ) +
2723 " seconds for link down to be discovered" )
2724 time.sleep( linkSleep )
2725 utilities.assert_equals( expect=main.TRUE, actual=LinkDown,
2726 onpass="Link down successful",
2727 onfail="Failed to bring link down" )
2728 # TODO do some sort of check here
2729
2730 def CASE10( self, main ):
2731 """
2732 Link s3-s28 up
2733 """
2734 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002735 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002736 assert main, "main not defined"
2737 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002738 assert main.CLIs, "main.CLIs not defined"
2739 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002740 # NOTE: You should probably run a topology check after this
2741
2742 linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
2743
2744 description = "Restore a link to ensure that Link Discovery is " + \
2745 "working properly"
2746 main.case( description )
2747
2748 main.step( "Bring link between s3 and s28 back up" )
2749 LinkUp = main.Mininet1.link( END1="s3", END2="s28", OPTION="up" )
2750 main.log.info( "Waiting " + str( linkSleep ) +
2751 " seconds for link up to be discovered" )
2752 time.sleep( linkSleep )
2753 utilities.assert_equals( expect=main.TRUE, actual=LinkUp,
2754 onpass="Link up successful",
2755 onfail="Failed to bring link up" )
2756 # TODO do some sort of check here
2757
2758 def CASE11( self, main ):
2759 """
2760 Switch Down
2761 """
2762 # NOTE: You should probably run a topology check after this
2763 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002764 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002765 assert main, "main not defined"
2766 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002767 assert main.CLIs, "main.CLIs not defined"
2768 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002769
2770 switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
2771
2772 description = "Killing a switch to ensure it is discovered correctly"
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002773 onosCli = main.CLIs[ main.activeNodes[0] ]
Jon Hall5cf14d52015-07-16 12:15:19 -07002774 main.case( description )
2775 switch = main.params[ 'kill' ][ 'switch' ]
2776 switchDPID = main.params[ 'kill' ][ 'dpid' ]
2777
2778 # TODO: Make this switch parameterizable
2779 main.step( "Kill " + switch )
2780 main.log.info( "Deleting " + switch )
2781 main.Mininet1.delSwitch( switch )
2782 main.log.info( "Waiting " + str( switchSleep ) +
2783 " seconds for switch down to be discovered" )
2784 time.sleep( switchSleep )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002785 device = onosCli.getDevice( dpid=switchDPID )
Jon Hall5cf14d52015-07-16 12:15:19 -07002786 # Peek at the deleted switch
2787 main.log.warn( str( device ) )
2788 result = main.FALSE
2789 if device and device[ 'available' ] is False:
2790 result = main.TRUE
2791 utilities.assert_equals( expect=main.TRUE, actual=result,
2792 onpass="Kill switch successful",
2793 onfail="Failed to kill switch?" )
2794
2795 def CASE12( self, main ):
2796 """
2797 Switch Up
2798 """
2799 # NOTE: You should probably run a topology check after this
2800 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002801 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002802 assert main, "main not defined"
2803 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002804 assert main.CLIs, "main.CLIs not defined"
2805 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002806 assert ONOS1Port, "ONOS1Port not defined"
2807 assert ONOS2Port, "ONOS2Port not defined"
2808 assert ONOS3Port, "ONOS3Port not defined"
2809 assert ONOS4Port, "ONOS4Port not defined"
2810 assert ONOS5Port, "ONOS5Port not defined"
2811 assert ONOS6Port, "ONOS6Port not defined"
2812 assert ONOS7Port, "ONOS7Port not defined"
2813
2814 switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
2815 switch = main.params[ 'kill' ][ 'switch' ]
2816 switchDPID = main.params[ 'kill' ][ 'dpid' ]
2817 links = main.params[ 'kill' ][ 'links' ].split()
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002818 onosCli = main.CLIs[ main.activeNodes[0] ]
Jon Hall5cf14d52015-07-16 12:15:19 -07002819 description = "Adding a switch to ensure it is discovered correctly"
2820 main.case( description )
2821
2822 main.step( "Add back " + switch )
2823 main.Mininet1.addSwitch( switch, dpid=switchDPID )
2824 for peer in links:
2825 main.Mininet1.addLink( switch, peer )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002826 ipList = [ node.ip_address for node in main.nodes ]
Jon Hall5cf14d52015-07-16 12:15:19 -07002827 main.Mininet1.assignSwController( sw=switch, ip=ipList )
2828 main.log.info( "Waiting " + str( switchSleep ) +
2829 " seconds for switch up to be discovered" )
2830 time.sleep( switchSleep )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002831 device = onosCli.getDevice( dpid=switchDPID )
Jon Hall5cf14d52015-07-16 12:15:19 -07002832 # Peek at the deleted switch
2833 main.log.warn( str( device ) )
2834 result = main.FALSE
2835 if device and device[ 'available' ]:
2836 result = main.TRUE
2837 utilities.assert_equals( expect=main.TRUE, actual=result,
2838 onpass="add switch successful",
2839 onfail="Failed to add switch?" )
2840
2841 def CASE13( self, main ):
2842 """
2843 Clean up
2844 """
2845 import os
2846 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002847 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002848 assert main, "main not defined"
2849 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002850 assert main.CLIs, "main.CLIs not defined"
2851 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002852
2853 # printing colors to terminal
2854 colors = { 'cyan': '\033[96m', 'purple': '\033[95m',
2855 'blue': '\033[94m', 'green': '\033[92m',
2856 'yellow': '\033[93m', 'red': '\033[91m', 'end': '\033[0m' }
2857 main.case( "Test Cleanup" )
2858 main.step( "Killing tcpdumps" )
2859 main.Mininet2.stopTcpdump()
2860
2861 testname = main.TEST
Jon Hall96091e62015-09-21 17:34:17 -07002862 if main.params[ 'BACKUP' ][ 'ENABLED' ] == "True":
Jon Hall5cf14d52015-07-16 12:15:19 -07002863 main.step( "Copying MN pcap and ONOS log files to test station" )
2864 teststationUser = main.params[ 'BACKUP' ][ 'TESTONUSER' ]
2865 teststationIP = main.params[ 'BACKUP' ][ 'TESTONIP' ]
Jon Hall96091e62015-09-21 17:34:17 -07002866 # NOTE: MN Pcap file is being saved to logdir.
2867 # We scp this file as MN and TestON aren't necessarily the same vm
2868
2869 # FIXME: To be replaced with a Jenkin's post script
Jon Hall5cf14d52015-07-16 12:15:19 -07002870 # TODO: Load these from params
2871 # NOTE: must end in /
2872 logFolder = "/opt/onos/log/"
2873 logFiles = [ "karaf.log", "karaf.log.1" ]
2874 # NOTE: must end in /
Jon Hall5cf14d52015-07-16 12:15:19 -07002875 for f in logFiles:
Jon Halle1a3b752015-07-22 13:02:46 -07002876 for node in main.nodes:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002877 dstName = main.logdir + "/" + node.name + "-" + f
Jon Hall96091e62015-09-21 17:34:17 -07002878 main.ONOSbench.secureCopy( node.user_name, node.ip_address,
2879 logFolder + f, dstName )
Jon Hall5cf14d52015-07-16 12:15:19 -07002880 # std*.log's
2881 # NOTE: must end in /
2882 logFolder = "/opt/onos/var/"
2883 logFiles = [ "stderr.log", "stdout.log" ]
2884 # NOTE: must end in /
Jon Hall5cf14d52015-07-16 12:15:19 -07002885 for f in logFiles:
Jon Halle1a3b752015-07-22 13:02:46 -07002886 for node in main.nodes:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002887 dstName = main.logdir + "/" + node.name + "-" + f
Jon Hall96091e62015-09-21 17:34:17 -07002888 main.ONOSbench.secureCopy( node.user_name, node.ip_address,
2889 logFolder + f, dstName )
2890 else:
2891 main.log.debug( "skipping saving log files" )
Jon Hall5cf14d52015-07-16 12:15:19 -07002892
2893 main.step( "Stopping Mininet" )
2894 mnResult = main.Mininet1.stopNet()
2895 utilities.assert_equals( expect=main.TRUE, actual=mnResult,
2896 onpass="Mininet stopped",
2897 onfail="MN cleanup NOT successful" )
2898
2899 main.step( "Checking ONOS Logs for errors" )
Jon Halle1a3b752015-07-22 13:02:46 -07002900 for node in main.nodes:
Jon Hall96091e62015-09-21 17:34:17 -07002901 main.log.debug( "Checking logs for errors on " + node.name + ":" )
2902 main.log.warn( main.ONOSbench.checkLogs( node.ip_address ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07002903
2904 try:
2905 timerLog = open( main.logdir + "/Timers.csv", 'w')
2906 # Overwrite with empty line and close
2907 labels = "Gossip Intents, Restart"
2908 data = str( gossipTime ) + ", " + str( main.restartTime )
2909 timerLog.write( labels + "\n" + data )
2910 timerLog.close()
2911 except NameError, e:
2912 main.log.exception(e)
2913
2914 def CASE14( self, main ):
2915 """
2916 start election app on all onos nodes
2917 """
Jon Halle1a3b752015-07-22 13:02:46 -07002918 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002919 assert main, "main not defined"
2920 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002921 assert main.CLIs, "main.CLIs not defined"
2922 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002923
2924 main.case("Start Leadership Election app")
2925 main.step( "Install leadership election app" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002926 onosCli = main.CLIs[ main.activeNodes[0] ]
2927 appResult = onosCli.activateApp( "org.onosproject.election" )
Jon Hall5cf14d52015-07-16 12:15:19 -07002928 utilities.assert_equals(
2929 expect=main.TRUE,
2930 actual=appResult,
2931 onpass="Election app installed",
2932 onfail="Something went wrong with installing Leadership election" )
2933
2934 main.step( "Run for election on each node" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002935 for i in main.activeNodes:
2936 main.CLIs[i].electionTestRun()
Jon Hall25463a82016-04-13 14:03:52 -07002937 time.sleep(5)
2938 activeCLIs = [ main.CLIs[i] for i in main.activeNodes ]
2939 sameResult, leaders = main.HA.consistentLeaderboards( activeCLIs )
Jon Hall5cf14d52015-07-16 12:15:19 -07002940 utilities.assert_equals(
Jon Hall25463a82016-04-13 14:03:52 -07002941 expect=True,
2942 actual=sameResult,
2943 onpass="All nodes see the same leaderboards",
2944 onfail="Inconsistent leaderboards" )
Jon Hall5cf14d52015-07-16 12:15:19 -07002945
Jon Hall25463a82016-04-13 14:03:52 -07002946 if sameResult:
2947 leader = leaders[ 0 ][ 0 ]
2948 if main.nodes[main.activeNodes[0]].ip_address in leader:
2949 correctLeader = True
2950 else:
2951 correctLeader = False
2952 main.step( "First node was elected leader" )
2953 utilities.assert_equals(
2954 expect=True,
2955 actual=correctLeader,
2956 onpass="Correct leader was elected",
2957 onfail="Incorrect leader" )
Jon Hall5cf14d52015-07-16 12:15:19 -07002958
2959 def CASE15( self, main ):
2960 """
2961 Check that Leadership Election is still functional
acsmars71adceb2015-08-31 15:09:26 -07002962 15.1 Run election on each node
2963 15.2 Check that each node has the same leaders and candidates
2964 15.3 Find current leader and withdraw
2965 15.4 Check that a new node was elected leader
2966 15.5 Check that that new leader was the candidate of old leader
2967 15.6 Run for election on old leader
2968 15.7 Check that oldLeader is a candidate, and leader if only 1 node
2969 15.8 Make sure that the old leader was added to the candidate list
2970
2971 old and new variable prefixes refer to data from before vs after
2972 withdrawl and later before withdrawl vs after re-election
Jon Hall5cf14d52015-07-16 12:15:19 -07002973 """
2974 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002975 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002976 assert main, "main not defined"
2977 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002978 assert main.CLIs, "main.CLIs not defined"
2979 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002980
Jon Hall5cf14d52015-07-16 12:15:19 -07002981 description = "Check that Leadership Election is still functional"
2982 main.case( description )
Jon Halla440e872016-03-31 15:15:50 -07002983 # NOTE: Need to re-run after restarts since being a canidate is not persistant
Jon Hall5cf14d52015-07-16 12:15:19 -07002984
Jon Halla440e872016-03-31 15:15:50 -07002985 oldLeaders = [] # list of lists of each nodes' candidates before
2986 newLeaders = [] # list of lists of each nodes' candidates after
acsmars71adceb2015-08-31 15:09:26 -07002987 oldLeader = '' # the old leader from oldLeaders, None if not same
2988 newLeader = '' # the new leaders fron newLoeaders, None if not same
2989 oldLeaderCLI = None # the CLI of the old leader used for re-electing
2990 expectNoLeader = False # True when there is only one leader
2991 if main.numCtrls == 1:
2992 expectNoLeader = True
2993
2994 main.step( "Run for election on each node" )
2995 electionResult = main.TRUE
2996
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002997 for i in main.activeNodes: # run test election on each node
2998 if main.CLIs[i].electionTestRun() == main.FALSE:
acsmars71adceb2015-08-31 15:09:26 -07002999 electionResult = main.FALSE
Jon Hall5cf14d52015-07-16 12:15:19 -07003000 utilities.assert_equals(
3001 expect=main.TRUE,
acsmars71adceb2015-08-31 15:09:26 -07003002 actual=electionResult,
3003 onpass="All nodes successfully ran for leadership",
3004 onfail="At least one node failed to run for leadership" )
3005
acsmars3a72bde2015-09-02 14:16:22 -07003006 if electionResult == main.FALSE:
3007 main.log.error(
3008 "Skipping Test Case because Election Test App isn't loaded" )
3009 main.skipCase()
3010
acsmars71adceb2015-08-31 15:09:26 -07003011 main.step( "Check that each node shows the same leader and candidates" )
Jon Halla440e872016-03-31 15:15:50 -07003012 failMessage = "Nodes have different leaderboards"
Jon Halla440e872016-03-31 15:15:50 -07003013 activeCLIs = [ main.CLIs[i] for i in main.activeNodes ]
Jon Hall41d39f12016-04-11 22:54:35 -07003014 sameResult, oldLeaders = main.HA.consistentLeaderboards( activeCLIs )
Jon Halla440e872016-03-31 15:15:50 -07003015 if sameResult:
3016 oldLeader = oldLeaders[ 0 ][ 0 ]
3017 main.log.warn( oldLeader )
acsmars71adceb2015-08-31 15:09:26 -07003018 else:
Jon Halla440e872016-03-31 15:15:50 -07003019 oldLeader = None
acsmars71adceb2015-08-31 15:09:26 -07003020 utilities.assert_equals(
Jon Halla440e872016-03-31 15:15:50 -07003021 expect=True,
acsmars71adceb2015-08-31 15:09:26 -07003022 actual=sameResult,
Jon Halla440e872016-03-31 15:15:50 -07003023 onpass="Leaderboards are consistent for the election topic",
acsmars71adceb2015-08-31 15:09:26 -07003024 onfail=failMessage )
Jon Hall5cf14d52015-07-16 12:15:19 -07003025
3026 main.step( "Find current leader and withdraw" )
acsmars71adceb2015-08-31 15:09:26 -07003027 withdrawResult = main.TRUE
Jon Hall5cf14d52015-07-16 12:15:19 -07003028 # do some sanity checking on leader before using it
acsmars71adceb2015-08-31 15:09:26 -07003029 if oldLeader is None:
3030 main.log.error( "Leadership isn't consistent." )
3031 withdrawResult = main.FALSE
3032 # Get the CLI of the oldLeader
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003033 for i in main.activeNodes:
acsmars71adceb2015-08-31 15:09:26 -07003034 if oldLeader == main.nodes[ i ].ip_address:
3035 oldLeaderCLI = main.CLIs[ i ]
Jon Hall5cf14d52015-07-16 12:15:19 -07003036 break
3037 else: # FOR/ELSE statement
3038 main.log.error( "Leader election, could not find current leader" )
3039 if oldLeader:
acsmars71adceb2015-08-31 15:09:26 -07003040 withdrawResult = oldLeaderCLI.electionTestWithdraw()
Jon Hall5cf14d52015-07-16 12:15:19 -07003041 utilities.assert_equals(
3042 expect=main.TRUE,
3043 actual=withdrawResult,
3044 onpass="Node was withdrawn from election",
3045 onfail="Node was not withdrawn from election" )
3046
acsmars71adceb2015-08-31 15:09:26 -07003047 main.step( "Check that a new node was elected leader" )
acsmars71adceb2015-08-31 15:09:26 -07003048 failMessage = "Nodes have different leaders"
acsmars71adceb2015-08-31 15:09:26 -07003049 # Get new leaders and candidates
Jon Hall41d39f12016-04-11 22:54:35 -07003050 newLeaderResult, newLeaders = main.HA.consistentLeaderboards( activeCLIs )
Jon Hall3a7843a2016-04-12 03:01:09 -07003051 newLeader = None
Jon Halla440e872016-03-31 15:15:50 -07003052 if newLeaderResult:
Jon Hall3a7843a2016-04-12 03:01:09 -07003053 if newLeaders[ 0 ][ 0 ] == 'none':
3054 main.log.error( "No leader was elected on at least 1 node" )
3055 if not expectNoLeader:
3056 newLeaderResult = False
Jon Hall25463a82016-04-13 14:03:52 -07003057 newLeader = newLeaders[ 0 ][ 0 ]
acsmars71adceb2015-08-31 15:09:26 -07003058
3059 # Check that the new leader is not the older leader, which was withdrawn
3060 if newLeader == oldLeader:
Jon Halla440e872016-03-31 15:15:50 -07003061 newLeaderResult = False
Jon Hall6e709752016-02-01 13:38:46 -08003062 main.log.error( "All nodes still see old leader: " + str( oldLeader ) +
acsmars71adceb2015-08-31 15:09:26 -07003063 " as the current leader" )
Jon Hall5cf14d52015-07-16 12:15:19 -07003064 utilities.assert_equals(
Jon Halla440e872016-03-31 15:15:50 -07003065 expect=True,
acsmars71adceb2015-08-31 15:09:26 -07003066 actual=newLeaderResult,
Jon Hall5cf14d52015-07-16 12:15:19 -07003067 onpass="Leadership election passed",
3068 onfail="Something went wrong with Leadership election" )
3069
Jon Halla440e872016-03-31 15:15:50 -07003070 main.step( "Check that that new leader was the candidate of old leader" )
Jon Hall6e709752016-02-01 13:38:46 -08003071 # candidates[ 2 ] should become the top candidate after withdrawl
acsmars71adceb2015-08-31 15:09:26 -07003072 correctCandidateResult = main.TRUE
3073 if expectNoLeader:
3074 if newLeader == 'none':
3075 main.log.info( "No leader expected. None found. Pass" )
3076 correctCandidateResult = main.TRUE
3077 else:
3078 main.log.info( "Expected no leader, got: " + str( newLeader ) )
3079 correctCandidateResult = main.FALSE
Jon Halla440e872016-03-31 15:15:50 -07003080 elif len( oldLeaders[0] ) >= 3:
3081 if newLeader == oldLeaders[ 0 ][ 2 ]:
3082 # correct leader was elected
3083 correctCandidateResult = main.TRUE
3084 else:
3085 correctCandidateResult = main.FALSE
3086 main.log.error( "Candidate {} was elected. {} should have had priority.".format(
3087 newLeader, oldLeaders[ 0 ][ 2 ] ) )
Jon Hall6e709752016-02-01 13:38:46 -08003088 else:
3089 main.log.warn( "Could not determine who should be the correct leader" )
Jon Halla440e872016-03-31 15:15:50 -07003090 main.log.debug( oldLeaders[ 0 ] )
Jon Hall6e709752016-02-01 13:38:46 -08003091 correctCandidateResult = main.FALSE
acsmars71adceb2015-08-31 15:09:26 -07003092 utilities.assert_equals(
3093 expect=main.TRUE,
3094 actual=correctCandidateResult,
3095 onpass="Correct Candidate Elected",
3096 onfail="Incorrect Candidate Elected" )
3097
Jon Hall5cf14d52015-07-16 12:15:19 -07003098 main.step( "Run for election on old leader( just so everyone " +
3099 "is in the hat )" )
acsmars71adceb2015-08-31 15:09:26 -07003100 if oldLeaderCLI is not None:
3101 runResult = oldLeaderCLI.electionTestRun()
Jon Hall5cf14d52015-07-16 12:15:19 -07003102 else:
acsmars71adceb2015-08-31 15:09:26 -07003103 main.log.error( "No old leader to re-elect" )
Jon Hall5cf14d52015-07-16 12:15:19 -07003104 runResult = main.FALSE
3105 utilities.assert_equals(
3106 expect=main.TRUE,
3107 actual=runResult,
3108 onpass="App re-ran for election",
3109 onfail="App failed to run for election" )
Jon Halla440e872016-03-31 15:15:50 -07003110
acsmars71adceb2015-08-31 15:09:26 -07003111 main.step(
3112 "Check that oldLeader is a candidate, and leader if only 1 node" )
Jon Hall5cf14d52015-07-16 12:15:19 -07003113 # verify leader didn't just change
Jon Halla440e872016-03-31 15:15:50 -07003114 # Get new leaders and candidates
3115 reRunLeaders = []
3116 time.sleep( 5 ) # Paremterize
Jon Hall41d39f12016-04-11 22:54:35 -07003117 positionResult, reRunLeaders = main.HA.consistentLeaderboards( activeCLIs )
acsmars71adceb2015-08-31 15:09:26 -07003118
3119 # Check that the re-elected node is last on the candidate List
Jon Hall3a7843a2016-04-12 03:01:09 -07003120 if not reRunLeaders[0]:
3121 positionResult = main.FALSE
3122 elif oldLeader != reRunLeaders[ 0 ][ -1 ]:
Jon Halla440e872016-03-31 15:15:50 -07003123 main.log.error( "Old Leader ({}) not in the proper position: {} ".format( str( oldLeader),
3124 str( reRunLeaders[ 0 ] ) ) )
acsmars71adceb2015-08-31 15:09:26 -07003125 positionResult = main.FALSE
Jon Hall5cf14d52015-07-16 12:15:19 -07003126 utilities.assert_equals(
Jon Halla440e872016-03-31 15:15:50 -07003127 expect=True,
acsmars71adceb2015-08-31 15:09:26 -07003128 actual=positionResult,
Jon Hall5cf14d52015-07-16 12:15:19 -07003129 onpass="Old leader successfully re-ran for election",
3130 onfail="Something went wrong with Leadership election after " +
3131 "the old leader re-ran for election" )
3132
3133 def CASE16( self, main ):
3134 """
3135 Install Distributed Primitives app
3136 """
3137 import time
Jon Halle1a3b752015-07-22 13:02:46 -07003138 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07003139 assert main, "main not defined"
3140 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07003141 assert main.CLIs, "main.CLIs not defined"
3142 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07003143
3144 # Variables for the distributed primitives tests
3145 global pCounterName
Jon Hall5cf14d52015-07-16 12:15:19 -07003146 global pCounterValue
Jon Hall5cf14d52015-07-16 12:15:19 -07003147 global onosSet
3148 global onosSetName
3149 pCounterName = "TestON-Partitions"
Jon Hall5cf14d52015-07-16 12:15:19 -07003150 pCounterValue = 0
Jon Hall5cf14d52015-07-16 12:15:19 -07003151 onosSet = set([])
3152 onosSetName = "TestON-set"
3153
3154 description = "Install Primitives app"
3155 main.case( description )
3156 main.step( "Install Primitives app" )
3157 appName = "org.onosproject.distributedprimitives"
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003158 node = main.activeNodes[0]
3159 appResults = main.CLIs[node].activateApp( appName )
Jon Hall5cf14d52015-07-16 12:15:19 -07003160 utilities.assert_equals( expect=main.TRUE,
3161 actual=appResults,
3162 onpass="Primitives app activated",
3163 onfail="Primitives app not activated" )
3164 time.sleep( 5 ) # To allow all nodes to activate
3165
3166 def CASE17( self, main ):
3167 """
3168 Check for basic functionality with distributed primitives
3169 """
Jon Hall5cf14d52015-07-16 12:15:19 -07003170 # Make sure variables are defined/set
Jon Halle1a3b752015-07-22 13:02:46 -07003171 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07003172 assert main, "main not defined"
3173 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07003174 assert main.CLIs, "main.CLIs not defined"
3175 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07003176 assert pCounterName, "pCounterName not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07003177 assert onosSetName, "onosSetName not defined"
3178 # NOTE: assert fails if value is 0/None/Empty/False
3179 try:
3180 pCounterValue
3181 except NameError:
3182 main.log.error( "pCounterValue not defined, setting to 0" )
3183 pCounterValue = 0
3184 try:
Jon Hall5cf14d52015-07-16 12:15:19 -07003185 onosSet
3186 except NameError:
3187 main.log.error( "onosSet not defined, setting to empty Set" )
3188 onosSet = set([])
3189 # Variables for the distributed primitives tests. These are local only
3190 addValue = "a"
3191 addAllValue = "a b c d e f"
3192 retainValue = "c d e f"
3193
3194 description = "Check for basic functionality with distributed " +\
3195 "primitives"
3196 main.case( description )
Jon Halle1a3b752015-07-22 13:02:46 -07003197 main.caseExplanation = "Test the methods of the distributed " +\
3198 "primitives (counters and sets) throught the cli"
Jon Hall5cf14d52015-07-16 12:15:19 -07003199 # DISTRIBUTED ATOMIC COUNTERS
Jon Halle1a3b752015-07-22 13:02:46 -07003200 # Partitioned counters
3201 main.step( "Increment then get a default counter on each node" )
Jon Hall5cf14d52015-07-16 12:15:19 -07003202 pCounters = []
3203 threads = []
3204 addedPValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003205 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003206 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3207 name="counterAddAndGet-" + str( i ),
Jon Hall5cf14d52015-07-16 12:15:19 -07003208 args=[ pCounterName ] )
3209 pCounterValue += 1
3210 addedPValues.append( pCounterValue )
3211 threads.append( t )
3212 t.start()
3213
3214 for t in threads:
3215 t.join()
3216 pCounters.append( t.result )
3217 # Check that counter incremented numController times
3218 pCounterResults = True
3219 for i in addedPValues:
3220 tmpResult = i in pCounters
3221 pCounterResults = pCounterResults and tmpResult
3222 if not tmpResult:
3223 main.log.error( str( i ) + " is not in partitioned "
3224 "counter incremented results" )
3225 utilities.assert_equals( expect=True,
3226 actual=pCounterResults,
3227 onpass="Default counter incremented",
3228 onfail="Error incrementing default" +
3229 " counter" )
3230
Jon Halle1a3b752015-07-22 13:02:46 -07003231 main.step( "Get then Increment a default counter on each node" )
3232 pCounters = []
3233 threads = []
3234 addedPValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003235 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003236 t = main.Thread( target=main.CLIs[i].counterTestGetAndAdd,
3237 name="counterGetAndAdd-" + str( i ),
3238 args=[ pCounterName ] )
3239 addedPValues.append( pCounterValue )
3240 pCounterValue += 1
3241 threads.append( t )
3242 t.start()
3243
3244 for t in threads:
3245 t.join()
3246 pCounters.append( t.result )
3247 # Check that counter incremented numController times
3248 pCounterResults = True
3249 for i in addedPValues:
3250 tmpResult = i in pCounters
3251 pCounterResults = pCounterResults and tmpResult
3252 if not tmpResult:
3253 main.log.error( str( i ) + " is not in partitioned "
3254 "counter incremented results" )
3255 utilities.assert_equals( expect=True,
3256 actual=pCounterResults,
3257 onpass="Default counter incremented",
3258 onfail="Error incrementing default" +
3259 " counter" )
3260
3261 main.step( "Counters we added have the correct values" )
Jon Hall41d39f12016-04-11 22:54:35 -07003262 incrementCheck = main.HA.counterCheck( pCounterName, pCounterValue )
Jon Halle1a3b752015-07-22 13:02:46 -07003263 utilities.assert_equals( expect=main.TRUE,
3264 actual=incrementCheck,
3265 onpass="Added counters are correct",
3266 onfail="Added counters are incorrect" )
3267
3268 main.step( "Add -8 to then get a default counter on each node" )
3269 pCounters = []
3270 threads = []
3271 addedPValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003272 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003273 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3274 name="counterIncrement-" + str( i ),
3275 args=[ pCounterName ],
3276 kwargs={ "delta": -8 } )
3277 pCounterValue += -8
3278 addedPValues.append( pCounterValue )
3279 threads.append( t )
3280 t.start()
3281
3282 for t in threads:
3283 t.join()
3284 pCounters.append( t.result )
3285 # Check that counter incremented numController times
3286 pCounterResults = True
3287 for i in addedPValues:
3288 tmpResult = i in pCounters
3289 pCounterResults = pCounterResults and tmpResult
3290 if not tmpResult:
3291 main.log.error( str( i ) + " is not in partitioned "
3292 "counter incremented results" )
3293 utilities.assert_equals( expect=True,
3294 actual=pCounterResults,
3295 onpass="Default counter incremented",
3296 onfail="Error incrementing default" +
3297 " counter" )
3298
3299 main.step( "Add 5 to then get a default counter on each node" )
3300 pCounters = []
3301 threads = []
3302 addedPValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003303 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003304 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3305 name="counterIncrement-" + str( i ),
3306 args=[ pCounterName ],
3307 kwargs={ "delta": 5 } )
3308 pCounterValue += 5
3309 addedPValues.append( pCounterValue )
3310 threads.append( t )
3311 t.start()
3312
3313 for t in threads:
3314 t.join()
3315 pCounters.append( t.result )
3316 # Check that counter incremented numController times
3317 pCounterResults = True
3318 for i in addedPValues:
3319 tmpResult = i in pCounters
3320 pCounterResults = pCounterResults and tmpResult
3321 if not tmpResult:
3322 main.log.error( str( i ) + " is not in partitioned "
3323 "counter incremented results" )
3324 utilities.assert_equals( expect=True,
3325 actual=pCounterResults,
3326 onpass="Default counter incremented",
3327 onfail="Error incrementing default" +
3328 " counter" )
3329
3330 main.step( "Get then add 5 to a default counter on each node" )
3331 pCounters = []
3332 threads = []
3333 addedPValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003334 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003335 t = main.Thread( target=main.CLIs[i].counterTestGetAndAdd,
3336 name="counterIncrement-" + str( i ),
3337 args=[ pCounterName ],
3338 kwargs={ "delta": 5 } )
3339 addedPValues.append( pCounterValue )
3340 pCounterValue += 5
3341 threads.append( t )
3342 t.start()
3343
3344 for t in threads:
3345 t.join()
3346 pCounters.append( t.result )
3347 # Check that counter incremented numController times
3348 pCounterResults = True
3349 for i in addedPValues:
3350 tmpResult = i in pCounters
3351 pCounterResults = pCounterResults and tmpResult
3352 if not tmpResult:
3353 main.log.error( str( i ) + " is not in partitioned "
3354 "counter incremented results" )
3355 utilities.assert_equals( expect=True,
3356 actual=pCounterResults,
3357 onpass="Default counter incremented",
3358 onfail="Error incrementing default" +
3359 " counter" )
3360
3361 main.step( "Counters we added have the correct values" )
Jon Hall41d39f12016-04-11 22:54:35 -07003362 incrementCheck = main.HA.counterCheck( pCounterName, pCounterValue )
Jon Halle1a3b752015-07-22 13:02:46 -07003363 utilities.assert_equals( expect=main.TRUE,
3364 actual=incrementCheck,
3365 onpass="Added counters are correct",
3366 onfail="Added counters are incorrect" )
3367
Jon Hall5cf14d52015-07-16 12:15:19 -07003368 # DISTRIBUTED SETS
3369 main.step( "Distributed Set get" )
3370 size = len( onosSet )
3371 getResponses = []
3372 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003373 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003374 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003375 name="setTestGet-" + str( i ),
3376 args=[ onosSetName ] )
3377 threads.append( t )
3378 t.start()
3379 for t in threads:
3380 t.join()
3381 getResponses.append( t.result )
3382
3383 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003384 for i in range( len( main.activeNodes ) ):
3385 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003386 if isinstance( getResponses[ i ], list):
3387 current = set( getResponses[ i ] )
3388 if len( current ) == len( getResponses[ i ] ):
3389 # no repeats
3390 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003391 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003392 " has incorrect view" +
3393 " of set " + onosSetName + ":\n" +
3394 str( getResponses[ i ] ) )
3395 main.log.debug( "Expected: " + str( onosSet ) )
3396 main.log.debug( "Actual: " + str( current ) )
3397 getResults = main.FALSE
3398 else:
3399 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003400 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003401 " has repeat elements in" +
3402 " set " + onosSetName + ":\n" +
3403 str( getResponses[ i ] ) )
3404 getResults = main.FALSE
3405 elif getResponses[ i ] == main.ERROR:
3406 getResults = main.FALSE
3407 utilities.assert_equals( expect=main.TRUE,
3408 actual=getResults,
3409 onpass="Set elements are correct",
3410 onfail="Set elements are incorrect" )
3411
3412 main.step( "Distributed Set size" )
3413 sizeResponses = []
3414 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003415 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003416 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07003417 name="setTestSize-" + str( i ),
3418 args=[ onosSetName ] )
3419 threads.append( t )
3420 t.start()
3421 for t in threads:
3422 t.join()
3423 sizeResponses.append( t.result )
3424
3425 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003426 for i in range( len( main.activeNodes ) ):
3427 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003428 if size != sizeResponses[ i ]:
3429 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003430 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003431 " expected a size of " + str( size ) +
3432 " for set " + onosSetName +
3433 " but got " + str( sizeResponses[ i ] ) )
3434 utilities.assert_equals( expect=main.TRUE,
3435 actual=sizeResults,
3436 onpass="Set sizes are correct",
3437 onfail="Set sizes are incorrect" )
3438
3439 main.step( "Distributed Set add()" )
3440 onosSet.add( addValue )
3441 addResponses = []
3442 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003443 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003444 t = main.Thread( target=main.CLIs[i].setTestAdd,
Jon Hall5cf14d52015-07-16 12:15:19 -07003445 name="setTestAdd-" + str( i ),
3446 args=[ onosSetName, addValue ] )
3447 threads.append( t )
3448 t.start()
3449 for t in threads:
3450 t.join()
3451 addResponses.append( t.result )
3452
3453 # main.TRUE = successfully changed the set
3454 # main.FALSE = action resulted in no change in set
3455 # main.ERROR - Some error in executing the function
3456 addResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003457 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003458 if addResponses[ i ] == main.TRUE:
3459 # All is well
3460 pass
3461 elif addResponses[ i ] == main.FALSE:
3462 # Already in set, probably fine
3463 pass
3464 elif addResponses[ i ] == main.ERROR:
3465 # Error in execution
3466 addResults = main.FALSE
3467 else:
3468 # unexpected result
3469 addResults = main.FALSE
3470 if addResults != main.TRUE:
3471 main.log.error( "Error executing set add" )
3472
3473 # Check if set is still correct
3474 size = len( onosSet )
3475 getResponses = []
3476 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003477 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003478 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003479 name="setTestGet-" + str( i ),
3480 args=[ onosSetName ] )
3481 threads.append( t )
3482 t.start()
3483 for t in threads:
3484 t.join()
3485 getResponses.append( t.result )
3486 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003487 for i in range( len( main.activeNodes ) ):
3488 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003489 if isinstance( getResponses[ i ], list):
3490 current = set( getResponses[ i ] )
3491 if len( current ) == len( getResponses[ i ] ):
3492 # no repeats
3493 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003494 main.log.error( "ONOS" + node + " has incorrect view" +
Jon Hall5cf14d52015-07-16 12:15:19 -07003495 " of set " + onosSetName + ":\n" +
3496 str( getResponses[ i ] ) )
3497 main.log.debug( "Expected: " + str( onosSet ) )
3498 main.log.debug( "Actual: " + str( current ) )
3499 getResults = main.FALSE
3500 else:
3501 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003502 main.log.error( "ONOS" + node + " has repeat elements in" +
Jon Hall5cf14d52015-07-16 12:15:19 -07003503 " set " + onosSetName + ":\n" +
3504 str( getResponses[ i ] ) )
3505 getResults = main.FALSE
3506 elif getResponses[ i ] == main.ERROR:
3507 getResults = main.FALSE
3508 sizeResponses = []
3509 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003510 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003511 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07003512 name="setTestSize-" + str( i ),
3513 args=[ onosSetName ] )
3514 threads.append( t )
3515 t.start()
3516 for t in threads:
3517 t.join()
3518 sizeResponses.append( t.result )
3519 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003520 for i in range( len( main.activeNodes ) ):
3521 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003522 if size != sizeResponses[ i ]:
3523 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003524 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003525 " expected a size of " + str( size ) +
3526 " for set " + onosSetName +
3527 " but got " + str( sizeResponses[ i ] ) )
3528 addResults = addResults and getResults and sizeResults
3529 utilities.assert_equals( expect=main.TRUE,
3530 actual=addResults,
3531 onpass="Set add correct",
3532 onfail="Set add was incorrect" )
3533
3534 main.step( "Distributed Set addAll()" )
3535 onosSet.update( addAllValue.split() )
3536 addResponses = []
3537 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003538 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003539 t = main.Thread( target=main.CLIs[i].setTestAdd,
Jon Hall5cf14d52015-07-16 12:15:19 -07003540 name="setTestAddAll-" + str( i ),
3541 args=[ onosSetName, addAllValue ] )
3542 threads.append( t )
3543 t.start()
3544 for t in threads:
3545 t.join()
3546 addResponses.append( t.result )
3547
3548 # main.TRUE = successfully changed the set
3549 # main.FALSE = action resulted in no change in set
3550 # main.ERROR - Some error in executing the function
3551 addAllResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003552 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003553 if addResponses[ i ] == main.TRUE:
3554 # All is well
3555 pass
3556 elif addResponses[ i ] == main.FALSE:
3557 # Already in set, probably fine
3558 pass
3559 elif addResponses[ i ] == main.ERROR:
3560 # Error in execution
3561 addAllResults = main.FALSE
3562 else:
3563 # unexpected result
3564 addAllResults = main.FALSE
3565 if addAllResults != main.TRUE:
3566 main.log.error( "Error executing set addAll" )
3567
3568 # Check if set is still correct
3569 size = len( onosSet )
3570 getResponses = []
3571 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003572 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003573 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003574 name="setTestGet-" + str( i ),
3575 args=[ onosSetName ] )
3576 threads.append( t )
3577 t.start()
3578 for t in threads:
3579 t.join()
3580 getResponses.append( t.result )
3581 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003582 for i in range( len( main.activeNodes ) ):
3583 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003584 if isinstance( getResponses[ i ], list):
3585 current = set( getResponses[ i ] )
3586 if len( current ) == len( getResponses[ i ] ):
3587 # no repeats
3588 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003589 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003590 " has incorrect view" +
3591 " of set " + onosSetName + ":\n" +
3592 str( getResponses[ i ] ) )
3593 main.log.debug( "Expected: " + str( onosSet ) )
3594 main.log.debug( "Actual: " + str( current ) )
3595 getResults = main.FALSE
3596 else:
3597 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003598 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003599 " has repeat elements in" +
3600 " set " + onosSetName + ":\n" +
3601 str( getResponses[ i ] ) )
3602 getResults = main.FALSE
3603 elif getResponses[ i ] == main.ERROR:
3604 getResults = main.FALSE
3605 sizeResponses = []
3606 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003607 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003608 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07003609 name="setTestSize-" + str( i ),
3610 args=[ onosSetName ] )
3611 threads.append( t )
3612 t.start()
3613 for t in threads:
3614 t.join()
3615 sizeResponses.append( t.result )
3616 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003617 for i in range( len( main.activeNodes ) ):
3618 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003619 if size != sizeResponses[ i ]:
3620 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003621 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003622 " expected a size of " + str( size ) +
3623 " for set " + onosSetName +
3624 " but got " + str( sizeResponses[ i ] ) )
3625 addAllResults = addAllResults and getResults and sizeResults
3626 utilities.assert_equals( expect=main.TRUE,
3627 actual=addAllResults,
3628 onpass="Set addAll correct",
3629 onfail="Set addAll was incorrect" )
3630
3631 main.step( "Distributed Set contains()" )
3632 containsResponses = []
3633 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003634 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003635 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003636 name="setContains-" + str( i ),
3637 args=[ onosSetName ],
3638 kwargs={ "values": addValue } )
3639 threads.append( t )
3640 t.start()
3641 for t in threads:
3642 t.join()
3643 # NOTE: This is the tuple
3644 containsResponses.append( t.result )
3645
3646 containsResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003647 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003648 if containsResponses[ i ] == main.ERROR:
3649 containsResults = main.FALSE
3650 else:
3651 containsResults = containsResults and\
3652 containsResponses[ i ][ 1 ]
3653 utilities.assert_equals( expect=main.TRUE,
3654 actual=containsResults,
3655 onpass="Set contains is functional",
3656 onfail="Set contains failed" )
3657
3658 main.step( "Distributed Set containsAll()" )
3659 containsAllResponses = []
3660 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003661 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003662 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003663 name="setContainsAll-" + str( i ),
3664 args=[ onosSetName ],
3665 kwargs={ "values": addAllValue } )
3666 threads.append( t )
3667 t.start()
3668 for t in threads:
3669 t.join()
3670 # NOTE: This is the tuple
3671 containsAllResponses.append( t.result )
3672
3673 containsAllResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003674 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003675 if containsResponses[ i ] == main.ERROR:
3676 containsResults = main.FALSE
3677 else:
3678 containsResults = containsResults and\
3679 containsResponses[ i ][ 1 ]
3680 utilities.assert_equals( expect=main.TRUE,
3681 actual=containsAllResults,
3682 onpass="Set containsAll is functional",
3683 onfail="Set containsAll failed" )
3684
3685 main.step( "Distributed Set remove()" )
3686 onosSet.remove( addValue )
3687 removeResponses = []
3688 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003689 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003690 t = main.Thread( target=main.CLIs[i].setTestRemove,
Jon Hall5cf14d52015-07-16 12:15:19 -07003691 name="setTestRemove-" + str( i ),
3692 args=[ onosSetName, addValue ] )
3693 threads.append( t )
3694 t.start()
3695 for t in threads:
3696 t.join()
3697 removeResponses.append( t.result )
3698
3699 # main.TRUE = successfully changed the set
3700 # main.FALSE = action resulted in no change in set
3701 # main.ERROR - Some error in executing the function
3702 removeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003703 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003704 if removeResponses[ i ] == main.TRUE:
3705 # All is well
3706 pass
3707 elif removeResponses[ i ] == main.FALSE:
3708 # not in set, probably fine
3709 pass
3710 elif removeResponses[ i ] == main.ERROR:
3711 # Error in execution
3712 removeResults = main.FALSE
3713 else:
3714 # unexpected result
3715 removeResults = main.FALSE
3716 if removeResults != main.TRUE:
3717 main.log.error( "Error executing set remove" )
3718
3719 # Check if set is still correct
3720 size = len( onosSet )
3721 getResponses = []
3722 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003723 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003724 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003725 name="setTestGet-" + str( i ),
3726 args=[ onosSetName ] )
3727 threads.append( t )
3728 t.start()
3729 for t in threads:
3730 t.join()
3731 getResponses.append( t.result )
3732 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003733 for i in range( len( main.activeNodes ) ):
3734 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003735 if isinstance( getResponses[ i ], list):
3736 current = set( getResponses[ i ] )
3737 if len( current ) == len( getResponses[ i ] ):
3738 # no repeats
3739 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003740 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003741 " has incorrect view" +
3742 " of set " + onosSetName + ":\n" +
3743 str( getResponses[ i ] ) )
3744 main.log.debug( "Expected: " + str( onosSet ) )
3745 main.log.debug( "Actual: " + str( current ) )
3746 getResults = main.FALSE
3747 else:
3748 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003749 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003750 " has repeat elements in" +
3751 " set " + onosSetName + ":\n" +
3752 str( getResponses[ i ] ) )
3753 getResults = main.FALSE
3754 elif getResponses[ i ] == main.ERROR:
3755 getResults = main.FALSE
3756 sizeResponses = []
3757 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003758 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003759 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07003760 name="setTestSize-" + str( i ),
3761 args=[ onosSetName ] )
3762 threads.append( t )
3763 t.start()
3764 for t in threads:
3765 t.join()
3766 sizeResponses.append( t.result )
3767 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003768 for i in range( len( main.activeNodes ) ):
3769 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003770 if size != sizeResponses[ i ]:
3771 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003772 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003773 " expected a size of " + str( size ) +
3774 " for set " + onosSetName +
3775 " but got " + str( sizeResponses[ i ] ) )
3776 removeResults = removeResults and getResults and sizeResults
3777 utilities.assert_equals( expect=main.TRUE,
3778 actual=removeResults,
3779 onpass="Set remove correct",
3780 onfail="Set remove was incorrect" )
3781
3782 main.step( "Distributed Set removeAll()" )
3783 onosSet.difference_update( addAllValue.split() )
3784 removeAllResponses = []
3785 threads = []
3786 try:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003787 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003788 t = main.Thread( target=main.CLIs[i].setTestRemove,
Jon Hall5cf14d52015-07-16 12:15:19 -07003789 name="setTestRemoveAll-" + str( i ),
3790 args=[ onosSetName, addAllValue ] )
3791 threads.append( t )
3792 t.start()
3793 for t in threads:
3794 t.join()
3795 removeAllResponses.append( t.result )
3796 except Exception, e:
3797 main.log.exception(e)
3798
3799 # main.TRUE = successfully changed the set
3800 # main.FALSE = action resulted in no change in set
3801 # main.ERROR - Some error in executing the function
3802 removeAllResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003803 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003804 if removeAllResponses[ i ] == main.TRUE:
3805 # All is well
3806 pass
3807 elif removeAllResponses[ i ] == main.FALSE:
3808 # not in set, probably fine
3809 pass
3810 elif removeAllResponses[ i ] == main.ERROR:
3811 # Error in execution
3812 removeAllResults = main.FALSE
3813 else:
3814 # unexpected result
3815 removeAllResults = main.FALSE
3816 if removeAllResults != main.TRUE:
3817 main.log.error( "Error executing set removeAll" )
3818
3819 # Check if set is still correct
3820 size = len( onosSet )
3821 getResponses = []
3822 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003823 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003824 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003825 name="setTestGet-" + str( i ),
3826 args=[ onosSetName ] )
3827 threads.append( t )
3828 t.start()
3829 for t in threads:
3830 t.join()
3831 getResponses.append( t.result )
3832 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003833 for i in range( len( main.activeNodes ) ):
3834 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003835 if isinstance( getResponses[ i ], list):
3836 current = set( getResponses[ i ] )
3837 if len( current ) == len( getResponses[ i ] ):
3838 # no repeats
3839 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003840 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003841 " has incorrect view" +
3842 " of set " + onosSetName + ":\n" +
3843 str( getResponses[ i ] ) )
3844 main.log.debug( "Expected: " + str( onosSet ) )
3845 main.log.debug( "Actual: " + str( current ) )
3846 getResults = main.FALSE
3847 else:
3848 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003849 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003850 " has repeat elements in" +
3851 " set " + onosSetName + ":\n" +
3852 str( getResponses[ i ] ) )
3853 getResults = main.FALSE
3854 elif getResponses[ i ] == main.ERROR:
3855 getResults = main.FALSE
3856 sizeResponses = []
3857 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003858 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003859 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07003860 name="setTestSize-" + str( i ),
3861 args=[ onosSetName ] )
3862 threads.append( t )
3863 t.start()
3864 for t in threads:
3865 t.join()
3866 sizeResponses.append( t.result )
3867 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003868 for i in range( len( main.activeNodes ) ):
3869 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003870 if size != sizeResponses[ i ]:
3871 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003872 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003873 " expected a size of " + str( size ) +
3874 " for set " + onosSetName +
3875 " but got " + str( sizeResponses[ i ] ) )
3876 removeAllResults = removeAllResults and getResults and sizeResults
3877 utilities.assert_equals( expect=main.TRUE,
3878 actual=removeAllResults,
3879 onpass="Set removeAll correct",
3880 onfail="Set removeAll was incorrect" )
3881
3882 main.step( "Distributed Set addAll()" )
3883 onosSet.update( addAllValue.split() )
3884 addResponses = []
3885 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003886 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003887 t = main.Thread( target=main.CLIs[i].setTestAdd,
Jon Hall5cf14d52015-07-16 12:15:19 -07003888 name="setTestAddAll-" + str( i ),
3889 args=[ onosSetName, addAllValue ] )
3890 threads.append( t )
3891 t.start()
3892 for t in threads:
3893 t.join()
3894 addResponses.append( t.result )
3895
3896 # main.TRUE = successfully changed the set
3897 # main.FALSE = action resulted in no change in set
3898 # main.ERROR - Some error in executing the function
3899 addAllResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003900 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003901 if addResponses[ i ] == main.TRUE:
3902 # All is well
3903 pass
3904 elif addResponses[ i ] == main.FALSE:
3905 # Already in set, probably fine
3906 pass
3907 elif addResponses[ i ] == main.ERROR:
3908 # Error in execution
3909 addAllResults = main.FALSE
3910 else:
3911 # unexpected result
3912 addAllResults = main.FALSE
3913 if addAllResults != main.TRUE:
3914 main.log.error( "Error executing set addAll" )
3915
3916 # Check if set is still correct
3917 size = len( onosSet )
3918 getResponses = []
3919 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003920 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003921 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003922 name="setTestGet-" + str( i ),
3923 args=[ onosSetName ] )
3924 threads.append( t )
3925 t.start()
3926 for t in threads:
3927 t.join()
3928 getResponses.append( t.result )
3929 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003930 for i in range( len( main.activeNodes ) ):
3931 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003932 if isinstance( getResponses[ i ], list):
3933 current = set( getResponses[ i ] )
3934 if len( current ) == len( getResponses[ i ] ):
3935 # no repeats
3936 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003937 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003938 " has incorrect view" +
3939 " of set " + onosSetName + ":\n" +
3940 str( getResponses[ i ] ) )
3941 main.log.debug( "Expected: " + str( onosSet ) )
3942 main.log.debug( "Actual: " + str( current ) )
3943 getResults = main.FALSE
3944 else:
3945 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003946 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003947 " has repeat elements in" +
3948 " set " + onosSetName + ":\n" +
3949 str( getResponses[ i ] ) )
3950 getResults = main.FALSE
3951 elif getResponses[ i ] == main.ERROR:
3952 getResults = main.FALSE
3953 sizeResponses = []
3954 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003955 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003956 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07003957 name="setTestSize-" + str( i ),
3958 args=[ onosSetName ] )
3959 threads.append( t )
3960 t.start()
3961 for t in threads:
3962 t.join()
3963 sizeResponses.append( t.result )
3964 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003965 for i in range( len( main.activeNodes ) ):
3966 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003967 if size != sizeResponses[ i ]:
3968 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003969 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003970 " expected a size of " + str( size ) +
3971 " for set " + onosSetName +
3972 " but got " + str( sizeResponses[ i ] ) )
3973 addAllResults = addAllResults and getResults and sizeResults
3974 utilities.assert_equals( expect=main.TRUE,
3975 actual=addAllResults,
3976 onpass="Set addAll correct",
3977 onfail="Set addAll was incorrect" )
3978
3979 main.step( "Distributed Set clear()" )
3980 onosSet.clear()
3981 clearResponses = []
3982 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003983 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003984 t = main.Thread( target=main.CLIs[i].setTestRemove,
Jon Hall5cf14d52015-07-16 12:15:19 -07003985 name="setTestClear-" + str( i ),
3986 args=[ onosSetName, " "], # Values doesn't matter
3987 kwargs={ "clear": True } )
3988 threads.append( t )
3989 t.start()
3990 for t in threads:
3991 t.join()
3992 clearResponses.append( t.result )
3993
3994 # main.TRUE = successfully changed the set
3995 # main.FALSE = action resulted in no change in set
3996 # main.ERROR - Some error in executing the function
3997 clearResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003998 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003999 if clearResponses[ i ] == main.TRUE:
4000 # All is well
4001 pass
4002 elif clearResponses[ i ] == main.FALSE:
4003 # Nothing set, probably fine
4004 pass
4005 elif clearResponses[ i ] == main.ERROR:
4006 # Error in execution
4007 clearResults = main.FALSE
4008 else:
4009 # unexpected result
4010 clearResults = main.FALSE
4011 if clearResults != main.TRUE:
4012 main.log.error( "Error executing set clear" )
4013
4014 # Check if set is still correct
4015 size = len( onosSet )
4016 getResponses = []
4017 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004018 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004019 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07004020 name="setTestGet-" + str( i ),
4021 args=[ onosSetName ] )
4022 threads.append( t )
4023 t.start()
4024 for t in threads:
4025 t.join()
4026 getResponses.append( t.result )
4027 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004028 for i in range( len( main.activeNodes ) ):
4029 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004030 if isinstance( getResponses[ i ], list):
4031 current = set( getResponses[ i ] )
4032 if len( current ) == len( getResponses[ i ] ):
4033 # no repeats
4034 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004035 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004036 " has incorrect view" +
4037 " of set " + onosSetName + ":\n" +
4038 str( getResponses[ i ] ) )
4039 main.log.debug( "Expected: " + str( onosSet ) )
4040 main.log.debug( "Actual: " + str( current ) )
4041 getResults = main.FALSE
4042 else:
4043 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004044 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004045 " has repeat elements in" +
4046 " set " + onosSetName + ":\n" +
4047 str( getResponses[ i ] ) )
4048 getResults = main.FALSE
4049 elif getResponses[ i ] == main.ERROR:
4050 getResults = main.FALSE
4051 sizeResponses = []
4052 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004053 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004054 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07004055 name="setTestSize-" + str( i ),
4056 args=[ onosSetName ] )
4057 threads.append( t )
4058 t.start()
4059 for t in threads:
4060 t.join()
4061 sizeResponses.append( t.result )
4062 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004063 for i in range( len( main.activeNodes ) ):
4064 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004065 if size != sizeResponses[ i ]:
4066 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004067 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004068 " expected a size of " + str( size ) +
4069 " for set " + onosSetName +
4070 " but got " + str( sizeResponses[ i ] ) )
4071 clearResults = clearResults and getResults and sizeResults
4072 utilities.assert_equals( expect=main.TRUE,
4073 actual=clearResults,
4074 onpass="Set clear correct",
4075 onfail="Set clear was incorrect" )
4076
4077 main.step( "Distributed Set addAll()" )
4078 onosSet.update( addAllValue.split() )
4079 addResponses = []
4080 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004081 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004082 t = main.Thread( target=main.CLIs[i].setTestAdd,
Jon Hall5cf14d52015-07-16 12:15:19 -07004083 name="setTestAddAll-" + str( i ),
4084 args=[ onosSetName, addAllValue ] )
4085 threads.append( t )
4086 t.start()
4087 for t in threads:
4088 t.join()
4089 addResponses.append( t.result )
4090
4091 # main.TRUE = successfully changed the set
4092 # main.FALSE = action resulted in no change in set
4093 # main.ERROR - Some error in executing the function
4094 addAllResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004095 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07004096 if addResponses[ i ] == main.TRUE:
4097 # All is well
4098 pass
4099 elif addResponses[ i ] == main.FALSE:
4100 # Already in set, probably fine
4101 pass
4102 elif addResponses[ i ] == main.ERROR:
4103 # Error in execution
4104 addAllResults = main.FALSE
4105 else:
4106 # unexpected result
4107 addAllResults = main.FALSE
4108 if addAllResults != main.TRUE:
4109 main.log.error( "Error executing set addAll" )
4110
4111 # Check if set is still correct
4112 size = len( onosSet )
4113 getResponses = []
4114 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004115 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004116 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07004117 name="setTestGet-" + str( i ),
4118 args=[ onosSetName ] )
4119 threads.append( t )
4120 t.start()
4121 for t in threads:
4122 t.join()
4123 getResponses.append( t.result )
4124 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004125 for i in range( len( main.activeNodes ) ):
4126 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004127 if isinstance( getResponses[ i ], list):
4128 current = set( getResponses[ i ] )
4129 if len( current ) == len( getResponses[ i ] ):
4130 # no repeats
4131 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004132 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004133 " has incorrect view" +
4134 " of set " + onosSetName + ":\n" +
4135 str( getResponses[ i ] ) )
4136 main.log.debug( "Expected: " + str( onosSet ) )
4137 main.log.debug( "Actual: " + str( current ) )
4138 getResults = main.FALSE
4139 else:
4140 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004141 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004142 " has repeat elements in" +
4143 " set " + onosSetName + ":\n" +
4144 str( getResponses[ i ] ) )
4145 getResults = main.FALSE
4146 elif getResponses[ i ] == main.ERROR:
4147 getResults = main.FALSE
4148 sizeResponses = []
4149 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004150 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004151 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07004152 name="setTestSize-" + str( i ),
4153 args=[ onosSetName ] )
4154 threads.append( t )
4155 t.start()
4156 for t in threads:
4157 t.join()
4158 sizeResponses.append( t.result )
4159 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004160 for i in range( len( main.activeNodes ) ):
4161 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004162 if size != sizeResponses[ i ]:
4163 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004164 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004165 " expected a size of " + str( size ) +
4166 " for set " + onosSetName +
4167 " but got " + str( sizeResponses[ i ] ) )
4168 addAllResults = addAllResults and getResults and sizeResults
4169 utilities.assert_equals( expect=main.TRUE,
4170 actual=addAllResults,
4171 onpass="Set addAll correct",
4172 onfail="Set addAll was incorrect" )
4173
4174 main.step( "Distributed Set retain()" )
4175 onosSet.intersection_update( retainValue.split() )
4176 retainResponses = []
4177 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004178 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004179 t = main.Thread( target=main.CLIs[i].setTestRemove,
Jon Hall5cf14d52015-07-16 12:15:19 -07004180 name="setTestRetain-" + str( i ),
4181 args=[ onosSetName, retainValue ],
4182 kwargs={ "retain": True } )
4183 threads.append( t )
4184 t.start()
4185 for t in threads:
4186 t.join()
4187 retainResponses.append( t.result )
4188
4189 # main.TRUE = successfully changed the set
4190 # main.FALSE = action resulted in no change in set
4191 # main.ERROR - Some error in executing the function
4192 retainResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004193 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07004194 if retainResponses[ i ] == main.TRUE:
4195 # All is well
4196 pass
4197 elif retainResponses[ i ] == main.FALSE:
4198 # Already in set, probably fine
4199 pass
4200 elif retainResponses[ i ] == main.ERROR:
4201 # Error in execution
4202 retainResults = main.FALSE
4203 else:
4204 # unexpected result
4205 retainResults = main.FALSE
4206 if retainResults != main.TRUE:
4207 main.log.error( "Error executing set retain" )
4208
4209 # Check if set is still correct
4210 size = len( onosSet )
4211 getResponses = []
4212 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004213 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004214 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07004215 name="setTestGet-" + str( i ),
4216 args=[ onosSetName ] )
4217 threads.append( t )
4218 t.start()
4219 for t in threads:
4220 t.join()
4221 getResponses.append( t.result )
4222 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004223 for i in range( len( main.activeNodes ) ):
4224 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004225 if isinstance( getResponses[ i ], list):
4226 current = set( getResponses[ i ] )
4227 if len( current ) == len( getResponses[ i ] ):
4228 # no repeats
4229 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004230 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004231 " has incorrect view" +
4232 " of set " + onosSetName + ":\n" +
4233 str( getResponses[ i ] ) )
4234 main.log.debug( "Expected: " + str( onosSet ) )
4235 main.log.debug( "Actual: " + str( current ) )
4236 getResults = main.FALSE
4237 else:
4238 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004239 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004240 " has repeat elements in" +
4241 " set " + onosSetName + ":\n" +
4242 str( getResponses[ i ] ) )
4243 getResults = main.FALSE
4244 elif getResponses[ i ] == main.ERROR:
4245 getResults = main.FALSE
4246 sizeResponses = []
4247 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004248 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004249 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07004250 name="setTestSize-" + str( i ),
4251 args=[ onosSetName ] )
4252 threads.append( t )
4253 t.start()
4254 for t in threads:
4255 t.join()
4256 sizeResponses.append( t.result )
4257 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004258 for i in range( len( main.activeNodes ) ):
4259 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004260 if size != sizeResponses[ i ]:
4261 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004262 main.log.error( "ONOS" + node + " expected a size of " +
Jon Hall5cf14d52015-07-16 12:15:19 -07004263 str( size ) + " for set " + onosSetName +
4264 " but got " + str( sizeResponses[ i ] ) )
4265 retainResults = retainResults and getResults and sizeResults
4266 utilities.assert_equals( expect=main.TRUE,
4267 actual=retainResults,
4268 onpass="Set retain correct",
4269 onfail="Set retain was incorrect" )
4270
Jon Hall2a5002c2015-08-21 16:49:11 -07004271 # Transactional maps
4272 main.step( "Partitioned Transactional maps put" )
4273 tMapValue = "Testing"
4274 numKeys = 100
4275 putResult = True
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004276 node = main.activeNodes[0]
4277 putResponses = main.CLIs[node].transactionalMapPut( numKeys, tMapValue )
Jon Hall6e709752016-02-01 13:38:46 -08004278 if putResponses and len( putResponses ) == 100:
Jon Hall2a5002c2015-08-21 16:49:11 -07004279 for i in putResponses:
4280 if putResponses[ i ][ 'value' ] != tMapValue:
4281 putResult = False
4282 else:
4283 putResult = False
4284 if not putResult:
4285 main.log.debug( "Put response values: " + str( putResponses ) )
4286 utilities.assert_equals( expect=True,
4287 actual=putResult,
4288 onpass="Partitioned Transactional Map put successful",
4289 onfail="Partitioned Transactional Map put values are incorrect" )
4290
4291 main.step( "Partitioned Transactional maps get" )
Jon Hall9bfadd22016-05-11 14:48:07 -07004292 # FIXME: is this sleep needed?
4293 time.sleep( 5 )
4294
Jon Hall2a5002c2015-08-21 16:49:11 -07004295 getCheck = True
4296 for n in range( 1, numKeys + 1 ):
4297 getResponses = []
4298 threads = []
4299 valueCheck = True
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004300 for i in main.activeNodes:
Jon Hall2a5002c2015-08-21 16:49:11 -07004301 t = main.Thread( target=main.CLIs[i].transactionalMapGet,
4302 name="TMap-get-" + str( i ),
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004303 args=[ "Key" + str( n ) ] )
Jon Hall2a5002c2015-08-21 16:49:11 -07004304 threads.append( t )
4305 t.start()
4306 for t in threads:
4307 t.join()
4308 getResponses.append( t.result )
4309 for node in getResponses:
4310 if node != tMapValue:
4311 valueCheck = False
4312 if not valueCheck:
4313 main.log.warn( "Values for key 'Key" + str( n ) + "' do not match:" )
4314 main.log.warn( getResponses )
4315 getCheck = getCheck and valueCheck
4316 utilities.assert_equals( expect=True,
4317 actual=getCheck,
4318 onpass="Partitioned Transactional Map get values were correct",
4319 onfail="Partitioned Transactional Map values incorrect" )