blob: d89fe67d75c1a9c303accb6459ee28eb98f5c59b [file] [log] [blame]
Jon Hall5cf14d52015-07-16 12:15:19 -07001"""
2Description: This test is to determine if ONOS can handle
3 a minority of it's nodes restarting
4
5List of test cases:
6CASE1: Compile ONOS and push it to the test machines
7CASE2: Assign devices to controllers
8CASE21: Assign mastership to controllers
9CASE3: Assign intents
10CASE4: Ping across added host intents
11CASE5: Reading state of ONOS
Jon Hallb3ed8ed2015-10-28 16:43:55 -070012CASE61: The Failure inducing case.
13CASE62: The Failure recovery case.
Jon Hall5cf14d52015-07-16 12:15:19 -070014CASE7: Check state after control plane failure
15CASE8: Compare topo
16CASE9: Link s3-s28 down
17CASE10: Link s3-s28 up
18CASE11: Switch down
19CASE12: Switch up
20CASE13: Clean up
21CASE14: start election app on all onos nodes
22CASE15: Check that Leadership Election is still functional
23CASE16: Install Distributed Primitives app
24CASE17: Check for basic functionality with distributed primitives
25"""
26
27
Jon Hallb3ed8ed2015-10-28 16:43:55 -070028class HAkillNodes:
Jon Hall5cf14d52015-07-16 12:15:19 -070029
30 def __init__( self ):
31 self.default = ''
32
33 def CASE1( self, main ):
34 """
35 CASE1 is to compile ONOS and push it to the test machines
36
37 Startup sequence:
38 cell <name>
39 onos-verify-cell
40 NOTE: temporary - onos-remove-raft-logs
41 onos-uninstall
42 start mininet
43 git pull
44 mvn clean install
45 onos-package
46 onos-install -f
47 onos-wait-for-start
48 start cli sessions
49 start tcpdump
50 """
Jon Halle1a3b752015-07-22 13:02:46 -070051 import imp
Jon Hall3b489db2015-10-05 14:38:37 -070052 import pexpect
Jon Hall6e709752016-02-01 13:38:46 -080053 import time
Jon Halla440e872016-03-31 15:15:50 -070054 import json
Jon Hall6e709752016-02-01 13:38:46 -080055 main.log.info( "ONOS HA test: Restart a minority of ONOS nodes - " +
Jon Hall5cf14d52015-07-16 12:15:19 -070056 "initialization" )
57 main.case( "Setting up test environment" )
Jon Hall783bbf92015-07-23 14:33:19 -070058 main.caseExplanation = "Setup the test environment including " +\
Jon Hall5cf14d52015-07-16 12:15:19 -070059 "installing ONOS, starting Mininet and ONOS" +\
60 "cli sessions."
Jon Hall5cf14d52015-07-16 12:15:19 -070061
62 # load some variables from the params file
63 PULLCODE = False
64 if main.params[ 'Git' ] == 'True':
65 PULLCODE = True
66 gitBranch = main.params[ 'branch' ]
67 cellName = main.params[ 'ENV' ][ 'cellName' ]
68
Jon Halle1a3b752015-07-22 13:02:46 -070069 main.numCtrls = int( main.params[ 'num_controllers' ] )
Jon Hall5cf14d52015-07-16 12:15:19 -070070 if main.ONOSbench.maxNodes:
Jon Halle1a3b752015-07-22 13:02:46 -070071 if main.ONOSbench.maxNodes < main.numCtrls:
72 main.numCtrls = int( main.ONOSbench.maxNodes )
73 # set global variables
Jon Hall5cf14d52015-07-16 12:15:19 -070074 global ONOS1Port
75 global ONOS2Port
76 global ONOS3Port
77 global ONOS4Port
78 global ONOS5Port
79 global ONOS6Port
80 global ONOS7Port
Jon Halla440e872016-03-31 15:15:50 -070081 # These are for csv plotting in jenkins
82 global labels
83 global data
84 labels = []
85 data = []
Jon Hall5cf14d52015-07-16 12:15:19 -070086
87 # FIXME: just get controller port from params?
88 # TODO: do we really need all these?
89 ONOS1Port = main.params[ 'CTRL' ][ 'port1' ]
90 ONOS2Port = main.params[ 'CTRL' ][ 'port2' ]
91 ONOS3Port = main.params[ 'CTRL' ][ 'port3' ]
92 ONOS4Port = main.params[ 'CTRL' ][ 'port4' ]
93 ONOS5Port = main.params[ 'CTRL' ][ 'port5' ]
94 ONOS6Port = main.params[ 'CTRL' ][ 'port6' ]
95 ONOS7Port = main.params[ 'CTRL' ][ 'port7' ]
96
Jon Halle1a3b752015-07-22 13:02:46 -070097 try:
Jon Hall53c5e662016-04-13 16:06:56 -070098 from tests.HA.dependencies.HA import HA
Jon Hall41d39f12016-04-11 22:54:35 -070099 main.HA = HA()
Jon Halle1a3b752015-07-22 13:02:46 -0700100 except Exception as e:
101 main.log.exception( e )
102 main.cleanup()
103 main.exit()
104
105 main.CLIs = []
106 main.nodes = []
Jon Hall5cf14d52015-07-16 12:15:19 -0700107 ipList = []
Jon Halle1a3b752015-07-22 13:02:46 -0700108 for i in range( 1, main.numCtrls + 1 ):
109 try:
110 main.CLIs.append( getattr( main, 'ONOScli' + str( i ) ) )
111 main.nodes.append( getattr( main, 'ONOS' + str( i ) ) )
112 ipList.append( main.nodes[ -1 ].ip_address )
113 except AttributeError:
114 break
Jon Hall5cf14d52015-07-16 12:15:19 -0700115
116 main.step( "Create cell file" )
117 cellAppString = main.params[ 'ENV' ][ 'appString' ]
118 main.ONOSbench.createCellFile( main.ONOSbench.ip_address, cellName,
119 main.Mininet1.ip_address,
120 cellAppString, ipList )
121 main.step( "Applying cell variable to environment" )
122 cellResult = main.ONOSbench.setCell( cellName )
123 verifyResult = main.ONOSbench.verifyCell()
124
125 # FIXME:this is short term fix
126 main.log.info( "Removing raft logs" )
127 main.ONOSbench.onosRemoveRaftLogs()
128
129 main.log.info( "Uninstalling ONOS" )
Jon Halle1a3b752015-07-22 13:02:46 -0700130 for node in main.nodes:
Jon Hall5cf14d52015-07-16 12:15:19 -0700131 main.ONOSbench.onosUninstall( node.ip_address )
132
133 # Make sure ONOS is DEAD
134 main.log.info( "Killing any ONOS processes" )
135 killResults = main.TRUE
Jon Halle1a3b752015-07-22 13:02:46 -0700136 for node in main.nodes:
Jon Hall5cf14d52015-07-16 12:15:19 -0700137 killed = main.ONOSbench.onosKill( node.ip_address )
138 killResults = killResults and killed
139
140 cleanInstallResult = main.TRUE
141 gitPullResult = main.TRUE
142
143 main.step( "Starting Mininet" )
144 # scp topo file to mininet
145 # TODO: move to params?
146 topoName = "obelisk.py"
147 filePath = main.ONOSbench.home + "/tools/test/topos/"
kelvin-onlabd9e23de2015-08-06 10:34:44 -0700148 main.ONOSbench.scp( main.Mininet1,
149 filePath + topoName,
150 main.Mininet1.home,
151 direction="to" )
Jon Hall5cf14d52015-07-16 12:15:19 -0700152 mnResult = main.Mininet1.startNet( )
153 utilities.assert_equals( expect=main.TRUE, actual=mnResult,
154 onpass="Mininet Started",
155 onfail="Error starting Mininet" )
156
157 main.step( "Git checkout and pull " + gitBranch )
158 if PULLCODE:
159 main.ONOSbench.gitCheckout( gitBranch )
160 gitPullResult = main.ONOSbench.gitPull()
161 # values of 1 or 3 are good
162 utilities.assert_lesser( expect=0, actual=gitPullResult,
163 onpass="Git pull successful",
164 onfail="Git pull failed" )
165 main.ONOSbench.getVersion( report=True )
166
167 main.step( "Using mvn clean install" )
168 cleanInstallResult = main.TRUE
169 if PULLCODE and gitPullResult == main.TRUE:
170 cleanInstallResult = main.ONOSbench.cleanInstall()
171 else:
172 main.log.warn( "Did not pull new code so skipping mvn " +
173 "clean install" )
174 utilities.assert_equals( expect=main.TRUE,
175 actual=cleanInstallResult,
176 onpass="MCI successful",
177 onfail="MCI failed" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700178
179 main.step( "Make sure ONOS service doesn't automatically respawn" )
180 handle = main.ONOSbench.handle
181 handle.sendline( "sed -i -e 's/^respawn$/#respawn/g' tools/package/init/onos.conf" )
182 handle.expect( "\$" ) # $ from the command
Jon Hall95501eb2017-01-19 17:02:17 -0800183 handle.sendline( "sed -i -e 's/^Restart=always/Restart=no/g' tools/package/init/onos.service" )
184 handle.expect( "\$" ) # $ from the command
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700185 handle.expect( "\$" ) # $ from the prompt
186
Jon Hall5cf14d52015-07-16 12:15:19 -0700187 # GRAPHS
188 # NOTE: important params here:
189 # job = name of Jenkins job
190 # Plot Name = Plot-HA, only can be used if multiple plots
191 # index = The number of the graph under plot name
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700192 job = "HAkillNodes"
Jon Hall5cf14d52015-07-16 12:15:19 -0700193 plotName = "Plot-HA"
Jon Hall484540d2017-01-17 11:34:02 -0800194 index = "1"
Jon Hall5cf14d52015-07-16 12:15:19 -0700195 graphs = '<ac:structured-macro ac:name="html">\n'
196 graphs += '<ac:plain-text-body><![CDATA[\n'
197 graphs += '<iframe src="https://onos-jenkins.onlab.us/job/' + job +\
Jon Halla9845df2016-01-15 14:55:58 -0800198 '/plot/' + plotName + '/getPlot?index=' + index +\
Jon Hall5cf14d52015-07-16 12:15:19 -0700199 '&width=500&height=300"' +\
200 'noborder="0" width="500" height="300" scrolling="yes" ' +\
201 'seamless="seamless"></iframe>\n'
202 graphs += ']]></ac:plain-text-body>\n'
203 graphs += '</ac:structured-macro>\n'
204 main.log.wiki(graphs)
205
206 main.step( "Creating ONOS package" )
Jon Hall3b489db2015-10-05 14:38:37 -0700207 # copy gen-partions file to ONOS
208 # NOTE: this assumes TestON and ONOS are on the same machine
Jon Hall53c5e662016-04-13 16:06:56 -0700209 srcFile = main.testDir + "/HA/dependencies/onos-gen-partitions"
Jon Hall3b489db2015-10-05 14:38:37 -0700210 dstDir = main.ONOSbench.home + "/tools/test/bin/onos-gen-partitions"
211 cpResult = main.ONOSbench.secureCopy( main.ONOSbench.user_name,
212 main.ONOSbench.ip_address,
213 srcFile,
214 dstDir,
215 pwd=main.ONOSbench.pwd,
216 direction="from" )
Jon Hallbd60ea02016-08-23 10:03:59 -0700217 packageResult = main.ONOSbench.buckBuild()
Jon Hall5cf14d52015-07-16 12:15:19 -0700218 utilities.assert_equals( expect=main.TRUE, actual=packageResult,
219 onpass="ONOS package successful",
220 onfail="ONOS package failed" )
221
222 main.step( "Installing ONOS package" )
223 onosInstallResult = main.TRUE
Jon Halle1a3b752015-07-22 13:02:46 -0700224 for node in main.nodes:
Jon Hall5cf14d52015-07-16 12:15:19 -0700225 tmpResult = main.ONOSbench.onosInstall( options="-f",
226 node=node.ip_address )
227 onosInstallResult = onosInstallResult and tmpResult
228 utilities.assert_equals( expect=main.TRUE, actual=onosInstallResult,
229 onpass="ONOS install successful",
230 onfail="ONOS install failed" )
Jon Hall3b489db2015-10-05 14:38:37 -0700231 # clean up gen-partitions file
232 try:
233 main.ONOSbench.handle.sendline( "cd " + main.ONOSbench.home )
234 main.ONOSbench.handle.expect( main.ONOSbench.home + "\$" )
235 main.ONOSbench.handle.sendline( "git checkout -- tools/test/bin/onos-gen-partitions" )
236 main.ONOSbench.handle.expect( main.ONOSbench.home + "\$" )
237 main.log.info( " Cleaning custom gen partitions file, response was: \n" +
238 str( main.ONOSbench.handle.before ) )
239 except ( pexpect.TIMEOUT, pexpect.EOF ):
240 main.log.exception( "ONOSbench: pexpect exception found:" +
241 main.ONOSbench.handle.before )
242 main.cleanup()
243 main.exit()
Jon Hall5cf14d52015-07-16 12:15:19 -0700244
245 main.step( "Checking if ONOS is up yet" )
246 for i in range( 2 ):
247 onosIsupResult = main.TRUE
Jon Halle1a3b752015-07-22 13:02:46 -0700248 for node in main.nodes:
Jon Hall5cf14d52015-07-16 12:15:19 -0700249 started = main.ONOSbench.isup( node.ip_address )
250 if not started:
Jon Hallc6793552016-01-19 14:18:37 -0800251 main.log.error( node.name + " hasn't started" )
Jon Hall5cf14d52015-07-16 12:15:19 -0700252 onosIsupResult = onosIsupResult and started
253 if onosIsupResult == main.TRUE:
254 break
255 utilities.assert_equals( expect=main.TRUE, actual=onosIsupResult,
256 onpass="ONOS startup successful",
257 onfail="ONOS startup failed" )
258
Chiyu Chengef109502016-11-21 15:51:38 -0800259 main.step( "Set up ONOS secure SSH" )
260 secureSshResult = main.TRUE
261 for node in main.nodes:
262 secureSshResult = secureSshResult and main.ONOSbench.onosSecureSSH( node=node.ip_address )
263 utilities.assert_equals( expect=main.TRUE, actual=secureSshResult,
264 onpass="Test step PASS",
265 onfail="Test step FAIL" )
266
Jon Hall6509dbf2016-06-21 17:01:17 -0700267 main.step( "Starting ONOS CLI sessions" )
Jon Hall5cf14d52015-07-16 12:15:19 -0700268 cliResults = main.TRUE
269 threads = []
Jon Halle1a3b752015-07-22 13:02:46 -0700270 for i in range( main.numCtrls ):
271 t = main.Thread( target=main.CLIs[i].startOnosCli,
Jon Hall5cf14d52015-07-16 12:15:19 -0700272 name="startOnosCli-" + str( i ),
Jon Halle1a3b752015-07-22 13:02:46 -0700273 args=[main.nodes[i].ip_address] )
Jon Hall5cf14d52015-07-16 12:15:19 -0700274 threads.append( t )
275 t.start()
276
277 for t in threads:
278 t.join()
279 cliResults = cliResults and t.result
280 utilities.assert_equals( expect=main.TRUE, actual=cliResults,
281 onpass="ONOS cli startup successful",
282 onfail="ONOS cli startup failed" )
283
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700284 # Create a list of active nodes for use when some nodes are stopped
285 main.activeNodes = [ i for i in range( 0, len( main.CLIs ) ) ]
286
Jon Hall5cf14d52015-07-16 12:15:19 -0700287 if main.params[ 'tcpdump' ].lower() == "true":
288 main.step( "Start Packet Capture MN" )
289 main.Mininet2.startTcpdump(
290 str( main.params[ 'MNtcpdump' ][ 'folder' ] ) + str( main.TEST )
291 + "-MN.pcap",
292 intf=main.params[ 'MNtcpdump' ][ 'intf' ],
293 port=main.params[ 'MNtcpdump' ][ 'port' ] )
294
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700295 main.step( "Clean up ONOS service changes" )
296 handle.sendline( "git checkout -- tools/package/init/onos.conf" )
Jon Hall95501eb2017-01-19 17:02:17 -0800297 handle.sendline( "git checkout -- tools/package/init/onos.service" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700298 handle.expect( "\$" )
299
Jon Halla440e872016-03-31 15:15:50 -0700300 main.step( "Checking ONOS nodes" )
Jon Hall41d39f12016-04-11 22:54:35 -0700301 nodeResults = utilities.retry( main.HA.nodesCheck,
302 False,
303 args=[main.activeNodes],
304 attempts=5 )
Jon Halla440e872016-03-31 15:15:50 -0700305
Jon Hall41d39f12016-04-11 22:54:35 -0700306 utilities.assert_equals( expect=True, actual=nodeResults,
Jon Halla440e872016-03-31 15:15:50 -0700307 onpass="Nodes check successful",
308 onfail="Nodes check NOT successful" )
309
310 if not nodeResults:
Jon Hall7ac7bc32016-05-05 10:57:02 -0700311 for i in main.activeNodes:
312 cli = main.CLIs[i]
Jon Halla440e872016-03-31 15:15:50 -0700313 main.log.debug( "{} components not ACTIVE: \n{}".format(
314 cli.name,
315 cli.sendline( "scr:list | grep -v ACTIVE" ) ) )
Jon Hall5cf14d52015-07-16 12:15:19 -0700316 main.log.error( "Failed to start ONOS, stopping test" )
317 main.cleanup()
318 main.exit()
319
Jon Hall172b7ba2016-04-07 18:12:20 -0700320 main.step( "Activate apps defined in the params file" )
321 # get data from the params
322 apps = main.params.get( 'apps' )
323 if apps:
324 apps = apps.split(',')
325 main.log.warn( apps )
326 activateResult = True
327 for app in apps:
328 main.CLIs[ 0 ].app( app, "Activate" )
329 # TODO: check this worked
330 time.sleep( 10 ) # wait for apps to activate
331 for app in apps:
332 state = main.CLIs[ 0 ].appStatus( app )
333 if state == "ACTIVE":
334 activateResult = activeResult and True
335 else:
336 main.log.error( "{} is in {} state".format( app, state ) )
337 activeResult = False
338 utilities.assert_equals( expect=True,
339 actual=activateResult,
340 onpass="Successfully activated apps",
341 onfail="Failed to activate apps" )
342 else:
343 main.log.warn( "No apps were specified to be loaded after startup" )
344
345 main.step( "Set ONOS configurations" )
346 config = main.params.get( 'ONOS_Configuration' )
347 if config:
348 main.log.debug( config )
349 checkResult = main.TRUE
350 for component in config:
351 for setting in config[component]:
352 value = config[component][setting]
353 check = main.CLIs[ 0 ].setCfg( component, setting, value )
354 main.log.info( "Value was changed? {}".format( main.TRUE == check ) )
355 checkResult = check and checkResult
356 utilities.assert_equals( expect=main.TRUE,
357 actual=checkResult,
358 onpass="Successfully set config",
359 onfail="Failed to set config" )
360 else:
361 main.log.warn( "No configurations were specified to be changed after startup" )
362
Jon Hall9d2dcad2016-04-08 10:15:20 -0700363 main.step( "App Ids check" )
364 appCheck = main.TRUE
365 threads = []
366 for i in main.activeNodes:
367 t = main.Thread( target=main.CLIs[i].appToIDCheck,
368 name="appToIDCheck-" + str( i ),
369 args=[] )
370 threads.append( t )
371 t.start()
372
373 for t in threads:
374 t.join()
375 appCheck = appCheck and t.result
376 if appCheck != main.TRUE:
377 node = main.activeNodes[0]
378 main.log.warn( main.CLIs[node].apps() )
379 main.log.warn( main.CLIs[node].appIDs() )
380 utilities.assert_equals( expect=main.TRUE, actual=appCheck,
381 onpass="App Ids seem to be correct",
382 onfail="Something is wrong with app Ids" )
383
Jon Hall5cf14d52015-07-16 12:15:19 -0700384 def CASE2( self, main ):
385 """
386 Assign devices to controllers
387 """
388 import re
Jon Halle1a3b752015-07-22 13:02:46 -0700389 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700390 assert main, "main not defined"
391 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -0700392 assert main.CLIs, "main.CLIs not defined"
393 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700394 assert ONOS1Port, "ONOS1Port not defined"
395 assert ONOS2Port, "ONOS2Port not defined"
396 assert ONOS3Port, "ONOS3Port not defined"
397 assert ONOS4Port, "ONOS4Port not defined"
398 assert ONOS5Port, "ONOS5Port not defined"
399 assert ONOS6Port, "ONOS6Port not defined"
400 assert ONOS7Port, "ONOS7Port not defined"
401
402 main.case( "Assigning devices to controllers" )
Jon Hall783bbf92015-07-23 14:33:19 -0700403 main.caseExplanation = "Assign switches to ONOS using 'ovs-vsctl' " +\
Jon Hall5cf14d52015-07-16 12:15:19 -0700404 "and check that an ONOS node becomes the " +\
405 "master of the device."
406 main.step( "Assign switches to controllers" )
407
408 ipList = []
Jon Halle1a3b752015-07-22 13:02:46 -0700409 for i in range( main.numCtrls ):
410 ipList.append( main.nodes[ i ].ip_address )
Jon Hall5cf14d52015-07-16 12:15:19 -0700411 swList = []
412 for i in range( 1, 29 ):
413 swList.append( "s" + str( i ) )
414 main.Mininet1.assignSwController( sw=swList, ip=ipList )
415
416 mastershipCheck = main.TRUE
417 for i in range( 1, 29 ):
418 response = main.Mininet1.getSwController( "s" + str( i ) )
419 try:
420 main.log.info( str( response ) )
421 except Exception:
422 main.log.info( repr( response ) )
Jon Halle1a3b752015-07-22 13:02:46 -0700423 for node in main.nodes:
Jon Hall5cf14d52015-07-16 12:15:19 -0700424 if re.search( "tcp:" + node.ip_address, response ):
425 mastershipCheck = mastershipCheck and main.TRUE
426 else:
427 main.log.error( "Error, node " + node.ip_address + " is " +
428 "not in the list of controllers s" +
429 str( i ) + " is connecting to." )
430 mastershipCheck = main.FALSE
431 utilities.assert_equals(
432 expect=main.TRUE,
433 actual=mastershipCheck,
434 onpass="Switch mastership assigned correctly",
435 onfail="Switches not assigned correctly to controllers" )
436
437 def CASE21( self, main ):
438 """
439 Assign mastership to controllers
440 """
Jon Hall5cf14d52015-07-16 12:15:19 -0700441 import time
Jon Halle1a3b752015-07-22 13:02:46 -0700442 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700443 assert main, "main not defined"
444 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -0700445 assert main.CLIs, "main.CLIs not defined"
446 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700447 assert ONOS1Port, "ONOS1Port not defined"
448 assert ONOS2Port, "ONOS2Port not defined"
449 assert ONOS3Port, "ONOS3Port not defined"
450 assert ONOS4Port, "ONOS4Port not defined"
451 assert ONOS5Port, "ONOS5Port not defined"
452 assert ONOS6Port, "ONOS6Port not defined"
453 assert ONOS7Port, "ONOS7Port not defined"
454
455 main.case( "Assigning Controller roles for switches" )
Jon Hall783bbf92015-07-23 14:33:19 -0700456 main.caseExplanation = "Check that ONOS is connected to each " +\
Jon Hall5cf14d52015-07-16 12:15:19 -0700457 "device. Then manually assign" +\
458 " mastership to specific ONOS nodes using" +\
459 " 'device-role'"
460 main.step( "Assign mastership of switches to specific controllers" )
461 # Manually assign mastership to the controller we want
462 roleCall = main.TRUE
463
464 ipList = [ ]
465 deviceList = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700466 onosCli = main.CLIs[ main.activeNodes[0] ]
Jon Hall5cf14d52015-07-16 12:15:19 -0700467 try:
468 # Assign mastership to specific controllers. This assignment was
469 # determined for a 7 node cluser, but will work with any sized
470 # cluster
471 for i in range( 1, 29 ): # switches 1 through 28
472 # set up correct variables:
473 if i == 1:
474 c = 0
Jon Halle1a3b752015-07-22 13:02:46 -0700475 ip = main.nodes[ c ].ip_address # ONOS1
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700476 deviceId = onosCli.getDevice( "1000" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700477 elif i == 2:
Jon Halle1a3b752015-07-22 13:02:46 -0700478 c = 1 % main.numCtrls
479 ip = main.nodes[ c ].ip_address # ONOS2
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700480 deviceId = onosCli.getDevice( "2000" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700481 elif i == 3:
Jon Halle1a3b752015-07-22 13:02:46 -0700482 c = 1 % main.numCtrls
483 ip = main.nodes[ c ].ip_address # ONOS2
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700484 deviceId = onosCli.getDevice( "3000" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700485 elif i == 4:
Jon Halle1a3b752015-07-22 13:02:46 -0700486 c = 3 % main.numCtrls
487 ip = main.nodes[ c ].ip_address # ONOS4
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700488 deviceId = onosCli.getDevice( "3004" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700489 elif i == 5:
Jon Halle1a3b752015-07-22 13:02:46 -0700490 c = 2 % main.numCtrls
491 ip = main.nodes[ c ].ip_address # ONOS3
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700492 deviceId = onosCli.getDevice( "5000" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700493 elif i == 6:
Jon Halle1a3b752015-07-22 13:02:46 -0700494 c = 2 % main.numCtrls
495 ip = main.nodes[ c ].ip_address # ONOS3
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700496 deviceId = onosCli.getDevice( "6000" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700497 elif i == 7:
Jon Halle1a3b752015-07-22 13:02:46 -0700498 c = 5 % main.numCtrls
499 ip = main.nodes[ c ].ip_address # ONOS6
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700500 deviceId = onosCli.getDevice( "6007" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700501 elif i >= 8 and i <= 17:
Jon Halle1a3b752015-07-22 13:02:46 -0700502 c = 4 % main.numCtrls
503 ip = main.nodes[ c ].ip_address # ONOS5
Jon Hall5cf14d52015-07-16 12:15:19 -0700504 dpid = '3' + str( i ).zfill( 3 )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700505 deviceId = onosCli.getDevice( dpid ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700506 elif i >= 18 and i <= 27:
Jon Halle1a3b752015-07-22 13:02:46 -0700507 c = 6 % main.numCtrls
508 ip = main.nodes[ c ].ip_address # ONOS7
Jon Hall5cf14d52015-07-16 12:15:19 -0700509 dpid = '6' + str( i ).zfill( 3 )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700510 deviceId = onosCli.getDevice( dpid ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700511 elif i == 28:
512 c = 0
Jon Halle1a3b752015-07-22 13:02:46 -0700513 ip = main.nodes[ c ].ip_address # ONOS1
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700514 deviceId = onosCli.getDevice( "2800" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700515 else:
516 main.log.error( "You didn't write an else statement for " +
517 "switch s" + str( i ) )
518 roleCall = main.FALSE
519 # Assign switch
520 assert deviceId, "No device id for s" + str( i ) + " in ONOS"
521 # TODO: make this controller dynamic
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700522 roleCall = roleCall and onosCli.deviceRole( deviceId, ip )
Jon Hall5cf14d52015-07-16 12:15:19 -0700523 ipList.append( ip )
524 deviceList.append( deviceId )
525 except ( AttributeError, AssertionError ):
526 main.log.exception( "Something is wrong with ONOS device view" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700527 main.log.info( onosCli.devices() )
Jon Hall5cf14d52015-07-16 12:15:19 -0700528 utilities.assert_equals(
529 expect=main.TRUE,
530 actual=roleCall,
531 onpass="Re-assigned switch mastership to designated controller",
532 onfail="Something wrong with deviceRole calls" )
533
534 main.step( "Check mastership was correctly assigned" )
535 roleCheck = main.TRUE
536 # NOTE: This is due to the fact that device mastership change is not
537 # atomic and is actually a multi step process
538 time.sleep( 5 )
539 for i in range( len( ipList ) ):
540 ip = ipList[i]
541 deviceId = deviceList[i]
542 # Check assignment
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700543 master = onosCli.getRole( deviceId ).get( 'master' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700544 if ip in master:
545 roleCheck = roleCheck and main.TRUE
546 else:
547 roleCheck = roleCheck and main.FALSE
548 main.log.error( "Error, controller " + ip + " is not" +
549 " master " + "of device " +
550 str( deviceId ) + ". Master is " +
551 repr( master ) + "." )
552 utilities.assert_equals(
553 expect=main.TRUE,
554 actual=roleCheck,
555 onpass="Switches were successfully reassigned to designated " +
556 "controller",
557 onfail="Switches were not successfully reassigned" )
558
559 def CASE3( self, main ):
560 """
561 Assign intents
562 """
563 import time
564 import json
Jon Halle1a3b752015-07-22 13:02:46 -0700565 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700566 assert main, "main not defined"
567 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -0700568 assert main.CLIs, "main.CLIs not defined"
569 assert main.nodes, "main.nodes not defined"
Jon Halla440e872016-03-31 15:15:50 -0700570 try:
571 labels
572 except NameError:
573 main.log.error( "labels not defined, setting to []" )
574 labels = []
575 try:
576 data
577 except NameError:
578 main.log.error( "data not defined, setting to []" )
579 data = []
Jon Hall5cf14d52015-07-16 12:15:19 -0700580 main.case( "Adding host Intents" )
Jon Hall783bbf92015-07-23 14:33:19 -0700581 main.caseExplanation = "Discover hosts by using pingall then " +\
Jon Hall5cf14d52015-07-16 12:15:19 -0700582 "assign predetermined host-to-host intents." +\
583 " After installation, check that the intent" +\
584 " is distributed to all nodes and the state" +\
585 " is INSTALLED"
586
587 # install onos-app-fwd
588 main.step( "Install reactive forwarding app" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700589 onosCli = main.CLIs[ main.activeNodes[0] ]
590 installResults = onosCli.activateApp( "org.onosproject.fwd" )
Jon Hall5cf14d52015-07-16 12:15:19 -0700591 utilities.assert_equals( expect=main.TRUE, actual=installResults,
592 onpass="Install fwd successful",
593 onfail="Install fwd failed" )
594
595 main.step( "Check app ids" )
596 appCheck = main.TRUE
597 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700598 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -0700599 t = main.Thread( target=main.CLIs[i].appToIDCheck,
Jon Hall5cf14d52015-07-16 12:15:19 -0700600 name="appToIDCheck-" + str( i ),
601 args=[] )
602 threads.append( t )
603 t.start()
604
605 for t in threads:
606 t.join()
607 appCheck = appCheck and t.result
608 if appCheck != main.TRUE:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700609 main.log.warn( onosCli.apps() )
610 main.log.warn( onosCli.appIDs() )
Jon Hall5cf14d52015-07-16 12:15:19 -0700611 utilities.assert_equals( expect=main.TRUE, actual=appCheck,
612 onpass="App Ids seem to be correct",
613 onfail="Something is wrong with app Ids" )
614
615 main.step( "Discovering Hosts( Via pingall for now )" )
616 # FIXME: Once we have a host discovery mechanism, use that instead
617 # REACTIVE FWD test
618 pingResult = main.FALSE
Jon Hall96091e62015-09-21 17:34:17 -0700619 passMsg = "Reactive Pingall test passed"
620 time1 = time.time()
621 pingResult = main.Mininet1.pingall()
622 time2 = time.time()
623 if not pingResult:
624 main.log.warn("First pingall failed. Trying again...")
Jon Hall5cf14d52015-07-16 12:15:19 -0700625 pingResult = main.Mininet1.pingall()
Jon Hall96091e62015-09-21 17:34:17 -0700626 passMsg += " on the second try"
627 utilities.assert_equals(
628 expect=main.TRUE,
629 actual=pingResult,
630 onpass= passMsg,
631 onfail="Reactive Pingall failed, " +
632 "one or more ping pairs failed" )
633 main.log.info( "Time for pingall: %2f seconds" %
634 ( time2 - time1 ) )
Jon Hall5cf14d52015-07-16 12:15:19 -0700635 # timeout for fwd flows
636 time.sleep( 11 )
637 # uninstall onos-app-fwd
638 main.step( "Uninstall reactive forwarding app" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700639 node = main.activeNodes[0]
640 uninstallResult = main.CLIs[node].deactivateApp( "org.onosproject.fwd" )
Jon Hall5cf14d52015-07-16 12:15:19 -0700641 utilities.assert_equals( expect=main.TRUE, actual=uninstallResult,
642 onpass="Uninstall fwd successful",
643 onfail="Uninstall fwd failed" )
644
645 main.step( "Check app ids" )
646 threads = []
647 appCheck2 = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700648 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -0700649 t = main.Thread( target=main.CLIs[i].appToIDCheck,
Jon Hall5cf14d52015-07-16 12:15:19 -0700650 name="appToIDCheck-" + str( i ),
651 args=[] )
652 threads.append( t )
653 t.start()
654
655 for t in threads:
656 t.join()
657 appCheck2 = appCheck2 and t.result
658 if appCheck2 != main.TRUE:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700659 node = main.activeNodes[0]
660 main.log.warn( main.CLIs[node].apps() )
661 main.log.warn( main.CLIs[node].appIDs() )
Jon Hall5cf14d52015-07-16 12:15:19 -0700662 utilities.assert_equals( expect=main.TRUE, actual=appCheck2,
663 onpass="App Ids seem to be correct",
664 onfail="Something is wrong with app Ids" )
665
666 main.step( "Add host intents via cli" )
667 intentIds = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700668 # TODO: move the host numbers to params
669 # Maybe look at all the paths we ping?
Jon Hall5cf14d52015-07-16 12:15:19 -0700670 intentAddResult = True
671 hostResult = main.TRUE
672 for i in range( 8, 18 ):
673 main.log.info( "Adding host intent between h" + str( i ) +
674 " and h" + str( i + 10 ) )
675 host1 = "00:00:00:00:00:" + \
676 str( hex( i )[ 2: ] ).zfill( 2 ).upper()
677 host2 = "00:00:00:00:00:" + \
678 str( hex( i + 10 )[ 2: ] ).zfill( 2 ).upper()
679 # NOTE: getHost can return None
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700680 host1Dict = onosCli.getHost( host1 )
681 host2Dict = onosCli.getHost( host2 )
Jon Hall5cf14d52015-07-16 12:15:19 -0700682 host1Id = None
683 host2Id = None
684 if host1Dict and host2Dict:
685 host1Id = host1Dict.get( 'id', None )
686 host2Id = host2Dict.get( 'id', None )
687 if host1Id and host2Id:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700688 nodeNum = ( i % len( main.activeNodes ) )
689 node = main.activeNodes[nodeNum]
690 tmpId = main.CLIs[node].addHostIntent( host1Id, host2Id )
Jon Hall5cf14d52015-07-16 12:15:19 -0700691 if tmpId:
692 main.log.info( "Added intent with id: " + tmpId )
693 intentIds.append( tmpId )
694 else:
695 main.log.error( "addHostIntent returned: " +
696 repr( tmpId ) )
697 else:
698 main.log.error( "Error, getHost() failed for h" + str( i ) +
699 " and/or h" + str( i + 10 ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700700 node = main.activeNodes[0]
701 hosts = main.CLIs[node].hosts()
Jon Hall5cf14d52015-07-16 12:15:19 -0700702 main.log.warn( "Hosts output: " )
703 try:
704 main.log.warn( json.dumps( json.loads( hosts ),
705 sort_keys=True,
706 indent=4,
707 separators=( ',', ': ' ) ) )
708 except ( ValueError, TypeError ):
709 main.log.warn( repr( hosts ) )
710 hostResult = main.FALSE
711 utilities.assert_equals( expect=main.TRUE, actual=hostResult,
712 onpass="Found a host id for each host",
713 onfail="Error looking up host ids" )
714
715 intentStart = time.time()
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700716 onosIds = onosCli.getAllIntentsId()
Jon Hall5cf14d52015-07-16 12:15:19 -0700717 main.log.info( "Submitted intents: " + str( intentIds ) )
718 main.log.info( "Intents in ONOS: " + str( onosIds ) )
719 for intent in intentIds:
720 if intent in onosIds:
721 pass # intent submitted is in onos
722 else:
723 intentAddResult = False
724 if intentAddResult:
725 intentStop = time.time()
726 else:
727 intentStop = None
728 # Print the intent states
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700729 intents = onosCli.intents()
Jon Hall5cf14d52015-07-16 12:15:19 -0700730 intentStates = []
731 installedCheck = True
732 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
733 count = 0
734 try:
735 for intent in json.loads( intents ):
736 state = intent.get( 'state', None )
737 if "INSTALLED" not in state:
738 installedCheck = False
739 intentId = intent.get( 'id', None )
740 intentStates.append( ( intentId, state ) )
741 except ( ValueError, TypeError ):
742 main.log.exception( "Error parsing intents" )
743 # add submitted intents not in the store
744 tmplist = [ i for i, s in intentStates ]
745 missingIntents = False
746 for i in intentIds:
747 if i not in tmplist:
748 intentStates.append( ( i, " - " ) )
749 missingIntents = True
750 intentStates.sort()
751 for i, s in intentStates:
752 count += 1
753 main.log.info( "%-6s%-15s%-15s" %
754 ( str( count ), str( i ), str( s ) ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700755 leaders = onosCli.leaders()
Jon Hall5cf14d52015-07-16 12:15:19 -0700756 try:
757 missing = False
758 if leaders:
759 parsedLeaders = json.loads( leaders )
760 main.log.warn( json.dumps( parsedLeaders,
761 sort_keys=True,
762 indent=4,
763 separators=( ',', ': ' ) ) )
764 # check for all intent partitions
765 topics = []
766 for i in range( 14 ):
Jon Hall8dafdcc2016-09-16 10:21:25 -0700767 topics.append( "work-partition-" + str( i ) )
Jon Hall5cf14d52015-07-16 12:15:19 -0700768 main.log.debug( topics )
769 ONOStopics = [ j['topic'] for j in parsedLeaders ]
770 for topic in topics:
771 if topic not in ONOStopics:
772 main.log.error( "Error: " + topic +
773 " not in leaders" )
774 missing = True
775 else:
776 main.log.error( "leaders() returned None" )
777 except ( ValueError, TypeError ):
778 main.log.exception( "Error parsing leaders" )
779 main.log.error( repr( leaders ) )
780 # Check all nodes
781 if missing:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700782 for i in main.activeNodes:
783 response = main.CLIs[i].leaders( jsonFormat=False)
784 main.log.warn( str( main.CLIs[i].name ) + " leaders output: \n" +
Jon Hall5cf14d52015-07-16 12:15:19 -0700785 str( response ) )
786
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700787 partitions = onosCli.partitions()
Jon Hall5cf14d52015-07-16 12:15:19 -0700788 try:
789 if partitions :
790 parsedPartitions = json.loads( partitions )
791 main.log.warn( json.dumps( parsedPartitions,
792 sort_keys=True,
793 indent=4,
794 separators=( ',', ': ' ) ) )
795 # TODO check for a leader in all paritions
796 # TODO check for consistency among nodes
797 else:
798 main.log.error( "partitions() returned None" )
799 except ( ValueError, TypeError ):
800 main.log.exception( "Error parsing partitions" )
801 main.log.error( repr( partitions ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700802 pendingMap = onosCli.pendingMap()
Jon Hall5cf14d52015-07-16 12:15:19 -0700803 try:
804 if pendingMap :
805 parsedPending = json.loads( pendingMap )
806 main.log.warn( json.dumps( parsedPending,
807 sort_keys=True,
808 indent=4,
809 separators=( ',', ': ' ) ) )
810 # TODO check something here?
811 else:
812 main.log.error( "pendingMap() returned None" )
813 except ( ValueError, TypeError ):
814 main.log.exception( "Error parsing pending map" )
815 main.log.error( repr( pendingMap ) )
816
817 intentAddResult = bool( intentAddResult and not missingIntents and
818 installedCheck )
819 if not intentAddResult:
820 main.log.error( "Error in pushing host intents to ONOS" )
821
822 main.step( "Intent Anti-Entropy dispersion" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700823 for j in range(100):
Jon Hall5cf14d52015-07-16 12:15:19 -0700824 correct = True
825 main.log.info( "Submitted intents: " + str( sorted( intentIds ) ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700826 for i in main.activeNodes:
Jon Hall5cf14d52015-07-16 12:15:19 -0700827 onosIds = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700828 ids = main.CLIs[i].getAllIntentsId()
Jon Hall5cf14d52015-07-16 12:15:19 -0700829 onosIds.append( ids )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700830 main.log.debug( "Intents in " + main.CLIs[i].name + ": " +
Jon Hall5cf14d52015-07-16 12:15:19 -0700831 str( sorted( onosIds ) ) )
832 if sorted( ids ) != sorted( intentIds ):
833 main.log.warn( "Set of intent IDs doesn't match" )
834 correct = False
835 break
836 else:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700837 intents = json.loads( main.CLIs[i].intents() )
Jon Hall5cf14d52015-07-16 12:15:19 -0700838 for intent in intents:
839 if intent[ 'state' ] != "INSTALLED":
840 main.log.warn( "Intent " + intent[ 'id' ] +
841 " is " + intent[ 'state' ] )
842 correct = False
843 break
844 if correct:
845 break
846 else:
847 time.sleep(1)
848 if not intentStop:
849 intentStop = time.time()
850 global gossipTime
851 gossipTime = intentStop - intentStart
852 main.log.info( "It took about " + str( gossipTime ) +
853 " seconds for all intents to appear in each node" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700854 gossipPeriod = int( main.params['timers']['gossip'] )
855 maxGossipTime = gossipPeriod * len( main.activeNodes )
Jon Hall5cf14d52015-07-16 12:15:19 -0700856 utilities.assert_greater_equals(
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700857 expect=maxGossipTime, actual=gossipTime,
Jon Hall5cf14d52015-07-16 12:15:19 -0700858 onpass="ECM anti-entropy for intents worked within " +
859 "expected time",
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700860 onfail="Intent ECM anti-entropy took too long. " +
861 "Expected time:{}, Actual time:{}".format( maxGossipTime,
862 gossipTime ) )
863 if gossipTime <= maxGossipTime:
Jon Hall5cf14d52015-07-16 12:15:19 -0700864 intentAddResult = True
865
866 if not intentAddResult or "key" in pendingMap:
867 import time
868 installedCheck = True
869 main.log.info( "Sleeping 60 seconds to see if intents are found" )
870 time.sleep( 60 )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700871 onosIds = onosCli.getAllIntentsId()
Jon Hall5cf14d52015-07-16 12:15:19 -0700872 main.log.info( "Submitted intents: " + str( intentIds ) )
873 main.log.info( "Intents in ONOS: " + str( onosIds ) )
874 # Print the intent states
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700875 intents = onosCli.intents()
Jon Hall5cf14d52015-07-16 12:15:19 -0700876 intentStates = []
877 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
878 count = 0
879 try:
880 for intent in json.loads( intents ):
881 # Iter through intents of a node
882 state = intent.get( 'state', None )
883 if "INSTALLED" not in state:
884 installedCheck = False
885 intentId = intent.get( 'id', None )
886 intentStates.append( ( intentId, state ) )
887 except ( ValueError, TypeError ):
888 main.log.exception( "Error parsing intents" )
889 # add submitted intents not in the store
890 tmplist = [ i for i, s in intentStates ]
891 for i in intentIds:
892 if i not in tmplist:
893 intentStates.append( ( i, " - " ) )
894 intentStates.sort()
895 for i, s in intentStates:
896 count += 1
897 main.log.info( "%-6s%-15s%-15s" %
898 ( str( count ), str( i ), str( s ) ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700899 leaders = onosCli.leaders()
Jon Hall5cf14d52015-07-16 12:15:19 -0700900 try:
901 missing = False
902 if leaders:
903 parsedLeaders = json.loads( leaders )
904 main.log.warn( json.dumps( parsedLeaders,
905 sort_keys=True,
906 indent=4,
907 separators=( ',', ': ' ) ) )
908 # check for all intent partitions
909 # check for election
910 topics = []
911 for i in range( 14 ):
Jon Hall8dafdcc2016-09-16 10:21:25 -0700912 topics.append( "work-partition-" + str( i ) )
Jon Hall5cf14d52015-07-16 12:15:19 -0700913 # FIXME: this should only be after we start the app
914 topics.append( "org.onosproject.election" )
915 main.log.debug( topics )
916 ONOStopics = [ j['topic'] for j in parsedLeaders ]
917 for topic in topics:
918 if topic not in ONOStopics:
919 main.log.error( "Error: " + topic +
920 " not in leaders" )
921 missing = True
922 else:
923 main.log.error( "leaders() returned None" )
924 except ( ValueError, TypeError ):
925 main.log.exception( "Error parsing leaders" )
926 main.log.error( repr( leaders ) )
927 # Check all nodes
928 if missing:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700929 for i in main.activeNodes:
930 node = main.CLIs[i]
Jon Hall5cf14d52015-07-16 12:15:19 -0700931 response = node.leaders( jsonFormat=False)
932 main.log.warn( str( node.name ) + " leaders output: \n" +
933 str( response ) )
934
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700935 partitions = onosCli.partitions()
Jon Hall5cf14d52015-07-16 12:15:19 -0700936 try:
937 if partitions :
938 parsedPartitions = json.loads( partitions )
939 main.log.warn( json.dumps( parsedPartitions,
940 sort_keys=True,
941 indent=4,
942 separators=( ',', ': ' ) ) )
943 # TODO check for a leader in all paritions
944 # TODO check for consistency among nodes
945 else:
946 main.log.error( "partitions() returned None" )
947 except ( ValueError, TypeError ):
948 main.log.exception( "Error parsing partitions" )
949 main.log.error( repr( partitions ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700950 pendingMap = onosCli.pendingMap()
Jon Hall5cf14d52015-07-16 12:15:19 -0700951 try:
952 if pendingMap :
953 parsedPending = json.loads( pendingMap )
954 main.log.warn( json.dumps( parsedPending,
955 sort_keys=True,
956 indent=4,
957 separators=( ',', ': ' ) ) )
958 # TODO check something here?
959 else:
960 main.log.error( "pendingMap() returned None" )
961 except ( ValueError, TypeError ):
962 main.log.exception( "Error parsing pending map" )
963 main.log.error( repr( pendingMap ) )
964
965 def CASE4( self, main ):
966 """
967 Ping across added host intents
968 """
969 import json
970 import time
Jon Halle1a3b752015-07-22 13:02:46 -0700971 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700972 assert main, "main not defined"
973 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -0700974 assert main.CLIs, "main.CLIs not defined"
975 assert main.nodes, "main.nodes not defined"
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700976 main.case( "Verify connectivity by sending traffic across Intents" )
Jon Hall783bbf92015-07-23 14:33:19 -0700977 main.caseExplanation = "Ping across added host intents to check " +\
Jon Hall5cf14d52015-07-16 12:15:19 -0700978 "functionality and check the state of " +\
979 "the intent"
Jon Hall5cf14d52015-07-16 12:15:19 -0700980
Jon Hall41d39f12016-04-11 22:54:35 -0700981 onosCli = main.CLIs[ main.activeNodes[0] ]
Jon Hall5cf14d52015-07-16 12:15:19 -0700982 main.step( "Check Intent state" )
983 installedCheck = False
984 loopCount = 0
985 while not installedCheck and loopCount < 40:
986 installedCheck = True
987 # Print the intent states
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700988 intents = onosCli.intents()
Jon Hall5cf14d52015-07-16 12:15:19 -0700989 intentStates = []
990 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
991 count = 0
992 # Iter through intents of a node
993 try:
994 for intent in json.loads( intents ):
995 state = intent.get( 'state', None )
996 if "INSTALLED" not in state:
997 installedCheck = False
998 intentId = intent.get( 'id', None )
999 intentStates.append( ( intentId, state ) )
1000 except ( ValueError, TypeError ):
1001 main.log.exception( "Error parsing intents." )
1002 # Print states
1003 intentStates.sort()
1004 for i, s in intentStates:
1005 count += 1
1006 main.log.info( "%-6s%-15s%-15s" %
1007 ( str( count ), str( i ), str( s ) ) )
1008 if not installedCheck:
1009 time.sleep( 1 )
1010 loopCount += 1
1011 utilities.assert_equals( expect=True, actual=installedCheck,
1012 onpass="Intents are all INSTALLED",
1013 onfail="Intents are not all in " +
1014 "INSTALLED state" )
1015
Jon Hall9d2dcad2016-04-08 10:15:20 -07001016 main.step( "Ping across added host intents" )
Jon Hall9d2dcad2016-04-08 10:15:20 -07001017 PingResult = main.TRUE
1018 for i in range( 8, 18 ):
1019 ping = main.Mininet1.pingHost( src="h" + str( i ),
1020 target="h" + str( i + 10 ) )
1021 PingResult = PingResult and ping
1022 if ping == main.FALSE:
1023 main.log.warn( "Ping failed between h" + str( i ) +
1024 " and h" + str( i + 10 ) )
1025 elif ping == main.TRUE:
1026 main.log.info( "Ping test passed!" )
1027 # Don't set PingResult or you'd override failures
1028 if PingResult == main.FALSE:
1029 main.log.error(
1030 "Intents have not been installed correctly, pings failed." )
1031 # TODO: pretty print
1032 main.log.warn( "ONOS1 intents: " )
1033 try:
1034 tmpIntents = onosCli.intents()
1035 main.log.warn( json.dumps( json.loads( tmpIntents ),
1036 sort_keys=True,
1037 indent=4,
1038 separators=( ',', ': ' ) ) )
1039 except ( ValueError, TypeError ):
1040 main.log.warn( repr( tmpIntents ) )
1041 utilities.assert_equals(
1042 expect=main.TRUE,
1043 actual=PingResult,
1044 onpass="Intents have been installed correctly and pings work",
1045 onfail="Intents have not been installed correctly, pings failed." )
1046
Jon Hall5cf14d52015-07-16 12:15:19 -07001047 main.step( "Check leadership of topics" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001048 leaders = onosCli.leaders()
Jon Hall5cf14d52015-07-16 12:15:19 -07001049 topicCheck = main.TRUE
1050 try:
1051 if leaders:
1052 parsedLeaders = json.loads( leaders )
1053 main.log.warn( json.dumps( parsedLeaders,
1054 sort_keys=True,
1055 indent=4,
1056 separators=( ',', ': ' ) ) )
1057 # check for all intent partitions
1058 # check for election
1059 # TODO: Look at Devices as topics now that it uses this system
1060 topics = []
1061 for i in range( 14 ):
Jon Hall8dafdcc2016-09-16 10:21:25 -07001062 topics.append( "work-partition-" + str( i ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001063 # FIXME: this should only be after we start the app
1064 # FIXME: topics.append( "org.onosproject.election" )
1065 # Print leaders output
1066 main.log.debug( topics )
1067 ONOStopics = [ j['topic'] for j in parsedLeaders ]
1068 for topic in topics:
1069 if topic not in ONOStopics:
1070 main.log.error( "Error: " + topic +
1071 " not in leaders" )
1072 topicCheck = main.FALSE
1073 else:
1074 main.log.error( "leaders() returned None" )
1075 topicCheck = main.FALSE
1076 except ( ValueError, TypeError ):
1077 topicCheck = main.FALSE
1078 main.log.exception( "Error parsing leaders" )
1079 main.log.error( repr( leaders ) )
1080 # TODO: Check for a leader of these topics
1081 # Check all nodes
1082 if topicCheck:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001083 for i in main.activeNodes:
1084 node = main.CLIs[i]
Jon Hall5cf14d52015-07-16 12:15:19 -07001085 response = node.leaders( jsonFormat=False)
1086 main.log.warn( str( node.name ) + " leaders output: \n" +
1087 str( response ) )
1088
1089 utilities.assert_equals( expect=main.TRUE, actual=topicCheck,
1090 onpass="intent Partitions is in leaders",
1091 onfail="Some topics were lost " )
1092 # Print partitions
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001093 partitions = onosCli.partitions()
Jon Hall5cf14d52015-07-16 12:15:19 -07001094 try:
1095 if partitions :
1096 parsedPartitions = json.loads( partitions )
1097 main.log.warn( json.dumps( parsedPartitions,
1098 sort_keys=True,
1099 indent=4,
1100 separators=( ',', ': ' ) ) )
1101 # TODO check for a leader in all paritions
1102 # TODO check for consistency among nodes
1103 else:
1104 main.log.error( "partitions() returned None" )
1105 except ( ValueError, TypeError ):
1106 main.log.exception( "Error parsing partitions" )
1107 main.log.error( repr( partitions ) )
1108 # Print Pending Map
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001109 pendingMap = onosCli.pendingMap()
Jon Hall5cf14d52015-07-16 12:15:19 -07001110 try:
1111 if pendingMap :
1112 parsedPending = json.loads( pendingMap )
1113 main.log.warn( json.dumps( parsedPending,
1114 sort_keys=True,
1115 indent=4,
1116 separators=( ',', ': ' ) ) )
1117 # TODO check something here?
1118 else:
1119 main.log.error( "pendingMap() returned None" )
1120 except ( ValueError, TypeError ):
1121 main.log.exception( "Error parsing pending map" )
1122 main.log.error( repr( pendingMap ) )
1123
1124 if not installedCheck:
1125 main.log.info( "Waiting 60 seconds to see if the state of " +
1126 "intents change" )
1127 time.sleep( 60 )
1128 # Print the intent states
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001129 intents = onosCli.intents()
Jon Hall5cf14d52015-07-16 12:15:19 -07001130 intentStates = []
1131 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
1132 count = 0
1133 # Iter through intents of a node
1134 try:
1135 for intent in json.loads( intents ):
1136 state = intent.get( 'state', None )
1137 if "INSTALLED" not in state:
1138 installedCheck = False
1139 intentId = intent.get( 'id', None )
1140 intentStates.append( ( intentId, state ) )
1141 except ( ValueError, TypeError ):
1142 main.log.exception( "Error parsing intents." )
1143 intentStates.sort()
1144 for i, s in intentStates:
1145 count += 1
1146 main.log.info( "%-6s%-15s%-15s" %
1147 ( str( count ), str( i ), str( s ) ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001148 leaders = onosCli.leaders()
Jon Hall5cf14d52015-07-16 12:15:19 -07001149 try:
1150 missing = False
1151 if leaders:
1152 parsedLeaders = json.loads( leaders )
1153 main.log.warn( json.dumps( parsedLeaders,
1154 sort_keys=True,
1155 indent=4,
1156 separators=( ',', ': ' ) ) )
1157 # check for all intent partitions
1158 # check for election
1159 topics = []
1160 for i in range( 14 ):
Jon Hall8dafdcc2016-09-16 10:21:25 -07001161 topics.append( "work-partition-" + str( i ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001162 # FIXME: this should only be after we start the app
1163 topics.append( "org.onosproject.election" )
1164 main.log.debug( topics )
1165 ONOStopics = [ j['topic'] for j in parsedLeaders ]
1166 for topic in topics:
1167 if topic not in ONOStopics:
1168 main.log.error( "Error: " + topic +
1169 " not in leaders" )
1170 missing = True
1171 else:
1172 main.log.error( "leaders() returned None" )
1173 except ( ValueError, TypeError ):
1174 main.log.exception( "Error parsing leaders" )
1175 main.log.error( repr( leaders ) )
1176 if missing:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001177 for i in main.activeNodes:
1178 node = main.CLIs[i]
Jon Hall5cf14d52015-07-16 12:15:19 -07001179 response = node.leaders( jsonFormat=False)
1180 main.log.warn( str( node.name ) + " leaders output: \n" +
1181 str( response ) )
1182
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001183 partitions = onosCli.partitions()
Jon Hall5cf14d52015-07-16 12:15:19 -07001184 try:
1185 if partitions :
1186 parsedPartitions = json.loads( partitions )
1187 main.log.warn( json.dumps( parsedPartitions,
1188 sort_keys=True,
1189 indent=4,
1190 separators=( ',', ': ' ) ) )
1191 # TODO check for a leader in all paritions
1192 # TODO check for consistency among nodes
1193 else:
1194 main.log.error( "partitions() returned None" )
1195 except ( ValueError, TypeError ):
1196 main.log.exception( "Error parsing partitions" )
1197 main.log.error( repr( partitions ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001198 pendingMap = onosCli.pendingMap()
Jon Hall5cf14d52015-07-16 12:15:19 -07001199 try:
1200 if pendingMap :
1201 parsedPending = json.loads( pendingMap )
1202 main.log.warn( json.dumps( parsedPending,
1203 sort_keys=True,
1204 indent=4,
1205 separators=( ',', ': ' ) ) )
1206 # TODO check something here?
1207 else:
1208 main.log.error( "pendingMap() returned None" )
1209 except ( ValueError, TypeError ):
1210 main.log.exception( "Error parsing pending map" )
1211 main.log.error( repr( pendingMap ) )
1212 # Print flowrules
Jon Hall41d39f12016-04-11 22:54:35 -07001213 main.log.debug( onosCli.flows( jsonFormat=False ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001214 main.step( "Wait a minute then ping again" )
1215 # the wait is above
1216 PingResult = main.TRUE
1217 for i in range( 8, 18 ):
1218 ping = main.Mininet1.pingHost( src="h" + str( i ),
1219 target="h" + str( i + 10 ) )
1220 PingResult = PingResult and ping
1221 if ping == main.FALSE:
1222 main.log.warn( "Ping failed between h" + str( i ) +
1223 " and h" + str( i + 10 ) )
1224 elif ping == main.TRUE:
1225 main.log.info( "Ping test passed!" )
1226 # Don't set PingResult or you'd override failures
1227 if PingResult == main.FALSE:
1228 main.log.error(
1229 "Intents have not been installed correctly, pings failed." )
1230 # TODO: pretty print
1231 main.log.warn( "ONOS1 intents: " )
1232 try:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001233 tmpIntents = onosCli.intents()
Jon Hall5cf14d52015-07-16 12:15:19 -07001234 main.log.warn( json.dumps( json.loads( tmpIntents ),
1235 sort_keys=True,
1236 indent=4,
1237 separators=( ',', ': ' ) ) )
1238 except ( ValueError, TypeError ):
1239 main.log.warn( repr( tmpIntents ) )
1240 utilities.assert_equals(
1241 expect=main.TRUE,
1242 actual=PingResult,
1243 onpass="Intents have been installed correctly and pings work",
1244 onfail="Intents have not been installed correctly, pings failed." )
1245
1246 def CASE5( self, main ):
1247 """
1248 Reading state of ONOS
1249 """
1250 import json
1251 import time
Jon Halle1a3b752015-07-22 13:02:46 -07001252 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07001253 assert main, "main not defined"
1254 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07001255 assert main.CLIs, "main.CLIs not defined"
1256 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07001257
1258 main.case( "Setting up and gathering data for current state" )
1259 # The general idea for this test case is to pull the state of
1260 # ( intents,flows, topology,... ) from each ONOS node
1261 # We can then compare them with each other and also with past states
1262
1263 main.step( "Check that each switch has a master" )
1264 global mastershipState
1265 mastershipState = '[]'
1266
1267 # Assert that each device has a master
1268 rolesNotNull = main.TRUE
1269 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001270 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001271 t = main.Thread( target=main.CLIs[i].rolesNotNull,
Jon Hall5cf14d52015-07-16 12:15:19 -07001272 name="rolesNotNull-" + str( i ),
1273 args=[] )
1274 threads.append( t )
1275 t.start()
1276
1277 for t in threads:
1278 t.join()
1279 rolesNotNull = rolesNotNull and t.result
1280 utilities.assert_equals(
1281 expect=main.TRUE,
1282 actual=rolesNotNull,
1283 onpass="Each device has a master",
1284 onfail="Some devices don't have a master assigned" )
1285
1286 main.step( "Get the Mastership of each switch from each controller" )
1287 ONOSMastership = []
1288 mastershipCheck = main.FALSE
1289 consistentMastership = True
1290 rolesResults = True
1291 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001292 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001293 t = main.Thread( target=main.CLIs[i].roles,
Jon Hall5cf14d52015-07-16 12:15:19 -07001294 name="roles-" + str( i ),
1295 args=[] )
1296 threads.append( t )
1297 t.start()
1298
1299 for t in threads:
1300 t.join()
1301 ONOSMastership.append( t.result )
1302
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001303 for i in range( len( ONOSMastership ) ):
1304 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001305 if not ONOSMastership[i] or "Error" in ONOSMastership[i]:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001306 main.log.error( "Error in getting ONOS" + node + " roles" )
1307 main.log.warn( "ONOS" + node + " mastership response: " +
1308 repr( ONOSMastership[i] ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001309 rolesResults = False
1310 utilities.assert_equals(
1311 expect=True,
1312 actual=rolesResults,
1313 onpass="No error in reading roles output",
1314 onfail="Error in reading roles from ONOS" )
1315
1316 main.step( "Check for consistency in roles from each controller" )
1317 if all([ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
1318 main.log.info(
1319 "Switch roles are consistent across all ONOS nodes" )
1320 else:
1321 consistentMastership = False
1322 utilities.assert_equals(
1323 expect=True,
1324 actual=consistentMastership,
1325 onpass="Switch roles are consistent across all ONOS nodes",
1326 onfail="ONOS nodes have different views of switch roles" )
1327
1328 if rolesResults and not consistentMastership:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001329 for i in range( len( main.activeNodes ) ):
1330 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001331 try:
1332 main.log.warn(
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001333 "ONOS" + node + " roles: ",
Jon Hall5cf14d52015-07-16 12:15:19 -07001334 json.dumps(
1335 json.loads( ONOSMastership[ i ] ),
1336 sort_keys=True,
1337 indent=4,
1338 separators=( ',', ': ' ) ) )
1339 except ( ValueError, TypeError ):
1340 main.log.warn( repr( ONOSMastership[ i ] ) )
1341 elif rolesResults and consistentMastership:
1342 mastershipCheck = main.TRUE
1343 mastershipState = ONOSMastership[ 0 ]
1344
1345 main.step( "Get the intents from each controller" )
1346 global intentState
1347 intentState = []
1348 ONOSIntents = []
1349 intentCheck = main.FALSE
1350 consistentIntents = True
1351 intentsResults = True
1352 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001353 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001354 t = main.Thread( target=main.CLIs[i].intents,
Jon Hall5cf14d52015-07-16 12:15:19 -07001355 name="intents-" + str( i ),
1356 args=[],
1357 kwargs={ 'jsonFormat': True } )
1358 threads.append( t )
1359 t.start()
1360
1361 for t in threads:
1362 t.join()
1363 ONOSIntents.append( t.result )
1364
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001365 for i in range( len( ONOSIntents ) ):
1366 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001367 if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001368 main.log.error( "Error in getting ONOS" + node + " intents" )
1369 main.log.warn( "ONOS" + node + " intents response: " +
Jon Hall5cf14d52015-07-16 12:15:19 -07001370 repr( ONOSIntents[ i ] ) )
1371 intentsResults = False
1372 utilities.assert_equals(
1373 expect=True,
1374 actual=intentsResults,
1375 onpass="No error in reading intents output",
1376 onfail="Error in reading intents from ONOS" )
1377
1378 main.step( "Check for consistency in Intents from each controller" )
1379 if all([ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
1380 main.log.info( "Intents are consistent across all ONOS " +
1381 "nodes" )
1382 else:
1383 consistentIntents = False
1384 main.log.error( "Intents not consistent" )
1385 utilities.assert_equals(
1386 expect=True,
1387 actual=consistentIntents,
1388 onpass="Intents are consistent across all ONOS nodes",
1389 onfail="ONOS nodes have different views of intents" )
1390
1391 if intentsResults:
1392 # Try to make it easy to figure out what is happening
1393 #
1394 # Intent ONOS1 ONOS2 ...
1395 # 0x01 INSTALLED INSTALLING
1396 # ... ... ...
1397 # ... ... ...
1398 title = " Id"
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001399 for n in main.activeNodes:
Jon Hall5cf14d52015-07-16 12:15:19 -07001400 title += " " * 10 + "ONOS" + str( n + 1 )
1401 main.log.warn( title )
1402 # get all intent keys in the cluster
1403 keys = []
Jon Halla440e872016-03-31 15:15:50 -07001404 try:
1405 # Get the set of all intent keys
Jon Hall5cf14d52015-07-16 12:15:19 -07001406 for nodeStr in ONOSIntents:
1407 node = json.loads( nodeStr )
1408 for intent in node:
Jon Halla440e872016-03-31 15:15:50 -07001409 keys.append( intent.get( 'id' ) )
1410 keys = set( keys )
1411 # For each intent key, print the state on each node
1412 for key in keys:
1413 row = "%-13s" % key
1414 for nodeStr in ONOSIntents:
1415 node = json.loads( nodeStr )
1416 for intent in node:
1417 if intent.get( 'id', "Error" ) == key:
1418 row += "%-15s" % intent.get( 'state' )
1419 main.log.warn( row )
1420 # End of intent state table
1421 except ValueError as e:
1422 main.log.exception( e )
1423 main.log.debug( "nodeStr was: " + repr( nodeStr ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001424
1425 if intentsResults and not consistentIntents:
1426 # print the json objects
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001427 n = str( main.activeNodes[-1] + 1 )
1428 main.log.debug( "ONOS" + n + " intents: " )
Jon Hall5cf14d52015-07-16 12:15:19 -07001429 main.log.debug( json.dumps( json.loads( ONOSIntents[ -1 ] ),
1430 sort_keys=True,
1431 indent=4,
1432 separators=( ',', ': ' ) ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001433 for i in range( len( ONOSIntents ) ):
1434 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001435 if ONOSIntents[ i ] != ONOSIntents[ -1 ]:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001436 main.log.debug( "ONOS" + node + " intents: " )
Jon Hall5cf14d52015-07-16 12:15:19 -07001437 main.log.debug( json.dumps( json.loads( ONOSIntents[i] ),
1438 sort_keys=True,
1439 indent=4,
1440 separators=( ',', ': ' ) ) )
1441 else:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001442 main.log.debug( "ONOS" + node + " intents match ONOS" +
1443 n + " intents" )
Jon Hall5cf14d52015-07-16 12:15:19 -07001444 elif intentsResults and consistentIntents:
1445 intentCheck = main.TRUE
1446 intentState = ONOSIntents[ 0 ]
1447
1448 main.step( "Get the flows from each controller" )
1449 global flowState
1450 flowState = []
1451 ONOSFlows = []
1452 ONOSFlowsJson = []
1453 flowCheck = main.FALSE
1454 consistentFlows = True
1455 flowsResults = True
1456 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001457 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001458 t = main.Thread( target=main.CLIs[i].flows,
Jon Hall5cf14d52015-07-16 12:15:19 -07001459 name="flows-" + str( i ),
1460 args=[],
1461 kwargs={ 'jsonFormat': True } )
1462 threads.append( t )
1463 t.start()
1464
1465 # NOTE: Flows command can take some time to run
1466 time.sleep(30)
1467 for t in threads:
1468 t.join()
1469 result = t.result
1470 ONOSFlows.append( result )
1471
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001472 for i in range( len( ONOSFlows ) ):
1473 num = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001474 if not ONOSFlows[ i ] or "Error" in ONOSFlows[ i ]:
1475 main.log.error( "Error in getting ONOS" + num + " flows" )
1476 main.log.warn( "ONOS" + num + " flows response: " +
1477 repr( ONOSFlows[ i ] ) )
1478 flowsResults = False
1479 ONOSFlowsJson.append( None )
1480 else:
1481 try:
1482 ONOSFlowsJson.append( json.loads( ONOSFlows[ i ] ) )
1483 except ( ValueError, TypeError ):
1484 # FIXME: change this to log.error?
1485 main.log.exception( "Error in parsing ONOS" + num +
1486 " response as json." )
1487 main.log.error( repr( ONOSFlows[ i ] ) )
1488 ONOSFlowsJson.append( None )
1489 flowsResults = False
1490 utilities.assert_equals(
1491 expect=True,
1492 actual=flowsResults,
1493 onpass="No error in reading flows output",
1494 onfail="Error in reading flows from ONOS" )
1495
1496 main.step( "Check for consistency in Flows from each controller" )
1497 tmp = [ len( i ) == len( ONOSFlowsJson[ 0 ] ) for i in ONOSFlowsJson ]
1498 if all( tmp ):
1499 main.log.info( "Flow count is consistent across all ONOS nodes" )
1500 else:
1501 consistentFlows = False
1502 utilities.assert_equals(
1503 expect=True,
1504 actual=consistentFlows,
1505 onpass="The flow count is consistent across all ONOS nodes",
1506 onfail="ONOS nodes have different flow counts" )
1507
1508 if flowsResults and not consistentFlows:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001509 for i in range( len( ONOSFlows ) ):
1510 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001511 try:
1512 main.log.warn(
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001513 "ONOS" + node + " flows: " +
Jon Hall5cf14d52015-07-16 12:15:19 -07001514 json.dumps( json.loads( ONOSFlows[i] ), sort_keys=True,
1515 indent=4, separators=( ',', ': ' ) ) )
1516 except ( ValueError, TypeError ):
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001517 main.log.warn( "ONOS" + node + " flows: " +
1518 repr( ONOSFlows[ i ] ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001519 elif flowsResults and consistentFlows:
1520 flowCheck = main.TRUE
1521 flowState = ONOSFlows[ 0 ]
1522
1523 main.step( "Get the OF Table entries" )
1524 global flows
1525 flows = []
1526 for i in range( 1, 29 ):
GlennRC68467eb2015-11-16 18:01:01 -08001527 flows.append( main.Mininet1.getFlowTable( "s" + str( i ), version="1.3", debug=False ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001528 if flowCheck == main.FALSE:
1529 for table in flows:
1530 main.log.warn( table )
1531 # TODO: Compare switch flow tables with ONOS flow tables
1532
1533 main.step( "Start continuous pings" )
1534 main.Mininet2.pingLong(
1535 src=main.params[ 'PING' ][ 'source1' ],
1536 target=main.params[ 'PING' ][ 'target1' ],
1537 pingTime=500 )
1538 main.Mininet2.pingLong(
1539 src=main.params[ 'PING' ][ 'source2' ],
1540 target=main.params[ 'PING' ][ 'target2' ],
1541 pingTime=500 )
1542 main.Mininet2.pingLong(
1543 src=main.params[ 'PING' ][ 'source3' ],
1544 target=main.params[ 'PING' ][ 'target3' ],
1545 pingTime=500 )
1546 main.Mininet2.pingLong(
1547 src=main.params[ 'PING' ][ 'source4' ],
1548 target=main.params[ 'PING' ][ 'target4' ],
1549 pingTime=500 )
1550 main.Mininet2.pingLong(
1551 src=main.params[ 'PING' ][ 'source5' ],
1552 target=main.params[ 'PING' ][ 'target5' ],
1553 pingTime=500 )
1554 main.Mininet2.pingLong(
1555 src=main.params[ 'PING' ][ 'source6' ],
1556 target=main.params[ 'PING' ][ 'target6' ],
1557 pingTime=500 )
1558 main.Mininet2.pingLong(
1559 src=main.params[ 'PING' ][ 'source7' ],
1560 target=main.params[ 'PING' ][ 'target7' ],
1561 pingTime=500 )
1562 main.Mininet2.pingLong(
1563 src=main.params[ 'PING' ][ 'source8' ],
1564 target=main.params[ 'PING' ][ 'target8' ],
1565 pingTime=500 )
1566 main.Mininet2.pingLong(
1567 src=main.params[ 'PING' ][ 'source9' ],
1568 target=main.params[ 'PING' ][ 'target9' ],
1569 pingTime=500 )
1570 main.Mininet2.pingLong(
1571 src=main.params[ 'PING' ][ 'source10' ],
1572 target=main.params[ 'PING' ][ 'target10' ],
1573 pingTime=500 )
1574
1575 main.step( "Collecting topology information from ONOS" )
1576 devices = []
1577 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001578 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001579 t = main.Thread( target=main.CLIs[i].devices,
Jon Hall5cf14d52015-07-16 12:15:19 -07001580 name="devices-" + str( i ),
1581 args=[ ] )
1582 threads.append( t )
1583 t.start()
1584
1585 for t in threads:
1586 t.join()
1587 devices.append( t.result )
1588 hosts = []
1589 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001590 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001591 t = main.Thread( target=main.CLIs[i].hosts,
Jon Hall5cf14d52015-07-16 12:15:19 -07001592 name="hosts-" + str( i ),
1593 args=[ ] )
1594 threads.append( t )
1595 t.start()
1596
1597 for t in threads:
1598 t.join()
1599 try:
1600 hosts.append( json.loads( t.result ) )
1601 except ( ValueError, TypeError ):
1602 # FIXME: better handling of this, print which node
1603 # Maybe use thread name?
1604 main.log.exception( "Error parsing json output of hosts" )
Jon Hallf3d16e72015-12-16 17:45:08 -08001605 main.log.warn( repr( t.result ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001606 hosts.append( None )
1607
1608 ports = []
1609 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001610 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001611 t = main.Thread( target=main.CLIs[i].ports,
Jon Hall5cf14d52015-07-16 12:15:19 -07001612 name="ports-" + str( i ),
1613 args=[ ] )
1614 threads.append( t )
1615 t.start()
1616
1617 for t in threads:
1618 t.join()
1619 ports.append( t.result )
1620 links = []
1621 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001622 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001623 t = main.Thread( target=main.CLIs[i].links,
Jon Hall5cf14d52015-07-16 12:15:19 -07001624 name="links-" + str( i ),
1625 args=[ ] )
1626 threads.append( t )
1627 t.start()
1628
1629 for t in threads:
1630 t.join()
1631 links.append( t.result )
1632 clusters = []
1633 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001634 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001635 t = main.Thread( target=main.CLIs[i].clusters,
Jon Hall5cf14d52015-07-16 12:15:19 -07001636 name="clusters-" + str( i ),
1637 args=[ ] )
1638 threads.append( t )
1639 t.start()
1640
1641 for t in threads:
1642 t.join()
1643 clusters.append( t.result )
1644 # Compare json objects for hosts and dataplane clusters
1645
1646 # hosts
1647 main.step( "Host view is consistent across ONOS nodes" )
1648 consistentHostsResult = main.TRUE
1649 for controller in range( len( hosts ) ):
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001650 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hallf3d16e72015-12-16 17:45:08 -08001651 if hosts[ controller ] and "Error" not in hosts[ controller ]:
Jon Hall5cf14d52015-07-16 12:15:19 -07001652 if hosts[ controller ] == hosts[ 0 ]:
1653 continue
1654 else: # hosts not consistent
1655 main.log.error( "hosts from ONOS" +
1656 controllerStr +
1657 " is inconsistent with ONOS1" )
1658 main.log.warn( repr( hosts[ controller ] ) )
1659 consistentHostsResult = main.FALSE
1660
1661 else:
1662 main.log.error( "Error in getting ONOS hosts from ONOS" +
1663 controllerStr )
1664 consistentHostsResult = main.FALSE
1665 main.log.warn( "ONOS" + controllerStr +
1666 " hosts response: " +
1667 repr( hosts[ controller ] ) )
1668 utilities.assert_equals(
1669 expect=main.TRUE,
1670 actual=consistentHostsResult,
1671 onpass="Hosts view is consistent across all ONOS nodes",
1672 onfail="ONOS nodes have different views of hosts" )
1673
1674 main.step( "Each host has an IP address" )
1675 ipResult = main.TRUE
1676 for controller in range( 0, len( hosts ) ):
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001677 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hallf3d16e72015-12-16 17:45:08 -08001678 if hosts[ controller ]:
1679 for host in hosts[ controller ]:
1680 if not host.get( 'ipAddresses', [ ] ):
1681 main.log.error( "Error with host ips on controller" +
1682 controllerStr + ": " + str( host ) )
1683 ipResult = main.FALSE
Jon Hall5cf14d52015-07-16 12:15:19 -07001684 utilities.assert_equals(
1685 expect=main.TRUE,
1686 actual=ipResult,
1687 onpass="The ips of the hosts aren't empty",
1688 onfail="The ip of at least one host is missing" )
1689
1690 # Strongly connected clusters of devices
1691 main.step( "Cluster view is consistent across ONOS nodes" )
1692 consistentClustersResult = main.TRUE
1693 for controller in range( len( clusters ) ):
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001694 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001695 if "Error" not in clusters[ controller ]:
1696 if clusters[ controller ] == clusters[ 0 ]:
1697 continue
1698 else: # clusters not consistent
1699 main.log.error( "clusters from ONOS" + controllerStr +
1700 " is inconsistent with ONOS1" )
1701 consistentClustersResult = main.FALSE
1702
1703 else:
1704 main.log.error( "Error in getting dataplane clusters " +
1705 "from ONOS" + controllerStr )
1706 consistentClustersResult = main.FALSE
1707 main.log.warn( "ONOS" + controllerStr +
1708 " clusters response: " +
1709 repr( clusters[ controller ] ) )
1710 utilities.assert_equals(
1711 expect=main.TRUE,
1712 actual=consistentClustersResult,
1713 onpass="Clusters view is consistent across all ONOS nodes",
1714 onfail="ONOS nodes have different views of clusters" )
Jon Hall64948022016-05-12 13:38:50 -07001715 if not consistentClustersResult:
Jon Hall172b7ba2016-04-07 18:12:20 -07001716 main.log.debug( clusters )
Jon Hall64948022016-05-12 13:38:50 -07001717
Jon Hall5cf14d52015-07-16 12:15:19 -07001718 # there should always only be one cluster
1719 main.step( "Cluster view correct across ONOS nodes" )
1720 try:
1721 numClusters = len( json.loads( clusters[ 0 ] ) )
1722 except ( ValueError, TypeError ):
1723 main.log.exception( "Error parsing clusters[0]: " +
1724 repr( clusters[ 0 ] ) )
Jon Hall6e709752016-02-01 13:38:46 -08001725 numClusters = "ERROR"
Jon Hall5cf14d52015-07-16 12:15:19 -07001726 clusterResults = main.FALSE
1727 if numClusters == 1:
1728 clusterResults = main.TRUE
1729 utilities.assert_equals(
1730 expect=1,
1731 actual=numClusters,
1732 onpass="ONOS shows 1 SCC",
1733 onfail="ONOS shows " + str( numClusters ) + " SCCs" )
1734
1735 main.step( "Comparing ONOS topology to MN" )
1736 devicesResults = main.TRUE
1737 linksResults = main.TRUE
1738 hostsResults = main.TRUE
1739 mnSwitches = main.Mininet1.getSwitches()
1740 mnLinks = main.Mininet1.getLinks()
1741 mnHosts = main.Mininet1.getHosts()
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001742 for controller in main.activeNodes:
1743 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001744 if devices[ controller ] and ports[ controller ] and\
1745 "Error" not in devices[ controller ] and\
1746 "Error" not in ports[ controller ]:
Jon Hall6e709752016-02-01 13:38:46 -08001747 currentDevicesResult = main.Mininet1.compareSwitches(
1748 mnSwitches,
1749 json.loads( devices[ controller ] ),
1750 json.loads( ports[ controller ] ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001751 else:
1752 currentDevicesResult = main.FALSE
1753 utilities.assert_equals( expect=main.TRUE,
1754 actual=currentDevicesResult,
1755 onpass="ONOS" + controllerStr +
1756 " Switches view is correct",
1757 onfail="ONOS" + controllerStr +
1758 " Switches view is incorrect" )
1759 if links[ controller ] and "Error" not in links[ controller ]:
1760 currentLinksResult = main.Mininet1.compareLinks(
1761 mnSwitches, mnLinks,
1762 json.loads( links[ controller ] ) )
1763 else:
1764 currentLinksResult = main.FALSE
1765 utilities.assert_equals( expect=main.TRUE,
1766 actual=currentLinksResult,
1767 onpass="ONOS" + controllerStr +
1768 " links view is correct",
1769 onfail="ONOS" + controllerStr +
1770 " links view is incorrect" )
1771
Jon Hall657cdf62015-12-17 14:40:51 -08001772 if hosts[ controller ] and "Error" not in hosts[ controller ]:
Jon Hall5cf14d52015-07-16 12:15:19 -07001773 currentHostsResult = main.Mininet1.compareHosts(
1774 mnHosts,
1775 hosts[ controller ] )
1776 else:
1777 currentHostsResult = main.FALSE
1778 utilities.assert_equals( expect=main.TRUE,
1779 actual=currentHostsResult,
1780 onpass="ONOS" + controllerStr +
1781 " hosts exist in Mininet",
1782 onfail="ONOS" + controllerStr +
1783 " hosts don't match Mininet" )
1784
1785 devicesResults = devicesResults and currentDevicesResult
1786 linksResults = linksResults and currentLinksResult
1787 hostsResults = hostsResults and currentHostsResult
1788
1789 main.step( "Device information is correct" )
1790 utilities.assert_equals(
1791 expect=main.TRUE,
1792 actual=devicesResults,
1793 onpass="Device information is correct",
1794 onfail="Device information is incorrect" )
1795
1796 main.step( "Links are correct" )
1797 utilities.assert_equals(
1798 expect=main.TRUE,
1799 actual=linksResults,
1800 onpass="Link are correct",
1801 onfail="Links are incorrect" )
1802
1803 main.step( "Hosts are correct" )
1804 utilities.assert_equals(
1805 expect=main.TRUE,
1806 actual=hostsResults,
1807 onpass="Hosts are correct",
1808 onfail="Hosts are incorrect" )
1809
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001810 def CASE61( self, main ):
Jon Hall5cf14d52015-07-16 12:15:19 -07001811 """
1812 The Failure case.
1813 """
Jon Halle1a3b752015-07-22 13:02:46 -07001814 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07001815 assert main, "main not defined"
1816 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07001817 assert main.CLIs, "main.CLIs not defined"
1818 assert main.nodes, "main.nodes not defined"
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001819 main.case( "Kill minority of ONOS nodes" )
Jon Hall96091e62015-09-21 17:34:17 -07001820
1821 main.step( "Checking ONOS Logs for errors" )
1822 for node in main.nodes:
1823 main.log.debug( "Checking logs for errors on " + node.name + ":" )
1824 main.log.warn( main.ONOSbench.checkLogs( node.ip_address ) )
1825
Jon Hall3b489db2015-10-05 14:38:37 -07001826 n = len( main.nodes ) # Number of nodes
1827 p = ( ( n + 1 ) / 2 ) + 1 # Number of partitions
1828 main.kill = [ 0 ] # ONOS node to kill, listed by index in main.nodes
1829 if n > 3:
1830 main.kill.append( p - 1 )
1831 # NOTE: This only works for cluster sizes of 3,5, or 7.
1832
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001833 main.step( "Kill " + str( len( main.kill ) ) + " ONOS nodes" )
Jon Hall3b489db2015-10-05 14:38:37 -07001834 killResults = main.TRUE
1835 for i in main.kill:
1836 killResults = killResults and\
1837 main.ONOSbench.onosKill( main.nodes[i].ip_address )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001838 main.activeNodes.remove( i )
Jon Hall5cf14d52015-07-16 12:15:19 -07001839 utilities.assert_equals( expect=main.TRUE, actual=killResults,
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001840 onpass="ONOS nodes killed successfully",
1841 onfail="ONOS nodes NOT successfully killed" )
1842
Jon Halld2871c22016-07-26 11:01:14 -07001843 main.step( "Checking ONOS nodes" )
1844 nodeResults = utilities.retry( main.HA.nodesCheck,
1845 False,
1846 args=[main.activeNodes],
1847 sleep=15,
1848 attempts=5 )
1849
1850 utilities.assert_equals( expect=True, actual=nodeResults,
1851 onpass="Nodes check successful",
1852 onfail="Nodes check NOT successful" )
1853
1854 if not nodeResults:
1855 for i in main.activeNodes:
1856 cli = main.CLIs[i]
1857 main.log.debug( "{} components not ACTIVE: \n{}".format(
1858 cli.name,
1859 cli.sendline( "scr:list | grep -v ACTIVE" ) ) )
1860 main.log.error( "Failed to start ONOS, stopping test" )
1861 main.cleanup()
1862 main.exit()
1863
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001864 def CASE62( self, main ):
1865 """
1866 The bring up stopped nodes
1867 """
1868 import time
1869 assert main.numCtrls, "main.numCtrls not defined"
1870 assert main, "main not defined"
1871 assert utilities.assert_equals, "utilities.assert_equals not defined"
1872 assert main.CLIs, "main.CLIs not defined"
1873 assert main.nodes, "main.nodes not defined"
1874 assert main.kill, "main.kill not defined"
1875 main.case( "Restart minority of ONOS nodes" )
1876
1877 main.step( "Restarting " + str( len( main.kill ) ) + " ONOS nodes" )
1878 startResults = main.TRUE
1879 restartTime = time.time()
1880 for i in main.kill:
1881 startResults = startResults and\
1882 main.ONOSbench.onosStart( main.nodes[i].ip_address )
1883 utilities.assert_equals( expect=main.TRUE, actual=startResults,
1884 onpass="ONOS nodes started successfully",
1885 onfail="ONOS nodes NOT successfully started" )
Jon Hall5cf14d52015-07-16 12:15:19 -07001886
1887 main.step( "Checking if ONOS is up yet" )
1888 count = 0
1889 onosIsupResult = main.FALSE
1890 while onosIsupResult == main.FALSE and count < 10:
Jon Hall3b489db2015-10-05 14:38:37 -07001891 onosIsupResult = main.TRUE
1892 for i in main.kill:
1893 onosIsupResult = onosIsupResult and\
1894 main.ONOSbench.isup( main.nodes[i].ip_address )
Jon Hall5cf14d52015-07-16 12:15:19 -07001895 count = count + 1
Jon Hall5cf14d52015-07-16 12:15:19 -07001896 utilities.assert_equals( expect=main.TRUE, actual=onosIsupResult,
1897 onpass="ONOS restarted successfully",
1898 onfail="ONOS restart NOT successful" )
1899
Jon Halle1a3b752015-07-22 13:02:46 -07001900 main.step( "Restarting ONOS main.CLIs" )
Jon Hall3b489db2015-10-05 14:38:37 -07001901 cliResults = main.TRUE
1902 for i in main.kill:
1903 cliResults = cliResults and\
1904 main.CLIs[i].startOnosCli( main.nodes[i].ip_address )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001905 main.activeNodes.append( i )
Jon Hall5cf14d52015-07-16 12:15:19 -07001906 utilities.assert_equals( expect=main.TRUE, actual=cliResults,
1907 onpass="ONOS cli restarted",
1908 onfail="ONOS cli did not restart" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001909 main.activeNodes.sort()
1910 try:
1911 assert list( set( main.activeNodes ) ) == main.activeNodes,\
1912 "List of active nodes has duplicates, this likely indicates something was run out of order"
1913 except AssertionError:
1914 main.log.exception( "" )
1915 main.cleanup()
1916 main.exit()
Jon Hall5cf14d52015-07-16 12:15:19 -07001917
1918 # Grab the time of restart so we chan check how long the gossip
1919 # protocol has had time to work
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001920 main.restartTime = time.time() - restartTime
Jon Hall5cf14d52015-07-16 12:15:19 -07001921 main.log.debug( "Restart time: " + str( main.restartTime ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001922 # TODO: MAke this configurable. Also, we are breaking the above timer
Jon Halld2871c22016-07-26 11:01:14 -07001923 main.step( "Checking ONOS nodes" )
1924 nodeResults = utilities.retry( main.HA.nodesCheck,
1925 False,
1926 args=[main.activeNodes],
1927 sleep=15,
1928 attempts=5 )
1929
1930 utilities.assert_equals( expect=True, actual=nodeResults,
1931 onpass="Nodes check successful",
1932 onfail="Nodes check NOT successful" )
1933
1934 if not nodeResults:
1935 for i in main.activeNodes:
1936 cli = main.CLIs[i]
1937 main.log.debug( "{} components not ACTIVE: \n{}".format(
1938 cli.name,
1939 cli.sendline( "scr:list | grep -v ACTIVE" ) ) )
1940 main.log.error( "Failed to start ONOS, stopping test" )
1941 main.cleanup()
1942 main.exit()
1943
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001944 node = main.activeNodes[0]
1945 main.log.debug( main.CLIs[node].nodes( jsonFormat=False ) )
1946 main.log.debug( main.CLIs[node].leaders( jsonFormat=False ) )
1947 main.log.debug( main.CLIs[node].partitions( jsonFormat=False ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001948
Jon Halla440e872016-03-31 15:15:50 -07001949 main.step( "Rerun for election on the node(s) that were killed" )
1950 runResults = main.TRUE
1951 for i in main.kill:
1952 runResults = runResults and\
1953 main.CLIs[i].electionTestRun()
1954 utilities.assert_equals( expect=main.TRUE, actual=runResults,
1955 onpass="ONOS nodes reran for election topic",
1956 onfail="Errror rerunning for election" )
1957
Jon Hall5cf14d52015-07-16 12:15:19 -07001958 def CASE7( self, main ):
1959 """
1960 Check state after ONOS failure
1961 """
1962 import json
Jon Halle1a3b752015-07-22 13:02:46 -07001963 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07001964 assert main, "main not defined"
1965 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07001966 assert main.CLIs, "main.CLIs not defined"
1967 assert main.nodes, "main.nodes not defined"
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001968 try:
1969 main.kill
1970 except AttributeError:
1971 main.kill = []
1972
Jon Hall5cf14d52015-07-16 12:15:19 -07001973 main.case( "Running ONOS Constant State Tests" )
1974
1975 main.step( "Check that each switch has a master" )
1976 # Assert that each device has a master
1977 rolesNotNull = main.TRUE
1978 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001979 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001980 t = main.Thread( target=main.CLIs[i].rolesNotNull,
Jon Hall5cf14d52015-07-16 12:15:19 -07001981 name="rolesNotNull-" + str( i ),
1982 args=[ ] )
1983 threads.append( t )
1984 t.start()
1985
1986 for t in threads:
1987 t.join()
1988 rolesNotNull = rolesNotNull and t.result
1989 utilities.assert_equals(
1990 expect=main.TRUE,
1991 actual=rolesNotNull,
1992 onpass="Each device has a master",
1993 onfail="Some devices don't have a master assigned" )
1994
1995 main.step( "Read device roles from ONOS" )
1996 ONOSMastership = []
Jon Halla440e872016-03-31 15:15:50 -07001997 mastershipCheck = main.FALSE
Jon Hall5cf14d52015-07-16 12:15:19 -07001998 consistentMastership = True
1999 rolesResults = True
2000 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002001 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07002002 t = main.Thread( target=main.CLIs[i].roles,
Jon Hall5cf14d52015-07-16 12:15:19 -07002003 name="roles-" + str( i ),
2004 args=[] )
2005 threads.append( t )
2006 t.start()
2007
2008 for t in threads:
2009 t.join()
2010 ONOSMastership.append( t.result )
2011
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002012 for i in range( len( ONOSMastership ) ):
2013 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07002014 if not ONOSMastership[i] or "Error" in ONOSMastership[i]:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002015 main.log.error( "Error in getting ONOS" + node + " roles" )
2016 main.log.warn( "ONOS" + node + " mastership response: " +
2017 repr( ONOSMastership[i] ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07002018 rolesResults = False
2019 utilities.assert_equals(
2020 expect=True,
2021 actual=rolesResults,
2022 onpass="No error in reading roles output",
2023 onfail="Error in reading roles from ONOS" )
2024
2025 main.step( "Check for consistency in roles from each controller" )
2026 if all([ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
2027 main.log.info(
2028 "Switch roles are consistent across all ONOS nodes" )
2029 else:
2030 consistentMastership = False
2031 utilities.assert_equals(
2032 expect=True,
2033 actual=consistentMastership,
2034 onpass="Switch roles are consistent across all ONOS nodes",
2035 onfail="ONOS nodes have different views of switch roles" )
2036
2037 if rolesResults and not consistentMastership:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002038 for i in range( len( ONOSMastership ) ):
2039 node = str( main.activeNodes[i] + 1 )
2040 main.log.warn( "ONOS" + node + " roles: ",
2041 json.dumps( json.loads( ONOSMastership[ i ] ),
2042 sort_keys=True,
2043 indent=4,
2044 separators=( ',', ': ' ) ) )
Jon Halla440e872016-03-31 15:15:50 -07002045 elif rolesResults and consistentMastership:
2046 mastershipCheck = main.TRUE
Jon Hall5cf14d52015-07-16 12:15:19 -07002047
2048 # NOTE: we expect mastership to change on controller failure
Jon Hall5cf14d52015-07-16 12:15:19 -07002049
2050 main.step( "Get the intents and compare across all nodes" )
2051 ONOSIntents = []
2052 intentCheck = main.FALSE
2053 consistentIntents = True
2054 intentsResults = True
2055 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002056 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07002057 t = main.Thread( target=main.CLIs[i].intents,
Jon Hall5cf14d52015-07-16 12:15:19 -07002058 name="intents-" + str( i ),
2059 args=[],
2060 kwargs={ 'jsonFormat': True } )
2061 threads.append( t )
2062 t.start()
2063
2064 for t in threads:
2065 t.join()
2066 ONOSIntents.append( t.result )
2067
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002068 for i in range( len( ONOSIntents) ):
2069 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07002070 if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002071 main.log.error( "Error in getting ONOS" + node + " intents" )
2072 main.log.warn( "ONOS" + node + " intents response: " +
Jon Hall5cf14d52015-07-16 12:15:19 -07002073 repr( ONOSIntents[ i ] ) )
2074 intentsResults = False
2075 utilities.assert_equals(
2076 expect=True,
2077 actual=intentsResults,
2078 onpass="No error in reading intents output",
2079 onfail="Error in reading intents from ONOS" )
2080
2081 main.step( "Check for consistency in Intents from each controller" )
2082 if all([ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
2083 main.log.info( "Intents are consistent across all ONOS " +
2084 "nodes" )
2085 else:
2086 consistentIntents = False
2087
2088 # Try to make it easy to figure out what is happening
2089 #
2090 # Intent ONOS1 ONOS2 ...
2091 # 0x01 INSTALLED INSTALLING
2092 # ... ... ...
2093 # ... ... ...
2094 title = " ID"
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002095 for n in main.activeNodes:
Jon Hall5cf14d52015-07-16 12:15:19 -07002096 title += " " * 10 + "ONOS" + str( n + 1 )
2097 main.log.warn( title )
2098 # get all intent keys in the cluster
2099 keys = []
2100 for nodeStr in ONOSIntents:
2101 node = json.loads( nodeStr )
2102 for intent in node:
2103 keys.append( intent.get( 'id' ) )
2104 keys = set( keys )
2105 for key in keys:
2106 row = "%-13s" % key
2107 for nodeStr in ONOSIntents:
2108 node = json.loads( nodeStr )
2109 for intent in node:
2110 if intent.get( 'id' ) == key:
2111 row += "%-15s" % intent.get( 'state' )
2112 main.log.warn( row )
2113 # End table view
2114
2115 utilities.assert_equals(
2116 expect=True,
2117 actual=consistentIntents,
2118 onpass="Intents are consistent across all ONOS nodes",
2119 onfail="ONOS nodes have different views of intents" )
2120 intentStates = []
2121 for node in ONOSIntents: # Iter through ONOS nodes
2122 nodeStates = []
2123 # Iter through intents of a node
2124 try:
2125 for intent in json.loads( node ):
2126 nodeStates.append( intent[ 'state' ] )
2127 except ( ValueError, TypeError ):
2128 main.log.exception( "Error in parsing intents" )
2129 main.log.error( repr( node ) )
2130 intentStates.append( nodeStates )
2131 out = [ (i, nodeStates.count( i ) ) for i in set( nodeStates ) ]
2132 main.log.info( dict( out ) )
2133
2134 if intentsResults and not consistentIntents:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002135 for i in range( len( main.activeNodes ) ):
2136 node = str( main.activeNodes[i] + 1 )
2137 main.log.warn( "ONOS" + node + " intents: " )
Jon Hall5cf14d52015-07-16 12:15:19 -07002138 main.log.warn( json.dumps(
2139 json.loads( ONOSIntents[ i ] ),
2140 sort_keys=True,
2141 indent=4,
2142 separators=( ',', ': ' ) ) )
2143 elif intentsResults and consistentIntents:
2144 intentCheck = main.TRUE
2145
2146 # NOTE: Store has no durability, so intents are lost across system
2147 # restarts
2148 main.step( "Compare current intents with intents before the failure" )
2149 # NOTE: this requires case 5 to pass for intentState to be set.
2150 # maybe we should stop the test if that fails?
2151 sameIntents = main.FALSE
Jon Halla440e872016-03-31 15:15:50 -07002152 try:
2153 intentState
2154 except NameError:
2155 main.log.warn( "No previous intent state was saved" )
2156 else:
2157 if intentState and intentState == ONOSIntents[ 0 ]:
2158 sameIntents = main.TRUE
2159 main.log.info( "Intents are consistent with before failure" )
2160 # TODO: possibly the states have changed? we may need to figure out
2161 # what the acceptable states are
2162 elif len( intentState ) == len( ONOSIntents[ 0 ] ):
2163 sameIntents = main.TRUE
2164 try:
2165 before = json.loads( intentState )
2166 after = json.loads( ONOSIntents[ 0 ] )
2167 for intent in before:
2168 if intent not in after:
2169 sameIntents = main.FALSE
2170 main.log.debug( "Intent is not currently in ONOS " +
2171 "(at least in the same form):" )
2172 main.log.debug( json.dumps( intent ) )
2173 except ( ValueError, TypeError ):
2174 main.log.exception( "Exception printing intents" )
2175 main.log.debug( repr( ONOSIntents[0] ) )
2176 main.log.debug( repr( intentState ) )
2177 if sameIntents == main.FALSE:
2178 try:
2179 main.log.debug( "ONOS intents before: " )
2180 main.log.debug( json.dumps( json.loads( intentState ),
2181 sort_keys=True, indent=4,
2182 separators=( ',', ': ' ) ) )
2183 main.log.debug( "Current ONOS intents: " )
2184 main.log.debug( json.dumps( json.loads( ONOSIntents[ 0 ] ),
2185 sort_keys=True, indent=4,
2186 separators=( ',', ': ' ) ) )
2187 except ( ValueError, TypeError ):
2188 main.log.exception( "Exception printing intents" )
2189 main.log.debug( repr( ONOSIntents[0] ) )
2190 main.log.debug( repr( intentState ) )
2191 utilities.assert_equals(
2192 expect=main.TRUE,
2193 actual=sameIntents,
2194 onpass="Intents are consistent with before failure",
2195 onfail="The Intents changed during failure" )
Jon Hall5cf14d52015-07-16 12:15:19 -07002196 intentCheck = intentCheck and sameIntents
2197
2198 main.step( "Get the OF Table entries and compare to before " +
2199 "component failure" )
2200 FlowTables = main.TRUE
Jon Hall5cf14d52015-07-16 12:15:19 -07002201 for i in range( 28 ):
2202 main.log.info( "Checking flow table on s" + str( i + 1 ) )
GlennRC68467eb2015-11-16 18:01:01 -08002203 tmpFlows = main.Mininet1.getFlowTable( "s" + str( i + 1 ), version="1.3", debug=False )
Jon Hall41d39f12016-04-11 22:54:35 -07002204 curSwitch = main.Mininet1.flowTableComp( flows[i], tmpFlows )
2205 FlowTables = FlowTables and curSwitch
2206 if curSwitch == main.FALSE:
GlennRC68467eb2015-11-16 18:01:01 -08002207 main.log.warn( "Differences in flow table for switch: s{}".format( i + 1 ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07002208 utilities.assert_equals(
2209 expect=main.TRUE,
2210 actual=FlowTables,
2211 onpass="No changes were found in the flow tables",
2212 onfail="Changes were found in the flow tables" )
2213
2214 main.Mininet2.pingLongKill()
2215 '''
2216 main.step( "Check the continuous pings to ensure that no packets " +
2217 "were dropped during component failure" )
2218 main.Mininet2.pingKill( main.params[ 'TESTONUSER' ],
2219 main.params[ 'TESTONIP' ] )
2220 LossInPings = main.FALSE
2221 # NOTE: checkForLoss returns main.FALSE with 0% packet loss
2222 for i in range( 8, 18 ):
2223 main.log.info(
2224 "Checking for a loss in pings along flow from s" +
2225 str( i ) )
2226 LossInPings = main.Mininet2.checkForLoss(
2227 "/tmp/ping.h" +
2228 str( i ) ) or LossInPings
2229 if LossInPings == main.TRUE:
2230 main.log.info( "Loss in ping detected" )
2231 elif LossInPings == main.ERROR:
2232 main.log.info( "There are multiple mininet process running" )
2233 elif LossInPings == main.FALSE:
2234 main.log.info( "No Loss in the pings" )
2235 main.log.info( "No loss of dataplane connectivity" )
2236 utilities.assert_equals(
2237 expect=main.FALSE,
2238 actual=LossInPings,
2239 onpass="No Loss of connectivity",
2240 onfail="Loss of dataplane connectivity detected" )
2241 '''
2242
2243 main.step( "Leadership Election is still functional" )
2244 # Test of LeadershipElection
2245 leaderList = []
Jon Hall5cf14d52015-07-16 12:15:19 -07002246
Jon Hall3b489db2015-10-05 14:38:37 -07002247 restarted = []
2248 for i in main.kill:
2249 restarted.append( main.nodes[i].ip_address )
Jon Hall5cf14d52015-07-16 12:15:19 -07002250 leaderResult = main.TRUE
Jon Hall3b489db2015-10-05 14:38:37 -07002251
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002252 for i in main.activeNodes:
2253 cli = main.CLIs[i]
Jon Hall5cf14d52015-07-16 12:15:19 -07002254 leaderN = cli.electionTestLeader()
2255 leaderList.append( leaderN )
2256 if leaderN == main.FALSE:
2257 # error in response
2258 main.log.error( "Something is wrong with " +
2259 "electionTestLeader function, check the" +
2260 " error logs" )
2261 leaderResult = main.FALSE
2262 elif leaderN is None:
2263 main.log.error( cli.name +
2264 " shows no leader for the election-app was" +
2265 " elected after the old one died" )
2266 leaderResult = main.FALSE
2267 elif leaderN in restarted:
2268 main.log.error( cli.name + " shows " + str( leaderN ) +
2269 " as leader for the election-app, but it " +
2270 "was restarted" )
2271 leaderResult = main.FALSE
2272 if len( set( leaderList ) ) != 1:
2273 leaderResult = main.FALSE
2274 main.log.error(
2275 "Inconsistent view of leader for the election test app" )
2276 # TODO: print the list
2277 utilities.assert_equals(
2278 expect=main.TRUE,
2279 actual=leaderResult,
2280 onpass="Leadership election passed",
2281 onfail="Something went wrong with Leadership election" )
2282
2283 def CASE8( self, main ):
2284 """
2285 Compare topo
2286 """
2287 import json
2288 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002289 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002290 assert main, "main not defined"
2291 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002292 assert main.CLIs, "main.CLIs not defined"
2293 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002294
2295 main.case( "Compare ONOS Topology view to Mininet topology" )
Jon Hall783bbf92015-07-23 14:33:19 -07002296 main.caseExplanation = "Compare topology objects between Mininet" +\
Jon Hall5cf14d52015-07-16 12:15:19 -07002297 " and ONOS"
Jon Hall5cf14d52015-07-16 12:15:19 -07002298 topoResult = main.FALSE
Jon Hall6e709752016-02-01 13:38:46 -08002299 topoFailMsg = "ONOS topology don't match Mininet"
Jon Hall5cf14d52015-07-16 12:15:19 -07002300 elapsed = 0
2301 count = 0
Jon Halle9b1fa32015-12-08 15:32:21 -08002302 main.step( "Comparing ONOS topology to MN topology" )
Jon Hall5cf14d52015-07-16 12:15:19 -07002303 startTime = time.time()
2304 # Give time for Gossip to work
Jon Halle9b1fa32015-12-08 15:32:21 -08002305 while topoResult == main.FALSE and ( elapsed < 60 or count < 3 ):
Jon Hall96091e62015-09-21 17:34:17 -07002306 devicesResults = main.TRUE
2307 linksResults = main.TRUE
2308 hostsResults = main.TRUE
2309 hostAttachmentResults = True
Jon Hall5cf14d52015-07-16 12:15:19 -07002310 count += 1
2311 cliStart = time.time()
2312 devices = []
2313 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002314 for i in main.activeNodes:
Jon Hall6e709752016-02-01 13:38:46 -08002315 t = main.Thread( target=utilities.retry,
Jon Hall5cf14d52015-07-16 12:15:19 -07002316 name="devices-" + str( i ),
Jon Hall6e709752016-02-01 13:38:46 -08002317 args=[ main.CLIs[i].devices, [ None ] ],
2318 kwargs= { 'sleep': 5, 'attempts': 5,
2319 'randomTime': True } )
Jon Hall5cf14d52015-07-16 12:15:19 -07002320 threads.append( t )
2321 t.start()
2322
2323 for t in threads:
2324 t.join()
2325 devices.append( t.result )
2326 hosts = []
2327 ipResult = main.TRUE
2328 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002329 for i in main.activeNodes:
Jon Halld8f6de82015-12-17 17:04:34 -08002330 t = main.Thread( target=utilities.retry,
Jon Hall5cf14d52015-07-16 12:15:19 -07002331 name="hosts-" + str( i ),
Jon Halld8f6de82015-12-17 17:04:34 -08002332 args=[ main.CLIs[i].hosts, [ None ] ],
2333 kwargs= { 'sleep': 5, 'attempts': 5,
2334 'randomTime': True } )
Jon Hall5cf14d52015-07-16 12:15:19 -07002335 threads.append( t )
2336 t.start()
2337
2338 for t in threads:
2339 t.join()
2340 try:
2341 hosts.append( json.loads( t.result ) )
2342 except ( ValueError, TypeError ):
2343 main.log.exception( "Error parsing hosts results" )
2344 main.log.error( repr( t.result ) )
Jon Hallf3d16e72015-12-16 17:45:08 -08002345 hosts.append( None )
Jon Hall5cf14d52015-07-16 12:15:19 -07002346 for controller in range( 0, len( hosts ) ):
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002347 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hallacd1b182015-12-17 11:43:20 -08002348 if hosts[ controller ]:
2349 for host in hosts[ controller ]:
2350 if host is None or host.get( 'ipAddresses', [] ) == []:
2351 main.log.error(
2352 "Error with host ipAddresses on controller" +
2353 controllerStr + ": " + str( host ) )
2354 ipResult = main.FALSE
Jon Hall5cf14d52015-07-16 12:15:19 -07002355 ports = []
2356 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002357 for i in main.activeNodes:
Jon Hall6e709752016-02-01 13:38:46 -08002358 t = main.Thread( target=utilities.retry,
Jon Hall5cf14d52015-07-16 12:15:19 -07002359 name="ports-" + str( i ),
Jon Hall6e709752016-02-01 13:38:46 -08002360 args=[ main.CLIs[i].ports, [ None ] ],
2361 kwargs= { 'sleep': 5, 'attempts': 5,
2362 'randomTime': True } )
Jon Hall5cf14d52015-07-16 12:15:19 -07002363 threads.append( t )
2364 t.start()
2365
2366 for t in threads:
2367 t.join()
2368 ports.append( t.result )
2369 links = []
2370 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002371 for i in main.activeNodes:
Jon Hall6e709752016-02-01 13:38:46 -08002372 t = main.Thread( target=utilities.retry,
Jon Hall5cf14d52015-07-16 12:15:19 -07002373 name="links-" + str( i ),
Jon Hall6e709752016-02-01 13:38:46 -08002374 args=[ main.CLIs[i].links, [ None ] ],
2375 kwargs= { 'sleep': 5, 'attempts': 5,
2376 'randomTime': True } )
Jon Hall5cf14d52015-07-16 12:15:19 -07002377 threads.append( t )
2378 t.start()
2379
2380 for t in threads:
2381 t.join()
2382 links.append( t.result )
2383 clusters = []
2384 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002385 for i in main.activeNodes:
Jon Hall6e709752016-02-01 13:38:46 -08002386 t = main.Thread( target=utilities.retry,
Jon Hall5cf14d52015-07-16 12:15:19 -07002387 name="clusters-" + str( i ),
Jon Hall6e709752016-02-01 13:38:46 -08002388 args=[ main.CLIs[i].clusters, [ None ] ],
2389 kwargs= { 'sleep': 5, 'attempts': 5,
2390 'randomTime': True } )
Jon Hall5cf14d52015-07-16 12:15:19 -07002391 threads.append( t )
2392 t.start()
2393
2394 for t in threads:
2395 t.join()
2396 clusters.append( t.result )
2397
2398 elapsed = time.time() - startTime
2399 cliTime = time.time() - cliStart
2400 print "Elapsed time: " + str( elapsed )
2401 print "CLI time: " + str( cliTime )
2402
Jon Hall6e709752016-02-01 13:38:46 -08002403 if all( e is None for e in devices ) and\
2404 all( e is None for e in hosts ) and\
2405 all( e is None for e in ports ) and\
2406 all( e is None for e in links ) and\
2407 all( e is None for e in clusters ):
2408 topoFailMsg = "Could not get topology from ONOS"
2409 main.log.error( topoFailMsg )
2410 continue # Try again, No use trying to compare
2411
Jon Hall5cf14d52015-07-16 12:15:19 -07002412 mnSwitches = main.Mininet1.getSwitches()
2413 mnLinks = main.Mininet1.getLinks()
2414 mnHosts = main.Mininet1.getHosts()
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002415 for controller in range( len( main.activeNodes ) ):
2416 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07002417 if devices[ controller ] and ports[ controller ] and\
2418 "Error" not in devices[ controller ] and\
2419 "Error" not in ports[ controller ]:
2420
Jon Hallc6793552016-01-19 14:18:37 -08002421 try:
2422 currentDevicesResult = main.Mininet1.compareSwitches(
2423 mnSwitches,
2424 json.loads( devices[ controller ] ),
2425 json.loads( ports[ controller ] ) )
2426 except ( TypeError, ValueError ) as e:
2427 main.log.exception( "Object not as expected; devices={!r}\nports={!r}".format(
2428 devices[ controller ], ports[ controller ] ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07002429 else:
2430 currentDevicesResult = main.FALSE
2431 utilities.assert_equals( expect=main.TRUE,
2432 actual=currentDevicesResult,
2433 onpass="ONOS" + controllerStr +
2434 " Switches view is correct",
2435 onfail="ONOS" + controllerStr +
2436 " Switches view is incorrect" )
2437
2438 if links[ controller ] and "Error" not in links[ controller ]:
2439 currentLinksResult = main.Mininet1.compareLinks(
2440 mnSwitches, mnLinks,
2441 json.loads( links[ controller ] ) )
2442 else:
2443 currentLinksResult = main.FALSE
2444 utilities.assert_equals( expect=main.TRUE,
2445 actual=currentLinksResult,
2446 onpass="ONOS" + controllerStr +
2447 " links view is correct",
2448 onfail="ONOS" + controllerStr +
2449 " links view is incorrect" )
Jon Hall657cdf62015-12-17 14:40:51 -08002450 if hosts[ controller ] and "Error" not in hosts[ controller ]:
Jon Hall5cf14d52015-07-16 12:15:19 -07002451 currentHostsResult = main.Mininet1.compareHosts(
2452 mnHosts,
2453 hosts[ controller ] )
Jon Hall13b446e2016-01-05 12:17:01 -08002454 elif hosts[ controller ] == []:
2455 currentHostsResult = main.TRUE
Jon Hall5cf14d52015-07-16 12:15:19 -07002456 else:
2457 currentHostsResult = main.FALSE
2458 utilities.assert_equals( expect=main.TRUE,
2459 actual=currentHostsResult,
2460 onpass="ONOS" + controllerStr +
2461 " hosts exist in Mininet",
2462 onfail="ONOS" + controllerStr +
2463 " hosts don't match Mininet" )
2464 # CHECKING HOST ATTACHMENT POINTS
2465 hostAttachment = True
2466 zeroHosts = False
2467 # FIXME: topo-HA/obelisk specific mappings:
2468 # key is mac and value is dpid
2469 mappings = {}
2470 for i in range( 1, 29 ): # hosts 1 through 28
2471 # set up correct variables:
2472 macId = "00:" * 5 + hex( i ).split( "0x" )[1].upper().zfill(2)
2473 if i == 1:
2474 deviceId = "1000".zfill(16)
2475 elif i == 2:
2476 deviceId = "2000".zfill(16)
2477 elif i == 3:
2478 deviceId = "3000".zfill(16)
2479 elif i == 4:
2480 deviceId = "3004".zfill(16)
2481 elif i == 5:
2482 deviceId = "5000".zfill(16)
2483 elif i == 6:
2484 deviceId = "6000".zfill(16)
2485 elif i == 7:
2486 deviceId = "6007".zfill(16)
2487 elif i >= 8 and i <= 17:
2488 dpid = '3' + str( i ).zfill( 3 )
2489 deviceId = dpid.zfill(16)
2490 elif i >= 18 and i <= 27:
2491 dpid = '6' + str( i ).zfill( 3 )
2492 deviceId = dpid.zfill(16)
2493 elif i == 28:
2494 deviceId = "2800".zfill(16)
2495 mappings[ macId ] = deviceId
Jon Halld8f6de82015-12-17 17:04:34 -08002496 if hosts[ controller ] is not None and "Error" not in hosts[ controller ]:
Jon Hall5cf14d52015-07-16 12:15:19 -07002497 if hosts[ controller ] == []:
2498 main.log.warn( "There are no hosts discovered" )
2499 zeroHosts = True
2500 else:
2501 for host in hosts[ controller ]:
2502 mac = None
2503 location = None
2504 device = None
2505 port = None
2506 try:
2507 mac = host.get( 'mac' )
2508 assert mac, "mac field could not be found for this host object"
2509
2510 location = host.get( 'location' )
2511 assert location, "location field could not be found for this host object"
2512
2513 # Trim the protocol identifier off deviceId
2514 device = str( location.get( 'elementId' ) ).split(':')[1]
2515 assert device, "elementId field could not be found for this host location object"
2516
2517 port = location.get( 'port' )
2518 assert port, "port field could not be found for this host location object"
2519
2520 # Now check if this matches where they should be
2521 if mac and device and port:
2522 if str( port ) != "1":
2523 main.log.error( "The attachment port is incorrect for " +
2524 "host " + str( mac ) +
2525 ". Expected: 1 Actual: " + str( port) )
2526 hostAttachment = False
2527 if device != mappings[ str( mac ) ]:
2528 main.log.error( "The attachment device is incorrect for " +
2529 "host " + str( mac ) +
2530 ". Expected: " + mappings[ str( mac ) ] +
2531 " Actual: " + device )
2532 hostAttachment = False
2533 else:
2534 hostAttachment = False
2535 except AssertionError:
2536 main.log.exception( "Json object not as expected" )
2537 main.log.error( repr( host ) )
2538 hostAttachment = False
2539 else:
2540 main.log.error( "No hosts json output or \"Error\"" +
2541 " in output. hosts = " +
2542 repr( hosts[ controller ] ) )
2543 if zeroHosts is False:
2544 hostAttachment = True
2545
2546 # END CHECKING HOST ATTACHMENT POINTS
2547 devicesResults = devicesResults and currentDevicesResult
2548 linksResults = linksResults and currentLinksResult
2549 hostsResults = hostsResults and currentHostsResult
2550 hostAttachmentResults = hostAttachmentResults and\
2551 hostAttachment
Jon Halla440e872016-03-31 15:15:50 -07002552 topoResult = ( devicesResults and linksResults
2553 and hostsResults and ipResult and
2554 hostAttachmentResults )
Jon Halle9b1fa32015-12-08 15:32:21 -08002555 utilities.assert_equals( expect=True,
2556 actual=topoResult,
2557 onpass="ONOS topology matches Mininet",
Jon Hall6e709752016-02-01 13:38:46 -08002558 onfail=topoFailMsg )
Jon Halle9b1fa32015-12-08 15:32:21 -08002559 # End of While loop to pull ONOS state
Jon Hall5cf14d52015-07-16 12:15:19 -07002560
2561 # Compare json objects for hosts and dataplane clusters
2562
2563 # hosts
2564 main.step( "Hosts view is consistent across all ONOS nodes" )
2565 consistentHostsResult = main.TRUE
2566 for controller in range( len( hosts ) ):
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002567 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hall13b446e2016-01-05 12:17:01 -08002568 if hosts[ controller ] is not None and "Error" not in hosts[ controller ]:
Jon Hall5cf14d52015-07-16 12:15:19 -07002569 if hosts[ controller ] == hosts[ 0 ]:
2570 continue
2571 else: # hosts not consistent
2572 main.log.error( "hosts from ONOS" + controllerStr +
2573 " is inconsistent with ONOS1" )
2574 main.log.warn( repr( hosts[ controller ] ) )
2575 consistentHostsResult = main.FALSE
2576
2577 else:
2578 main.log.error( "Error in getting ONOS hosts from ONOS" +
2579 controllerStr )
2580 consistentHostsResult = main.FALSE
2581 main.log.warn( "ONOS" + controllerStr +
2582 " hosts response: " +
2583 repr( hosts[ controller ] ) )
2584 utilities.assert_equals(
2585 expect=main.TRUE,
2586 actual=consistentHostsResult,
2587 onpass="Hosts view is consistent across all ONOS nodes",
2588 onfail="ONOS nodes have different views of hosts" )
2589
2590 main.step( "Hosts information is correct" )
2591 hostsResults = hostsResults and ipResult
2592 utilities.assert_equals(
2593 expect=main.TRUE,
2594 actual=hostsResults,
2595 onpass="Host information is correct",
2596 onfail="Host information is incorrect" )
2597
2598 main.step( "Host attachment points to the network" )
2599 utilities.assert_equals(
2600 expect=True,
2601 actual=hostAttachmentResults,
2602 onpass="Hosts are correctly attached to the network",
2603 onfail="ONOS did not correctly attach hosts to the network" )
2604
2605 # Strongly connected clusters of devices
2606 main.step( "Clusters view is consistent across all ONOS nodes" )
2607 consistentClustersResult = main.TRUE
2608 for controller in range( len( clusters ) ):
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002609 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07002610 if "Error" not in clusters[ controller ]:
2611 if clusters[ controller ] == clusters[ 0 ]:
2612 continue
2613 else: # clusters not consistent
2614 main.log.error( "clusters from ONOS" +
2615 controllerStr +
2616 " is inconsistent with ONOS1" )
2617 consistentClustersResult = main.FALSE
Jon Hall5cf14d52015-07-16 12:15:19 -07002618 else:
2619 main.log.error( "Error in getting dataplane clusters " +
2620 "from ONOS" + controllerStr )
2621 consistentClustersResult = main.FALSE
2622 main.log.warn( "ONOS" + controllerStr +
2623 " clusters response: " +
2624 repr( clusters[ controller ] ) )
2625 utilities.assert_equals(
2626 expect=main.TRUE,
2627 actual=consistentClustersResult,
2628 onpass="Clusters view is consistent across all ONOS nodes",
2629 onfail="ONOS nodes have different views of clusters" )
Jon Hall64948022016-05-12 13:38:50 -07002630 if not consistentClustersResult:
2631 main.log.debug( clusters )
Jon Hall5cf14d52015-07-16 12:15:19 -07002632
2633 main.step( "There is only one SCC" )
2634 # there should always only be one cluster
2635 try:
2636 numClusters = len( json.loads( clusters[ 0 ] ) )
2637 except ( ValueError, TypeError ):
2638 main.log.exception( "Error parsing clusters[0]: " +
2639 repr( clusters[0] ) )
Jon Halla440e872016-03-31 15:15:50 -07002640 numClusters = "ERROR"
Jon Hall5cf14d52015-07-16 12:15:19 -07002641 clusterResults = main.FALSE
2642 if numClusters == 1:
2643 clusterResults = main.TRUE
2644 utilities.assert_equals(
2645 expect=1,
2646 actual=numClusters,
2647 onpass="ONOS shows 1 SCC",
2648 onfail="ONOS shows " + str( numClusters ) + " SCCs" )
2649
2650 topoResult = ( devicesResults and linksResults
2651 and hostsResults and consistentHostsResult
2652 and consistentClustersResult and clusterResults
2653 and ipResult and hostAttachmentResults )
2654
2655 topoResult = topoResult and int( count <= 2 )
2656 note = "note it takes about " + str( int( cliTime ) ) + \
2657 " seconds for the test to make all the cli calls to fetch " +\
2658 "the topology from each ONOS instance"
2659 main.log.info(
2660 "Very crass estimate for topology discovery/convergence( " +
2661 str( note ) + " ): " + str( elapsed ) + " seconds, " +
2662 str( count ) + " tries" )
2663
2664 main.step( "Device information is correct" )
2665 utilities.assert_equals(
2666 expect=main.TRUE,
2667 actual=devicesResults,
2668 onpass="Device information is correct",
2669 onfail="Device information is incorrect" )
2670
2671 main.step( "Links are correct" )
2672 utilities.assert_equals(
2673 expect=main.TRUE,
2674 actual=linksResults,
2675 onpass="Link are correct",
2676 onfail="Links are incorrect" )
2677
Jon Halla440e872016-03-31 15:15:50 -07002678 main.step( "Hosts are correct" )
2679 utilities.assert_equals(
2680 expect=main.TRUE,
2681 actual=hostsResults,
2682 onpass="Hosts are correct",
2683 onfail="Hosts are incorrect" )
2684
Jon Hall5cf14d52015-07-16 12:15:19 -07002685 # FIXME: move this to an ONOS state case
2686 main.step( "Checking ONOS nodes" )
Jon Hall41d39f12016-04-11 22:54:35 -07002687 nodeResults = utilities.retry( main.HA.nodesCheck,
2688 False,
2689 args=[main.activeNodes],
2690 attempts=5 )
Jon Hall5cf14d52015-07-16 12:15:19 -07002691
Jon Hall41d39f12016-04-11 22:54:35 -07002692 utilities.assert_equals( expect=True, actual=nodeResults,
Jon Hall5cf14d52015-07-16 12:15:19 -07002693 onpass="Nodes check successful",
2694 onfail="Nodes check NOT successful" )
Jon Halla440e872016-03-31 15:15:50 -07002695 if not nodeResults:
Jon Hall41d39f12016-04-11 22:54:35 -07002696 for i in main.activeNodes:
Jon Halla440e872016-03-31 15:15:50 -07002697 main.log.debug( "{} components not ACTIVE: \n{}".format(
Jon Hall41d39f12016-04-11 22:54:35 -07002698 main.CLIs[i].name,
2699 main.CLIs[i].sendline( "scr:list | grep -v ACTIVE" ) ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07002700
Jon Halld2871c22016-07-26 11:01:14 -07002701 if not topoResult:
2702 main.cleanup()
2703 main.exit()
2704
Jon Hall5cf14d52015-07-16 12:15:19 -07002705 def CASE9( self, main ):
2706 """
2707 Link s3-s28 down
2708 """
2709 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002710 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002711 assert main, "main not defined"
2712 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002713 assert main.CLIs, "main.CLIs not defined"
2714 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002715 # NOTE: You should probably run a topology check after this
2716
2717 linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
2718
2719 description = "Turn off a link to ensure that Link Discovery " +\
2720 "is working properly"
2721 main.case( description )
2722
2723 main.step( "Kill Link between s3 and s28" )
2724 LinkDown = main.Mininet1.link( END1="s3", END2="s28", OPTION="down" )
2725 main.log.info( "Waiting " + str( linkSleep ) +
2726 " seconds for link down to be discovered" )
2727 time.sleep( linkSleep )
2728 utilities.assert_equals( expect=main.TRUE, actual=LinkDown,
2729 onpass="Link down successful",
2730 onfail="Failed to bring link down" )
2731 # TODO do some sort of check here
2732
2733 def CASE10( self, main ):
2734 """
2735 Link s3-s28 up
2736 """
2737 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002738 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002739 assert main, "main not defined"
2740 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002741 assert main.CLIs, "main.CLIs not defined"
2742 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002743 # NOTE: You should probably run a topology check after this
2744
2745 linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
2746
2747 description = "Restore a link to ensure that Link Discovery is " + \
2748 "working properly"
2749 main.case( description )
2750
2751 main.step( "Bring link between s3 and s28 back up" )
2752 LinkUp = main.Mininet1.link( END1="s3", END2="s28", OPTION="up" )
2753 main.log.info( "Waiting " + str( linkSleep ) +
2754 " seconds for link up to be discovered" )
2755 time.sleep( linkSleep )
2756 utilities.assert_equals( expect=main.TRUE, actual=LinkUp,
2757 onpass="Link up successful",
2758 onfail="Failed to bring link up" )
2759 # TODO do some sort of check here
2760
2761 def CASE11( self, main ):
2762 """
2763 Switch Down
2764 """
2765 # NOTE: You should probably run a topology check after this
2766 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002767 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002768 assert main, "main not defined"
2769 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002770 assert main.CLIs, "main.CLIs not defined"
2771 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002772
2773 switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
2774
2775 description = "Killing a switch to ensure it is discovered correctly"
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002776 onosCli = main.CLIs[ main.activeNodes[0] ]
Jon Hall5cf14d52015-07-16 12:15:19 -07002777 main.case( description )
2778 switch = main.params[ 'kill' ][ 'switch' ]
2779 switchDPID = main.params[ 'kill' ][ 'dpid' ]
2780
2781 # TODO: Make this switch parameterizable
2782 main.step( "Kill " + switch )
2783 main.log.info( "Deleting " + switch )
2784 main.Mininet1.delSwitch( switch )
2785 main.log.info( "Waiting " + str( switchSleep ) +
2786 " seconds for switch down to be discovered" )
2787 time.sleep( switchSleep )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002788 device = onosCli.getDevice( dpid=switchDPID )
Jon Hall5cf14d52015-07-16 12:15:19 -07002789 # Peek at the deleted switch
2790 main.log.warn( str( device ) )
2791 result = main.FALSE
2792 if device and device[ 'available' ] is False:
2793 result = main.TRUE
2794 utilities.assert_equals( expect=main.TRUE, actual=result,
2795 onpass="Kill switch successful",
2796 onfail="Failed to kill switch?" )
2797
2798 def CASE12( self, main ):
2799 """
2800 Switch Up
2801 """
2802 # NOTE: You should probably run a topology check after this
2803 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002804 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002805 assert main, "main not defined"
2806 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002807 assert main.CLIs, "main.CLIs not defined"
2808 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002809 assert ONOS1Port, "ONOS1Port not defined"
2810 assert ONOS2Port, "ONOS2Port not defined"
2811 assert ONOS3Port, "ONOS3Port not defined"
2812 assert ONOS4Port, "ONOS4Port not defined"
2813 assert ONOS5Port, "ONOS5Port not defined"
2814 assert ONOS6Port, "ONOS6Port not defined"
2815 assert ONOS7Port, "ONOS7Port not defined"
2816
2817 switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
2818 switch = main.params[ 'kill' ][ 'switch' ]
2819 switchDPID = main.params[ 'kill' ][ 'dpid' ]
2820 links = main.params[ 'kill' ][ 'links' ].split()
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002821 onosCli = main.CLIs[ main.activeNodes[0] ]
Jon Hall5cf14d52015-07-16 12:15:19 -07002822 description = "Adding a switch to ensure it is discovered correctly"
2823 main.case( description )
2824
2825 main.step( "Add back " + switch )
2826 main.Mininet1.addSwitch( switch, dpid=switchDPID )
2827 for peer in links:
2828 main.Mininet1.addLink( switch, peer )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002829 ipList = [ node.ip_address for node in main.nodes ]
Jon Hall5cf14d52015-07-16 12:15:19 -07002830 main.Mininet1.assignSwController( sw=switch, ip=ipList )
2831 main.log.info( "Waiting " + str( switchSleep ) +
2832 " seconds for switch up to be discovered" )
2833 time.sleep( switchSleep )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002834 device = onosCli.getDevice( dpid=switchDPID )
Jon Hall5cf14d52015-07-16 12:15:19 -07002835 # Peek at the deleted switch
2836 main.log.warn( str( device ) )
2837 result = main.FALSE
2838 if device and device[ 'available' ]:
2839 result = main.TRUE
2840 utilities.assert_equals( expect=main.TRUE, actual=result,
2841 onpass="add switch successful",
2842 onfail="Failed to add switch?" )
2843
2844 def CASE13( self, main ):
2845 """
2846 Clean up
2847 """
2848 import os
2849 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002850 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002851 assert main, "main not defined"
2852 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002853 assert main.CLIs, "main.CLIs not defined"
2854 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002855
2856 # printing colors to terminal
2857 colors = { 'cyan': '\033[96m', 'purple': '\033[95m',
2858 'blue': '\033[94m', 'green': '\033[92m',
2859 'yellow': '\033[93m', 'red': '\033[91m', 'end': '\033[0m' }
2860 main.case( "Test Cleanup" )
2861 main.step( "Killing tcpdumps" )
2862 main.Mininet2.stopTcpdump()
2863
2864 testname = main.TEST
Jon Hall96091e62015-09-21 17:34:17 -07002865 if main.params[ 'BACKUP' ][ 'ENABLED' ] == "True":
Jon Hall5cf14d52015-07-16 12:15:19 -07002866 main.step( "Copying MN pcap and ONOS log files to test station" )
2867 teststationUser = main.params[ 'BACKUP' ][ 'TESTONUSER' ]
2868 teststationIP = main.params[ 'BACKUP' ][ 'TESTONIP' ]
Jon Hall96091e62015-09-21 17:34:17 -07002869 # NOTE: MN Pcap file is being saved to logdir.
2870 # We scp this file as MN and TestON aren't necessarily the same vm
2871
2872 # FIXME: To be replaced with a Jenkin's post script
Jon Hall5cf14d52015-07-16 12:15:19 -07002873 # TODO: Load these from params
2874 # NOTE: must end in /
2875 logFolder = "/opt/onos/log/"
2876 logFiles = [ "karaf.log", "karaf.log.1" ]
2877 # NOTE: must end in /
Jon Hall5cf14d52015-07-16 12:15:19 -07002878 for f in logFiles:
Jon Halle1a3b752015-07-22 13:02:46 -07002879 for node in main.nodes:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002880 dstName = main.logdir + "/" + node.name + "-" + f
Jon Hall96091e62015-09-21 17:34:17 -07002881 main.ONOSbench.secureCopy( node.user_name, node.ip_address,
2882 logFolder + f, dstName )
Jon Hall5cf14d52015-07-16 12:15:19 -07002883 # std*.log's
2884 # NOTE: must end in /
2885 logFolder = "/opt/onos/var/"
2886 logFiles = [ "stderr.log", "stdout.log" ]
2887 # NOTE: must end in /
Jon Hall5cf14d52015-07-16 12:15:19 -07002888 for f in logFiles:
Jon Halle1a3b752015-07-22 13:02:46 -07002889 for node in main.nodes:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002890 dstName = main.logdir + "/" + node.name + "-" + f
Jon Hall96091e62015-09-21 17:34:17 -07002891 main.ONOSbench.secureCopy( node.user_name, node.ip_address,
2892 logFolder + f, dstName )
2893 else:
2894 main.log.debug( "skipping saving log files" )
Jon Hall5cf14d52015-07-16 12:15:19 -07002895
2896 main.step( "Stopping Mininet" )
2897 mnResult = main.Mininet1.stopNet()
2898 utilities.assert_equals( expect=main.TRUE, actual=mnResult,
2899 onpass="Mininet stopped",
2900 onfail="MN cleanup NOT successful" )
2901
2902 main.step( "Checking ONOS Logs for errors" )
Jon Halle1a3b752015-07-22 13:02:46 -07002903 for node in main.nodes:
Jon Hall96091e62015-09-21 17:34:17 -07002904 main.log.debug( "Checking logs for errors on " + node.name + ":" )
2905 main.log.warn( main.ONOSbench.checkLogs( node.ip_address ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07002906
2907 try:
2908 timerLog = open( main.logdir + "/Timers.csv", 'w')
2909 # Overwrite with empty line and close
2910 labels = "Gossip Intents, Restart"
2911 data = str( gossipTime ) + ", " + str( main.restartTime )
2912 timerLog.write( labels + "\n" + data )
2913 timerLog.close()
2914 except NameError, e:
2915 main.log.exception(e)
2916
2917 def CASE14( self, main ):
2918 """
2919 start election app on all onos nodes
2920 """
Jon Halle1a3b752015-07-22 13:02:46 -07002921 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002922 assert main, "main not defined"
2923 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002924 assert main.CLIs, "main.CLIs not defined"
2925 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002926
2927 main.case("Start Leadership Election app")
2928 main.step( "Install leadership election app" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002929 onosCli = main.CLIs[ main.activeNodes[0] ]
2930 appResult = onosCli.activateApp( "org.onosproject.election" )
Jon Hall5cf14d52015-07-16 12:15:19 -07002931 utilities.assert_equals(
2932 expect=main.TRUE,
2933 actual=appResult,
2934 onpass="Election app installed",
2935 onfail="Something went wrong with installing Leadership election" )
2936
2937 main.step( "Run for election on each node" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002938 for i in main.activeNodes:
2939 main.CLIs[i].electionTestRun()
Jon Hall25463a82016-04-13 14:03:52 -07002940 time.sleep(5)
2941 activeCLIs = [ main.CLIs[i] for i in main.activeNodes ]
2942 sameResult, leaders = main.HA.consistentLeaderboards( activeCLIs )
Jon Hall5cf14d52015-07-16 12:15:19 -07002943 utilities.assert_equals(
Jon Hall25463a82016-04-13 14:03:52 -07002944 expect=True,
2945 actual=sameResult,
2946 onpass="All nodes see the same leaderboards",
2947 onfail="Inconsistent leaderboards" )
Jon Hall5cf14d52015-07-16 12:15:19 -07002948
Jon Hall25463a82016-04-13 14:03:52 -07002949 if sameResult:
2950 leader = leaders[ 0 ][ 0 ]
2951 if main.nodes[main.activeNodes[0]].ip_address in leader:
2952 correctLeader = True
2953 else:
2954 correctLeader = False
2955 main.step( "First node was elected leader" )
2956 utilities.assert_equals(
2957 expect=True,
2958 actual=correctLeader,
2959 onpass="Correct leader was elected",
2960 onfail="Incorrect leader" )
Jon Hall5cf14d52015-07-16 12:15:19 -07002961
2962 def CASE15( self, main ):
2963 """
2964 Check that Leadership Election is still functional
acsmars71adceb2015-08-31 15:09:26 -07002965 15.1 Run election on each node
2966 15.2 Check that each node has the same leaders and candidates
2967 15.3 Find current leader and withdraw
2968 15.4 Check that a new node was elected leader
2969 15.5 Check that that new leader was the candidate of old leader
2970 15.6 Run for election on old leader
2971 15.7 Check that oldLeader is a candidate, and leader if only 1 node
2972 15.8 Make sure that the old leader was added to the candidate list
2973
2974 old and new variable prefixes refer to data from before vs after
2975 withdrawl and later before withdrawl vs after re-election
Jon Hall5cf14d52015-07-16 12:15:19 -07002976 """
2977 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002978 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002979 assert main, "main not defined"
2980 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002981 assert main.CLIs, "main.CLIs not defined"
2982 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002983
Jon Hall5cf14d52015-07-16 12:15:19 -07002984 description = "Check that Leadership Election is still functional"
2985 main.case( description )
Jon Halla440e872016-03-31 15:15:50 -07002986 # NOTE: Need to re-run after restarts since being a canidate is not persistant
Jon Hall5cf14d52015-07-16 12:15:19 -07002987
Jon Halla440e872016-03-31 15:15:50 -07002988 oldLeaders = [] # list of lists of each nodes' candidates before
2989 newLeaders = [] # list of lists of each nodes' candidates after
acsmars71adceb2015-08-31 15:09:26 -07002990 oldLeader = '' # the old leader from oldLeaders, None if not same
2991 newLeader = '' # the new leaders fron newLoeaders, None if not same
2992 oldLeaderCLI = None # the CLI of the old leader used for re-electing
2993 expectNoLeader = False # True when there is only one leader
2994 if main.numCtrls == 1:
2995 expectNoLeader = True
2996
2997 main.step( "Run for election on each node" )
2998 electionResult = main.TRUE
2999
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003000 for i in main.activeNodes: # run test election on each node
3001 if main.CLIs[i].electionTestRun() == main.FALSE:
acsmars71adceb2015-08-31 15:09:26 -07003002 electionResult = main.FALSE
Jon Hall5cf14d52015-07-16 12:15:19 -07003003 utilities.assert_equals(
3004 expect=main.TRUE,
acsmars71adceb2015-08-31 15:09:26 -07003005 actual=electionResult,
3006 onpass="All nodes successfully ran for leadership",
3007 onfail="At least one node failed to run for leadership" )
3008
acsmars3a72bde2015-09-02 14:16:22 -07003009 if electionResult == main.FALSE:
3010 main.log.error(
3011 "Skipping Test Case because Election Test App isn't loaded" )
3012 main.skipCase()
3013
acsmars71adceb2015-08-31 15:09:26 -07003014 main.step( "Check that each node shows the same leader and candidates" )
Jon Halla440e872016-03-31 15:15:50 -07003015 failMessage = "Nodes have different leaderboards"
Jon Halla440e872016-03-31 15:15:50 -07003016 activeCLIs = [ main.CLIs[i] for i in main.activeNodes ]
Jon Hall41d39f12016-04-11 22:54:35 -07003017 sameResult, oldLeaders = main.HA.consistentLeaderboards( activeCLIs )
Jon Halla440e872016-03-31 15:15:50 -07003018 if sameResult:
3019 oldLeader = oldLeaders[ 0 ][ 0 ]
3020 main.log.warn( oldLeader )
acsmars71adceb2015-08-31 15:09:26 -07003021 else:
Jon Halla440e872016-03-31 15:15:50 -07003022 oldLeader = None
acsmars71adceb2015-08-31 15:09:26 -07003023 utilities.assert_equals(
Jon Halla440e872016-03-31 15:15:50 -07003024 expect=True,
acsmars71adceb2015-08-31 15:09:26 -07003025 actual=sameResult,
Jon Halla440e872016-03-31 15:15:50 -07003026 onpass="Leaderboards are consistent for the election topic",
acsmars71adceb2015-08-31 15:09:26 -07003027 onfail=failMessage )
Jon Hall5cf14d52015-07-16 12:15:19 -07003028
3029 main.step( "Find current leader and withdraw" )
acsmars71adceb2015-08-31 15:09:26 -07003030 withdrawResult = main.TRUE
Jon Hall5cf14d52015-07-16 12:15:19 -07003031 # do some sanity checking on leader before using it
acsmars71adceb2015-08-31 15:09:26 -07003032 if oldLeader is None:
3033 main.log.error( "Leadership isn't consistent." )
3034 withdrawResult = main.FALSE
3035 # Get the CLI of the oldLeader
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003036 for i in main.activeNodes:
acsmars71adceb2015-08-31 15:09:26 -07003037 if oldLeader == main.nodes[ i ].ip_address:
3038 oldLeaderCLI = main.CLIs[ i ]
Jon Hall5cf14d52015-07-16 12:15:19 -07003039 break
3040 else: # FOR/ELSE statement
3041 main.log.error( "Leader election, could not find current leader" )
3042 if oldLeader:
acsmars71adceb2015-08-31 15:09:26 -07003043 withdrawResult = oldLeaderCLI.electionTestWithdraw()
Jon Hall5cf14d52015-07-16 12:15:19 -07003044 utilities.assert_equals(
3045 expect=main.TRUE,
3046 actual=withdrawResult,
3047 onpass="Node was withdrawn from election",
3048 onfail="Node was not withdrawn from election" )
3049
acsmars71adceb2015-08-31 15:09:26 -07003050 main.step( "Check that a new node was elected leader" )
acsmars71adceb2015-08-31 15:09:26 -07003051 failMessage = "Nodes have different leaders"
acsmars71adceb2015-08-31 15:09:26 -07003052 # Get new leaders and candidates
Jon Hall41d39f12016-04-11 22:54:35 -07003053 newLeaderResult, newLeaders = main.HA.consistentLeaderboards( activeCLIs )
Jon Hall3a7843a2016-04-12 03:01:09 -07003054 newLeader = None
Jon Halla440e872016-03-31 15:15:50 -07003055 if newLeaderResult:
Jon Hall3a7843a2016-04-12 03:01:09 -07003056 if newLeaders[ 0 ][ 0 ] == 'none':
3057 main.log.error( "No leader was elected on at least 1 node" )
3058 if not expectNoLeader:
3059 newLeaderResult = False
Jon Hall25463a82016-04-13 14:03:52 -07003060 newLeader = newLeaders[ 0 ][ 0 ]
acsmars71adceb2015-08-31 15:09:26 -07003061
3062 # Check that the new leader is not the older leader, which was withdrawn
3063 if newLeader == oldLeader:
Jon Halla440e872016-03-31 15:15:50 -07003064 newLeaderResult = False
Jon Hall6e709752016-02-01 13:38:46 -08003065 main.log.error( "All nodes still see old leader: " + str( oldLeader ) +
acsmars71adceb2015-08-31 15:09:26 -07003066 " as the current leader" )
Jon Hall5cf14d52015-07-16 12:15:19 -07003067 utilities.assert_equals(
Jon Halla440e872016-03-31 15:15:50 -07003068 expect=True,
acsmars71adceb2015-08-31 15:09:26 -07003069 actual=newLeaderResult,
Jon Hall5cf14d52015-07-16 12:15:19 -07003070 onpass="Leadership election passed",
3071 onfail="Something went wrong with Leadership election" )
3072
Jon Halla440e872016-03-31 15:15:50 -07003073 main.step( "Check that that new leader was the candidate of old leader" )
Jon Hall6e709752016-02-01 13:38:46 -08003074 # candidates[ 2 ] should become the top candidate after withdrawl
acsmars71adceb2015-08-31 15:09:26 -07003075 correctCandidateResult = main.TRUE
3076 if expectNoLeader:
3077 if newLeader == 'none':
3078 main.log.info( "No leader expected. None found. Pass" )
3079 correctCandidateResult = main.TRUE
3080 else:
3081 main.log.info( "Expected no leader, got: " + str( newLeader ) )
3082 correctCandidateResult = main.FALSE
Jon Halla440e872016-03-31 15:15:50 -07003083 elif len( oldLeaders[0] ) >= 3:
3084 if newLeader == oldLeaders[ 0 ][ 2 ]:
3085 # correct leader was elected
3086 correctCandidateResult = main.TRUE
3087 else:
3088 correctCandidateResult = main.FALSE
3089 main.log.error( "Candidate {} was elected. {} should have had priority.".format(
3090 newLeader, oldLeaders[ 0 ][ 2 ] ) )
Jon Hall6e709752016-02-01 13:38:46 -08003091 else:
3092 main.log.warn( "Could not determine who should be the correct leader" )
Jon Halla440e872016-03-31 15:15:50 -07003093 main.log.debug( oldLeaders[ 0 ] )
Jon Hall6e709752016-02-01 13:38:46 -08003094 correctCandidateResult = main.FALSE
acsmars71adceb2015-08-31 15:09:26 -07003095 utilities.assert_equals(
3096 expect=main.TRUE,
3097 actual=correctCandidateResult,
3098 onpass="Correct Candidate Elected",
3099 onfail="Incorrect Candidate Elected" )
3100
Jon Hall5cf14d52015-07-16 12:15:19 -07003101 main.step( "Run for election on old leader( just so everyone " +
3102 "is in the hat )" )
acsmars71adceb2015-08-31 15:09:26 -07003103 if oldLeaderCLI is not None:
3104 runResult = oldLeaderCLI.electionTestRun()
Jon Hall5cf14d52015-07-16 12:15:19 -07003105 else:
acsmars71adceb2015-08-31 15:09:26 -07003106 main.log.error( "No old leader to re-elect" )
Jon Hall5cf14d52015-07-16 12:15:19 -07003107 runResult = main.FALSE
3108 utilities.assert_equals(
3109 expect=main.TRUE,
3110 actual=runResult,
3111 onpass="App re-ran for election",
3112 onfail="App failed to run for election" )
Jon Halla440e872016-03-31 15:15:50 -07003113
acsmars71adceb2015-08-31 15:09:26 -07003114 main.step(
3115 "Check that oldLeader is a candidate, and leader if only 1 node" )
Jon Hall5cf14d52015-07-16 12:15:19 -07003116 # verify leader didn't just change
Jon Halla440e872016-03-31 15:15:50 -07003117 # Get new leaders and candidates
3118 reRunLeaders = []
3119 time.sleep( 5 ) # Paremterize
Jon Hall41d39f12016-04-11 22:54:35 -07003120 positionResult, reRunLeaders = main.HA.consistentLeaderboards( activeCLIs )
acsmars71adceb2015-08-31 15:09:26 -07003121
3122 # Check that the re-elected node is last on the candidate List
Jon Hall3a7843a2016-04-12 03:01:09 -07003123 if not reRunLeaders[0]:
3124 positionResult = main.FALSE
3125 elif oldLeader != reRunLeaders[ 0 ][ -1 ]:
Jon Halla440e872016-03-31 15:15:50 -07003126 main.log.error( "Old Leader ({}) not in the proper position: {} ".format( str( oldLeader),
3127 str( reRunLeaders[ 0 ] ) ) )
acsmars71adceb2015-08-31 15:09:26 -07003128 positionResult = main.FALSE
Jon Hall5cf14d52015-07-16 12:15:19 -07003129 utilities.assert_equals(
Jon Halla440e872016-03-31 15:15:50 -07003130 expect=True,
acsmars71adceb2015-08-31 15:09:26 -07003131 actual=positionResult,
Jon Hall5cf14d52015-07-16 12:15:19 -07003132 onpass="Old leader successfully re-ran for election",
3133 onfail="Something went wrong with Leadership election after " +
3134 "the old leader re-ran for election" )
3135
3136 def CASE16( self, main ):
3137 """
3138 Install Distributed Primitives app
3139 """
3140 import time
Jon Halle1a3b752015-07-22 13:02:46 -07003141 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07003142 assert main, "main not defined"
3143 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07003144 assert main.CLIs, "main.CLIs not defined"
3145 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07003146
3147 # Variables for the distributed primitives tests
3148 global pCounterName
Jon Hall5cf14d52015-07-16 12:15:19 -07003149 global pCounterValue
Jon Hall5cf14d52015-07-16 12:15:19 -07003150 global onosSet
3151 global onosSetName
3152 pCounterName = "TestON-Partitions"
Jon Hall5cf14d52015-07-16 12:15:19 -07003153 pCounterValue = 0
Jon Hall5cf14d52015-07-16 12:15:19 -07003154 onosSet = set([])
3155 onosSetName = "TestON-set"
3156
3157 description = "Install Primitives app"
3158 main.case( description )
3159 main.step( "Install Primitives app" )
3160 appName = "org.onosproject.distributedprimitives"
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003161 node = main.activeNodes[0]
3162 appResults = main.CLIs[node].activateApp( appName )
Jon Hall5cf14d52015-07-16 12:15:19 -07003163 utilities.assert_equals( expect=main.TRUE,
3164 actual=appResults,
3165 onpass="Primitives app activated",
3166 onfail="Primitives app not activated" )
3167 time.sleep( 5 ) # To allow all nodes to activate
3168
3169 def CASE17( self, main ):
3170 """
3171 Check for basic functionality with distributed primitives
3172 """
Jon Hall5cf14d52015-07-16 12:15:19 -07003173 # Make sure variables are defined/set
Jon Halle1a3b752015-07-22 13:02:46 -07003174 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07003175 assert main, "main not defined"
3176 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07003177 assert main.CLIs, "main.CLIs not defined"
3178 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07003179 assert pCounterName, "pCounterName not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07003180 assert onosSetName, "onosSetName not defined"
3181 # NOTE: assert fails if value is 0/None/Empty/False
3182 try:
3183 pCounterValue
3184 except NameError:
3185 main.log.error( "pCounterValue not defined, setting to 0" )
3186 pCounterValue = 0
3187 try:
Jon Hall5cf14d52015-07-16 12:15:19 -07003188 onosSet
3189 except NameError:
3190 main.log.error( "onosSet not defined, setting to empty Set" )
3191 onosSet = set([])
3192 # Variables for the distributed primitives tests. These are local only
3193 addValue = "a"
3194 addAllValue = "a b c d e f"
3195 retainValue = "c d e f"
3196
3197 description = "Check for basic functionality with distributed " +\
3198 "primitives"
3199 main.case( description )
Jon Halle1a3b752015-07-22 13:02:46 -07003200 main.caseExplanation = "Test the methods of the distributed " +\
3201 "primitives (counters and sets) throught the cli"
Jon Hall5cf14d52015-07-16 12:15:19 -07003202 # DISTRIBUTED ATOMIC COUNTERS
Jon Halle1a3b752015-07-22 13:02:46 -07003203 # Partitioned counters
3204 main.step( "Increment then get a default counter on each node" )
Jon Hall5cf14d52015-07-16 12:15:19 -07003205 pCounters = []
3206 threads = []
3207 addedPValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003208 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003209 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3210 name="counterAddAndGet-" + str( i ),
Jon Hall5cf14d52015-07-16 12:15:19 -07003211 args=[ pCounterName ] )
3212 pCounterValue += 1
3213 addedPValues.append( pCounterValue )
3214 threads.append( t )
3215 t.start()
3216
3217 for t in threads:
3218 t.join()
3219 pCounters.append( t.result )
3220 # Check that counter incremented numController times
3221 pCounterResults = True
3222 for i in addedPValues:
3223 tmpResult = i in pCounters
3224 pCounterResults = pCounterResults and tmpResult
3225 if not tmpResult:
3226 main.log.error( str( i ) + " is not in partitioned "
3227 "counter incremented results" )
3228 utilities.assert_equals( expect=True,
3229 actual=pCounterResults,
3230 onpass="Default counter incremented",
3231 onfail="Error incrementing default" +
3232 " counter" )
3233
Jon Halle1a3b752015-07-22 13:02:46 -07003234 main.step( "Get then Increment a default counter on each node" )
3235 pCounters = []
3236 threads = []
3237 addedPValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003238 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003239 t = main.Thread( target=main.CLIs[i].counterTestGetAndAdd,
3240 name="counterGetAndAdd-" + str( i ),
3241 args=[ pCounterName ] )
3242 addedPValues.append( pCounterValue )
3243 pCounterValue += 1
3244 threads.append( t )
3245 t.start()
3246
3247 for t in threads:
3248 t.join()
3249 pCounters.append( t.result )
3250 # Check that counter incremented numController times
3251 pCounterResults = True
3252 for i in addedPValues:
3253 tmpResult = i in pCounters
3254 pCounterResults = pCounterResults and tmpResult
3255 if not tmpResult:
3256 main.log.error( str( i ) + " is not in partitioned "
3257 "counter incremented results" )
3258 utilities.assert_equals( expect=True,
3259 actual=pCounterResults,
3260 onpass="Default counter incremented",
3261 onfail="Error incrementing default" +
3262 " counter" )
3263
3264 main.step( "Counters we added have the correct values" )
Jon Hall41d39f12016-04-11 22:54:35 -07003265 incrementCheck = main.HA.counterCheck( pCounterName, pCounterValue )
Jon Halle1a3b752015-07-22 13:02:46 -07003266 utilities.assert_equals( expect=main.TRUE,
3267 actual=incrementCheck,
3268 onpass="Added counters are correct",
3269 onfail="Added counters are incorrect" )
3270
3271 main.step( "Add -8 to then get a default counter on each node" )
3272 pCounters = []
3273 threads = []
3274 addedPValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003275 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003276 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3277 name="counterIncrement-" + str( i ),
3278 args=[ pCounterName ],
3279 kwargs={ "delta": -8 } )
3280 pCounterValue += -8
3281 addedPValues.append( pCounterValue )
3282 threads.append( t )
3283 t.start()
3284
3285 for t in threads:
3286 t.join()
3287 pCounters.append( t.result )
3288 # Check that counter incremented numController times
3289 pCounterResults = True
3290 for i in addedPValues:
3291 tmpResult = i in pCounters
3292 pCounterResults = pCounterResults and tmpResult
3293 if not tmpResult:
3294 main.log.error( str( i ) + " is not in partitioned "
3295 "counter incremented results" )
3296 utilities.assert_equals( expect=True,
3297 actual=pCounterResults,
3298 onpass="Default counter incremented",
3299 onfail="Error incrementing default" +
3300 " counter" )
3301
3302 main.step( "Add 5 to then get a default counter on each node" )
3303 pCounters = []
3304 threads = []
3305 addedPValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003306 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003307 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3308 name="counterIncrement-" + str( i ),
3309 args=[ pCounterName ],
3310 kwargs={ "delta": 5 } )
3311 pCounterValue += 5
3312 addedPValues.append( pCounterValue )
3313 threads.append( t )
3314 t.start()
3315
3316 for t in threads:
3317 t.join()
3318 pCounters.append( t.result )
3319 # Check that counter incremented numController times
3320 pCounterResults = True
3321 for i in addedPValues:
3322 tmpResult = i in pCounters
3323 pCounterResults = pCounterResults and tmpResult
3324 if not tmpResult:
3325 main.log.error( str( i ) + " is not in partitioned "
3326 "counter incremented results" )
3327 utilities.assert_equals( expect=True,
3328 actual=pCounterResults,
3329 onpass="Default counter incremented",
3330 onfail="Error incrementing default" +
3331 " counter" )
3332
3333 main.step( "Get then add 5 to a default counter on each node" )
3334 pCounters = []
3335 threads = []
3336 addedPValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003337 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003338 t = main.Thread( target=main.CLIs[i].counterTestGetAndAdd,
3339 name="counterIncrement-" + str( i ),
3340 args=[ pCounterName ],
3341 kwargs={ "delta": 5 } )
3342 addedPValues.append( pCounterValue )
3343 pCounterValue += 5
3344 threads.append( t )
3345 t.start()
3346
3347 for t in threads:
3348 t.join()
3349 pCounters.append( t.result )
3350 # Check that counter incremented numController times
3351 pCounterResults = True
3352 for i in addedPValues:
3353 tmpResult = i in pCounters
3354 pCounterResults = pCounterResults and tmpResult
3355 if not tmpResult:
3356 main.log.error( str( i ) + " is not in partitioned "
3357 "counter incremented results" )
3358 utilities.assert_equals( expect=True,
3359 actual=pCounterResults,
3360 onpass="Default counter incremented",
3361 onfail="Error incrementing default" +
3362 " counter" )
3363
3364 main.step( "Counters we added have the correct values" )
Jon Hall41d39f12016-04-11 22:54:35 -07003365 incrementCheck = main.HA.counterCheck( pCounterName, pCounterValue )
Jon Halle1a3b752015-07-22 13:02:46 -07003366 utilities.assert_equals( expect=main.TRUE,
3367 actual=incrementCheck,
3368 onpass="Added counters are correct",
3369 onfail="Added counters are incorrect" )
3370
Jon Hall5cf14d52015-07-16 12:15:19 -07003371 # DISTRIBUTED SETS
3372 main.step( "Distributed Set get" )
3373 size = len( onosSet )
3374 getResponses = []
3375 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003376 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003377 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003378 name="setTestGet-" + str( i ),
3379 args=[ onosSetName ] )
3380 threads.append( t )
3381 t.start()
3382 for t in threads:
3383 t.join()
3384 getResponses.append( t.result )
3385
3386 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003387 for i in range( len( main.activeNodes ) ):
3388 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003389 if isinstance( getResponses[ i ], list):
3390 current = set( getResponses[ i ] )
3391 if len( current ) == len( getResponses[ i ] ):
3392 # no repeats
3393 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003394 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003395 " has incorrect view" +
3396 " of set " + onosSetName + ":\n" +
3397 str( getResponses[ i ] ) )
3398 main.log.debug( "Expected: " + str( onosSet ) )
3399 main.log.debug( "Actual: " + str( current ) )
3400 getResults = main.FALSE
3401 else:
3402 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003403 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003404 " has repeat elements in" +
3405 " set " + onosSetName + ":\n" +
3406 str( getResponses[ i ] ) )
3407 getResults = main.FALSE
3408 elif getResponses[ i ] == main.ERROR:
3409 getResults = main.FALSE
3410 utilities.assert_equals( expect=main.TRUE,
3411 actual=getResults,
3412 onpass="Set elements are correct",
3413 onfail="Set elements are incorrect" )
3414
3415 main.step( "Distributed Set size" )
3416 sizeResponses = []
3417 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003418 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003419 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07003420 name="setTestSize-" + str( i ),
3421 args=[ onosSetName ] )
3422 threads.append( t )
3423 t.start()
3424 for t in threads:
3425 t.join()
3426 sizeResponses.append( t.result )
3427
3428 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003429 for i in range( len( main.activeNodes ) ):
3430 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003431 if size != sizeResponses[ i ]:
3432 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003433 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003434 " expected a size of " + str( size ) +
3435 " for set " + onosSetName +
3436 " but got " + str( sizeResponses[ i ] ) )
3437 utilities.assert_equals( expect=main.TRUE,
3438 actual=sizeResults,
3439 onpass="Set sizes are correct",
3440 onfail="Set sizes are incorrect" )
3441
3442 main.step( "Distributed Set add()" )
3443 onosSet.add( addValue )
3444 addResponses = []
3445 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003446 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003447 t = main.Thread( target=main.CLIs[i].setTestAdd,
Jon Hall5cf14d52015-07-16 12:15:19 -07003448 name="setTestAdd-" + str( i ),
3449 args=[ onosSetName, addValue ] )
3450 threads.append( t )
3451 t.start()
3452 for t in threads:
3453 t.join()
3454 addResponses.append( t.result )
3455
3456 # main.TRUE = successfully changed the set
3457 # main.FALSE = action resulted in no change in set
3458 # main.ERROR - Some error in executing the function
3459 addResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003460 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003461 if addResponses[ i ] == main.TRUE:
3462 # All is well
3463 pass
3464 elif addResponses[ i ] == main.FALSE:
3465 # Already in set, probably fine
3466 pass
3467 elif addResponses[ i ] == main.ERROR:
3468 # Error in execution
3469 addResults = main.FALSE
3470 else:
3471 # unexpected result
3472 addResults = main.FALSE
3473 if addResults != main.TRUE:
3474 main.log.error( "Error executing set add" )
3475
3476 # Check if set is still correct
3477 size = len( onosSet )
3478 getResponses = []
3479 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003480 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003481 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003482 name="setTestGet-" + str( i ),
3483 args=[ onosSetName ] )
3484 threads.append( t )
3485 t.start()
3486 for t in threads:
3487 t.join()
3488 getResponses.append( t.result )
3489 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003490 for i in range( len( main.activeNodes ) ):
3491 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003492 if isinstance( getResponses[ i ], list):
3493 current = set( getResponses[ i ] )
3494 if len( current ) == len( getResponses[ i ] ):
3495 # no repeats
3496 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003497 main.log.error( "ONOS" + node + " has incorrect view" +
Jon Hall5cf14d52015-07-16 12:15:19 -07003498 " of set " + onosSetName + ":\n" +
3499 str( getResponses[ i ] ) )
3500 main.log.debug( "Expected: " + str( onosSet ) )
3501 main.log.debug( "Actual: " + str( current ) )
3502 getResults = main.FALSE
3503 else:
3504 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003505 main.log.error( "ONOS" + node + " has repeat elements in" +
Jon Hall5cf14d52015-07-16 12:15:19 -07003506 " set " + onosSetName + ":\n" +
3507 str( getResponses[ i ] ) )
3508 getResults = main.FALSE
3509 elif getResponses[ i ] == main.ERROR:
3510 getResults = main.FALSE
3511 sizeResponses = []
3512 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003513 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003514 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07003515 name="setTestSize-" + str( i ),
3516 args=[ onosSetName ] )
3517 threads.append( t )
3518 t.start()
3519 for t in threads:
3520 t.join()
3521 sizeResponses.append( t.result )
3522 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003523 for i in range( len( main.activeNodes ) ):
3524 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003525 if size != sizeResponses[ i ]:
3526 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003527 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003528 " expected a size of " + str( size ) +
3529 " for set " + onosSetName +
3530 " but got " + str( sizeResponses[ i ] ) )
3531 addResults = addResults and getResults and sizeResults
3532 utilities.assert_equals( expect=main.TRUE,
3533 actual=addResults,
3534 onpass="Set add correct",
3535 onfail="Set add was incorrect" )
3536
3537 main.step( "Distributed Set addAll()" )
3538 onosSet.update( addAllValue.split() )
3539 addResponses = []
3540 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003541 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003542 t = main.Thread( target=main.CLIs[i].setTestAdd,
Jon Hall5cf14d52015-07-16 12:15:19 -07003543 name="setTestAddAll-" + str( i ),
3544 args=[ onosSetName, addAllValue ] )
3545 threads.append( t )
3546 t.start()
3547 for t in threads:
3548 t.join()
3549 addResponses.append( t.result )
3550
3551 # main.TRUE = successfully changed the set
3552 # main.FALSE = action resulted in no change in set
3553 # main.ERROR - Some error in executing the function
3554 addAllResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003555 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003556 if addResponses[ i ] == main.TRUE:
3557 # All is well
3558 pass
3559 elif addResponses[ i ] == main.FALSE:
3560 # Already in set, probably fine
3561 pass
3562 elif addResponses[ i ] == main.ERROR:
3563 # Error in execution
3564 addAllResults = main.FALSE
3565 else:
3566 # unexpected result
3567 addAllResults = main.FALSE
3568 if addAllResults != main.TRUE:
3569 main.log.error( "Error executing set addAll" )
3570
3571 # Check if set is still correct
3572 size = len( onosSet )
3573 getResponses = []
3574 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003575 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003576 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003577 name="setTestGet-" + str( i ),
3578 args=[ onosSetName ] )
3579 threads.append( t )
3580 t.start()
3581 for t in threads:
3582 t.join()
3583 getResponses.append( t.result )
3584 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003585 for i in range( len( main.activeNodes ) ):
3586 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003587 if isinstance( getResponses[ i ], list):
3588 current = set( getResponses[ i ] )
3589 if len( current ) == len( getResponses[ i ] ):
3590 # no repeats
3591 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003592 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003593 " has incorrect view" +
3594 " of set " + onosSetName + ":\n" +
3595 str( getResponses[ i ] ) )
3596 main.log.debug( "Expected: " + str( onosSet ) )
3597 main.log.debug( "Actual: " + str( current ) )
3598 getResults = main.FALSE
3599 else:
3600 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003601 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003602 " has repeat elements in" +
3603 " set " + onosSetName + ":\n" +
3604 str( getResponses[ i ] ) )
3605 getResults = main.FALSE
3606 elif getResponses[ i ] == main.ERROR:
3607 getResults = main.FALSE
3608 sizeResponses = []
3609 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003610 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003611 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07003612 name="setTestSize-" + str( i ),
3613 args=[ onosSetName ] )
3614 threads.append( t )
3615 t.start()
3616 for t in threads:
3617 t.join()
3618 sizeResponses.append( t.result )
3619 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003620 for i in range( len( main.activeNodes ) ):
3621 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003622 if size != sizeResponses[ i ]:
3623 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003624 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003625 " expected a size of " + str( size ) +
3626 " for set " + onosSetName +
3627 " but got " + str( sizeResponses[ i ] ) )
3628 addAllResults = addAllResults and getResults and sizeResults
3629 utilities.assert_equals( expect=main.TRUE,
3630 actual=addAllResults,
3631 onpass="Set addAll correct",
3632 onfail="Set addAll was incorrect" )
3633
3634 main.step( "Distributed Set contains()" )
3635 containsResponses = []
3636 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003637 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003638 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003639 name="setContains-" + str( i ),
3640 args=[ onosSetName ],
3641 kwargs={ "values": addValue } )
3642 threads.append( t )
3643 t.start()
3644 for t in threads:
3645 t.join()
3646 # NOTE: This is the tuple
3647 containsResponses.append( t.result )
3648
3649 containsResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003650 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003651 if containsResponses[ i ] == main.ERROR:
3652 containsResults = main.FALSE
3653 else:
3654 containsResults = containsResults and\
3655 containsResponses[ i ][ 1 ]
3656 utilities.assert_equals( expect=main.TRUE,
3657 actual=containsResults,
3658 onpass="Set contains is functional",
3659 onfail="Set contains failed" )
3660
3661 main.step( "Distributed Set containsAll()" )
3662 containsAllResponses = []
3663 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003664 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003665 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003666 name="setContainsAll-" + str( i ),
3667 args=[ onosSetName ],
3668 kwargs={ "values": addAllValue } )
3669 threads.append( t )
3670 t.start()
3671 for t in threads:
3672 t.join()
3673 # NOTE: This is the tuple
3674 containsAllResponses.append( t.result )
3675
3676 containsAllResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003677 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003678 if containsResponses[ i ] == main.ERROR:
3679 containsResults = main.FALSE
3680 else:
3681 containsResults = containsResults and\
3682 containsResponses[ i ][ 1 ]
3683 utilities.assert_equals( expect=main.TRUE,
3684 actual=containsAllResults,
3685 onpass="Set containsAll is functional",
3686 onfail="Set containsAll failed" )
3687
3688 main.step( "Distributed Set remove()" )
3689 onosSet.remove( addValue )
3690 removeResponses = []
3691 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003692 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003693 t = main.Thread( target=main.CLIs[i].setTestRemove,
Jon Hall5cf14d52015-07-16 12:15:19 -07003694 name="setTestRemove-" + str( i ),
3695 args=[ onosSetName, addValue ] )
3696 threads.append( t )
3697 t.start()
3698 for t in threads:
3699 t.join()
3700 removeResponses.append( t.result )
3701
3702 # main.TRUE = successfully changed the set
3703 # main.FALSE = action resulted in no change in set
3704 # main.ERROR - Some error in executing the function
3705 removeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003706 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003707 if removeResponses[ i ] == main.TRUE:
3708 # All is well
3709 pass
3710 elif removeResponses[ i ] == main.FALSE:
3711 # not in set, probably fine
3712 pass
3713 elif removeResponses[ i ] == main.ERROR:
3714 # Error in execution
3715 removeResults = main.FALSE
3716 else:
3717 # unexpected result
3718 removeResults = main.FALSE
3719 if removeResults != main.TRUE:
3720 main.log.error( "Error executing set remove" )
3721
3722 # Check if set is still correct
3723 size = len( onosSet )
3724 getResponses = []
3725 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003726 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003727 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003728 name="setTestGet-" + str( i ),
3729 args=[ onosSetName ] )
3730 threads.append( t )
3731 t.start()
3732 for t in threads:
3733 t.join()
3734 getResponses.append( t.result )
3735 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003736 for i in range( len( main.activeNodes ) ):
3737 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003738 if isinstance( getResponses[ i ], list):
3739 current = set( getResponses[ i ] )
3740 if len( current ) == len( getResponses[ i ] ):
3741 # no repeats
3742 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003743 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003744 " has incorrect view" +
3745 " of set " + onosSetName + ":\n" +
3746 str( getResponses[ i ] ) )
3747 main.log.debug( "Expected: " + str( onosSet ) )
3748 main.log.debug( "Actual: " + str( current ) )
3749 getResults = main.FALSE
3750 else:
3751 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003752 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003753 " has repeat elements in" +
3754 " set " + onosSetName + ":\n" +
3755 str( getResponses[ i ] ) )
3756 getResults = main.FALSE
3757 elif getResponses[ i ] == main.ERROR:
3758 getResults = main.FALSE
3759 sizeResponses = []
3760 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003761 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003762 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07003763 name="setTestSize-" + str( i ),
3764 args=[ onosSetName ] )
3765 threads.append( t )
3766 t.start()
3767 for t in threads:
3768 t.join()
3769 sizeResponses.append( t.result )
3770 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003771 for i in range( len( main.activeNodes ) ):
3772 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003773 if size != sizeResponses[ i ]:
3774 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003775 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003776 " expected a size of " + str( size ) +
3777 " for set " + onosSetName +
3778 " but got " + str( sizeResponses[ i ] ) )
3779 removeResults = removeResults and getResults and sizeResults
3780 utilities.assert_equals( expect=main.TRUE,
3781 actual=removeResults,
3782 onpass="Set remove correct",
3783 onfail="Set remove was incorrect" )
3784
3785 main.step( "Distributed Set removeAll()" )
3786 onosSet.difference_update( addAllValue.split() )
3787 removeAllResponses = []
3788 threads = []
3789 try:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003790 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003791 t = main.Thread( target=main.CLIs[i].setTestRemove,
Jon Hall5cf14d52015-07-16 12:15:19 -07003792 name="setTestRemoveAll-" + str( i ),
3793 args=[ onosSetName, addAllValue ] )
3794 threads.append( t )
3795 t.start()
3796 for t in threads:
3797 t.join()
3798 removeAllResponses.append( t.result )
3799 except Exception, e:
3800 main.log.exception(e)
3801
3802 # main.TRUE = successfully changed the set
3803 # main.FALSE = action resulted in no change in set
3804 # main.ERROR - Some error in executing the function
3805 removeAllResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003806 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003807 if removeAllResponses[ i ] == main.TRUE:
3808 # All is well
3809 pass
3810 elif removeAllResponses[ i ] == main.FALSE:
3811 # not in set, probably fine
3812 pass
3813 elif removeAllResponses[ i ] == main.ERROR:
3814 # Error in execution
3815 removeAllResults = main.FALSE
3816 else:
3817 # unexpected result
3818 removeAllResults = main.FALSE
3819 if removeAllResults != main.TRUE:
3820 main.log.error( "Error executing set removeAll" )
3821
3822 # Check if set is still correct
3823 size = len( onosSet )
3824 getResponses = []
3825 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003826 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003827 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003828 name="setTestGet-" + str( i ),
3829 args=[ onosSetName ] )
3830 threads.append( t )
3831 t.start()
3832 for t in threads:
3833 t.join()
3834 getResponses.append( t.result )
3835 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003836 for i in range( len( main.activeNodes ) ):
3837 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003838 if isinstance( getResponses[ i ], list):
3839 current = set( getResponses[ i ] )
3840 if len( current ) == len( getResponses[ i ] ):
3841 # no repeats
3842 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003843 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003844 " has incorrect view" +
3845 " of set " + onosSetName + ":\n" +
3846 str( getResponses[ i ] ) )
3847 main.log.debug( "Expected: " + str( onosSet ) )
3848 main.log.debug( "Actual: " + str( current ) )
3849 getResults = main.FALSE
3850 else:
3851 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003852 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003853 " has repeat elements in" +
3854 " set " + onosSetName + ":\n" +
3855 str( getResponses[ i ] ) )
3856 getResults = main.FALSE
3857 elif getResponses[ i ] == main.ERROR:
3858 getResults = main.FALSE
3859 sizeResponses = []
3860 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003861 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003862 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07003863 name="setTestSize-" + str( i ),
3864 args=[ onosSetName ] )
3865 threads.append( t )
3866 t.start()
3867 for t in threads:
3868 t.join()
3869 sizeResponses.append( t.result )
3870 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003871 for i in range( len( main.activeNodes ) ):
3872 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003873 if size != sizeResponses[ i ]:
3874 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003875 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003876 " expected a size of " + str( size ) +
3877 " for set " + onosSetName +
3878 " but got " + str( sizeResponses[ i ] ) )
3879 removeAllResults = removeAllResults and getResults and sizeResults
3880 utilities.assert_equals( expect=main.TRUE,
3881 actual=removeAllResults,
3882 onpass="Set removeAll correct",
3883 onfail="Set removeAll was incorrect" )
3884
3885 main.step( "Distributed Set addAll()" )
3886 onosSet.update( addAllValue.split() )
3887 addResponses = []
3888 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003889 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003890 t = main.Thread( target=main.CLIs[i].setTestAdd,
Jon Hall5cf14d52015-07-16 12:15:19 -07003891 name="setTestAddAll-" + str( i ),
3892 args=[ onosSetName, addAllValue ] )
3893 threads.append( t )
3894 t.start()
3895 for t in threads:
3896 t.join()
3897 addResponses.append( t.result )
3898
3899 # main.TRUE = successfully changed the set
3900 # main.FALSE = action resulted in no change in set
3901 # main.ERROR - Some error in executing the function
3902 addAllResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003903 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003904 if addResponses[ i ] == main.TRUE:
3905 # All is well
3906 pass
3907 elif addResponses[ i ] == main.FALSE:
3908 # Already in set, probably fine
3909 pass
3910 elif addResponses[ i ] == main.ERROR:
3911 # Error in execution
3912 addAllResults = main.FALSE
3913 else:
3914 # unexpected result
3915 addAllResults = main.FALSE
3916 if addAllResults != main.TRUE:
3917 main.log.error( "Error executing set addAll" )
3918
3919 # Check if set is still correct
3920 size = len( onosSet )
3921 getResponses = []
3922 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003923 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003924 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003925 name="setTestGet-" + str( i ),
3926 args=[ onosSetName ] )
3927 threads.append( t )
3928 t.start()
3929 for t in threads:
3930 t.join()
3931 getResponses.append( t.result )
3932 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003933 for i in range( len( main.activeNodes ) ):
3934 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003935 if isinstance( getResponses[ i ], list):
3936 current = set( getResponses[ i ] )
3937 if len( current ) == len( getResponses[ i ] ):
3938 # no repeats
3939 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003940 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003941 " has incorrect view" +
3942 " of set " + onosSetName + ":\n" +
3943 str( getResponses[ i ] ) )
3944 main.log.debug( "Expected: " + str( onosSet ) )
3945 main.log.debug( "Actual: " + str( current ) )
3946 getResults = main.FALSE
3947 else:
3948 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003949 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003950 " has repeat elements in" +
3951 " set " + onosSetName + ":\n" +
3952 str( getResponses[ i ] ) )
3953 getResults = main.FALSE
3954 elif getResponses[ i ] == main.ERROR:
3955 getResults = main.FALSE
3956 sizeResponses = []
3957 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003958 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003959 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07003960 name="setTestSize-" + str( i ),
3961 args=[ onosSetName ] )
3962 threads.append( t )
3963 t.start()
3964 for t in threads:
3965 t.join()
3966 sizeResponses.append( t.result )
3967 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003968 for i in range( len( main.activeNodes ) ):
3969 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003970 if size != sizeResponses[ i ]:
3971 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003972 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003973 " expected a size of " + str( size ) +
3974 " for set " + onosSetName +
3975 " but got " + str( sizeResponses[ i ] ) )
3976 addAllResults = addAllResults and getResults and sizeResults
3977 utilities.assert_equals( expect=main.TRUE,
3978 actual=addAllResults,
3979 onpass="Set addAll correct",
3980 onfail="Set addAll was incorrect" )
3981
3982 main.step( "Distributed Set clear()" )
3983 onosSet.clear()
3984 clearResponses = []
3985 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003986 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003987 t = main.Thread( target=main.CLIs[i].setTestRemove,
Jon Hall5cf14d52015-07-16 12:15:19 -07003988 name="setTestClear-" + str( i ),
3989 args=[ onosSetName, " "], # Values doesn't matter
3990 kwargs={ "clear": True } )
3991 threads.append( t )
3992 t.start()
3993 for t in threads:
3994 t.join()
3995 clearResponses.append( t.result )
3996
3997 # main.TRUE = successfully changed the set
3998 # main.FALSE = action resulted in no change in set
3999 # main.ERROR - Some error in executing the function
4000 clearResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004001 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07004002 if clearResponses[ i ] == main.TRUE:
4003 # All is well
4004 pass
4005 elif clearResponses[ i ] == main.FALSE:
4006 # Nothing set, probably fine
4007 pass
4008 elif clearResponses[ i ] == main.ERROR:
4009 # Error in execution
4010 clearResults = main.FALSE
4011 else:
4012 # unexpected result
4013 clearResults = main.FALSE
4014 if clearResults != main.TRUE:
4015 main.log.error( "Error executing set clear" )
4016
4017 # Check if set is still correct
4018 size = len( onosSet )
4019 getResponses = []
4020 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004021 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004022 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07004023 name="setTestGet-" + str( i ),
4024 args=[ onosSetName ] )
4025 threads.append( t )
4026 t.start()
4027 for t in threads:
4028 t.join()
4029 getResponses.append( t.result )
4030 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004031 for i in range( len( main.activeNodes ) ):
4032 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004033 if isinstance( getResponses[ i ], list):
4034 current = set( getResponses[ i ] )
4035 if len( current ) == len( getResponses[ i ] ):
4036 # no repeats
4037 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004038 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004039 " has incorrect view" +
4040 " of set " + onosSetName + ":\n" +
4041 str( getResponses[ i ] ) )
4042 main.log.debug( "Expected: " + str( onosSet ) )
4043 main.log.debug( "Actual: " + str( current ) )
4044 getResults = main.FALSE
4045 else:
4046 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004047 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004048 " has repeat elements in" +
4049 " set " + onosSetName + ":\n" +
4050 str( getResponses[ i ] ) )
4051 getResults = main.FALSE
4052 elif getResponses[ i ] == main.ERROR:
4053 getResults = main.FALSE
4054 sizeResponses = []
4055 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004056 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004057 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07004058 name="setTestSize-" + str( i ),
4059 args=[ onosSetName ] )
4060 threads.append( t )
4061 t.start()
4062 for t in threads:
4063 t.join()
4064 sizeResponses.append( t.result )
4065 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004066 for i in range( len( main.activeNodes ) ):
4067 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004068 if size != sizeResponses[ i ]:
4069 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004070 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004071 " expected a size of " + str( size ) +
4072 " for set " + onosSetName +
4073 " but got " + str( sizeResponses[ i ] ) )
4074 clearResults = clearResults and getResults and sizeResults
4075 utilities.assert_equals( expect=main.TRUE,
4076 actual=clearResults,
4077 onpass="Set clear correct",
4078 onfail="Set clear was incorrect" )
4079
4080 main.step( "Distributed Set addAll()" )
4081 onosSet.update( addAllValue.split() )
4082 addResponses = []
4083 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004084 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004085 t = main.Thread( target=main.CLIs[i].setTestAdd,
Jon Hall5cf14d52015-07-16 12:15:19 -07004086 name="setTestAddAll-" + str( i ),
4087 args=[ onosSetName, addAllValue ] )
4088 threads.append( t )
4089 t.start()
4090 for t in threads:
4091 t.join()
4092 addResponses.append( t.result )
4093
4094 # main.TRUE = successfully changed the set
4095 # main.FALSE = action resulted in no change in set
4096 # main.ERROR - Some error in executing the function
4097 addAllResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004098 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07004099 if addResponses[ i ] == main.TRUE:
4100 # All is well
4101 pass
4102 elif addResponses[ i ] == main.FALSE:
4103 # Already in set, probably fine
4104 pass
4105 elif addResponses[ i ] == main.ERROR:
4106 # Error in execution
4107 addAllResults = main.FALSE
4108 else:
4109 # unexpected result
4110 addAllResults = main.FALSE
4111 if addAllResults != main.TRUE:
4112 main.log.error( "Error executing set addAll" )
4113
4114 # Check if set is still correct
4115 size = len( onosSet )
4116 getResponses = []
4117 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004118 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004119 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07004120 name="setTestGet-" + str( i ),
4121 args=[ onosSetName ] )
4122 threads.append( t )
4123 t.start()
4124 for t in threads:
4125 t.join()
4126 getResponses.append( t.result )
4127 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004128 for i in range( len( main.activeNodes ) ):
4129 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004130 if isinstance( getResponses[ i ], list):
4131 current = set( getResponses[ i ] )
4132 if len( current ) == len( getResponses[ i ] ):
4133 # no repeats
4134 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004135 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004136 " has incorrect view" +
4137 " of set " + onosSetName + ":\n" +
4138 str( getResponses[ i ] ) )
4139 main.log.debug( "Expected: " + str( onosSet ) )
4140 main.log.debug( "Actual: " + str( current ) )
4141 getResults = main.FALSE
4142 else:
4143 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004144 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004145 " has repeat elements in" +
4146 " set " + onosSetName + ":\n" +
4147 str( getResponses[ i ] ) )
4148 getResults = main.FALSE
4149 elif getResponses[ i ] == main.ERROR:
4150 getResults = main.FALSE
4151 sizeResponses = []
4152 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004153 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004154 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07004155 name="setTestSize-" + str( i ),
4156 args=[ onosSetName ] )
4157 threads.append( t )
4158 t.start()
4159 for t in threads:
4160 t.join()
4161 sizeResponses.append( t.result )
4162 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004163 for i in range( len( main.activeNodes ) ):
4164 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004165 if size != sizeResponses[ i ]:
4166 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004167 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004168 " expected a size of " + str( size ) +
4169 " for set " + onosSetName +
4170 " but got " + str( sizeResponses[ i ] ) )
4171 addAllResults = addAllResults and getResults and sizeResults
4172 utilities.assert_equals( expect=main.TRUE,
4173 actual=addAllResults,
4174 onpass="Set addAll correct",
4175 onfail="Set addAll was incorrect" )
4176
4177 main.step( "Distributed Set retain()" )
4178 onosSet.intersection_update( retainValue.split() )
4179 retainResponses = []
4180 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004181 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004182 t = main.Thread( target=main.CLIs[i].setTestRemove,
Jon Hall5cf14d52015-07-16 12:15:19 -07004183 name="setTestRetain-" + str( i ),
4184 args=[ onosSetName, retainValue ],
4185 kwargs={ "retain": True } )
4186 threads.append( t )
4187 t.start()
4188 for t in threads:
4189 t.join()
4190 retainResponses.append( t.result )
4191
4192 # main.TRUE = successfully changed the set
4193 # main.FALSE = action resulted in no change in set
4194 # main.ERROR - Some error in executing the function
4195 retainResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004196 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07004197 if retainResponses[ i ] == main.TRUE:
4198 # All is well
4199 pass
4200 elif retainResponses[ i ] == main.FALSE:
4201 # Already in set, probably fine
4202 pass
4203 elif retainResponses[ i ] == main.ERROR:
4204 # Error in execution
4205 retainResults = main.FALSE
4206 else:
4207 # unexpected result
4208 retainResults = main.FALSE
4209 if retainResults != main.TRUE:
4210 main.log.error( "Error executing set retain" )
4211
4212 # Check if set is still correct
4213 size = len( onosSet )
4214 getResponses = []
4215 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004216 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004217 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07004218 name="setTestGet-" + str( i ),
4219 args=[ onosSetName ] )
4220 threads.append( t )
4221 t.start()
4222 for t in threads:
4223 t.join()
4224 getResponses.append( t.result )
4225 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004226 for i in range( len( main.activeNodes ) ):
4227 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004228 if isinstance( getResponses[ i ], list):
4229 current = set( getResponses[ i ] )
4230 if len( current ) == len( getResponses[ i ] ):
4231 # no repeats
4232 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004233 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004234 " has incorrect view" +
4235 " of set " + onosSetName + ":\n" +
4236 str( getResponses[ i ] ) )
4237 main.log.debug( "Expected: " + str( onosSet ) )
4238 main.log.debug( "Actual: " + str( current ) )
4239 getResults = main.FALSE
4240 else:
4241 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004242 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004243 " has repeat elements in" +
4244 " set " + onosSetName + ":\n" +
4245 str( getResponses[ i ] ) )
4246 getResults = main.FALSE
4247 elif getResponses[ i ] == main.ERROR:
4248 getResults = main.FALSE
4249 sizeResponses = []
4250 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004251 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004252 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07004253 name="setTestSize-" + str( i ),
4254 args=[ onosSetName ] )
4255 threads.append( t )
4256 t.start()
4257 for t in threads:
4258 t.join()
4259 sizeResponses.append( t.result )
4260 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004261 for i in range( len( main.activeNodes ) ):
4262 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004263 if size != sizeResponses[ i ]:
4264 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004265 main.log.error( "ONOS" + node + " expected a size of " +
Jon Hall5cf14d52015-07-16 12:15:19 -07004266 str( size ) + " for set " + onosSetName +
4267 " but got " + str( sizeResponses[ i ] ) )
4268 retainResults = retainResults and getResults and sizeResults
4269 utilities.assert_equals( expect=main.TRUE,
4270 actual=retainResults,
4271 onpass="Set retain correct",
4272 onfail="Set retain was incorrect" )
4273
Jon Hall2a5002c2015-08-21 16:49:11 -07004274 # Transactional maps
4275 main.step( "Partitioned Transactional maps put" )
4276 tMapValue = "Testing"
4277 numKeys = 100
4278 putResult = True
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004279 node = main.activeNodes[0]
4280 putResponses = main.CLIs[node].transactionalMapPut( numKeys, tMapValue )
Jon Hall6e709752016-02-01 13:38:46 -08004281 if putResponses and len( putResponses ) == 100:
Jon Hall2a5002c2015-08-21 16:49:11 -07004282 for i in putResponses:
4283 if putResponses[ i ][ 'value' ] != tMapValue:
4284 putResult = False
4285 else:
4286 putResult = False
4287 if not putResult:
4288 main.log.debug( "Put response values: " + str( putResponses ) )
4289 utilities.assert_equals( expect=True,
4290 actual=putResult,
4291 onpass="Partitioned Transactional Map put successful",
4292 onfail="Partitioned Transactional Map put values are incorrect" )
4293
4294 main.step( "Partitioned Transactional maps get" )
Jon Hall9bfadd22016-05-11 14:48:07 -07004295 # FIXME: is this sleep needed?
4296 time.sleep( 5 )
4297
Jon Hall2a5002c2015-08-21 16:49:11 -07004298 getCheck = True
4299 for n in range( 1, numKeys + 1 ):
4300 getResponses = []
4301 threads = []
4302 valueCheck = True
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004303 for i in main.activeNodes:
Jon Hall2a5002c2015-08-21 16:49:11 -07004304 t = main.Thread( target=main.CLIs[i].transactionalMapGet,
4305 name="TMap-get-" + str( i ),
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004306 args=[ "Key" + str( n ) ] )
Jon Hall2a5002c2015-08-21 16:49:11 -07004307 threads.append( t )
4308 t.start()
4309 for t in threads:
4310 t.join()
4311 getResponses.append( t.result )
4312 for node in getResponses:
4313 if node != tMapValue:
4314 valueCheck = False
4315 if not valueCheck:
4316 main.log.warn( "Values for key 'Key" + str( n ) + "' do not match:" )
4317 main.log.warn( getResponses )
4318 getCheck = getCheck and valueCheck
4319 utilities.assert_equals( expect=True,
4320 actual=getCheck,
4321 onpass="Partitioned Transactional Map get values were correct",
4322 onfail="Partitioned Transactional Map values incorrect" )