blob: d38ac3699a68b9dabad0a7ec7c45b28b26743cab [file] [log] [blame]
Jon Hall5cf14d52015-07-16 12:15:19 -07001"""
2Description: This test is to determine if ONOS can handle
3 a minority of it's nodes restarting
4
5List of test cases:
6CASE1: Compile ONOS and push it to the test machines
7CASE2: Assign devices to controllers
8CASE21: Assign mastership to controllers
9CASE3: Assign intents
10CASE4: Ping across added host intents
11CASE5: Reading state of ONOS
Jon Hallb3ed8ed2015-10-28 16:43:55 -070012CASE61: The Failure inducing case.
13CASE62: The Failure recovery case.
Jon Hall5cf14d52015-07-16 12:15:19 -070014CASE7: Check state after control plane failure
15CASE8: Compare topo
16CASE9: Link s3-s28 down
17CASE10: Link s3-s28 up
18CASE11: Switch down
19CASE12: Switch up
20CASE13: Clean up
21CASE14: start election app on all onos nodes
22CASE15: Check that Leadership Election is still functional
23CASE16: Install Distributed Primitives app
24CASE17: Check for basic functionality with distributed primitives
25"""
26
27
Jon Hallb3ed8ed2015-10-28 16:43:55 -070028class HAkillNodes:
Jon Hall5cf14d52015-07-16 12:15:19 -070029
30 def __init__( self ):
31 self.default = ''
32
33 def CASE1( self, main ):
34 """
35 CASE1 is to compile ONOS and push it to the test machines
36
37 Startup sequence:
38 cell <name>
39 onos-verify-cell
40 NOTE: temporary - onos-remove-raft-logs
41 onos-uninstall
42 start mininet
43 git pull
44 mvn clean install
45 onos-package
46 onos-install -f
47 onos-wait-for-start
48 start cli sessions
49 start tcpdump
50 """
Jon Halle1a3b752015-07-22 13:02:46 -070051 import imp
Jon Hall3b489db2015-10-05 14:38:37 -070052 import pexpect
Jon Hall6e709752016-02-01 13:38:46 -080053 import time
Jon Halla440e872016-03-31 15:15:50 -070054 import json
Jon Hall6e709752016-02-01 13:38:46 -080055 main.log.info( "ONOS HA test: Restart a minority of ONOS nodes - " +
Jon Hall5cf14d52015-07-16 12:15:19 -070056 "initialization" )
57 main.case( "Setting up test environment" )
Jon Hall783bbf92015-07-23 14:33:19 -070058 main.caseExplanation = "Setup the test environment including " +\
Jon Hall5cf14d52015-07-16 12:15:19 -070059 "installing ONOS, starting Mininet and ONOS" +\
60 "cli sessions."
Jon Hall5cf14d52015-07-16 12:15:19 -070061
62 # load some variables from the params file
63 PULLCODE = False
64 if main.params[ 'Git' ] == 'True':
65 PULLCODE = True
66 gitBranch = main.params[ 'branch' ]
67 cellName = main.params[ 'ENV' ][ 'cellName' ]
68
Jon Halle1a3b752015-07-22 13:02:46 -070069 main.numCtrls = int( main.params[ 'num_controllers' ] )
Jon Hall5cf14d52015-07-16 12:15:19 -070070 if main.ONOSbench.maxNodes:
Jon Halle1a3b752015-07-22 13:02:46 -070071 if main.ONOSbench.maxNodes < main.numCtrls:
72 main.numCtrls = int( main.ONOSbench.maxNodes )
73 # set global variables
Jon Hall5cf14d52015-07-16 12:15:19 -070074 global ONOS1Port
75 global ONOS2Port
76 global ONOS3Port
77 global ONOS4Port
78 global ONOS5Port
79 global ONOS6Port
80 global ONOS7Port
Jon Halla440e872016-03-31 15:15:50 -070081 # These are for csv plotting in jenkins
82 global labels
83 global data
84 labels = []
85 data = []
Jon Hall5cf14d52015-07-16 12:15:19 -070086
87 # FIXME: just get controller port from params?
88 # TODO: do we really need all these?
89 ONOS1Port = main.params[ 'CTRL' ][ 'port1' ]
90 ONOS2Port = main.params[ 'CTRL' ][ 'port2' ]
91 ONOS3Port = main.params[ 'CTRL' ][ 'port3' ]
92 ONOS4Port = main.params[ 'CTRL' ][ 'port4' ]
93 ONOS5Port = main.params[ 'CTRL' ][ 'port5' ]
94 ONOS6Port = main.params[ 'CTRL' ][ 'port6' ]
95 ONOS7Port = main.params[ 'CTRL' ][ 'port7' ]
96
Jon Halle1a3b752015-07-22 13:02:46 -070097 try:
Jon Halla440e872016-03-31 15:15:50 -070098 from tests.HAsanity.dependencies.Counters import Counters
99 main.Counters = Counters()
Jon Halle1a3b752015-07-22 13:02:46 -0700100 except Exception as e:
101 main.log.exception( e )
102 main.cleanup()
103 main.exit()
104
105 main.CLIs = []
106 main.nodes = []
Jon Hall5cf14d52015-07-16 12:15:19 -0700107 ipList = []
Jon Halle1a3b752015-07-22 13:02:46 -0700108 for i in range( 1, main.numCtrls + 1 ):
109 try:
110 main.CLIs.append( getattr( main, 'ONOScli' + str( i ) ) )
111 main.nodes.append( getattr( main, 'ONOS' + str( i ) ) )
112 ipList.append( main.nodes[ -1 ].ip_address )
113 except AttributeError:
114 break
Jon Hall5cf14d52015-07-16 12:15:19 -0700115
116 main.step( "Create cell file" )
117 cellAppString = main.params[ 'ENV' ][ 'appString' ]
118 main.ONOSbench.createCellFile( main.ONOSbench.ip_address, cellName,
119 main.Mininet1.ip_address,
120 cellAppString, ipList )
121 main.step( "Applying cell variable to environment" )
122 cellResult = main.ONOSbench.setCell( cellName )
123 verifyResult = main.ONOSbench.verifyCell()
124
125 # FIXME:this is short term fix
126 main.log.info( "Removing raft logs" )
127 main.ONOSbench.onosRemoveRaftLogs()
128
129 main.log.info( "Uninstalling ONOS" )
Jon Halle1a3b752015-07-22 13:02:46 -0700130 for node in main.nodes:
Jon Hall5cf14d52015-07-16 12:15:19 -0700131 main.ONOSbench.onosUninstall( node.ip_address )
132
133 # Make sure ONOS is DEAD
134 main.log.info( "Killing any ONOS processes" )
135 killResults = main.TRUE
Jon Halle1a3b752015-07-22 13:02:46 -0700136 for node in main.nodes:
Jon Hall5cf14d52015-07-16 12:15:19 -0700137 killed = main.ONOSbench.onosKill( node.ip_address )
138 killResults = killResults and killed
139
140 cleanInstallResult = main.TRUE
141 gitPullResult = main.TRUE
142
143 main.step( "Starting Mininet" )
144 # scp topo file to mininet
145 # TODO: move to params?
146 topoName = "obelisk.py"
147 filePath = main.ONOSbench.home + "/tools/test/topos/"
kelvin-onlabd9e23de2015-08-06 10:34:44 -0700148 main.ONOSbench.scp( main.Mininet1,
149 filePath + topoName,
150 main.Mininet1.home,
151 direction="to" )
Jon Hall5cf14d52015-07-16 12:15:19 -0700152 mnResult = main.Mininet1.startNet( )
153 utilities.assert_equals( expect=main.TRUE, actual=mnResult,
154 onpass="Mininet Started",
155 onfail="Error starting Mininet" )
156
157 main.step( "Git checkout and pull " + gitBranch )
158 if PULLCODE:
159 main.ONOSbench.gitCheckout( gitBranch )
160 gitPullResult = main.ONOSbench.gitPull()
161 # values of 1 or 3 are good
162 utilities.assert_lesser( expect=0, actual=gitPullResult,
163 onpass="Git pull successful",
164 onfail="Git pull failed" )
165 main.ONOSbench.getVersion( report=True )
166
167 main.step( "Using mvn clean install" )
168 cleanInstallResult = main.TRUE
169 if PULLCODE and gitPullResult == main.TRUE:
170 cleanInstallResult = main.ONOSbench.cleanInstall()
171 else:
172 main.log.warn( "Did not pull new code so skipping mvn " +
173 "clean install" )
174 utilities.assert_equals( expect=main.TRUE,
175 actual=cleanInstallResult,
176 onpass="MCI successful",
177 onfail="MCI failed" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700178
179 main.step( "Make sure ONOS service doesn't automatically respawn" )
180 handle = main.ONOSbench.handle
181 handle.sendline( "sed -i -e 's/^respawn$/#respawn/g' tools/package/init/onos.conf" )
182 handle.expect( "\$" ) # $ from the command
183 handle.expect( "\$" ) # $ from the prompt
184
Jon Hall5cf14d52015-07-16 12:15:19 -0700185 # GRAPHS
186 # NOTE: important params here:
187 # job = name of Jenkins job
188 # Plot Name = Plot-HA, only can be used if multiple plots
189 # index = The number of the graph under plot name
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700190 job = "HAkillNodes"
Jon Hall5cf14d52015-07-16 12:15:19 -0700191 plotName = "Plot-HA"
Jon Hall843f8bc2016-03-18 14:28:13 -0700192 index = "2"
Jon Hall5cf14d52015-07-16 12:15:19 -0700193 graphs = '<ac:structured-macro ac:name="html">\n'
194 graphs += '<ac:plain-text-body><![CDATA[\n'
195 graphs += '<iframe src="https://onos-jenkins.onlab.us/job/' + job +\
Jon Halla9845df2016-01-15 14:55:58 -0800196 '/plot/' + plotName + '/getPlot?index=' + index +\
Jon Hall5cf14d52015-07-16 12:15:19 -0700197 '&width=500&height=300"' +\
198 'noborder="0" width="500" height="300" scrolling="yes" ' +\
199 'seamless="seamless"></iframe>\n'
200 graphs += ']]></ac:plain-text-body>\n'
201 graphs += '</ac:structured-macro>\n'
202 main.log.wiki(graphs)
203
204 main.step( "Creating ONOS package" )
Jon Hall3b489db2015-10-05 14:38:37 -0700205 # copy gen-partions file to ONOS
206 # NOTE: this assumes TestON and ONOS are on the same machine
207 srcFile = main.testDir + "/" + main.TEST + "/dependencies/onos-gen-partitions"
208 dstDir = main.ONOSbench.home + "/tools/test/bin/onos-gen-partitions"
209 cpResult = main.ONOSbench.secureCopy( main.ONOSbench.user_name,
210 main.ONOSbench.ip_address,
211 srcFile,
212 dstDir,
213 pwd=main.ONOSbench.pwd,
214 direction="from" )
Jon Hall5cf14d52015-07-16 12:15:19 -0700215 packageResult = main.ONOSbench.onosPackage()
216 utilities.assert_equals( expect=main.TRUE, actual=packageResult,
217 onpass="ONOS package successful",
218 onfail="ONOS package failed" )
219
220 main.step( "Installing ONOS package" )
221 onosInstallResult = main.TRUE
Jon Halle1a3b752015-07-22 13:02:46 -0700222 for node in main.nodes:
Jon Hall5cf14d52015-07-16 12:15:19 -0700223 tmpResult = main.ONOSbench.onosInstall( options="-f",
224 node=node.ip_address )
225 onosInstallResult = onosInstallResult and tmpResult
226 utilities.assert_equals( expect=main.TRUE, actual=onosInstallResult,
227 onpass="ONOS install successful",
228 onfail="ONOS install failed" )
Jon Hall3b489db2015-10-05 14:38:37 -0700229 # clean up gen-partitions file
230 try:
231 main.ONOSbench.handle.sendline( "cd " + main.ONOSbench.home )
232 main.ONOSbench.handle.expect( main.ONOSbench.home + "\$" )
233 main.ONOSbench.handle.sendline( "git checkout -- tools/test/bin/onos-gen-partitions" )
234 main.ONOSbench.handle.expect( main.ONOSbench.home + "\$" )
235 main.log.info( " Cleaning custom gen partitions file, response was: \n" +
236 str( main.ONOSbench.handle.before ) )
237 except ( pexpect.TIMEOUT, pexpect.EOF ):
238 main.log.exception( "ONOSbench: pexpect exception found:" +
239 main.ONOSbench.handle.before )
240 main.cleanup()
241 main.exit()
Jon Hall5cf14d52015-07-16 12:15:19 -0700242
243 main.step( "Checking if ONOS is up yet" )
244 for i in range( 2 ):
245 onosIsupResult = main.TRUE
Jon Halle1a3b752015-07-22 13:02:46 -0700246 for node in main.nodes:
Jon Hall5cf14d52015-07-16 12:15:19 -0700247 started = main.ONOSbench.isup( node.ip_address )
248 if not started:
Jon Hallc6793552016-01-19 14:18:37 -0800249 main.log.error( node.name + " hasn't started" )
Jon Hall5cf14d52015-07-16 12:15:19 -0700250 onosIsupResult = onosIsupResult and started
251 if onosIsupResult == main.TRUE:
252 break
253 utilities.assert_equals( expect=main.TRUE, actual=onosIsupResult,
254 onpass="ONOS startup successful",
255 onfail="ONOS startup failed" )
256
257 main.log.step( "Starting ONOS CLI sessions" )
258 cliResults = main.TRUE
259 threads = []
Jon Halle1a3b752015-07-22 13:02:46 -0700260 for i in range( main.numCtrls ):
261 t = main.Thread( target=main.CLIs[i].startOnosCli,
Jon Hall5cf14d52015-07-16 12:15:19 -0700262 name="startOnosCli-" + str( i ),
Jon Halle1a3b752015-07-22 13:02:46 -0700263 args=[main.nodes[i].ip_address] )
Jon Hall5cf14d52015-07-16 12:15:19 -0700264 threads.append( t )
265 t.start()
266
267 for t in threads:
268 t.join()
269 cliResults = cliResults and t.result
270 utilities.assert_equals( expect=main.TRUE, actual=cliResults,
271 onpass="ONOS cli startup successful",
272 onfail="ONOS cli startup failed" )
273
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700274 # Create a list of active nodes for use when some nodes are stopped
275 main.activeNodes = [ i for i in range( 0, len( main.CLIs ) ) ]
276
Jon Hall5cf14d52015-07-16 12:15:19 -0700277 if main.params[ 'tcpdump' ].lower() == "true":
278 main.step( "Start Packet Capture MN" )
279 main.Mininet2.startTcpdump(
280 str( main.params[ 'MNtcpdump' ][ 'folder' ] ) + str( main.TEST )
281 + "-MN.pcap",
282 intf=main.params[ 'MNtcpdump' ][ 'intf' ],
283 port=main.params[ 'MNtcpdump' ][ 'port' ] )
284
285 main.step( "App Ids check" )
286 appCheck = main.TRUE
287 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700288 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -0700289 t = main.Thread( target=main.CLIs[i].appToIDCheck,
Jon Hall5cf14d52015-07-16 12:15:19 -0700290 name="appToIDCheck-" + str( i ),
291 args=[] )
292 threads.append( t )
293 t.start()
294
295 for t in threads:
296 t.join()
297 appCheck = appCheck and t.result
298 if appCheck != main.TRUE:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700299 node = main.activeNodes[0]
300 main.log.warn( main.CLIs[node].apps() )
301 main.log.warn( main.CLIs[node].appIDs() )
Jon Hall5cf14d52015-07-16 12:15:19 -0700302 utilities.assert_equals( expect=main.TRUE, actual=appCheck,
303 onpass="App Ids seem to be correct",
304 onfail="Something is wrong with app Ids" )
305
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700306 main.step( "Clean up ONOS service changes" )
307 handle.sendline( "git checkout -- tools/package/init/onos.conf" )
308 handle.expect( "\$" )
309
Jon Halla440e872016-03-31 15:15:50 -0700310 main.step( "Checking ONOS nodes" )
311 nodesOutput = []
312 nodeResults = main.TRUE
313 threads = []
314 for i in main.activeNodes:
315 t = main.Thread( target=main.CLIs[i].nodes,
316 name="nodes-" + str( i ),
317 args=[ ] )
318 threads.append( t )
319 t.start()
320
321 for t in threads:
322 t.join()
323 nodesOutput.append( t.result )
324 ips = [ main.nodes[node].ip_address for node in main.activeNodes ]
325 ips.sort()
326 for i in nodesOutput:
327 try:
328 current = json.loads( i )
329 activeIps = []
330 currentResult = main.FALSE
331 for node in current:
332 if node['state'] == 'READY':
333 activeIps.append( node['ip'] )
334 activeIps.sort()
335 if ips == activeIps:
336 currentResult = main.TRUE
337 except ( ValueError, TypeError ):
338 main.log.error( "Error parsing nodes output" )
339 main.log.warn( repr( i ) )
340 currentResult = main.FALSE
341 nodeResults = nodeResults and currentResult
342 utilities.assert_equals( expect=main.TRUE, actual=nodeResults,
343 onpass="Nodes check successful",
344 onfail="Nodes check NOT successful" )
345
346 if not nodeResults:
347 for cli in main.CLIs:
348 main.log.debug( "{} components not ACTIVE: \n{}".format(
349 cli.name,
350 cli.sendline( "scr:list | grep -v ACTIVE" ) ) )
351
Jon Hall5cf14d52015-07-16 12:15:19 -0700352 if cliResults == main.FALSE:
353 main.log.error( "Failed to start ONOS, stopping test" )
354 main.cleanup()
355 main.exit()
356
357 def CASE2( self, main ):
358 """
359 Assign devices to controllers
360 """
361 import re
Jon Halle1a3b752015-07-22 13:02:46 -0700362 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700363 assert main, "main not defined"
364 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -0700365 assert main.CLIs, "main.CLIs not defined"
366 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700367 assert ONOS1Port, "ONOS1Port not defined"
368 assert ONOS2Port, "ONOS2Port not defined"
369 assert ONOS3Port, "ONOS3Port not defined"
370 assert ONOS4Port, "ONOS4Port not defined"
371 assert ONOS5Port, "ONOS5Port not defined"
372 assert ONOS6Port, "ONOS6Port not defined"
373 assert ONOS7Port, "ONOS7Port not defined"
374
375 main.case( "Assigning devices to controllers" )
Jon Hall783bbf92015-07-23 14:33:19 -0700376 main.caseExplanation = "Assign switches to ONOS using 'ovs-vsctl' " +\
Jon Hall5cf14d52015-07-16 12:15:19 -0700377 "and check that an ONOS node becomes the " +\
378 "master of the device."
379 main.step( "Assign switches to controllers" )
380
381 ipList = []
Jon Halle1a3b752015-07-22 13:02:46 -0700382 for i in range( main.numCtrls ):
383 ipList.append( main.nodes[ i ].ip_address )
Jon Hall5cf14d52015-07-16 12:15:19 -0700384 swList = []
385 for i in range( 1, 29 ):
386 swList.append( "s" + str( i ) )
387 main.Mininet1.assignSwController( sw=swList, ip=ipList )
388
389 mastershipCheck = main.TRUE
390 for i in range( 1, 29 ):
391 response = main.Mininet1.getSwController( "s" + str( i ) )
392 try:
393 main.log.info( str( response ) )
394 except Exception:
395 main.log.info( repr( response ) )
Jon Halle1a3b752015-07-22 13:02:46 -0700396 for node in main.nodes:
Jon Hall5cf14d52015-07-16 12:15:19 -0700397 if re.search( "tcp:" + node.ip_address, response ):
398 mastershipCheck = mastershipCheck and main.TRUE
399 else:
400 main.log.error( "Error, node " + node.ip_address + " is " +
401 "not in the list of controllers s" +
402 str( i ) + " is connecting to." )
403 mastershipCheck = main.FALSE
404 utilities.assert_equals(
405 expect=main.TRUE,
406 actual=mastershipCheck,
407 onpass="Switch mastership assigned correctly",
408 onfail="Switches not assigned correctly to controllers" )
409
410 def CASE21( self, main ):
411 """
412 Assign mastership to controllers
413 """
Jon Hall5cf14d52015-07-16 12:15:19 -0700414 import time
Jon Halle1a3b752015-07-22 13:02:46 -0700415 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700416 assert main, "main not defined"
417 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -0700418 assert main.CLIs, "main.CLIs not defined"
419 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700420 assert ONOS1Port, "ONOS1Port not defined"
421 assert ONOS2Port, "ONOS2Port not defined"
422 assert ONOS3Port, "ONOS3Port not defined"
423 assert ONOS4Port, "ONOS4Port not defined"
424 assert ONOS5Port, "ONOS5Port not defined"
425 assert ONOS6Port, "ONOS6Port not defined"
426 assert ONOS7Port, "ONOS7Port not defined"
427
428 main.case( "Assigning Controller roles for switches" )
Jon Hall783bbf92015-07-23 14:33:19 -0700429 main.caseExplanation = "Check that ONOS is connected to each " +\
Jon Hall5cf14d52015-07-16 12:15:19 -0700430 "device. Then manually assign" +\
431 " mastership to specific ONOS nodes using" +\
432 " 'device-role'"
433 main.step( "Assign mastership of switches to specific controllers" )
434 # Manually assign mastership to the controller we want
435 roleCall = main.TRUE
436
437 ipList = [ ]
438 deviceList = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700439 onosCli = main.CLIs[ main.activeNodes[0] ]
Jon Hall5cf14d52015-07-16 12:15:19 -0700440 try:
441 # Assign mastership to specific controllers. This assignment was
442 # determined for a 7 node cluser, but will work with any sized
443 # cluster
444 for i in range( 1, 29 ): # switches 1 through 28
445 # set up correct variables:
446 if i == 1:
447 c = 0
Jon Halle1a3b752015-07-22 13:02:46 -0700448 ip = main.nodes[ c ].ip_address # ONOS1
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700449 deviceId = onosCli.getDevice( "1000" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700450 elif i == 2:
Jon Halle1a3b752015-07-22 13:02:46 -0700451 c = 1 % main.numCtrls
452 ip = main.nodes[ c ].ip_address # ONOS2
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700453 deviceId = onosCli.getDevice( "2000" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700454 elif i == 3:
Jon Halle1a3b752015-07-22 13:02:46 -0700455 c = 1 % main.numCtrls
456 ip = main.nodes[ c ].ip_address # ONOS2
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700457 deviceId = onosCli.getDevice( "3000" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700458 elif i == 4:
Jon Halle1a3b752015-07-22 13:02:46 -0700459 c = 3 % main.numCtrls
460 ip = main.nodes[ c ].ip_address # ONOS4
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700461 deviceId = onosCli.getDevice( "3004" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700462 elif i == 5:
Jon Halle1a3b752015-07-22 13:02:46 -0700463 c = 2 % main.numCtrls
464 ip = main.nodes[ c ].ip_address # ONOS3
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700465 deviceId = onosCli.getDevice( "5000" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700466 elif i == 6:
Jon Halle1a3b752015-07-22 13:02:46 -0700467 c = 2 % main.numCtrls
468 ip = main.nodes[ c ].ip_address # ONOS3
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700469 deviceId = onosCli.getDevice( "6000" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700470 elif i == 7:
Jon Halle1a3b752015-07-22 13:02:46 -0700471 c = 5 % main.numCtrls
472 ip = main.nodes[ c ].ip_address # ONOS6
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700473 deviceId = onosCli.getDevice( "6007" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700474 elif i >= 8 and i <= 17:
Jon Halle1a3b752015-07-22 13:02:46 -0700475 c = 4 % main.numCtrls
476 ip = main.nodes[ c ].ip_address # ONOS5
Jon Hall5cf14d52015-07-16 12:15:19 -0700477 dpid = '3' + str( i ).zfill( 3 )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700478 deviceId = onosCli.getDevice( dpid ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700479 elif i >= 18 and i <= 27:
Jon Halle1a3b752015-07-22 13:02:46 -0700480 c = 6 % main.numCtrls
481 ip = main.nodes[ c ].ip_address # ONOS7
Jon Hall5cf14d52015-07-16 12:15:19 -0700482 dpid = '6' + str( i ).zfill( 3 )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700483 deviceId = onosCli.getDevice( dpid ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700484 elif i == 28:
485 c = 0
Jon Halle1a3b752015-07-22 13:02:46 -0700486 ip = main.nodes[ c ].ip_address # ONOS1
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700487 deviceId = onosCli.getDevice( "2800" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700488 else:
489 main.log.error( "You didn't write an else statement for " +
490 "switch s" + str( i ) )
491 roleCall = main.FALSE
492 # Assign switch
493 assert deviceId, "No device id for s" + str( i ) + " in ONOS"
494 # TODO: make this controller dynamic
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700495 roleCall = roleCall and onosCli.deviceRole( deviceId, ip )
Jon Hall5cf14d52015-07-16 12:15:19 -0700496 ipList.append( ip )
497 deviceList.append( deviceId )
498 except ( AttributeError, AssertionError ):
499 main.log.exception( "Something is wrong with ONOS device view" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700500 main.log.info( onosCli.devices() )
Jon Hall5cf14d52015-07-16 12:15:19 -0700501 utilities.assert_equals(
502 expect=main.TRUE,
503 actual=roleCall,
504 onpass="Re-assigned switch mastership to designated controller",
505 onfail="Something wrong with deviceRole calls" )
506
507 main.step( "Check mastership was correctly assigned" )
508 roleCheck = main.TRUE
509 # NOTE: This is due to the fact that device mastership change is not
510 # atomic and is actually a multi step process
511 time.sleep( 5 )
512 for i in range( len( ipList ) ):
513 ip = ipList[i]
514 deviceId = deviceList[i]
515 # Check assignment
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700516 master = onosCli.getRole( deviceId ).get( 'master' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700517 if ip in master:
518 roleCheck = roleCheck and main.TRUE
519 else:
520 roleCheck = roleCheck and main.FALSE
521 main.log.error( "Error, controller " + ip + " is not" +
522 " master " + "of device " +
523 str( deviceId ) + ". Master is " +
524 repr( master ) + "." )
525 utilities.assert_equals(
526 expect=main.TRUE,
527 actual=roleCheck,
528 onpass="Switches were successfully reassigned to designated " +
529 "controller",
530 onfail="Switches were not successfully reassigned" )
531
532 def CASE3( self, main ):
533 """
534 Assign intents
535 """
536 import time
537 import json
Jon Halle1a3b752015-07-22 13:02:46 -0700538 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700539 assert main, "main not defined"
540 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -0700541 assert main.CLIs, "main.CLIs not defined"
542 assert main.nodes, "main.nodes not defined"
Jon Halla440e872016-03-31 15:15:50 -0700543 try:
544 labels
545 except NameError:
546 main.log.error( "labels not defined, setting to []" )
547 labels = []
548 try:
549 data
550 except NameError:
551 main.log.error( "data not defined, setting to []" )
552 data = []
Jon Hall5cf14d52015-07-16 12:15:19 -0700553 main.case( "Adding host Intents" )
Jon Hall783bbf92015-07-23 14:33:19 -0700554 main.caseExplanation = "Discover hosts by using pingall then " +\
Jon Hall5cf14d52015-07-16 12:15:19 -0700555 "assign predetermined host-to-host intents." +\
556 " After installation, check that the intent" +\
557 " is distributed to all nodes and the state" +\
558 " is INSTALLED"
559
560 # install onos-app-fwd
561 main.step( "Install reactive forwarding app" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700562 onosCli = main.CLIs[ main.activeNodes[0] ]
563 installResults = onosCli.activateApp( "org.onosproject.fwd" )
Jon Hall5cf14d52015-07-16 12:15:19 -0700564 utilities.assert_equals( expect=main.TRUE, actual=installResults,
565 onpass="Install fwd successful",
566 onfail="Install fwd failed" )
567
568 main.step( "Check app ids" )
569 appCheck = main.TRUE
570 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700571 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -0700572 t = main.Thread( target=main.CLIs[i].appToIDCheck,
Jon Hall5cf14d52015-07-16 12:15:19 -0700573 name="appToIDCheck-" + str( i ),
574 args=[] )
575 threads.append( t )
576 t.start()
577
578 for t in threads:
579 t.join()
580 appCheck = appCheck and t.result
581 if appCheck != main.TRUE:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700582 main.log.warn( onosCli.apps() )
583 main.log.warn( onosCli.appIDs() )
Jon Hall5cf14d52015-07-16 12:15:19 -0700584 utilities.assert_equals( expect=main.TRUE, actual=appCheck,
585 onpass="App Ids seem to be correct",
586 onfail="Something is wrong with app Ids" )
587
588 main.step( "Discovering Hosts( Via pingall for now )" )
589 # FIXME: Once we have a host discovery mechanism, use that instead
590 # REACTIVE FWD test
591 pingResult = main.FALSE
Jon Hall96091e62015-09-21 17:34:17 -0700592 passMsg = "Reactive Pingall test passed"
593 time1 = time.time()
594 pingResult = main.Mininet1.pingall()
595 time2 = time.time()
596 if not pingResult:
597 main.log.warn("First pingall failed. Trying again...")
Jon Hall5cf14d52015-07-16 12:15:19 -0700598 pingResult = main.Mininet1.pingall()
Jon Hall96091e62015-09-21 17:34:17 -0700599 passMsg += " on the second try"
600 utilities.assert_equals(
601 expect=main.TRUE,
602 actual=pingResult,
603 onpass= passMsg,
604 onfail="Reactive Pingall failed, " +
605 "one or more ping pairs failed" )
606 main.log.info( "Time for pingall: %2f seconds" %
607 ( time2 - time1 ) )
Jon Hall5cf14d52015-07-16 12:15:19 -0700608 # timeout for fwd flows
609 time.sleep( 11 )
610 # uninstall onos-app-fwd
611 main.step( "Uninstall reactive forwarding app" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700612 node = main.activeNodes[0]
613 uninstallResult = main.CLIs[node].deactivateApp( "org.onosproject.fwd" )
Jon Hall5cf14d52015-07-16 12:15:19 -0700614 utilities.assert_equals( expect=main.TRUE, actual=uninstallResult,
615 onpass="Uninstall fwd successful",
616 onfail="Uninstall fwd failed" )
617
618 main.step( "Check app ids" )
619 threads = []
620 appCheck2 = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700621 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -0700622 t = main.Thread( target=main.CLIs[i].appToIDCheck,
Jon Hall5cf14d52015-07-16 12:15:19 -0700623 name="appToIDCheck-" + str( i ),
624 args=[] )
625 threads.append( t )
626 t.start()
627
628 for t in threads:
629 t.join()
630 appCheck2 = appCheck2 and t.result
631 if appCheck2 != main.TRUE:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700632 node = main.activeNodes[0]
633 main.log.warn( main.CLIs[node].apps() )
634 main.log.warn( main.CLIs[node].appIDs() )
Jon Hall5cf14d52015-07-16 12:15:19 -0700635 utilities.assert_equals( expect=main.TRUE, actual=appCheck2,
636 onpass="App Ids seem to be correct",
637 onfail="Something is wrong with app Ids" )
638
639 main.step( "Add host intents via cli" )
640 intentIds = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700641 # TODO: move the host numbers to params
642 # Maybe look at all the paths we ping?
Jon Hall5cf14d52015-07-16 12:15:19 -0700643 intentAddResult = True
644 hostResult = main.TRUE
645 for i in range( 8, 18 ):
646 main.log.info( "Adding host intent between h" + str( i ) +
647 " and h" + str( i + 10 ) )
648 host1 = "00:00:00:00:00:" + \
649 str( hex( i )[ 2: ] ).zfill( 2 ).upper()
650 host2 = "00:00:00:00:00:" + \
651 str( hex( i + 10 )[ 2: ] ).zfill( 2 ).upper()
652 # NOTE: getHost can return None
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700653 host1Dict = onosCli.getHost( host1 )
654 host2Dict = onosCli.getHost( host2 )
Jon Hall5cf14d52015-07-16 12:15:19 -0700655 host1Id = None
656 host2Id = None
657 if host1Dict and host2Dict:
658 host1Id = host1Dict.get( 'id', None )
659 host2Id = host2Dict.get( 'id', None )
660 if host1Id and host2Id:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700661 nodeNum = ( i % len( main.activeNodes ) )
662 node = main.activeNodes[nodeNum]
663 tmpId = main.CLIs[node].addHostIntent( host1Id, host2Id )
Jon Hall5cf14d52015-07-16 12:15:19 -0700664 if tmpId:
665 main.log.info( "Added intent with id: " + tmpId )
666 intentIds.append( tmpId )
667 else:
668 main.log.error( "addHostIntent returned: " +
669 repr( tmpId ) )
670 else:
671 main.log.error( "Error, getHost() failed for h" + str( i ) +
672 " and/or h" + str( i + 10 ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700673 node = main.activeNodes[0]
674 hosts = main.CLIs[node].hosts()
Jon Hall5cf14d52015-07-16 12:15:19 -0700675 main.log.warn( "Hosts output: " )
676 try:
677 main.log.warn( json.dumps( json.loads( hosts ),
678 sort_keys=True,
679 indent=4,
680 separators=( ',', ': ' ) ) )
681 except ( ValueError, TypeError ):
682 main.log.warn( repr( hosts ) )
683 hostResult = main.FALSE
684 utilities.assert_equals( expect=main.TRUE, actual=hostResult,
685 onpass="Found a host id for each host",
686 onfail="Error looking up host ids" )
687
688 intentStart = time.time()
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700689 onosIds = onosCli.getAllIntentsId()
Jon Hall5cf14d52015-07-16 12:15:19 -0700690 main.log.info( "Submitted intents: " + str( intentIds ) )
691 main.log.info( "Intents in ONOS: " + str( onosIds ) )
692 for intent in intentIds:
693 if intent in onosIds:
694 pass # intent submitted is in onos
695 else:
696 intentAddResult = False
697 if intentAddResult:
698 intentStop = time.time()
699 else:
700 intentStop = None
701 # Print the intent states
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700702 intents = onosCli.intents()
Jon Hall5cf14d52015-07-16 12:15:19 -0700703 intentStates = []
704 installedCheck = True
705 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
706 count = 0
707 try:
708 for intent in json.loads( intents ):
709 state = intent.get( 'state', None )
710 if "INSTALLED" not in state:
711 installedCheck = False
712 intentId = intent.get( 'id', None )
713 intentStates.append( ( intentId, state ) )
714 except ( ValueError, TypeError ):
715 main.log.exception( "Error parsing intents" )
716 # add submitted intents not in the store
717 tmplist = [ i for i, s in intentStates ]
718 missingIntents = False
719 for i in intentIds:
720 if i not in tmplist:
721 intentStates.append( ( i, " - " ) )
722 missingIntents = True
723 intentStates.sort()
724 for i, s in intentStates:
725 count += 1
726 main.log.info( "%-6s%-15s%-15s" %
727 ( str( count ), str( i ), str( s ) ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700728 leaders = onosCli.leaders()
Jon Hall5cf14d52015-07-16 12:15:19 -0700729 try:
730 missing = False
731 if leaders:
732 parsedLeaders = json.loads( leaders )
733 main.log.warn( json.dumps( parsedLeaders,
734 sort_keys=True,
735 indent=4,
736 separators=( ',', ': ' ) ) )
737 # check for all intent partitions
738 topics = []
739 for i in range( 14 ):
740 topics.append( "intent-partition-" + str( i ) )
741 main.log.debug( topics )
742 ONOStopics = [ j['topic'] for j in parsedLeaders ]
743 for topic in topics:
744 if topic not in ONOStopics:
745 main.log.error( "Error: " + topic +
746 " not in leaders" )
747 missing = True
748 else:
749 main.log.error( "leaders() returned None" )
750 except ( ValueError, TypeError ):
751 main.log.exception( "Error parsing leaders" )
752 main.log.error( repr( leaders ) )
753 # Check all nodes
754 if missing:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700755 for i in main.activeNodes:
756 response = main.CLIs[i].leaders( jsonFormat=False)
757 main.log.warn( str( main.CLIs[i].name ) + " leaders output: \n" +
Jon Hall5cf14d52015-07-16 12:15:19 -0700758 str( response ) )
759
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700760 partitions = onosCli.partitions()
Jon Hall5cf14d52015-07-16 12:15:19 -0700761 try:
762 if partitions :
763 parsedPartitions = json.loads( partitions )
764 main.log.warn( json.dumps( parsedPartitions,
765 sort_keys=True,
766 indent=4,
767 separators=( ',', ': ' ) ) )
768 # TODO check for a leader in all paritions
769 # TODO check for consistency among nodes
770 else:
771 main.log.error( "partitions() returned None" )
772 except ( ValueError, TypeError ):
773 main.log.exception( "Error parsing partitions" )
774 main.log.error( repr( partitions ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700775 pendingMap = onosCli.pendingMap()
Jon Hall5cf14d52015-07-16 12:15:19 -0700776 try:
777 if pendingMap :
778 parsedPending = json.loads( pendingMap )
779 main.log.warn( json.dumps( parsedPending,
780 sort_keys=True,
781 indent=4,
782 separators=( ',', ': ' ) ) )
783 # TODO check something here?
784 else:
785 main.log.error( "pendingMap() returned None" )
786 except ( ValueError, TypeError ):
787 main.log.exception( "Error parsing pending map" )
788 main.log.error( repr( pendingMap ) )
789
790 intentAddResult = bool( intentAddResult and not missingIntents and
791 installedCheck )
792 if not intentAddResult:
793 main.log.error( "Error in pushing host intents to ONOS" )
794
795 main.step( "Intent Anti-Entropy dispersion" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700796 for j in range(100):
Jon Hall5cf14d52015-07-16 12:15:19 -0700797 correct = True
798 main.log.info( "Submitted intents: " + str( sorted( intentIds ) ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700799 for i in main.activeNodes:
Jon Hall5cf14d52015-07-16 12:15:19 -0700800 onosIds = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700801 ids = main.CLIs[i].getAllIntentsId()
Jon Hall5cf14d52015-07-16 12:15:19 -0700802 onosIds.append( ids )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700803 main.log.debug( "Intents in " + main.CLIs[i].name + ": " +
Jon Hall5cf14d52015-07-16 12:15:19 -0700804 str( sorted( onosIds ) ) )
805 if sorted( ids ) != sorted( intentIds ):
806 main.log.warn( "Set of intent IDs doesn't match" )
807 correct = False
808 break
809 else:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700810 intents = json.loads( main.CLIs[i].intents() )
Jon Hall5cf14d52015-07-16 12:15:19 -0700811 for intent in intents:
812 if intent[ 'state' ] != "INSTALLED":
813 main.log.warn( "Intent " + intent[ 'id' ] +
814 " is " + intent[ 'state' ] )
815 correct = False
816 break
817 if correct:
818 break
819 else:
820 time.sleep(1)
821 if not intentStop:
822 intentStop = time.time()
823 global gossipTime
824 gossipTime = intentStop - intentStart
825 main.log.info( "It took about " + str( gossipTime ) +
826 " seconds for all intents to appear in each node" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700827 gossipPeriod = int( main.params['timers']['gossip'] )
828 maxGossipTime = gossipPeriod * len( main.activeNodes )
Jon Hall5cf14d52015-07-16 12:15:19 -0700829 utilities.assert_greater_equals(
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700830 expect=maxGossipTime, actual=gossipTime,
Jon Hall5cf14d52015-07-16 12:15:19 -0700831 onpass="ECM anti-entropy for intents worked within " +
832 "expected time",
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700833 onfail="Intent ECM anti-entropy took too long. " +
834 "Expected time:{}, Actual time:{}".format( maxGossipTime,
835 gossipTime ) )
836 if gossipTime <= maxGossipTime:
Jon Hall5cf14d52015-07-16 12:15:19 -0700837 intentAddResult = True
838
839 if not intentAddResult or "key" in pendingMap:
840 import time
841 installedCheck = True
842 main.log.info( "Sleeping 60 seconds to see if intents are found" )
843 time.sleep( 60 )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700844 onosIds = onosCli.getAllIntentsId()
Jon Hall5cf14d52015-07-16 12:15:19 -0700845 main.log.info( "Submitted intents: " + str( intentIds ) )
846 main.log.info( "Intents in ONOS: " + str( onosIds ) )
847 # Print the intent states
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700848 intents = onosCli.intents()
Jon Hall5cf14d52015-07-16 12:15:19 -0700849 intentStates = []
850 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
851 count = 0
852 try:
853 for intent in json.loads( intents ):
854 # Iter through intents of a node
855 state = intent.get( 'state', None )
856 if "INSTALLED" not in state:
857 installedCheck = False
858 intentId = intent.get( 'id', None )
859 intentStates.append( ( intentId, state ) )
860 except ( ValueError, TypeError ):
861 main.log.exception( "Error parsing intents" )
862 # add submitted intents not in the store
863 tmplist = [ i for i, s in intentStates ]
864 for i in intentIds:
865 if i not in tmplist:
866 intentStates.append( ( i, " - " ) )
867 intentStates.sort()
868 for i, s in intentStates:
869 count += 1
870 main.log.info( "%-6s%-15s%-15s" %
871 ( str( count ), str( i ), str( s ) ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700872 leaders = onosCli.leaders()
Jon Hall5cf14d52015-07-16 12:15:19 -0700873 try:
874 missing = False
875 if leaders:
876 parsedLeaders = json.loads( leaders )
877 main.log.warn( json.dumps( parsedLeaders,
878 sort_keys=True,
879 indent=4,
880 separators=( ',', ': ' ) ) )
881 # check for all intent partitions
882 # check for election
883 topics = []
884 for i in range( 14 ):
885 topics.append( "intent-partition-" + str( i ) )
886 # FIXME: this should only be after we start the app
887 topics.append( "org.onosproject.election" )
888 main.log.debug( topics )
889 ONOStopics = [ j['topic'] for j in parsedLeaders ]
890 for topic in topics:
891 if topic not in ONOStopics:
892 main.log.error( "Error: " + topic +
893 " not in leaders" )
894 missing = True
895 else:
896 main.log.error( "leaders() returned None" )
897 except ( ValueError, TypeError ):
898 main.log.exception( "Error parsing leaders" )
899 main.log.error( repr( leaders ) )
900 # Check all nodes
901 if missing:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700902 for i in main.activeNodes:
903 node = main.CLIs[i]
Jon Hall5cf14d52015-07-16 12:15:19 -0700904 response = node.leaders( jsonFormat=False)
905 main.log.warn( str( node.name ) + " leaders output: \n" +
906 str( response ) )
907
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700908 partitions = onosCli.partitions()
Jon Hall5cf14d52015-07-16 12:15:19 -0700909 try:
910 if partitions :
911 parsedPartitions = json.loads( partitions )
912 main.log.warn( json.dumps( parsedPartitions,
913 sort_keys=True,
914 indent=4,
915 separators=( ',', ': ' ) ) )
916 # TODO check for a leader in all paritions
917 # TODO check for consistency among nodes
918 else:
919 main.log.error( "partitions() returned None" )
920 except ( ValueError, TypeError ):
921 main.log.exception( "Error parsing partitions" )
922 main.log.error( repr( partitions ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700923 pendingMap = onosCli.pendingMap()
Jon Hall5cf14d52015-07-16 12:15:19 -0700924 try:
925 if pendingMap :
926 parsedPending = json.loads( pendingMap )
927 main.log.warn( json.dumps( parsedPending,
928 sort_keys=True,
929 indent=4,
930 separators=( ',', ': ' ) ) )
931 # TODO check something here?
932 else:
933 main.log.error( "pendingMap() returned None" )
934 except ( ValueError, TypeError ):
935 main.log.exception( "Error parsing pending map" )
936 main.log.error( repr( pendingMap ) )
937
938 def CASE4( self, main ):
939 """
940 Ping across added host intents
941 """
942 import json
943 import time
Jon Halle1a3b752015-07-22 13:02:46 -0700944 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700945 assert main, "main not defined"
946 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -0700947 assert main.CLIs, "main.CLIs not defined"
948 assert main.nodes, "main.nodes not defined"
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700949 main.case( "Verify connectivity by sending traffic across Intents" )
Jon Hall783bbf92015-07-23 14:33:19 -0700950 main.caseExplanation = "Ping across added host intents to check " +\
Jon Hall5cf14d52015-07-16 12:15:19 -0700951 "functionality and check the state of " +\
952 "the intent"
953 main.step( "Ping across added host intents" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700954 onosCli = main.CLIs[ main.activeNodes[0] ]
Jon Hall5cf14d52015-07-16 12:15:19 -0700955 PingResult = main.TRUE
956 for i in range( 8, 18 ):
957 ping = main.Mininet1.pingHost( src="h" + str( i ),
958 target="h" + str( i + 10 ) )
959 PingResult = PingResult and ping
960 if ping == main.FALSE:
961 main.log.warn( "Ping failed between h" + str( i ) +
962 " and h" + str( i + 10 ) )
963 elif ping == main.TRUE:
964 main.log.info( "Ping test passed!" )
965 # Don't set PingResult or you'd override failures
966 if PingResult == main.FALSE:
967 main.log.error(
968 "Intents have not been installed correctly, pings failed." )
969 # TODO: pretty print
970 main.log.warn( "ONOS1 intents: " )
971 try:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700972 tmpIntents = onosCli.intents()
Jon Hall5cf14d52015-07-16 12:15:19 -0700973 main.log.warn( json.dumps( json.loads( tmpIntents ),
974 sort_keys=True,
975 indent=4,
976 separators=( ',', ': ' ) ) )
977 except ( ValueError, TypeError ):
978 main.log.warn( repr( tmpIntents ) )
979 utilities.assert_equals(
980 expect=main.TRUE,
981 actual=PingResult,
982 onpass="Intents have been installed correctly and pings work",
983 onfail="Intents have not been installed correctly, pings failed." )
984
985 main.step( "Check Intent state" )
986 installedCheck = False
987 loopCount = 0
988 while not installedCheck and loopCount < 40:
989 installedCheck = True
990 # Print the intent states
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700991 intents = onosCli.intents()
Jon Hall5cf14d52015-07-16 12:15:19 -0700992 intentStates = []
993 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
994 count = 0
995 # Iter through intents of a node
996 try:
997 for intent in json.loads( intents ):
998 state = intent.get( 'state', None )
999 if "INSTALLED" not in state:
1000 installedCheck = False
1001 intentId = intent.get( 'id', None )
1002 intentStates.append( ( intentId, state ) )
1003 except ( ValueError, TypeError ):
1004 main.log.exception( "Error parsing intents." )
1005 # Print states
1006 intentStates.sort()
1007 for i, s in intentStates:
1008 count += 1
1009 main.log.info( "%-6s%-15s%-15s" %
1010 ( str( count ), str( i ), str( s ) ) )
1011 if not installedCheck:
1012 time.sleep( 1 )
1013 loopCount += 1
1014 utilities.assert_equals( expect=True, actual=installedCheck,
1015 onpass="Intents are all INSTALLED",
1016 onfail="Intents are not all in " +
1017 "INSTALLED state" )
1018
1019 main.step( "Check leadership of topics" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001020 leaders = onosCli.leaders()
Jon Hall5cf14d52015-07-16 12:15:19 -07001021 topicCheck = main.TRUE
1022 try:
1023 if leaders:
1024 parsedLeaders = json.loads( leaders )
1025 main.log.warn( json.dumps( parsedLeaders,
1026 sort_keys=True,
1027 indent=4,
1028 separators=( ',', ': ' ) ) )
1029 # check for all intent partitions
1030 # check for election
1031 # TODO: Look at Devices as topics now that it uses this system
1032 topics = []
1033 for i in range( 14 ):
1034 topics.append( "intent-partition-" + str( i ) )
1035 # FIXME: this should only be after we start the app
1036 # FIXME: topics.append( "org.onosproject.election" )
1037 # Print leaders output
1038 main.log.debug( topics )
1039 ONOStopics = [ j['topic'] for j in parsedLeaders ]
1040 for topic in topics:
1041 if topic not in ONOStopics:
1042 main.log.error( "Error: " + topic +
1043 " not in leaders" )
1044 topicCheck = main.FALSE
1045 else:
1046 main.log.error( "leaders() returned None" )
1047 topicCheck = main.FALSE
1048 except ( ValueError, TypeError ):
1049 topicCheck = main.FALSE
1050 main.log.exception( "Error parsing leaders" )
1051 main.log.error( repr( leaders ) )
1052 # TODO: Check for a leader of these topics
1053 # Check all nodes
1054 if topicCheck:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001055 for i in main.activeNodes:
1056 node = main.CLIs[i]
Jon Hall5cf14d52015-07-16 12:15:19 -07001057 response = node.leaders( jsonFormat=False)
1058 main.log.warn( str( node.name ) + " leaders output: \n" +
1059 str( response ) )
1060
1061 utilities.assert_equals( expect=main.TRUE, actual=topicCheck,
1062 onpass="intent Partitions is in leaders",
1063 onfail="Some topics were lost " )
1064 # Print partitions
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001065 partitions = onosCli.partitions()
Jon Hall5cf14d52015-07-16 12:15:19 -07001066 try:
1067 if partitions :
1068 parsedPartitions = json.loads( partitions )
1069 main.log.warn( json.dumps( parsedPartitions,
1070 sort_keys=True,
1071 indent=4,
1072 separators=( ',', ': ' ) ) )
1073 # TODO check for a leader in all paritions
1074 # TODO check for consistency among nodes
1075 else:
1076 main.log.error( "partitions() returned None" )
1077 except ( ValueError, TypeError ):
1078 main.log.exception( "Error parsing partitions" )
1079 main.log.error( repr( partitions ) )
1080 # Print Pending Map
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001081 pendingMap = onosCli.pendingMap()
Jon Hall5cf14d52015-07-16 12:15:19 -07001082 try:
1083 if pendingMap :
1084 parsedPending = json.loads( pendingMap )
1085 main.log.warn( json.dumps( parsedPending,
1086 sort_keys=True,
1087 indent=4,
1088 separators=( ',', ': ' ) ) )
1089 # TODO check something here?
1090 else:
1091 main.log.error( "pendingMap() returned None" )
1092 except ( ValueError, TypeError ):
1093 main.log.exception( "Error parsing pending map" )
1094 main.log.error( repr( pendingMap ) )
1095
1096 if not installedCheck:
1097 main.log.info( "Waiting 60 seconds to see if the state of " +
1098 "intents change" )
1099 time.sleep( 60 )
1100 # Print the intent states
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001101 intents = onosCli.intents()
Jon Hall5cf14d52015-07-16 12:15:19 -07001102 intentStates = []
1103 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
1104 count = 0
1105 # Iter through intents of a node
1106 try:
1107 for intent in json.loads( intents ):
1108 state = intent.get( 'state', None )
1109 if "INSTALLED" not in state:
1110 installedCheck = False
1111 intentId = intent.get( 'id', None )
1112 intentStates.append( ( intentId, state ) )
1113 except ( ValueError, TypeError ):
1114 main.log.exception( "Error parsing intents." )
1115 intentStates.sort()
1116 for i, s in intentStates:
1117 count += 1
1118 main.log.info( "%-6s%-15s%-15s" %
1119 ( str( count ), str( i ), str( s ) ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001120 leaders = onosCli.leaders()
Jon Hall5cf14d52015-07-16 12:15:19 -07001121 try:
1122 missing = False
1123 if leaders:
1124 parsedLeaders = json.loads( leaders )
1125 main.log.warn( json.dumps( parsedLeaders,
1126 sort_keys=True,
1127 indent=4,
1128 separators=( ',', ': ' ) ) )
1129 # check for all intent partitions
1130 # check for election
1131 topics = []
1132 for i in range( 14 ):
1133 topics.append( "intent-partition-" + str( i ) )
1134 # FIXME: this should only be after we start the app
1135 topics.append( "org.onosproject.election" )
1136 main.log.debug( topics )
1137 ONOStopics = [ j['topic'] for j in parsedLeaders ]
1138 for topic in topics:
1139 if topic not in ONOStopics:
1140 main.log.error( "Error: " + topic +
1141 " not in leaders" )
1142 missing = True
1143 else:
1144 main.log.error( "leaders() returned None" )
1145 except ( ValueError, TypeError ):
1146 main.log.exception( "Error parsing leaders" )
1147 main.log.error( repr( leaders ) )
1148 if missing:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001149 for i in main.activeNodes:
1150 node = main.CLIs[i]
Jon Hall5cf14d52015-07-16 12:15:19 -07001151 response = node.leaders( jsonFormat=False)
1152 main.log.warn( str( node.name ) + " leaders output: \n" +
1153 str( response ) )
1154
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001155 partitions = onosCli.partitions()
Jon Hall5cf14d52015-07-16 12:15:19 -07001156 try:
1157 if partitions :
1158 parsedPartitions = json.loads( partitions )
1159 main.log.warn( json.dumps( parsedPartitions,
1160 sort_keys=True,
1161 indent=4,
1162 separators=( ',', ': ' ) ) )
1163 # TODO check for a leader in all paritions
1164 # TODO check for consistency among nodes
1165 else:
1166 main.log.error( "partitions() returned None" )
1167 except ( ValueError, TypeError ):
1168 main.log.exception( "Error parsing partitions" )
1169 main.log.error( repr( partitions ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001170 pendingMap = onosCli.pendingMap()
Jon Hall5cf14d52015-07-16 12:15:19 -07001171 try:
1172 if pendingMap :
1173 parsedPending = json.loads( pendingMap )
1174 main.log.warn( json.dumps( parsedPending,
1175 sort_keys=True,
1176 indent=4,
1177 separators=( ',', ': ' ) ) )
1178 # TODO check something here?
1179 else:
1180 main.log.error( "pendingMap() returned None" )
1181 except ( ValueError, TypeError ):
1182 main.log.exception( "Error parsing pending map" )
1183 main.log.error( repr( pendingMap ) )
1184 # Print flowrules
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001185 node = main.activeNodes[0]
1186 main.log.debug( main.CLIs[node].flows( jsonFormat=False ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001187 main.step( "Wait a minute then ping again" )
1188 # the wait is above
1189 PingResult = main.TRUE
1190 for i in range( 8, 18 ):
1191 ping = main.Mininet1.pingHost( src="h" + str( i ),
1192 target="h" + str( i + 10 ) )
1193 PingResult = PingResult and ping
1194 if ping == main.FALSE:
1195 main.log.warn( "Ping failed between h" + str( i ) +
1196 " and h" + str( i + 10 ) )
1197 elif ping == main.TRUE:
1198 main.log.info( "Ping test passed!" )
1199 # Don't set PingResult or you'd override failures
1200 if PingResult == main.FALSE:
1201 main.log.error(
1202 "Intents have not been installed correctly, pings failed." )
1203 # TODO: pretty print
1204 main.log.warn( "ONOS1 intents: " )
1205 try:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001206 tmpIntents = onosCli.intents()
Jon Hall5cf14d52015-07-16 12:15:19 -07001207 main.log.warn( json.dumps( json.loads( tmpIntents ),
1208 sort_keys=True,
1209 indent=4,
1210 separators=( ',', ': ' ) ) )
1211 except ( ValueError, TypeError ):
1212 main.log.warn( repr( tmpIntents ) )
1213 utilities.assert_equals(
1214 expect=main.TRUE,
1215 actual=PingResult,
1216 onpass="Intents have been installed correctly and pings work",
1217 onfail="Intents have not been installed correctly, pings failed." )
1218
1219 def CASE5( self, main ):
1220 """
1221 Reading state of ONOS
1222 """
1223 import json
1224 import time
Jon Halle1a3b752015-07-22 13:02:46 -07001225 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07001226 assert main, "main not defined"
1227 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07001228 assert main.CLIs, "main.CLIs not defined"
1229 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07001230
1231 main.case( "Setting up and gathering data for current state" )
1232 # The general idea for this test case is to pull the state of
1233 # ( intents,flows, topology,... ) from each ONOS node
1234 # We can then compare them with each other and also with past states
1235
1236 main.step( "Check that each switch has a master" )
1237 global mastershipState
1238 mastershipState = '[]'
1239
1240 # Assert that each device has a master
1241 rolesNotNull = main.TRUE
1242 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001243 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001244 t = main.Thread( target=main.CLIs[i].rolesNotNull,
Jon Hall5cf14d52015-07-16 12:15:19 -07001245 name="rolesNotNull-" + str( i ),
1246 args=[] )
1247 threads.append( t )
1248 t.start()
1249
1250 for t in threads:
1251 t.join()
1252 rolesNotNull = rolesNotNull and t.result
1253 utilities.assert_equals(
1254 expect=main.TRUE,
1255 actual=rolesNotNull,
1256 onpass="Each device has a master",
1257 onfail="Some devices don't have a master assigned" )
1258
1259 main.step( "Get the Mastership of each switch from each controller" )
1260 ONOSMastership = []
1261 mastershipCheck = main.FALSE
1262 consistentMastership = True
1263 rolesResults = True
1264 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001265 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001266 t = main.Thread( target=main.CLIs[i].roles,
Jon Hall5cf14d52015-07-16 12:15:19 -07001267 name="roles-" + str( i ),
1268 args=[] )
1269 threads.append( t )
1270 t.start()
1271
1272 for t in threads:
1273 t.join()
1274 ONOSMastership.append( t.result )
1275
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001276 for i in range( len( ONOSMastership ) ):
1277 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001278 if not ONOSMastership[i] or "Error" in ONOSMastership[i]:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001279 main.log.error( "Error in getting ONOS" + node + " roles" )
1280 main.log.warn( "ONOS" + node + " mastership response: " +
1281 repr( ONOSMastership[i] ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001282 rolesResults = False
1283 utilities.assert_equals(
1284 expect=True,
1285 actual=rolesResults,
1286 onpass="No error in reading roles output",
1287 onfail="Error in reading roles from ONOS" )
1288
1289 main.step( "Check for consistency in roles from each controller" )
1290 if all([ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
1291 main.log.info(
1292 "Switch roles are consistent across all ONOS nodes" )
1293 else:
1294 consistentMastership = False
1295 utilities.assert_equals(
1296 expect=True,
1297 actual=consistentMastership,
1298 onpass="Switch roles are consistent across all ONOS nodes",
1299 onfail="ONOS nodes have different views of switch roles" )
1300
1301 if rolesResults and not consistentMastership:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001302 for i in range( len( main.activeNodes ) ):
1303 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001304 try:
1305 main.log.warn(
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001306 "ONOS" + node + " roles: ",
Jon Hall5cf14d52015-07-16 12:15:19 -07001307 json.dumps(
1308 json.loads( ONOSMastership[ i ] ),
1309 sort_keys=True,
1310 indent=4,
1311 separators=( ',', ': ' ) ) )
1312 except ( ValueError, TypeError ):
1313 main.log.warn( repr( ONOSMastership[ i ] ) )
1314 elif rolesResults and consistentMastership:
1315 mastershipCheck = main.TRUE
1316 mastershipState = ONOSMastership[ 0 ]
1317
1318 main.step( "Get the intents from each controller" )
1319 global intentState
1320 intentState = []
1321 ONOSIntents = []
1322 intentCheck = main.FALSE
1323 consistentIntents = True
1324 intentsResults = True
1325 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001326 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001327 t = main.Thread( target=main.CLIs[i].intents,
Jon Hall5cf14d52015-07-16 12:15:19 -07001328 name="intents-" + str( i ),
1329 args=[],
1330 kwargs={ 'jsonFormat': True } )
1331 threads.append( t )
1332 t.start()
1333
1334 for t in threads:
1335 t.join()
1336 ONOSIntents.append( t.result )
1337
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001338 for i in range( len( ONOSIntents ) ):
1339 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001340 if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001341 main.log.error( "Error in getting ONOS" + node + " intents" )
1342 main.log.warn( "ONOS" + node + " intents response: " +
Jon Hall5cf14d52015-07-16 12:15:19 -07001343 repr( ONOSIntents[ i ] ) )
1344 intentsResults = False
1345 utilities.assert_equals(
1346 expect=True,
1347 actual=intentsResults,
1348 onpass="No error in reading intents output",
1349 onfail="Error in reading intents from ONOS" )
1350
1351 main.step( "Check for consistency in Intents from each controller" )
1352 if all([ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
1353 main.log.info( "Intents are consistent across all ONOS " +
1354 "nodes" )
1355 else:
1356 consistentIntents = False
1357 main.log.error( "Intents not consistent" )
1358 utilities.assert_equals(
1359 expect=True,
1360 actual=consistentIntents,
1361 onpass="Intents are consistent across all ONOS nodes",
1362 onfail="ONOS nodes have different views of intents" )
1363
1364 if intentsResults:
1365 # Try to make it easy to figure out what is happening
1366 #
1367 # Intent ONOS1 ONOS2 ...
1368 # 0x01 INSTALLED INSTALLING
1369 # ... ... ...
1370 # ... ... ...
1371 title = " Id"
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001372 for n in main.activeNodes:
Jon Hall5cf14d52015-07-16 12:15:19 -07001373 title += " " * 10 + "ONOS" + str( n + 1 )
1374 main.log.warn( title )
1375 # get all intent keys in the cluster
1376 keys = []
Jon Halla440e872016-03-31 15:15:50 -07001377 try:
1378 # Get the set of all intent keys
Jon Hall5cf14d52015-07-16 12:15:19 -07001379 for nodeStr in ONOSIntents:
1380 node = json.loads( nodeStr )
1381 for intent in node:
Jon Halla440e872016-03-31 15:15:50 -07001382 keys.append( intent.get( 'id' ) )
1383 keys = set( keys )
1384 # For each intent key, print the state on each node
1385 for key in keys:
1386 row = "%-13s" % key
1387 for nodeStr in ONOSIntents:
1388 node = json.loads( nodeStr )
1389 for intent in node:
1390 if intent.get( 'id', "Error" ) == key:
1391 row += "%-15s" % intent.get( 'state' )
1392 main.log.warn( row )
1393 # End of intent state table
1394 except ValueError as e:
1395 main.log.exception( e )
1396 main.log.debug( "nodeStr was: " + repr( nodeStr ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001397
1398 if intentsResults and not consistentIntents:
1399 # print the json objects
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001400 n = str( main.activeNodes[-1] + 1 )
1401 main.log.debug( "ONOS" + n + " intents: " )
Jon Hall5cf14d52015-07-16 12:15:19 -07001402 main.log.debug( json.dumps( json.loads( ONOSIntents[ -1 ] ),
1403 sort_keys=True,
1404 indent=4,
1405 separators=( ',', ': ' ) ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001406 for i in range( len( ONOSIntents ) ):
1407 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001408 if ONOSIntents[ i ] != ONOSIntents[ -1 ]:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001409 main.log.debug( "ONOS" + node + " intents: " )
Jon Hall5cf14d52015-07-16 12:15:19 -07001410 main.log.debug( json.dumps( json.loads( ONOSIntents[i] ),
1411 sort_keys=True,
1412 indent=4,
1413 separators=( ',', ': ' ) ) )
1414 else:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001415 main.log.debug( "ONOS" + node + " intents match ONOS" +
1416 n + " intents" )
Jon Hall5cf14d52015-07-16 12:15:19 -07001417 elif intentsResults and consistentIntents:
1418 intentCheck = main.TRUE
1419 intentState = ONOSIntents[ 0 ]
1420
1421 main.step( "Get the flows from each controller" )
1422 global flowState
1423 flowState = []
1424 ONOSFlows = []
1425 ONOSFlowsJson = []
1426 flowCheck = main.FALSE
1427 consistentFlows = True
1428 flowsResults = True
1429 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001430 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001431 t = main.Thread( target=main.CLIs[i].flows,
Jon Hall5cf14d52015-07-16 12:15:19 -07001432 name="flows-" + str( i ),
1433 args=[],
1434 kwargs={ 'jsonFormat': True } )
1435 threads.append( t )
1436 t.start()
1437
1438 # NOTE: Flows command can take some time to run
1439 time.sleep(30)
1440 for t in threads:
1441 t.join()
1442 result = t.result
1443 ONOSFlows.append( result )
1444
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001445 for i in range( len( ONOSFlows ) ):
1446 num = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001447 if not ONOSFlows[ i ] or "Error" in ONOSFlows[ i ]:
1448 main.log.error( "Error in getting ONOS" + num + " flows" )
1449 main.log.warn( "ONOS" + num + " flows response: " +
1450 repr( ONOSFlows[ i ] ) )
1451 flowsResults = False
1452 ONOSFlowsJson.append( None )
1453 else:
1454 try:
1455 ONOSFlowsJson.append( json.loads( ONOSFlows[ i ] ) )
1456 except ( ValueError, TypeError ):
1457 # FIXME: change this to log.error?
1458 main.log.exception( "Error in parsing ONOS" + num +
1459 " response as json." )
1460 main.log.error( repr( ONOSFlows[ i ] ) )
1461 ONOSFlowsJson.append( None )
1462 flowsResults = False
1463 utilities.assert_equals(
1464 expect=True,
1465 actual=flowsResults,
1466 onpass="No error in reading flows output",
1467 onfail="Error in reading flows from ONOS" )
1468
1469 main.step( "Check for consistency in Flows from each controller" )
1470 tmp = [ len( i ) == len( ONOSFlowsJson[ 0 ] ) for i in ONOSFlowsJson ]
1471 if all( tmp ):
1472 main.log.info( "Flow count is consistent across all ONOS nodes" )
1473 else:
1474 consistentFlows = False
1475 utilities.assert_equals(
1476 expect=True,
1477 actual=consistentFlows,
1478 onpass="The flow count is consistent across all ONOS nodes",
1479 onfail="ONOS nodes have different flow counts" )
1480
1481 if flowsResults and not consistentFlows:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001482 for i in range( len( ONOSFlows ) ):
1483 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001484 try:
1485 main.log.warn(
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001486 "ONOS" + node + " flows: " +
Jon Hall5cf14d52015-07-16 12:15:19 -07001487 json.dumps( json.loads( ONOSFlows[i] ), sort_keys=True,
1488 indent=4, separators=( ',', ': ' ) ) )
1489 except ( ValueError, TypeError ):
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001490 main.log.warn( "ONOS" + node + " flows: " +
1491 repr( ONOSFlows[ i ] ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001492 elif flowsResults and consistentFlows:
1493 flowCheck = main.TRUE
1494 flowState = ONOSFlows[ 0 ]
1495
1496 main.step( "Get the OF Table entries" )
1497 global flows
1498 flows = []
1499 for i in range( 1, 29 ):
GlennRC68467eb2015-11-16 18:01:01 -08001500 flows.append( main.Mininet1.getFlowTable( "s" + str( i ), version="1.3", debug=False ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001501 if flowCheck == main.FALSE:
1502 for table in flows:
1503 main.log.warn( table )
1504 # TODO: Compare switch flow tables with ONOS flow tables
1505
1506 main.step( "Start continuous pings" )
1507 main.Mininet2.pingLong(
1508 src=main.params[ 'PING' ][ 'source1' ],
1509 target=main.params[ 'PING' ][ 'target1' ],
1510 pingTime=500 )
1511 main.Mininet2.pingLong(
1512 src=main.params[ 'PING' ][ 'source2' ],
1513 target=main.params[ 'PING' ][ 'target2' ],
1514 pingTime=500 )
1515 main.Mininet2.pingLong(
1516 src=main.params[ 'PING' ][ 'source3' ],
1517 target=main.params[ 'PING' ][ 'target3' ],
1518 pingTime=500 )
1519 main.Mininet2.pingLong(
1520 src=main.params[ 'PING' ][ 'source4' ],
1521 target=main.params[ 'PING' ][ 'target4' ],
1522 pingTime=500 )
1523 main.Mininet2.pingLong(
1524 src=main.params[ 'PING' ][ 'source5' ],
1525 target=main.params[ 'PING' ][ 'target5' ],
1526 pingTime=500 )
1527 main.Mininet2.pingLong(
1528 src=main.params[ 'PING' ][ 'source6' ],
1529 target=main.params[ 'PING' ][ 'target6' ],
1530 pingTime=500 )
1531 main.Mininet2.pingLong(
1532 src=main.params[ 'PING' ][ 'source7' ],
1533 target=main.params[ 'PING' ][ 'target7' ],
1534 pingTime=500 )
1535 main.Mininet2.pingLong(
1536 src=main.params[ 'PING' ][ 'source8' ],
1537 target=main.params[ 'PING' ][ 'target8' ],
1538 pingTime=500 )
1539 main.Mininet2.pingLong(
1540 src=main.params[ 'PING' ][ 'source9' ],
1541 target=main.params[ 'PING' ][ 'target9' ],
1542 pingTime=500 )
1543 main.Mininet2.pingLong(
1544 src=main.params[ 'PING' ][ 'source10' ],
1545 target=main.params[ 'PING' ][ 'target10' ],
1546 pingTime=500 )
1547
1548 main.step( "Collecting topology information from ONOS" )
1549 devices = []
1550 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001551 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001552 t = main.Thread( target=main.CLIs[i].devices,
Jon Hall5cf14d52015-07-16 12:15:19 -07001553 name="devices-" + str( i ),
1554 args=[ ] )
1555 threads.append( t )
1556 t.start()
1557
1558 for t in threads:
1559 t.join()
1560 devices.append( t.result )
1561 hosts = []
1562 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001563 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001564 t = main.Thread( target=main.CLIs[i].hosts,
Jon Hall5cf14d52015-07-16 12:15:19 -07001565 name="hosts-" + str( i ),
1566 args=[ ] )
1567 threads.append( t )
1568 t.start()
1569
1570 for t in threads:
1571 t.join()
1572 try:
1573 hosts.append( json.loads( t.result ) )
1574 except ( ValueError, TypeError ):
1575 # FIXME: better handling of this, print which node
1576 # Maybe use thread name?
1577 main.log.exception( "Error parsing json output of hosts" )
Jon Hallf3d16e72015-12-16 17:45:08 -08001578 main.log.warn( repr( t.result ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001579 hosts.append( None )
1580
1581 ports = []
1582 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001583 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001584 t = main.Thread( target=main.CLIs[i].ports,
Jon Hall5cf14d52015-07-16 12:15:19 -07001585 name="ports-" + str( i ),
1586 args=[ ] )
1587 threads.append( t )
1588 t.start()
1589
1590 for t in threads:
1591 t.join()
1592 ports.append( t.result )
1593 links = []
1594 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001595 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001596 t = main.Thread( target=main.CLIs[i].links,
Jon Hall5cf14d52015-07-16 12:15:19 -07001597 name="links-" + str( i ),
1598 args=[ ] )
1599 threads.append( t )
1600 t.start()
1601
1602 for t in threads:
1603 t.join()
1604 links.append( t.result )
1605 clusters = []
1606 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001607 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001608 t = main.Thread( target=main.CLIs[i].clusters,
Jon Hall5cf14d52015-07-16 12:15:19 -07001609 name="clusters-" + str( i ),
1610 args=[ ] )
1611 threads.append( t )
1612 t.start()
1613
1614 for t in threads:
1615 t.join()
1616 clusters.append( t.result )
1617 # Compare json objects for hosts and dataplane clusters
1618
1619 # hosts
1620 main.step( "Host view is consistent across ONOS nodes" )
1621 consistentHostsResult = main.TRUE
1622 for controller in range( len( hosts ) ):
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001623 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hallf3d16e72015-12-16 17:45:08 -08001624 if hosts[ controller ] and "Error" not in hosts[ controller ]:
Jon Hall5cf14d52015-07-16 12:15:19 -07001625 if hosts[ controller ] == hosts[ 0 ]:
1626 continue
1627 else: # hosts not consistent
1628 main.log.error( "hosts from ONOS" +
1629 controllerStr +
1630 " is inconsistent with ONOS1" )
1631 main.log.warn( repr( hosts[ controller ] ) )
1632 consistentHostsResult = main.FALSE
1633
1634 else:
1635 main.log.error( "Error in getting ONOS hosts from ONOS" +
1636 controllerStr )
1637 consistentHostsResult = main.FALSE
1638 main.log.warn( "ONOS" + controllerStr +
1639 " hosts response: " +
1640 repr( hosts[ controller ] ) )
1641 utilities.assert_equals(
1642 expect=main.TRUE,
1643 actual=consistentHostsResult,
1644 onpass="Hosts view is consistent across all ONOS nodes",
1645 onfail="ONOS nodes have different views of hosts" )
1646
1647 main.step( "Each host has an IP address" )
1648 ipResult = main.TRUE
1649 for controller in range( 0, len( hosts ) ):
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001650 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hallf3d16e72015-12-16 17:45:08 -08001651 if hosts[ controller ]:
1652 for host in hosts[ controller ]:
1653 if not host.get( 'ipAddresses', [ ] ):
1654 main.log.error( "Error with host ips on controller" +
1655 controllerStr + ": " + str( host ) )
1656 ipResult = main.FALSE
Jon Hall5cf14d52015-07-16 12:15:19 -07001657 utilities.assert_equals(
1658 expect=main.TRUE,
1659 actual=ipResult,
1660 onpass="The ips of the hosts aren't empty",
1661 onfail="The ip of at least one host is missing" )
1662
1663 # Strongly connected clusters of devices
1664 main.step( "Cluster view is consistent across ONOS nodes" )
1665 consistentClustersResult = main.TRUE
1666 for controller in range( len( clusters ) ):
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001667 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001668 if "Error" not in clusters[ controller ]:
1669 if clusters[ controller ] == clusters[ 0 ]:
1670 continue
1671 else: # clusters not consistent
1672 main.log.error( "clusters from ONOS" + controllerStr +
1673 " is inconsistent with ONOS1" )
1674 consistentClustersResult = main.FALSE
1675
1676 else:
1677 main.log.error( "Error in getting dataplane clusters " +
1678 "from ONOS" + controllerStr )
1679 consistentClustersResult = main.FALSE
1680 main.log.warn( "ONOS" + controllerStr +
1681 " clusters response: " +
1682 repr( clusters[ controller ] ) )
1683 utilities.assert_equals(
1684 expect=main.TRUE,
1685 actual=consistentClustersResult,
1686 onpass="Clusters view is consistent across all ONOS nodes",
1687 onfail="ONOS nodes have different views of clusters" )
1688 # there should always only be one cluster
1689 main.step( "Cluster view correct across ONOS nodes" )
1690 try:
1691 numClusters = len( json.loads( clusters[ 0 ] ) )
1692 except ( ValueError, TypeError ):
1693 main.log.exception( "Error parsing clusters[0]: " +
1694 repr( clusters[ 0 ] ) )
Jon Hall6e709752016-02-01 13:38:46 -08001695 numClusters = "ERROR"
Jon Hall5cf14d52015-07-16 12:15:19 -07001696 clusterResults = main.FALSE
1697 if numClusters == 1:
1698 clusterResults = main.TRUE
1699 utilities.assert_equals(
1700 expect=1,
1701 actual=numClusters,
1702 onpass="ONOS shows 1 SCC",
1703 onfail="ONOS shows " + str( numClusters ) + " SCCs" )
1704
1705 main.step( "Comparing ONOS topology to MN" )
1706 devicesResults = main.TRUE
1707 linksResults = main.TRUE
1708 hostsResults = main.TRUE
1709 mnSwitches = main.Mininet1.getSwitches()
1710 mnLinks = main.Mininet1.getLinks()
1711 mnHosts = main.Mininet1.getHosts()
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001712 for controller in main.activeNodes:
1713 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001714 if devices[ controller ] and ports[ controller ] and\
1715 "Error" not in devices[ controller ] and\
1716 "Error" not in ports[ controller ]:
Jon Hall6e709752016-02-01 13:38:46 -08001717 currentDevicesResult = main.Mininet1.compareSwitches(
1718 mnSwitches,
1719 json.loads( devices[ controller ] ),
1720 json.loads( ports[ controller ] ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001721 else:
1722 currentDevicesResult = main.FALSE
1723 utilities.assert_equals( expect=main.TRUE,
1724 actual=currentDevicesResult,
1725 onpass="ONOS" + controllerStr +
1726 " Switches view is correct",
1727 onfail="ONOS" + controllerStr +
1728 " Switches view is incorrect" )
1729 if links[ controller ] and "Error" not in links[ controller ]:
1730 currentLinksResult = main.Mininet1.compareLinks(
1731 mnSwitches, mnLinks,
1732 json.loads( links[ controller ] ) )
1733 else:
1734 currentLinksResult = main.FALSE
1735 utilities.assert_equals( expect=main.TRUE,
1736 actual=currentLinksResult,
1737 onpass="ONOS" + controllerStr +
1738 " links view is correct",
1739 onfail="ONOS" + controllerStr +
1740 " links view is incorrect" )
1741
Jon Hall657cdf62015-12-17 14:40:51 -08001742 if hosts[ controller ] and "Error" not in hosts[ controller ]:
Jon Hall5cf14d52015-07-16 12:15:19 -07001743 currentHostsResult = main.Mininet1.compareHosts(
1744 mnHosts,
1745 hosts[ controller ] )
1746 else:
1747 currentHostsResult = main.FALSE
1748 utilities.assert_equals( expect=main.TRUE,
1749 actual=currentHostsResult,
1750 onpass="ONOS" + controllerStr +
1751 " hosts exist in Mininet",
1752 onfail="ONOS" + controllerStr +
1753 " hosts don't match Mininet" )
1754
1755 devicesResults = devicesResults and currentDevicesResult
1756 linksResults = linksResults and currentLinksResult
1757 hostsResults = hostsResults and currentHostsResult
1758
1759 main.step( "Device information is correct" )
1760 utilities.assert_equals(
1761 expect=main.TRUE,
1762 actual=devicesResults,
1763 onpass="Device information is correct",
1764 onfail="Device information is incorrect" )
1765
1766 main.step( "Links are correct" )
1767 utilities.assert_equals(
1768 expect=main.TRUE,
1769 actual=linksResults,
1770 onpass="Link are correct",
1771 onfail="Links are incorrect" )
1772
1773 main.step( "Hosts are correct" )
1774 utilities.assert_equals(
1775 expect=main.TRUE,
1776 actual=hostsResults,
1777 onpass="Hosts are correct",
1778 onfail="Hosts are incorrect" )
1779
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001780 def CASE61( self, main ):
Jon Hall5cf14d52015-07-16 12:15:19 -07001781 """
1782 The Failure case.
1783 """
Jon Halle1a3b752015-07-22 13:02:46 -07001784 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07001785 assert main, "main not defined"
1786 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07001787 assert main.CLIs, "main.CLIs not defined"
1788 assert main.nodes, "main.nodes not defined"
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001789 main.case( "Kill minority of ONOS nodes" )
Jon Hall96091e62015-09-21 17:34:17 -07001790
1791 main.step( "Checking ONOS Logs for errors" )
1792 for node in main.nodes:
1793 main.log.debug( "Checking logs for errors on " + node.name + ":" )
1794 main.log.warn( main.ONOSbench.checkLogs( node.ip_address ) )
1795
Jon Hall3b489db2015-10-05 14:38:37 -07001796 n = len( main.nodes ) # Number of nodes
1797 p = ( ( n + 1 ) / 2 ) + 1 # Number of partitions
1798 main.kill = [ 0 ] # ONOS node to kill, listed by index in main.nodes
1799 if n > 3:
1800 main.kill.append( p - 1 )
1801 # NOTE: This only works for cluster sizes of 3,5, or 7.
1802
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001803 main.step( "Kill " + str( len( main.kill ) ) + " ONOS nodes" )
Jon Hall3b489db2015-10-05 14:38:37 -07001804 killResults = main.TRUE
1805 for i in main.kill:
1806 killResults = killResults and\
1807 main.ONOSbench.onosKill( main.nodes[i].ip_address )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001808 main.activeNodes.remove( i )
Jon Hall5cf14d52015-07-16 12:15:19 -07001809 utilities.assert_equals( expect=main.TRUE, actual=killResults,
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001810 onpass="ONOS nodes killed successfully",
1811 onfail="ONOS nodes NOT successfully killed" )
1812
1813 def CASE62( self, main ):
1814 """
1815 The bring up stopped nodes
1816 """
1817 import time
1818 assert main.numCtrls, "main.numCtrls not defined"
1819 assert main, "main not defined"
1820 assert utilities.assert_equals, "utilities.assert_equals not defined"
1821 assert main.CLIs, "main.CLIs not defined"
1822 assert main.nodes, "main.nodes not defined"
1823 assert main.kill, "main.kill not defined"
1824 main.case( "Restart minority of ONOS nodes" )
1825
1826 main.step( "Restarting " + str( len( main.kill ) ) + " ONOS nodes" )
1827 startResults = main.TRUE
1828 restartTime = time.time()
1829 for i in main.kill:
1830 startResults = startResults and\
1831 main.ONOSbench.onosStart( main.nodes[i].ip_address )
1832 utilities.assert_equals( expect=main.TRUE, actual=startResults,
1833 onpass="ONOS nodes started successfully",
1834 onfail="ONOS nodes NOT successfully started" )
Jon Hall5cf14d52015-07-16 12:15:19 -07001835
1836 main.step( "Checking if ONOS is up yet" )
1837 count = 0
1838 onosIsupResult = main.FALSE
1839 while onosIsupResult == main.FALSE and count < 10:
Jon Hall3b489db2015-10-05 14:38:37 -07001840 onosIsupResult = main.TRUE
1841 for i in main.kill:
1842 onosIsupResult = onosIsupResult and\
1843 main.ONOSbench.isup( main.nodes[i].ip_address )
Jon Hall5cf14d52015-07-16 12:15:19 -07001844 count = count + 1
Jon Hall5cf14d52015-07-16 12:15:19 -07001845 utilities.assert_equals( expect=main.TRUE, actual=onosIsupResult,
1846 onpass="ONOS restarted successfully",
1847 onfail="ONOS restart NOT successful" )
1848
Jon Halle1a3b752015-07-22 13:02:46 -07001849 main.step( "Restarting ONOS main.CLIs" )
Jon Hall3b489db2015-10-05 14:38:37 -07001850 cliResults = main.TRUE
1851 for i in main.kill:
1852 cliResults = cliResults and\
1853 main.CLIs[i].startOnosCli( main.nodes[i].ip_address )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001854 main.activeNodes.append( i )
Jon Hall5cf14d52015-07-16 12:15:19 -07001855 utilities.assert_equals( expect=main.TRUE, actual=cliResults,
1856 onpass="ONOS cli restarted",
1857 onfail="ONOS cli did not restart" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001858 main.activeNodes.sort()
1859 try:
1860 assert list( set( main.activeNodes ) ) == main.activeNodes,\
1861 "List of active nodes has duplicates, this likely indicates something was run out of order"
1862 except AssertionError:
1863 main.log.exception( "" )
1864 main.cleanup()
1865 main.exit()
Jon Hall5cf14d52015-07-16 12:15:19 -07001866
1867 # Grab the time of restart so we chan check how long the gossip
1868 # protocol has had time to work
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001869 main.restartTime = time.time() - restartTime
Jon Hall5cf14d52015-07-16 12:15:19 -07001870 main.log.debug( "Restart time: " + str( main.restartTime ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001871 # TODO: MAke this configurable. Also, we are breaking the above timer
1872 time.sleep( 60 )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001873 node = main.activeNodes[0]
1874 main.log.debug( main.CLIs[node].nodes( jsonFormat=False ) )
1875 main.log.debug( main.CLIs[node].leaders( jsonFormat=False ) )
1876 main.log.debug( main.CLIs[node].partitions( jsonFormat=False ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001877
Jon Halla440e872016-03-31 15:15:50 -07001878 main.step( "Rerun for election on the node(s) that were killed" )
1879 runResults = main.TRUE
1880 for i in main.kill:
1881 runResults = runResults and\
1882 main.CLIs[i].electionTestRun()
1883 utilities.assert_equals( expect=main.TRUE, actual=runResults,
1884 onpass="ONOS nodes reran for election topic",
1885 onfail="Errror rerunning for election" )
1886
Jon Hall5cf14d52015-07-16 12:15:19 -07001887 def CASE7( self, main ):
1888 """
1889 Check state after ONOS failure
1890 """
1891 import json
Jon Halle1a3b752015-07-22 13:02:46 -07001892 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07001893 assert main, "main not defined"
1894 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07001895 assert main.CLIs, "main.CLIs not defined"
1896 assert main.nodes, "main.nodes not defined"
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001897 try:
1898 main.kill
1899 except AttributeError:
1900 main.kill = []
1901
Jon Hall5cf14d52015-07-16 12:15:19 -07001902 main.case( "Running ONOS Constant State Tests" )
1903
1904 main.step( "Check that each switch has a master" )
1905 # Assert that each device has a master
1906 rolesNotNull = main.TRUE
1907 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001908 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001909 t = main.Thread( target=main.CLIs[i].rolesNotNull,
Jon Hall5cf14d52015-07-16 12:15:19 -07001910 name="rolesNotNull-" + str( i ),
1911 args=[ ] )
1912 threads.append( t )
1913 t.start()
1914
1915 for t in threads:
1916 t.join()
1917 rolesNotNull = rolesNotNull and t.result
1918 utilities.assert_equals(
1919 expect=main.TRUE,
1920 actual=rolesNotNull,
1921 onpass="Each device has a master",
1922 onfail="Some devices don't have a master assigned" )
1923
1924 main.step( "Read device roles from ONOS" )
1925 ONOSMastership = []
Jon Halla440e872016-03-31 15:15:50 -07001926 mastershipCheck = main.FALSE
Jon Hall5cf14d52015-07-16 12:15:19 -07001927 consistentMastership = True
1928 rolesResults = True
1929 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001930 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001931 t = main.Thread( target=main.CLIs[i].roles,
Jon Hall5cf14d52015-07-16 12:15:19 -07001932 name="roles-" + str( i ),
1933 args=[] )
1934 threads.append( t )
1935 t.start()
1936
1937 for t in threads:
1938 t.join()
1939 ONOSMastership.append( t.result )
1940
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001941 for i in range( len( ONOSMastership ) ):
1942 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001943 if not ONOSMastership[i] or "Error" in ONOSMastership[i]:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001944 main.log.error( "Error in getting ONOS" + node + " roles" )
1945 main.log.warn( "ONOS" + node + " mastership response: " +
1946 repr( ONOSMastership[i] ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001947 rolesResults = False
1948 utilities.assert_equals(
1949 expect=True,
1950 actual=rolesResults,
1951 onpass="No error in reading roles output",
1952 onfail="Error in reading roles from ONOS" )
1953
1954 main.step( "Check for consistency in roles from each controller" )
1955 if all([ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
1956 main.log.info(
1957 "Switch roles are consistent across all ONOS nodes" )
1958 else:
1959 consistentMastership = False
1960 utilities.assert_equals(
1961 expect=True,
1962 actual=consistentMastership,
1963 onpass="Switch roles are consistent across all ONOS nodes",
1964 onfail="ONOS nodes have different views of switch roles" )
1965
1966 if rolesResults and not consistentMastership:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001967 for i in range( len( ONOSMastership ) ):
1968 node = str( main.activeNodes[i] + 1 )
1969 main.log.warn( "ONOS" + node + " roles: ",
1970 json.dumps( json.loads( ONOSMastership[ i ] ),
1971 sort_keys=True,
1972 indent=4,
1973 separators=( ',', ': ' ) ) )
Jon Halla440e872016-03-31 15:15:50 -07001974 elif rolesResults and consistentMastership:
1975 mastershipCheck = main.TRUE
Jon Hall5cf14d52015-07-16 12:15:19 -07001976
1977 # NOTE: we expect mastership to change on controller failure
Jon Hall5cf14d52015-07-16 12:15:19 -07001978
1979 main.step( "Get the intents and compare across all nodes" )
1980 ONOSIntents = []
1981 intentCheck = main.FALSE
1982 consistentIntents = True
1983 intentsResults = True
1984 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001985 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001986 t = main.Thread( target=main.CLIs[i].intents,
Jon Hall5cf14d52015-07-16 12:15:19 -07001987 name="intents-" + str( i ),
1988 args=[],
1989 kwargs={ 'jsonFormat': True } )
1990 threads.append( t )
1991 t.start()
1992
1993 for t in threads:
1994 t.join()
1995 ONOSIntents.append( t.result )
1996
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001997 for i in range( len( ONOSIntents) ):
1998 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001999 if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002000 main.log.error( "Error in getting ONOS" + node + " intents" )
2001 main.log.warn( "ONOS" + node + " intents response: " +
Jon Hall5cf14d52015-07-16 12:15:19 -07002002 repr( ONOSIntents[ i ] ) )
2003 intentsResults = False
2004 utilities.assert_equals(
2005 expect=True,
2006 actual=intentsResults,
2007 onpass="No error in reading intents output",
2008 onfail="Error in reading intents from ONOS" )
2009
2010 main.step( "Check for consistency in Intents from each controller" )
2011 if all([ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
2012 main.log.info( "Intents are consistent across all ONOS " +
2013 "nodes" )
2014 else:
2015 consistentIntents = False
2016
2017 # Try to make it easy to figure out what is happening
2018 #
2019 # Intent ONOS1 ONOS2 ...
2020 # 0x01 INSTALLED INSTALLING
2021 # ... ... ...
2022 # ... ... ...
2023 title = " ID"
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002024 for n in main.activeNodes:
Jon Hall5cf14d52015-07-16 12:15:19 -07002025 title += " " * 10 + "ONOS" + str( n + 1 )
2026 main.log.warn( title )
2027 # get all intent keys in the cluster
2028 keys = []
2029 for nodeStr in ONOSIntents:
2030 node = json.loads( nodeStr )
2031 for intent in node:
2032 keys.append( intent.get( 'id' ) )
2033 keys = set( keys )
2034 for key in keys:
2035 row = "%-13s" % key
2036 for nodeStr in ONOSIntents:
2037 node = json.loads( nodeStr )
2038 for intent in node:
2039 if intent.get( 'id' ) == key:
2040 row += "%-15s" % intent.get( 'state' )
2041 main.log.warn( row )
2042 # End table view
2043
2044 utilities.assert_equals(
2045 expect=True,
2046 actual=consistentIntents,
2047 onpass="Intents are consistent across all ONOS nodes",
2048 onfail="ONOS nodes have different views of intents" )
2049 intentStates = []
2050 for node in ONOSIntents: # Iter through ONOS nodes
2051 nodeStates = []
2052 # Iter through intents of a node
2053 try:
2054 for intent in json.loads( node ):
2055 nodeStates.append( intent[ 'state' ] )
2056 except ( ValueError, TypeError ):
2057 main.log.exception( "Error in parsing intents" )
2058 main.log.error( repr( node ) )
2059 intentStates.append( nodeStates )
2060 out = [ (i, nodeStates.count( i ) ) for i in set( nodeStates ) ]
2061 main.log.info( dict( out ) )
2062
2063 if intentsResults and not consistentIntents:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002064 for i in range( len( main.activeNodes ) ):
2065 node = str( main.activeNodes[i] + 1 )
2066 main.log.warn( "ONOS" + node + " intents: " )
Jon Hall5cf14d52015-07-16 12:15:19 -07002067 main.log.warn( json.dumps(
2068 json.loads( ONOSIntents[ i ] ),
2069 sort_keys=True,
2070 indent=4,
2071 separators=( ',', ': ' ) ) )
2072 elif intentsResults and consistentIntents:
2073 intentCheck = main.TRUE
2074
2075 # NOTE: Store has no durability, so intents are lost across system
2076 # restarts
2077 main.step( "Compare current intents with intents before the failure" )
2078 # NOTE: this requires case 5 to pass for intentState to be set.
2079 # maybe we should stop the test if that fails?
2080 sameIntents = main.FALSE
Jon Halla440e872016-03-31 15:15:50 -07002081 try:
2082 intentState
2083 except NameError:
2084 main.log.warn( "No previous intent state was saved" )
2085 else:
2086 if intentState and intentState == ONOSIntents[ 0 ]:
2087 sameIntents = main.TRUE
2088 main.log.info( "Intents are consistent with before failure" )
2089 # TODO: possibly the states have changed? we may need to figure out
2090 # what the acceptable states are
2091 elif len( intentState ) == len( ONOSIntents[ 0 ] ):
2092 sameIntents = main.TRUE
2093 try:
2094 before = json.loads( intentState )
2095 after = json.loads( ONOSIntents[ 0 ] )
2096 for intent in before:
2097 if intent not in after:
2098 sameIntents = main.FALSE
2099 main.log.debug( "Intent is not currently in ONOS " +
2100 "(at least in the same form):" )
2101 main.log.debug( json.dumps( intent ) )
2102 except ( ValueError, TypeError ):
2103 main.log.exception( "Exception printing intents" )
2104 main.log.debug( repr( ONOSIntents[0] ) )
2105 main.log.debug( repr( intentState ) )
2106 if sameIntents == main.FALSE:
2107 try:
2108 main.log.debug( "ONOS intents before: " )
2109 main.log.debug( json.dumps( json.loads( intentState ),
2110 sort_keys=True, indent=4,
2111 separators=( ',', ': ' ) ) )
2112 main.log.debug( "Current ONOS intents: " )
2113 main.log.debug( json.dumps( json.loads( ONOSIntents[ 0 ] ),
2114 sort_keys=True, indent=4,
2115 separators=( ',', ': ' ) ) )
2116 except ( ValueError, TypeError ):
2117 main.log.exception( "Exception printing intents" )
2118 main.log.debug( repr( ONOSIntents[0] ) )
2119 main.log.debug( repr( intentState ) )
2120 utilities.assert_equals(
2121 expect=main.TRUE,
2122 actual=sameIntents,
2123 onpass="Intents are consistent with before failure",
2124 onfail="The Intents changed during failure" )
Jon Hall5cf14d52015-07-16 12:15:19 -07002125 intentCheck = intentCheck and sameIntents
2126
2127 main.step( "Get the OF Table entries and compare to before " +
2128 "component failure" )
2129 FlowTables = main.TRUE
Jon Hall5cf14d52015-07-16 12:15:19 -07002130 for i in range( 28 ):
2131 main.log.info( "Checking flow table on s" + str( i + 1 ) )
GlennRC68467eb2015-11-16 18:01:01 -08002132 tmpFlows = main.Mininet1.getFlowTable( "s" + str( i + 1 ), version="1.3", debug=False )
2133 FlowTables = FlowTables and main.Mininet1.flowTableComp( flows[i], tmpFlows )
Jon Hall5cf14d52015-07-16 12:15:19 -07002134 if FlowTables == main.FALSE:
GlennRC68467eb2015-11-16 18:01:01 -08002135 main.log.warn( "Differences in flow table for switch: s{}".format( i + 1 ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07002136 utilities.assert_equals(
2137 expect=main.TRUE,
2138 actual=FlowTables,
2139 onpass="No changes were found in the flow tables",
2140 onfail="Changes were found in the flow tables" )
2141
2142 main.Mininet2.pingLongKill()
2143 '''
2144 main.step( "Check the continuous pings to ensure that no packets " +
2145 "were dropped during component failure" )
2146 main.Mininet2.pingKill( main.params[ 'TESTONUSER' ],
2147 main.params[ 'TESTONIP' ] )
2148 LossInPings = main.FALSE
2149 # NOTE: checkForLoss returns main.FALSE with 0% packet loss
2150 for i in range( 8, 18 ):
2151 main.log.info(
2152 "Checking for a loss in pings along flow from s" +
2153 str( i ) )
2154 LossInPings = main.Mininet2.checkForLoss(
2155 "/tmp/ping.h" +
2156 str( i ) ) or LossInPings
2157 if LossInPings == main.TRUE:
2158 main.log.info( "Loss in ping detected" )
2159 elif LossInPings == main.ERROR:
2160 main.log.info( "There are multiple mininet process running" )
2161 elif LossInPings == main.FALSE:
2162 main.log.info( "No Loss in the pings" )
2163 main.log.info( "No loss of dataplane connectivity" )
2164 utilities.assert_equals(
2165 expect=main.FALSE,
2166 actual=LossInPings,
2167 onpass="No Loss of connectivity",
2168 onfail="Loss of dataplane connectivity detected" )
2169 '''
2170
2171 main.step( "Leadership Election is still functional" )
2172 # Test of LeadershipElection
2173 leaderList = []
Jon Hall5cf14d52015-07-16 12:15:19 -07002174
Jon Hall3b489db2015-10-05 14:38:37 -07002175 restarted = []
2176 for i in main.kill:
2177 restarted.append( main.nodes[i].ip_address )
Jon Hall5cf14d52015-07-16 12:15:19 -07002178 leaderResult = main.TRUE
Jon Hall3b489db2015-10-05 14:38:37 -07002179
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002180 for i in main.activeNodes:
2181 cli = main.CLIs[i]
Jon Hall5cf14d52015-07-16 12:15:19 -07002182 leaderN = cli.electionTestLeader()
2183 leaderList.append( leaderN )
2184 if leaderN == main.FALSE:
2185 # error in response
2186 main.log.error( "Something is wrong with " +
2187 "electionTestLeader function, check the" +
2188 " error logs" )
2189 leaderResult = main.FALSE
2190 elif leaderN is None:
2191 main.log.error( cli.name +
2192 " shows no leader for the election-app was" +
2193 " elected after the old one died" )
2194 leaderResult = main.FALSE
2195 elif leaderN in restarted:
2196 main.log.error( cli.name + " shows " + str( leaderN ) +
2197 " as leader for the election-app, but it " +
2198 "was restarted" )
2199 leaderResult = main.FALSE
2200 if len( set( leaderList ) ) != 1:
2201 leaderResult = main.FALSE
2202 main.log.error(
2203 "Inconsistent view of leader for the election test app" )
2204 # TODO: print the list
2205 utilities.assert_equals(
2206 expect=main.TRUE,
2207 actual=leaderResult,
2208 onpass="Leadership election passed",
2209 onfail="Something went wrong with Leadership election" )
2210
2211 def CASE8( self, main ):
2212 """
2213 Compare topo
2214 """
2215 import json
2216 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002217 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002218 assert main, "main not defined"
2219 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002220 assert main.CLIs, "main.CLIs not defined"
2221 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002222
2223 main.case( "Compare ONOS Topology view to Mininet topology" )
Jon Hall783bbf92015-07-23 14:33:19 -07002224 main.caseExplanation = "Compare topology objects between Mininet" +\
Jon Hall5cf14d52015-07-16 12:15:19 -07002225 " and ONOS"
Jon Hall5cf14d52015-07-16 12:15:19 -07002226 topoResult = main.FALSE
Jon Hall6e709752016-02-01 13:38:46 -08002227 topoFailMsg = "ONOS topology don't match Mininet"
Jon Hall5cf14d52015-07-16 12:15:19 -07002228 elapsed = 0
2229 count = 0
Jon Halle9b1fa32015-12-08 15:32:21 -08002230 main.step( "Comparing ONOS topology to MN topology" )
Jon Hall5cf14d52015-07-16 12:15:19 -07002231 startTime = time.time()
2232 # Give time for Gossip to work
Jon Halle9b1fa32015-12-08 15:32:21 -08002233 while topoResult == main.FALSE and ( elapsed < 60 or count < 3 ):
Jon Hall96091e62015-09-21 17:34:17 -07002234 devicesResults = main.TRUE
2235 linksResults = main.TRUE
2236 hostsResults = main.TRUE
2237 hostAttachmentResults = True
Jon Hall5cf14d52015-07-16 12:15:19 -07002238 count += 1
2239 cliStart = time.time()
2240 devices = []
2241 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002242 for i in main.activeNodes:
Jon Hall6e709752016-02-01 13:38:46 -08002243 t = main.Thread( target=utilities.retry,
Jon Hall5cf14d52015-07-16 12:15:19 -07002244 name="devices-" + str( i ),
Jon Hall6e709752016-02-01 13:38:46 -08002245 args=[ main.CLIs[i].devices, [ None ] ],
2246 kwargs= { 'sleep': 5, 'attempts': 5,
2247 'randomTime': True } )
Jon Hall5cf14d52015-07-16 12:15:19 -07002248 threads.append( t )
2249 t.start()
2250
2251 for t in threads:
2252 t.join()
2253 devices.append( t.result )
2254 hosts = []
2255 ipResult = main.TRUE
2256 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002257 for i in main.activeNodes:
Jon Halld8f6de82015-12-17 17:04:34 -08002258 t = main.Thread( target=utilities.retry,
Jon Hall5cf14d52015-07-16 12:15:19 -07002259 name="hosts-" + str( i ),
Jon Halld8f6de82015-12-17 17:04:34 -08002260 args=[ main.CLIs[i].hosts, [ None ] ],
2261 kwargs= { 'sleep': 5, 'attempts': 5,
2262 'randomTime': True } )
Jon Hall5cf14d52015-07-16 12:15:19 -07002263 threads.append( t )
2264 t.start()
2265
2266 for t in threads:
2267 t.join()
2268 try:
2269 hosts.append( json.loads( t.result ) )
2270 except ( ValueError, TypeError ):
2271 main.log.exception( "Error parsing hosts results" )
2272 main.log.error( repr( t.result ) )
Jon Hallf3d16e72015-12-16 17:45:08 -08002273 hosts.append( None )
Jon Hall5cf14d52015-07-16 12:15:19 -07002274 for controller in range( 0, len( hosts ) ):
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002275 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hallacd1b182015-12-17 11:43:20 -08002276 if hosts[ controller ]:
2277 for host in hosts[ controller ]:
2278 if host is None or host.get( 'ipAddresses', [] ) == []:
2279 main.log.error(
2280 "Error with host ipAddresses on controller" +
2281 controllerStr + ": " + str( host ) )
2282 ipResult = main.FALSE
Jon Hall5cf14d52015-07-16 12:15:19 -07002283 ports = []
2284 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002285 for i in main.activeNodes:
Jon Hall6e709752016-02-01 13:38:46 -08002286 t = main.Thread( target=utilities.retry,
Jon Hall5cf14d52015-07-16 12:15:19 -07002287 name="ports-" + str( i ),
Jon Hall6e709752016-02-01 13:38:46 -08002288 args=[ main.CLIs[i].ports, [ None ] ],
2289 kwargs= { 'sleep': 5, 'attempts': 5,
2290 'randomTime': True } )
Jon Hall5cf14d52015-07-16 12:15:19 -07002291 threads.append( t )
2292 t.start()
2293
2294 for t in threads:
2295 t.join()
2296 ports.append( t.result )
2297 links = []
2298 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002299 for i in main.activeNodes:
Jon Hall6e709752016-02-01 13:38:46 -08002300 t = main.Thread( target=utilities.retry,
Jon Hall5cf14d52015-07-16 12:15:19 -07002301 name="links-" + str( i ),
Jon Hall6e709752016-02-01 13:38:46 -08002302 args=[ main.CLIs[i].links, [ None ] ],
2303 kwargs= { 'sleep': 5, 'attempts': 5,
2304 'randomTime': True } )
Jon Hall5cf14d52015-07-16 12:15:19 -07002305 threads.append( t )
2306 t.start()
2307
2308 for t in threads:
2309 t.join()
2310 links.append( t.result )
2311 clusters = []
2312 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002313 for i in main.activeNodes:
Jon Hall6e709752016-02-01 13:38:46 -08002314 t = main.Thread( target=utilities.retry,
Jon Hall5cf14d52015-07-16 12:15:19 -07002315 name="clusters-" + str( i ),
Jon Hall6e709752016-02-01 13:38:46 -08002316 args=[ main.CLIs[i].clusters, [ None ] ],
2317 kwargs= { 'sleep': 5, 'attempts': 5,
2318 'randomTime': True } )
Jon Hall5cf14d52015-07-16 12:15:19 -07002319 threads.append( t )
2320 t.start()
2321
2322 for t in threads:
2323 t.join()
2324 clusters.append( t.result )
2325
2326 elapsed = time.time() - startTime
2327 cliTime = time.time() - cliStart
2328 print "Elapsed time: " + str( elapsed )
2329 print "CLI time: " + str( cliTime )
2330
Jon Hall6e709752016-02-01 13:38:46 -08002331 if all( e is None for e in devices ) and\
2332 all( e is None for e in hosts ) and\
2333 all( e is None for e in ports ) and\
2334 all( e is None for e in links ) and\
2335 all( e is None for e in clusters ):
2336 topoFailMsg = "Could not get topology from ONOS"
2337 main.log.error( topoFailMsg )
2338 continue # Try again, No use trying to compare
2339
Jon Hall5cf14d52015-07-16 12:15:19 -07002340 mnSwitches = main.Mininet1.getSwitches()
2341 mnLinks = main.Mininet1.getLinks()
2342 mnHosts = main.Mininet1.getHosts()
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002343 for controller in range( len( main.activeNodes ) ):
2344 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07002345 if devices[ controller ] and ports[ controller ] and\
2346 "Error" not in devices[ controller ] and\
2347 "Error" not in ports[ controller ]:
2348
Jon Hallc6793552016-01-19 14:18:37 -08002349 try:
2350 currentDevicesResult = main.Mininet1.compareSwitches(
2351 mnSwitches,
2352 json.loads( devices[ controller ] ),
2353 json.loads( ports[ controller ] ) )
2354 except ( TypeError, ValueError ) as e:
2355 main.log.exception( "Object not as expected; devices={!r}\nports={!r}".format(
2356 devices[ controller ], ports[ controller ] ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07002357 else:
2358 currentDevicesResult = main.FALSE
2359 utilities.assert_equals( expect=main.TRUE,
2360 actual=currentDevicesResult,
2361 onpass="ONOS" + controllerStr +
2362 " Switches view is correct",
2363 onfail="ONOS" + controllerStr +
2364 " Switches view is incorrect" )
2365
2366 if links[ controller ] and "Error" not in links[ controller ]:
2367 currentLinksResult = main.Mininet1.compareLinks(
2368 mnSwitches, mnLinks,
2369 json.loads( links[ controller ] ) )
2370 else:
2371 currentLinksResult = main.FALSE
2372 utilities.assert_equals( expect=main.TRUE,
2373 actual=currentLinksResult,
2374 onpass="ONOS" + controllerStr +
2375 " links view is correct",
2376 onfail="ONOS" + controllerStr +
2377 " links view is incorrect" )
Jon Hall657cdf62015-12-17 14:40:51 -08002378 if hosts[ controller ] and "Error" not in hosts[ controller ]:
Jon Hall5cf14d52015-07-16 12:15:19 -07002379 currentHostsResult = main.Mininet1.compareHosts(
2380 mnHosts,
2381 hosts[ controller ] )
Jon Hall13b446e2016-01-05 12:17:01 -08002382 elif hosts[ controller ] == []:
2383 currentHostsResult = main.TRUE
Jon Hall5cf14d52015-07-16 12:15:19 -07002384 else:
2385 currentHostsResult = main.FALSE
2386 utilities.assert_equals( expect=main.TRUE,
2387 actual=currentHostsResult,
2388 onpass="ONOS" + controllerStr +
2389 " hosts exist in Mininet",
2390 onfail="ONOS" + controllerStr +
2391 " hosts don't match Mininet" )
2392 # CHECKING HOST ATTACHMENT POINTS
2393 hostAttachment = True
2394 zeroHosts = False
2395 # FIXME: topo-HA/obelisk specific mappings:
2396 # key is mac and value is dpid
2397 mappings = {}
2398 for i in range( 1, 29 ): # hosts 1 through 28
2399 # set up correct variables:
2400 macId = "00:" * 5 + hex( i ).split( "0x" )[1].upper().zfill(2)
2401 if i == 1:
2402 deviceId = "1000".zfill(16)
2403 elif i == 2:
2404 deviceId = "2000".zfill(16)
2405 elif i == 3:
2406 deviceId = "3000".zfill(16)
2407 elif i == 4:
2408 deviceId = "3004".zfill(16)
2409 elif i == 5:
2410 deviceId = "5000".zfill(16)
2411 elif i == 6:
2412 deviceId = "6000".zfill(16)
2413 elif i == 7:
2414 deviceId = "6007".zfill(16)
2415 elif i >= 8 and i <= 17:
2416 dpid = '3' + str( i ).zfill( 3 )
2417 deviceId = dpid.zfill(16)
2418 elif i >= 18 and i <= 27:
2419 dpid = '6' + str( i ).zfill( 3 )
2420 deviceId = dpid.zfill(16)
2421 elif i == 28:
2422 deviceId = "2800".zfill(16)
2423 mappings[ macId ] = deviceId
Jon Halld8f6de82015-12-17 17:04:34 -08002424 if hosts[ controller ] is not None and "Error" not in hosts[ controller ]:
Jon Hall5cf14d52015-07-16 12:15:19 -07002425 if hosts[ controller ] == []:
2426 main.log.warn( "There are no hosts discovered" )
2427 zeroHosts = True
2428 else:
2429 for host in hosts[ controller ]:
2430 mac = None
2431 location = None
2432 device = None
2433 port = None
2434 try:
2435 mac = host.get( 'mac' )
2436 assert mac, "mac field could not be found for this host object"
2437
2438 location = host.get( 'location' )
2439 assert location, "location field could not be found for this host object"
2440
2441 # Trim the protocol identifier off deviceId
2442 device = str( location.get( 'elementId' ) ).split(':')[1]
2443 assert device, "elementId field could not be found for this host location object"
2444
2445 port = location.get( 'port' )
2446 assert port, "port field could not be found for this host location object"
2447
2448 # Now check if this matches where they should be
2449 if mac and device and port:
2450 if str( port ) != "1":
2451 main.log.error( "The attachment port is incorrect for " +
2452 "host " + str( mac ) +
2453 ". Expected: 1 Actual: " + str( port) )
2454 hostAttachment = False
2455 if device != mappings[ str( mac ) ]:
2456 main.log.error( "The attachment device is incorrect for " +
2457 "host " + str( mac ) +
2458 ". Expected: " + mappings[ str( mac ) ] +
2459 " Actual: " + device )
2460 hostAttachment = False
2461 else:
2462 hostAttachment = False
2463 except AssertionError:
2464 main.log.exception( "Json object not as expected" )
2465 main.log.error( repr( host ) )
2466 hostAttachment = False
2467 else:
2468 main.log.error( "No hosts json output or \"Error\"" +
2469 " in output. hosts = " +
2470 repr( hosts[ controller ] ) )
2471 if zeroHosts is False:
2472 hostAttachment = True
2473
2474 # END CHECKING HOST ATTACHMENT POINTS
2475 devicesResults = devicesResults and currentDevicesResult
2476 linksResults = linksResults and currentLinksResult
2477 hostsResults = hostsResults and currentHostsResult
2478 hostAttachmentResults = hostAttachmentResults and\
2479 hostAttachment
Jon Halla440e872016-03-31 15:15:50 -07002480 topoResult = ( devicesResults and linksResults
2481 and hostsResults and ipResult and
2482 hostAttachmentResults )
Jon Halle9b1fa32015-12-08 15:32:21 -08002483 utilities.assert_equals( expect=True,
2484 actual=topoResult,
2485 onpass="ONOS topology matches Mininet",
Jon Hall6e709752016-02-01 13:38:46 -08002486 onfail=topoFailMsg )
Jon Halle9b1fa32015-12-08 15:32:21 -08002487 # End of While loop to pull ONOS state
Jon Hall5cf14d52015-07-16 12:15:19 -07002488
2489 # Compare json objects for hosts and dataplane clusters
2490
2491 # hosts
2492 main.step( "Hosts view is consistent across all ONOS nodes" )
2493 consistentHostsResult = main.TRUE
2494 for controller in range( len( hosts ) ):
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002495 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hall13b446e2016-01-05 12:17:01 -08002496 if hosts[ controller ] is not None and "Error" not in hosts[ controller ]:
Jon Hall5cf14d52015-07-16 12:15:19 -07002497 if hosts[ controller ] == hosts[ 0 ]:
2498 continue
2499 else: # hosts not consistent
2500 main.log.error( "hosts from ONOS" + controllerStr +
2501 " is inconsistent with ONOS1" )
2502 main.log.warn( repr( hosts[ controller ] ) )
2503 consistentHostsResult = main.FALSE
2504
2505 else:
2506 main.log.error( "Error in getting ONOS hosts from ONOS" +
2507 controllerStr )
2508 consistentHostsResult = main.FALSE
2509 main.log.warn( "ONOS" + controllerStr +
2510 " hosts response: " +
2511 repr( hosts[ controller ] ) )
2512 utilities.assert_equals(
2513 expect=main.TRUE,
2514 actual=consistentHostsResult,
2515 onpass="Hosts view is consistent across all ONOS nodes",
2516 onfail="ONOS nodes have different views of hosts" )
2517
2518 main.step( "Hosts information is correct" )
2519 hostsResults = hostsResults and ipResult
2520 utilities.assert_equals(
2521 expect=main.TRUE,
2522 actual=hostsResults,
2523 onpass="Host information is correct",
2524 onfail="Host information is incorrect" )
2525
2526 main.step( "Host attachment points to the network" )
2527 utilities.assert_equals(
2528 expect=True,
2529 actual=hostAttachmentResults,
2530 onpass="Hosts are correctly attached to the network",
2531 onfail="ONOS did not correctly attach hosts to the network" )
2532
2533 # Strongly connected clusters of devices
2534 main.step( "Clusters view is consistent across all ONOS nodes" )
2535 consistentClustersResult = main.TRUE
2536 for controller in range( len( clusters ) ):
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002537 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07002538 if "Error" not in clusters[ controller ]:
2539 if clusters[ controller ] == clusters[ 0 ]:
2540 continue
2541 else: # clusters not consistent
2542 main.log.error( "clusters from ONOS" +
2543 controllerStr +
2544 " is inconsistent with ONOS1" )
2545 consistentClustersResult = main.FALSE
Jon Hall5cf14d52015-07-16 12:15:19 -07002546 else:
2547 main.log.error( "Error in getting dataplane clusters " +
2548 "from ONOS" + controllerStr )
2549 consistentClustersResult = main.FALSE
2550 main.log.warn( "ONOS" + controllerStr +
2551 " clusters response: " +
2552 repr( clusters[ controller ] ) )
2553 utilities.assert_equals(
2554 expect=main.TRUE,
2555 actual=consistentClustersResult,
2556 onpass="Clusters view is consistent across all ONOS nodes",
2557 onfail="ONOS nodes have different views of clusters" )
2558
2559 main.step( "There is only one SCC" )
2560 # there should always only be one cluster
2561 try:
2562 numClusters = len( json.loads( clusters[ 0 ] ) )
2563 except ( ValueError, TypeError ):
2564 main.log.exception( "Error parsing clusters[0]: " +
2565 repr( clusters[0] ) )
Jon Halla440e872016-03-31 15:15:50 -07002566 numClusters = "ERROR"
Jon Hall5cf14d52015-07-16 12:15:19 -07002567 clusterResults = main.FALSE
2568 if numClusters == 1:
2569 clusterResults = main.TRUE
2570 utilities.assert_equals(
2571 expect=1,
2572 actual=numClusters,
2573 onpass="ONOS shows 1 SCC",
2574 onfail="ONOS shows " + str( numClusters ) + " SCCs" )
2575
2576 topoResult = ( devicesResults and linksResults
2577 and hostsResults and consistentHostsResult
2578 and consistentClustersResult and clusterResults
2579 and ipResult and hostAttachmentResults )
2580
2581 topoResult = topoResult and int( count <= 2 )
2582 note = "note it takes about " + str( int( cliTime ) ) + \
2583 " seconds for the test to make all the cli calls to fetch " +\
2584 "the topology from each ONOS instance"
2585 main.log.info(
2586 "Very crass estimate for topology discovery/convergence( " +
2587 str( note ) + " ): " + str( elapsed ) + " seconds, " +
2588 str( count ) + " tries" )
2589
2590 main.step( "Device information is correct" )
2591 utilities.assert_equals(
2592 expect=main.TRUE,
2593 actual=devicesResults,
2594 onpass="Device information is correct",
2595 onfail="Device information is incorrect" )
2596
2597 main.step( "Links are correct" )
2598 utilities.assert_equals(
2599 expect=main.TRUE,
2600 actual=linksResults,
2601 onpass="Link are correct",
2602 onfail="Links are incorrect" )
2603
Jon Halla440e872016-03-31 15:15:50 -07002604 main.step( "Hosts are correct" )
2605 utilities.assert_equals(
2606 expect=main.TRUE,
2607 actual=hostsResults,
2608 onpass="Hosts are correct",
2609 onfail="Hosts are incorrect" )
2610
Jon Hall5cf14d52015-07-16 12:15:19 -07002611 # FIXME: move this to an ONOS state case
2612 main.step( "Checking ONOS nodes" )
2613 nodesOutput = []
2614 nodeResults = main.TRUE
2615 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002616 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07002617 t = main.Thread( target=main.CLIs[i].nodes,
Jon Hall5cf14d52015-07-16 12:15:19 -07002618 name="nodes-" + str( i ),
2619 args=[ ] )
2620 threads.append( t )
2621 t.start()
2622
2623 for t in threads:
2624 t.join()
2625 nodesOutput.append( t.result )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002626 ips = [ main.nodes[node].ip_address for node in main.activeNodes ]
Jon Halle9b1fa32015-12-08 15:32:21 -08002627 ips.sort()
Jon Hall5cf14d52015-07-16 12:15:19 -07002628 for i in nodesOutput:
2629 try:
2630 current = json.loads( i )
Jon Halle9b1fa32015-12-08 15:32:21 -08002631 activeIps = []
2632 currentResult = main.FALSE
Jon Hall5cf14d52015-07-16 12:15:19 -07002633 for node in current:
Jon Hallbd182782016-03-28 16:42:22 -07002634 if node['state'] == 'READY':
Jon Halle9b1fa32015-12-08 15:32:21 -08002635 activeIps.append( node['ip'] )
2636 activeIps.sort()
2637 if ips == activeIps:
2638 currentResult = main.TRUE
Jon Hall5cf14d52015-07-16 12:15:19 -07002639 except ( ValueError, TypeError ):
2640 main.log.error( "Error parsing nodes output" )
2641 main.log.warn( repr( i ) )
Jon Halle9b1fa32015-12-08 15:32:21 -08002642 currentResult = main.FALSE
2643 nodeResults = nodeResults and currentResult
Jon Hall5cf14d52015-07-16 12:15:19 -07002644 utilities.assert_equals( expect=main.TRUE, actual=nodeResults,
2645 onpass="Nodes check successful",
2646 onfail="Nodes check NOT successful" )
Jon Halla440e872016-03-31 15:15:50 -07002647 if not nodeResults:
2648 for cli in main.CLIs:
2649 main.log.debug( "{} components not ACTIVE: \n{}".format(
2650 cli.name,
2651 cli.sendline( "scr:list | grep -v ACTIVE" ) ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07002652
2653 def CASE9( self, main ):
2654 """
2655 Link s3-s28 down
2656 """
2657 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002658 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002659 assert main, "main not defined"
2660 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002661 assert main.CLIs, "main.CLIs not defined"
2662 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002663 # NOTE: You should probably run a topology check after this
2664
2665 linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
2666
2667 description = "Turn off a link to ensure that Link Discovery " +\
2668 "is working properly"
2669 main.case( description )
2670
2671 main.step( "Kill Link between s3 and s28" )
2672 LinkDown = main.Mininet1.link( END1="s3", END2="s28", OPTION="down" )
2673 main.log.info( "Waiting " + str( linkSleep ) +
2674 " seconds for link down to be discovered" )
2675 time.sleep( linkSleep )
2676 utilities.assert_equals( expect=main.TRUE, actual=LinkDown,
2677 onpass="Link down successful",
2678 onfail="Failed to bring link down" )
2679 # TODO do some sort of check here
2680
2681 def CASE10( self, main ):
2682 """
2683 Link s3-s28 up
2684 """
2685 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002686 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002687 assert main, "main not defined"
2688 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002689 assert main.CLIs, "main.CLIs not defined"
2690 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002691 # NOTE: You should probably run a topology check after this
2692
2693 linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
2694
2695 description = "Restore a link to ensure that Link Discovery is " + \
2696 "working properly"
2697 main.case( description )
2698
2699 main.step( "Bring link between s3 and s28 back up" )
2700 LinkUp = main.Mininet1.link( END1="s3", END2="s28", OPTION="up" )
2701 main.log.info( "Waiting " + str( linkSleep ) +
2702 " seconds for link up to be discovered" )
2703 time.sleep( linkSleep )
2704 utilities.assert_equals( expect=main.TRUE, actual=LinkUp,
2705 onpass="Link up successful",
2706 onfail="Failed to bring link up" )
2707 # TODO do some sort of check here
2708
2709 def CASE11( self, main ):
2710 """
2711 Switch Down
2712 """
2713 # NOTE: You should probably run a topology check after this
2714 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002715 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002716 assert main, "main not defined"
2717 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002718 assert main.CLIs, "main.CLIs not defined"
2719 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002720
2721 switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
2722
2723 description = "Killing a switch to ensure it is discovered correctly"
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002724 onosCli = main.CLIs[ main.activeNodes[0] ]
Jon Hall5cf14d52015-07-16 12:15:19 -07002725 main.case( description )
2726 switch = main.params[ 'kill' ][ 'switch' ]
2727 switchDPID = main.params[ 'kill' ][ 'dpid' ]
2728
2729 # TODO: Make this switch parameterizable
2730 main.step( "Kill " + switch )
2731 main.log.info( "Deleting " + switch )
2732 main.Mininet1.delSwitch( switch )
2733 main.log.info( "Waiting " + str( switchSleep ) +
2734 " seconds for switch down to be discovered" )
2735 time.sleep( switchSleep )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002736 device = onosCli.getDevice( dpid=switchDPID )
Jon Hall5cf14d52015-07-16 12:15:19 -07002737 # Peek at the deleted switch
2738 main.log.warn( str( device ) )
2739 result = main.FALSE
2740 if device and device[ 'available' ] is False:
2741 result = main.TRUE
2742 utilities.assert_equals( expect=main.TRUE, actual=result,
2743 onpass="Kill switch successful",
2744 onfail="Failed to kill switch?" )
2745
2746 def CASE12( self, main ):
2747 """
2748 Switch Up
2749 """
2750 # NOTE: You should probably run a topology check after this
2751 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002752 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002753 assert main, "main not defined"
2754 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002755 assert main.CLIs, "main.CLIs not defined"
2756 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002757 assert ONOS1Port, "ONOS1Port not defined"
2758 assert ONOS2Port, "ONOS2Port not defined"
2759 assert ONOS3Port, "ONOS3Port not defined"
2760 assert ONOS4Port, "ONOS4Port not defined"
2761 assert ONOS5Port, "ONOS5Port not defined"
2762 assert ONOS6Port, "ONOS6Port not defined"
2763 assert ONOS7Port, "ONOS7Port not defined"
2764
2765 switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
2766 switch = main.params[ 'kill' ][ 'switch' ]
2767 switchDPID = main.params[ 'kill' ][ 'dpid' ]
2768 links = main.params[ 'kill' ][ 'links' ].split()
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002769 onosCli = main.CLIs[ main.activeNodes[0] ]
Jon Hall5cf14d52015-07-16 12:15:19 -07002770 description = "Adding a switch to ensure it is discovered correctly"
2771 main.case( description )
2772
2773 main.step( "Add back " + switch )
2774 main.Mininet1.addSwitch( switch, dpid=switchDPID )
2775 for peer in links:
2776 main.Mininet1.addLink( switch, peer )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002777 ipList = [ node.ip_address for node in main.nodes ]
Jon Hall5cf14d52015-07-16 12:15:19 -07002778 main.Mininet1.assignSwController( sw=switch, ip=ipList )
2779 main.log.info( "Waiting " + str( switchSleep ) +
2780 " seconds for switch up to be discovered" )
2781 time.sleep( switchSleep )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002782 device = onosCli.getDevice( dpid=switchDPID )
Jon Hall5cf14d52015-07-16 12:15:19 -07002783 # Peek at the deleted switch
2784 main.log.warn( str( device ) )
2785 result = main.FALSE
2786 if device and device[ 'available' ]:
2787 result = main.TRUE
2788 utilities.assert_equals( expect=main.TRUE, actual=result,
2789 onpass="add switch successful",
2790 onfail="Failed to add switch?" )
2791
2792 def CASE13( self, main ):
2793 """
2794 Clean up
2795 """
2796 import os
2797 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002798 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002799 assert main, "main not defined"
2800 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002801 assert main.CLIs, "main.CLIs not defined"
2802 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002803
2804 # printing colors to terminal
2805 colors = { 'cyan': '\033[96m', 'purple': '\033[95m',
2806 'blue': '\033[94m', 'green': '\033[92m',
2807 'yellow': '\033[93m', 'red': '\033[91m', 'end': '\033[0m' }
2808 main.case( "Test Cleanup" )
2809 main.step( "Killing tcpdumps" )
2810 main.Mininet2.stopTcpdump()
2811
2812 testname = main.TEST
Jon Hall96091e62015-09-21 17:34:17 -07002813 if main.params[ 'BACKUP' ][ 'ENABLED' ] == "True":
Jon Hall5cf14d52015-07-16 12:15:19 -07002814 main.step( "Copying MN pcap and ONOS log files to test station" )
2815 teststationUser = main.params[ 'BACKUP' ][ 'TESTONUSER' ]
2816 teststationIP = main.params[ 'BACKUP' ][ 'TESTONIP' ]
Jon Hall96091e62015-09-21 17:34:17 -07002817 # NOTE: MN Pcap file is being saved to logdir.
2818 # We scp this file as MN and TestON aren't necessarily the same vm
2819
2820 # FIXME: To be replaced with a Jenkin's post script
Jon Hall5cf14d52015-07-16 12:15:19 -07002821 # TODO: Load these from params
2822 # NOTE: must end in /
2823 logFolder = "/opt/onos/log/"
2824 logFiles = [ "karaf.log", "karaf.log.1" ]
2825 # NOTE: must end in /
Jon Hall5cf14d52015-07-16 12:15:19 -07002826 for f in logFiles:
Jon Halle1a3b752015-07-22 13:02:46 -07002827 for node in main.nodes:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002828 dstName = main.logdir + "/" + node.name + "-" + f
Jon Hall96091e62015-09-21 17:34:17 -07002829 main.ONOSbench.secureCopy( node.user_name, node.ip_address,
2830 logFolder + f, dstName )
Jon Hall5cf14d52015-07-16 12:15:19 -07002831 # std*.log's
2832 # NOTE: must end in /
2833 logFolder = "/opt/onos/var/"
2834 logFiles = [ "stderr.log", "stdout.log" ]
2835 # NOTE: must end in /
Jon Hall5cf14d52015-07-16 12:15:19 -07002836 for f in logFiles:
Jon Halle1a3b752015-07-22 13:02:46 -07002837 for node in main.nodes:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002838 dstName = main.logdir + "/" + node.name + "-" + f
Jon Hall96091e62015-09-21 17:34:17 -07002839 main.ONOSbench.secureCopy( node.user_name, node.ip_address,
2840 logFolder + f, dstName )
2841 else:
2842 main.log.debug( "skipping saving log files" )
Jon Hall5cf14d52015-07-16 12:15:19 -07002843
2844 main.step( "Stopping Mininet" )
2845 mnResult = main.Mininet1.stopNet()
2846 utilities.assert_equals( expect=main.TRUE, actual=mnResult,
2847 onpass="Mininet stopped",
2848 onfail="MN cleanup NOT successful" )
2849
2850 main.step( "Checking ONOS Logs for errors" )
Jon Halle1a3b752015-07-22 13:02:46 -07002851 for node in main.nodes:
Jon Hall96091e62015-09-21 17:34:17 -07002852 main.log.debug( "Checking logs for errors on " + node.name + ":" )
2853 main.log.warn( main.ONOSbench.checkLogs( node.ip_address ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07002854
2855 try:
2856 timerLog = open( main.logdir + "/Timers.csv", 'w')
2857 # Overwrite with empty line and close
2858 labels = "Gossip Intents, Restart"
2859 data = str( gossipTime ) + ", " + str( main.restartTime )
2860 timerLog.write( labels + "\n" + data )
2861 timerLog.close()
2862 except NameError, e:
2863 main.log.exception(e)
2864
2865 def CASE14( self, main ):
2866 """
2867 start election app on all onos nodes
2868 """
Jon Halle1a3b752015-07-22 13:02:46 -07002869 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002870 assert main, "main not defined"
2871 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002872 assert main.CLIs, "main.CLIs not defined"
2873 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002874
2875 main.case("Start Leadership Election app")
2876 main.step( "Install leadership election app" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002877 onosCli = main.CLIs[ main.activeNodes[0] ]
2878 appResult = onosCli.activateApp( "org.onosproject.election" )
Jon Hall5cf14d52015-07-16 12:15:19 -07002879 utilities.assert_equals(
2880 expect=main.TRUE,
2881 actual=appResult,
2882 onpass="Election app installed",
2883 onfail="Something went wrong with installing Leadership election" )
2884
2885 main.step( "Run for election on each node" )
2886 leaderResult = main.TRUE
2887 leaders = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002888 for i in main.activeNodes:
2889 main.CLIs[i].electionTestRun()
2890 for i in main.activeNodes:
2891 cli = main.CLIs[i]
Jon Hall5cf14d52015-07-16 12:15:19 -07002892 leader = cli.electionTestLeader()
2893 if leader is None or leader == main.FALSE:
2894 main.log.error( cli.name + ": Leader for the election app " +
2895 "should be an ONOS node, instead got '" +
2896 str( leader ) + "'" )
2897 leaderResult = main.FALSE
2898 leaders.append( leader )
2899 utilities.assert_equals(
2900 expect=main.TRUE,
2901 actual=leaderResult,
2902 onpass="Successfully ran for leadership",
2903 onfail="Failed to run for leadership" )
2904
2905 main.step( "Check that each node shows the same leader" )
2906 sameLeader = main.TRUE
2907 if len( set( leaders ) ) != 1:
2908 sameLeader = main.FALSE
Jon Halle1a3b752015-07-22 13:02:46 -07002909 main.log.error( "Results of electionTestLeader is order of main.CLIs:" +
Jon Hall5cf14d52015-07-16 12:15:19 -07002910 str( leaders ) )
2911 utilities.assert_equals(
2912 expect=main.TRUE,
2913 actual=sameLeader,
2914 onpass="Leadership is consistent for the election topic",
2915 onfail="Nodes have different leaders" )
2916
2917 def CASE15( self, main ):
2918 """
2919 Check that Leadership Election is still functional
acsmars71adceb2015-08-31 15:09:26 -07002920 15.1 Run election on each node
2921 15.2 Check that each node has the same leaders and candidates
2922 15.3 Find current leader and withdraw
2923 15.4 Check that a new node was elected leader
2924 15.5 Check that that new leader was the candidate of old leader
2925 15.6 Run for election on old leader
2926 15.7 Check that oldLeader is a candidate, and leader if only 1 node
2927 15.8 Make sure that the old leader was added to the candidate list
2928
2929 old and new variable prefixes refer to data from before vs after
2930 withdrawl and later before withdrawl vs after re-election
Jon Hall5cf14d52015-07-16 12:15:19 -07002931 """
2932 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002933 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002934 assert main, "main not defined"
2935 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002936 assert main.CLIs, "main.CLIs not defined"
2937 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002938
Jon Hall5cf14d52015-07-16 12:15:19 -07002939 description = "Check that Leadership Election is still functional"
2940 main.case( description )
Jon Halla440e872016-03-31 15:15:50 -07002941 # NOTE: Need to re-run after restarts since being a canidate is not persistant
Jon Hall5cf14d52015-07-16 12:15:19 -07002942
Jon Halla440e872016-03-31 15:15:50 -07002943 oldLeaders = [] # list of lists of each nodes' candidates before
2944 newLeaders = [] # list of lists of each nodes' candidates after
acsmars71adceb2015-08-31 15:09:26 -07002945 oldLeader = '' # the old leader from oldLeaders, None if not same
2946 newLeader = '' # the new leaders fron newLoeaders, None if not same
2947 oldLeaderCLI = None # the CLI of the old leader used for re-electing
2948 expectNoLeader = False # True when there is only one leader
2949 if main.numCtrls == 1:
2950 expectNoLeader = True
2951
2952 main.step( "Run for election on each node" )
2953 electionResult = main.TRUE
2954
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002955 for i in main.activeNodes: # run test election on each node
2956 if main.CLIs[i].electionTestRun() == main.FALSE:
acsmars71adceb2015-08-31 15:09:26 -07002957 electionResult = main.FALSE
Jon Hall5cf14d52015-07-16 12:15:19 -07002958 utilities.assert_equals(
2959 expect=main.TRUE,
acsmars71adceb2015-08-31 15:09:26 -07002960 actual=electionResult,
2961 onpass="All nodes successfully ran for leadership",
2962 onfail="At least one node failed to run for leadership" )
2963
acsmars3a72bde2015-09-02 14:16:22 -07002964 if electionResult == main.FALSE:
2965 main.log.error(
2966 "Skipping Test Case because Election Test App isn't loaded" )
2967 main.skipCase()
2968
acsmars71adceb2015-08-31 15:09:26 -07002969 main.step( "Check that each node shows the same leader and candidates" )
Jon Halla440e872016-03-31 15:15:50 -07002970 failMessage = "Nodes have different leaderboards"
2971 def consistentLeaderboards( nodes ):
2972 TOPIC = 'org.onosproject.election'
2973 # FIXME: use threads
2974 #FIXME: should we retry outside the function?
2975 for n in range( 5 ): # Retry in case election is still happening
2976 leaderList = []
2977 # Get all leaderboards
2978 for cli in nodes:
2979 leaderList.append( cli.specificLeaderCandidate( TOPIC ) )
2980 # Compare leaderboards
2981 result = all( i == leaderList[0] for i in leaderList ) and\
2982 leaderList is not None
2983 main.log.debug( leaderList )
2984 main.log.warn( result )
2985 if result:
2986 return ( result, leaderList )
2987 time.sleep(5) #TODO: paramerterize
2988 main.log.error( "Inconsistent leaderboards:" + str( leaderList ) )
2989 activeCLIs = [ main.CLIs[i] for i in main.activeNodes ]
2990 sameResult, oldLeaders = consistentLeaderboards( activeCLIs )
2991 if sameResult:
2992 oldLeader = oldLeaders[ 0 ][ 0 ]
2993 main.log.warn( oldLeader )
acsmars71adceb2015-08-31 15:09:26 -07002994 else:
Jon Halla440e872016-03-31 15:15:50 -07002995 oldLeader = None
acsmars71adceb2015-08-31 15:09:26 -07002996 utilities.assert_equals(
Jon Halla440e872016-03-31 15:15:50 -07002997 expect=True,
acsmars71adceb2015-08-31 15:09:26 -07002998 actual=sameResult,
Jon Halla440e872016-03-31 15:15:50 -07002999 onpass="Leaderboards are consistent for the election topic",
acsmars71adceb2015-08-31 15:09:26 -07003000 onfail=failMessage )
Jon Hall5cf14d52015-07-16 12:15:19 -07003001
3002 main.step( "Find current leader and withdraw" )
acsmars71adceb2015-08-31 15:09:26 -07003003 withdrawResult = main.TRUE
Jon Hall5cf14d52015-07-16 12:15:19 -07003004 # do some sanity checking on leader before using it
acsmars71adceb2015-08-31 15:09:26 -07003005 if oldLeader is None:
3006 main.log.error( "Leadership isn't consistent." )
3007 withdrawResult = main.FALSE
3008 # Get the CLI of the oldLeader
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003009 for i in main.activeNodes:
acsmars71adceb2015-08-31 15:09:26 -07003010 if oldLeader == main.nodes[ i ].ip_address:
3011 oldLeaderCLI = main.CLIs[ i ]
Jon Hall5cf14d52015-07-16 12:15:19 -07003012 break
3013 else: # FOR/ELSE statement
3014 main.log.error( "Leader election, could not find current leader" )
3015 if oldLeader:
acsmars71adceb2015-08-31 15:09:26 -07003016 withdrawResult = oldLeaderCLI.electionTestWithdraw()
Jon Hall5cf14d52015-07-16 12:15:19 -07003017 utilities.assert_equals(
3018 expect=main.TRUE,
3019 actual=withdrawResult,
3020 onpass="Node was withdrawn from election",
3021 onfail="Node was not withdrawn from election" )
3022
acsmars71adceb2015-08-31 15:09:26 -07003023 main.step( "Check that a new node was elected leader" )
acsmars71adceb2015-08-31 15:09:26 -07003024 failMessage = "Nodes have different leaders"
acsmars71adceb2015-08-31 15:09:26 -07003025 # Get new leaders and candidates
Jon Halla440e872016-03-31 15:15:50 -07003026 newLeaderResult, newLeaders = consistentLeaderboards( activeCLIs )
3027 if newLeaders[ 0 ][ 0 ] == 'none':
3028 main.log.error( "No leader was elected on at least 1 node" )
3029 if not expectNoLeader:
3030 newLeaderResult = False
3031 if newLeaderResult:
3032 newLeader = newLeaders[ 0 ][ 0 ]
Jon Hall5cf14d52015-07-16 12:15:19 -07003033 else:
Jon Halla440e872016-03-31 15:15:50 -07003034 newLeader = None
acsmars71adceb2015-08-31 15:09:26 -07003035
3036 # Check that the new leader is not the older leader, which was withdrawn
3037 if newLeader == oldLeader:
Jon Halla440e872016-03-31 15:15:50 -07003038 newLeaderResult = False
Jon Hall6e709752016-02-01 13:38:46 -08003039 main.log.error( "All nodes still see old leader: " + str( oldLeader ) +
acsmars71adceb2015-08-31 15:09:26 -07003040 " as the current leader" )
Jon Hall5cf14d52015-07-16 12:15:19 -07003041 utilities.assert_equals(
Jon Halla440e872016-03-31 15:15:50 -07003042 expect=True,
acsmars71adceb2015-08-31 15:09:26 -07003043 actual=newLeaderResult,
Jon Hall5cf14d52015-07-16 12:15:19 -07003044 onpass="Leadership election passed",
3045 onfail="Something went wrong with Leadership election" )
3046
Jon Halla440e872016-03-31 15:15:50 -07003047 main.step( "Check that that new leader was the candidate of old leader" )
Jon Hall6e709752016-02-01 13:38:46 -08003048 # candidates[ 2 ] should become the top candidate after withdrawl
acsmars71adceb2015-08-31 15:09:26 -07003049 correctCandidateResult = main.TRUE
3050 if expectNoLeader:
3051 if newLeader == 'none':
3052 main.log.info( "No leader expected. None found. Pass" )
3053 correctCandidateResult = main.TRUE
3054 else:
3055 main.log.info( "Expected no leader, got: " + str( newLeader ) )
3056 correctCandidateResult = main.FALSE
Jon Halla440e872016-03-31 15:15:50 -07003057 elif len( oldLeaders[0] ) >= 3:
3058 if newLeader == oldLeaders[ 0 ][ 2 ]:
3059 # correct leader was elected
3060 correctCandidateResult = main.TRUE
3061 else:
3062 correctCandidateResult = main.FALSE
3063 main.log.error( "Candidate {} was elected. {} should have had priority.".format(
3064 newLeader, oldLeaders[ 0 ][ 2 ] ) )
Jon Hall6e709752016-02-01 13:38:46 -08003065 else:
3066 main.log.warn( "Could not determine who should be the correct leader" )
Jon Halla440e872016-03-31 15:15:50 -07003067 main.log.debug( oldLeaders[ 0 ] )
Jon Hall6e709752016-02-01 13:38:46 -08003068 correctCandidateResult = main.FALSE
acsmars71adceb2015-08-31 15:09:26 -07003069 utilities.assert_equals(
3070 expect=main.TRUE,
3071 actual=correctCandidateResult,
3072 onpass="Correct Candidate Elected",
3073 onfail="Incorrect Candidate Elected" )
3074
Jon Hall5cf14d52015-07-16 12:15:19 -07003075 main.step( "Run for election on old leader( just so everyone " +
3076 "is in the hat )" )
acsmars71adceb2015-08-31 15:09:26 -07003077 if oldLeaderCLI is not None:
3078 runResult = oldLeaderCLI.electionTestRun()
Jon Hall5cf14d52015-07-16 12:15:19 -07003079 else:
acsmars71adceb2015-08-31 15:09:26 -07003080 main.log.error( "No old leader to re-elect" )
Jon Hall5cf14d52015-07-16 12:15:19 -07003081 runResult = main.FALSE
3082 utilities.assert_equals(
3083 expect=main.TRUE,
3084 actual=runResult,
3085 onpass="App re-ran for election",
3086 onfail="App failed to run for election" )
Jon Halla440e872016-03-31 15:15:50 -07003087
acsmars71adceb2015-08-31 15:09:26 -07003088 main.step(
3089 "Check that oldLeader is a candidate, and leader if only 1 node" )
Jon Hall5cf14d52015-07-16 12:15:19 -07003090 # verify leader didn't just change
Jon Halla440e872016-03-31 15:15:50 -07003091 # Get new leaders and candidates
3092 reRunLeaders = []
3093 time.sleep( 5 ) # Paremterize
3094 positionResult, reRunLeaders = consistentLeaderboards( activeCLIs )
acsmars71adceb2015-08-31 15:09:26 -07003095
3096 # Check that the re-elected node is last on the candidate List
Jon Halla440e872016-03-31 15:15:50 -07003097 if oldLeader != reRunLeaders[ 0 ][ -1 ]:
3098 main.log.error( "Old Leader ({}) not in the proper position: {} ".format( str( oldLeader),
3099 str( reRunLeaders[ 0 ] ) ) )
acsmars71adceb2015-08-31 15:09:26 -07003100 positionResult = main.FALSE
Jon Hall5cf14d52015-07-16 12:15:19 -07003101
3102 utilities.assert_equals(
Jon Halla440e872016-03-31 15:15:50 -07003103 expect=True,
acsmars71adceb2015-08-31 15:09:26 -07003104 actual=positionResult,
Jon Hall5cf14d52015-07-16 12:15:19 -07003105 onpass="Old leader successfully re-ran for election",
3106 onfail="Something went wrong with Leadership election after " +
3107 "the old leader re-ran for election" )
3108
3109 def CASE16( self, main ):
3110 """
3111 Install Distributed Primitives app
3112 """
3113 import time
Jon Halle1a3b752015-07-22 13:02:46 -07003114 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07003115 assert main, "main not defined"
3116 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07003117 assert main.CLIs, "main.CLIs not defined"
3118 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07003119
3120 # Variables for the distributed primitives tests
3121 global pCounterName
Jon Hall5cf14d52015-07-16 12:15:19 -07003122 global pCounterValue
Jon Hall5cf14d52015-07-16 12:15:19 -07003123 global onosSet
3124 global onosSetName
3125 pCounterName = "TestON-Partitions"
Jon Hall5cf14d52015-07-16 12:15:19 -07003126 pCounterValue = 0
Jon Hall5cf14d52015-07-16 12:15:19 -07003127 onosSet = set([])
3128 onosSetName = "TestON-set"
3129
3130 description = "Install Primitives app"
3131 main.case( description )
3132 main.step( "Install Primitives app" )
3133 appName = "org.onosproject.distributedprimitives"
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003134 node = main.activeNodes[0]
3135 appResults = main.CLIs[node].activateApp( appName )
Jon Hall5cf14d52015-07-16 12:15:19 -07003136 utilities.assert_equals( expect=main.TRUE,
3137 actual=appResults,
3138 onpass="Primitives app activated",
3139 onfail="Primitives app not activated" )
3140 time.sleep( 5 ) # To allow all nodes to activate
3141
3142 def CASE17( self, main ):
3143 """
3144 Check for basic functionality with distributed primitives
3145 """
Jon Hall5cf14d52015-07-16 12:15:19 -07003146 # Make sure variables are defined/set
Jon Halle1a3b752015-07-22 13:02:46 -07003147 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07003148 assert main, "main not defined"
3149 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07003150 assert main.CLIs, "main.CLIs not defined"
3151 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07003152 assert pCounterName, "pCounterName not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07003153 assert onosSetName, "onosSetName not defined"
3154 # NOTE: assert fails if value is 0/None/Empty/False
3155 try:
3156 pCounterValue
3157 except NameError:
3158 main.log.error( "pCounterValue not defined, setting to 0" )
3159 pCounterValue = 0
3160 try:
Jon Hall5cf14d52015-07-16 12:15:19 -07003161 onosSet
3162 except NameError:
3163 main.log.error( "onosSet not defined, setting to empty Set" )
3164 onosSet = set([])
3165 # Variables for the distributed primitives tests. These are local only
3166 addValue = "a"
3167 addAllValue = "a b c d e f"
3168 retainValue = "c d e f"
3169
3170 description = "Check for basic functionality with distributed " +\
3171 "primitives"
3172 main.case( description )
Jon Halle1a3b752015-07-22 13:02:46 -07003173 main.caseExplanation = "Test the methods of the distributed " +\
3174 "primitives (counters and sets) throught the cli"
Jon Hall5cf14d52015-07-16 12:15:19 -07003175 # DISTRIBUTED ATOMIC COUNTERS
Jon Halle1a3b752015-07-22 13:02:46 -07003176 # Partitioned counters
3177 main.step( "Increment then get a default counter on each node" )
Jon Hall5cf14d52015-07-16 12:15:19 -07003178 pCounters = []
3179 threads = []
3180 addedPValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003181 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003182 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3183 name="counterAddAndGet-" + str( i ),
Jon Hall5cf14d52015-07-16 12:15:19 -07003184 args=[ pCounterName ] )
3185 pCounterValue += 1
3186 addedPValues.append( pCounterValue )
3187 threads.append( t )
3188 t.start()
3189
3190 for t in threads:
3191 t.join()
3192 pCounters.append( t.result )
3193 # Check that counter incremented numController times
3194 pCounterResults = True
3195 for i in addedPValues:
3196 tmpResult = i in pCounters
3197 pCounterResults = pCounterResults and tmpResult
3198 if not tmpResult:
3199 main.log.error( str( i ) + " is not in partitioned "
3200 "counter incremented results" )
3201 utilities.assert_equals( expect=True,
3202 actual=pCounterResults,
3203 onpass="Default counter incremented",
3204 onfail="Error incrementing default" +
3205 " counter" )
3206
Jon Halle1a3b752015-07-22 13:02:46 -07003207 main.step( "Get then Increment a default counter on each node" )
3208 pCounters = []
3209 threads = []
3210 addedPValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003211 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003212 t = main.Thread( target=main.CLIs[i].counterTestGetAndAdd,
3213 name="counterGetAndAdd-" + str( i ),
3214 args=[ pCounterName ] )
3215 addedPValues.append( pCounterValue )
3216 pCounterValue += 1
3217 threads.append( t )
3218 t.start()
3219
3220 for t in threads:
3221 t.join()
3222 pCounters.append( t.result )
3223 # Check that counter incremented numController times
3224 pCounterResults = True
3225 for i in addedPValues:
3226 tmpResult = i in pCounters
3227 pCounterResults = pCounterResults and tmpResult
3228 if not tmpResult:
3229 main.log.error( str( i ) + " is not in partitioned "
3230 "counter incremented results" )
3231 utilities.assert_equals( expect=True,
3232 actual=pCounterResults,
3233 onpass="Default counter incremented",
3234 onfail="Error incrementing default" +
3235 " counter" )
3236
3237 main.step( "Counters we added have the correct values" )
3238 incrementCheck = main.Counters.counterCheck( pCounterName, pCounterValue )
3239 utilities.assert_equals( expect=main.TRUE,
3240 actual=incrementCheck,
3241 onpass="Added counters are correct",
3242 onfail="Added counters are incorrect" )
3243
3244 main.step( "Add -8 to then get a default counter on each node" )
3245 pCounters = []
3246 threads = []
3247 addedPValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003248 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003249 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3250 name="counterIncrement-" + str( i ),
3251 args=[ pCounterName ],
3252 kwargs={ "delta": -8 } )
3253 pCounterValue += -8
3254 addedPValues.append( pCounterValue )
3255 threads.append( t )
3256 t.start()
3257
3258 for t in threads:
3259 t.join()
3260 pCounters.append( t.result )
3261 # Check that counter incremented numController times
3262 pCounterResults = True
3263 for i in addedPValues:
3264 tmpResult = i in pCounters
3265 pCounterResults = pCounterResults and tmpResult
3266 if not tmpResult:
3267 main.log.error( str( i ) + " is not in partitioned "
3268 "counter incremented results" )
3269 utilities.assert_equals( expect=True,
3270 actual=pCounterResults,
3271 onpass="Default counter incremented",
3272 onfail="Error incrementing default" +
3273 " counter" )
3274
3275 main.step( "Add 5 to then get a default counter on each node" )
3276 pCounters = []
3277 threads = []
3278 addedPValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003279 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003280 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3281 name="counterIncrement-" + str( i ),
3282 args=[ pCounterName ],
3283 kwargs={ "delta": 5 } )
3284 pCounterValue += 5
3285 addedPValues.append( pCounterValue )
3286 threads.append( t )
3287 t.start()
3288
3289 for t in threads:
3290 t.join()
3291 pCounters.append( t.result )
3292 # Check that counter incremented numController times
3293 pCounterResults = True
3294 for i in addedPValues:
3295 tmpResult = i in pCounters
3296 pCounterResults = pCounterResults and tmpResult
3297 if not tmpResult:
3298 main.log.error( str( i ) + " is not in partitioned "
3299 "counter incremented results" )
3300 utilities.assert_equals( expect=True,
3301 actual=pCounterResults,
3302 onpass="Default counter incremented",
3303 onfail="Error incrementing default" +
3304 " counter" )
3305
3306 main.step( "Get then add 5 to a default counter on each node" )
3307 pCounters = []
3308 threads = []
3309 addedPValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003310 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003311 t = main.Thread( target=main.CLIs[i].counterTestGetAndAdd,
3312 name="counterIncrement-" + str( i ),
3313 args=[ pCounterName ],
3314 kwargs={ "delta": 5 } )
3315 addedPValues.append( pCounterValue )
3316 pCounterValue += 5
3317 threads.append( t )
3318 t.start()
3319
3320 for t in threads:
3321 t.join()
3322 pCounters.append( t.result )
3323 # Check that counter incremented numController times
3324 pCounterResults = True
3325 for i in addedPValues:
3326 tmpResult = i in pCounters
3327 pCounterResults = pCounterResults and tmpResult
3328 if not tmpResult:
3329 main.log.error( str( i ) + " is not in partitioned "
3330 "counter incremented results" )
3331 utilities.assert_equals( expect=True,
3332 actual=pCounterResults,
3333 onpass="Default counter incremented",
3334 onfail="Error incrementing default" +
3335 " counter" )
3336
3337 main.step( "Counters we added have the correct values" )
3338 incrementCheck = main.Counters.counterCheck( pCounterName, pCounterValue )
3339 utilities.assert_equals( expect=main.TRUE,
3340 actual=incrementCheck,
3341 onpass="Added counters are correct",
3342 onfail="Added counters are incorrect" )
3343
Jon Hall5cf14d52015-07-16 12:15:19 -07003344 # DISTRIBUTED SETS
3345 main.step( "Distributed Set get" )
3346 size = len( onosSet )
3347 getResponses = []
3348 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003349 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003350 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003351 name="setTestGet-" + str( i ),
3352 args=[ onosSetName ] )
3353 threads.append( t )
3354 t.start()
3355 for t in threads:
3356 t.join()
3357 getResponses.append( t.result )
3358
3359 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003360 for i in range( len( main.activeNodes ) ):
3361 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003362 if isinstance( getResponses[ i ], list):
3363 current = set( getResponses[ i ] )
3364 if len( current ) == len( getResponses[ i ] ):
3365 # no repeats
3366 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003367 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003368 " has incorrect view" +
3369 " of set " + onosSetName + ":\n" +
3370 str( getResponses[ i ] ) )
3371 main.log.debug( "Expected: " + str( onosSet ) )
3372 main.log.debug( "Actual: " + str( current ) )
3373 getResults = main.FALSE
3374 else:
3375 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003376 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003377 " has repeat elements in" +
3378 " set " + onosSetName + ":\n" +
3379 str( getResponses[ i ] ) )
3380 getResults = main.FALSE
3381 elif getResponses[ i ] == main.ERROR:
3382 getResults = main.FALSE
3383 utilities.assert_equals( expect=main.TRUE,
3384 actual=getResults,
3385 onpass="Set elements are correct",
3386 onfail="Set elements are incorrect" )
3387
3388 main.step( "Distributed Set size" )
3389 sizeResponses = []
3390 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003391 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003392 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07003393 name="setTestSize-" + str( i ),
3394 args=[ onosSetName ] )
3395 threads.append( t )
3396 t.start()
3397 for t in threads:
3398 t.join()
3399 sizeResponses.append( t.result )
3400
3401 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003402 for i in range( len( main.activeNodes ) ):
3403 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003404 if size != sizeResponses[ i ]:
3405 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003406 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003407 " expected a size of " + str( size ) +
3408 " for set " + onosSetName +
3409 " but got " + str( sizeResponses[ i ] ) )
3410 utilities.assert_equals( expect=main.TRUE,
3411 actual=sizeResults,
3412 onpass="Set sizes are correct",
3413 onfail="Set sizes are incorrect" )
3414
3415 main.step( "Distributed Set add()" )
3416 onosSet.add( addValue )
3417 addResponses = []
3418 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003419 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003420 t = main.Thread( target=main.CLIs[i].setTestAdd,
Jon Hall5cf14d52015-07-16 12:15:19 -07003421 name="setTestAdd-" + str( i ),
3422 args=[ onosSetName, addValue ] )
3423 threads.append( t )
3424 t.start()
3425 for t in threads:
3426 t.join()
3427 addResponses.append( t.result )
3428
3429 # main.TRUE = successfully changed the set
3430 # main.FALSE = action resulted in no change in set
3431 # main.ERROR - Some error in executing the function
3432 addResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003433 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003434 if addResponses[ i ] == main.TRUE:
3435 # All is well
3436 pass
3437 elif addResponses[ i ] == main.FALSE:
3438 # Already in set, probably fine
3439 pass
3440 elif addResponses[ i ] == main.ERROR:
3441 # Error in execution
3442 addResults = main.FALSE
3443 else:
3444 # unexpected result
3445 addResults = main.FALSE
3446 if addResults != main.TRUE:
3447 main.log.error( "Error executing set add" )
3448
3449 # Check if set is still correct
3450 size = len( onosSet )
3451 getResponses = []
3452 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003453 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003454 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003455 name="setTestGet-" + str( i ),
3456 args=[ onosSetName ] )
3457 threads.append( t )
3458 t.start()
3459 for t in threads:
3460 t.join()
3461 getResponses.append( t.result )
3462 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003463 for i in range( len( main.activeNodes ) ):
3464 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003465 if isinstance( getResponses[ i ], list):
3466 current = set( getResponses[ i ] )
3467 if len( current ) == len( getResponses[ i ] ):
3468 # no repeats
3469 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003470 main.log.error( "ONOS" + node + " has incorrect view" +
Jon Hall5cf14d52015-07-16 12:15:19 -07003471 " of set " + onosSetName + ":\n" +
3472 str( getResponses[ i ] ) )
3473 main.log.debug( "Expected: " + str( onosSet ) )
3474 main.log.debug( "Actual: " + str( current ) )
3475 getResults = main.FALSE
3476 else:
3477 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003478 main.log.error( "ONOS" + node + " has repeat elements in" +
Jon Hall5cf14d52015-07-16 12:15:19 -07003479 " set " + onosSetName + ":\n" +
3480 str( getResponses[ i ] ) )
3481 getResults = main.FALSE
3482 elif getResponses[ i ] == main.ERROR:
3483 getResults = main.FALSE
3484 sizeResponses = []
3485 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003486 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003487 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07003488 name="setTestSize-" + str( i ),
3489 args=[ onosSetName ] )
3490 threads.append( t )
3491 t.start()
3492 for t in threads:
3493 t.join()
3494 sizeResponses.append( t.result )
3495 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003496 for i in range( len( main.activeNodes ) ):
3497 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003498 if size != sizeResponses[ i ]:
3499 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003500 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003501 " expected a size of " + str( size ) +
3502 " for set " + onosSetName +
3503 " but got " + str( sizeResponses[ i ] ) )
3504 addResults = addResults and getResults and sizeResults
3505 utilities.assert_equals( expect=main.TRUE,
3506 actual=addResults,
3507 onpass="Set add correct",
3508 onfail="Set add was incorrect" )
3509
3510 main.step( "Distributed Set addAll()" )
3511 onosSet.update( addAllValue.split() )
3512 addResponses = []
3513 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003514 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003515 t = main.Thread( target=main.CLIs[i].setTestAdd,
Jon Hall5cf14d52015-07-16 12:15:19 -07003516 name="setTestAddAll-" + str( i ),
3517 args=[ onosSetName, addAllValue ] )
3518 threads.append( t )
3519 t.start()
3520 for t in threads:
3521 t.join()
3522 addResponses.append( t.result )
3523
3524 # main.TRUE = successfully changed the set
3525 # main.FALSE = action resulted in no change in set
3526 # main.ERROR - Some error in executing the function
3527 addAllResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003528 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003529 if addResponses[ i ] == main.TRUE:
3530 # All is well
3531 pass
3532 elif addResponses[ i ] == main.FALSE:
3533 # Already in set, probably fine
3534 pass
3535 elif addResponses[ i ] == main.ERROR:
3536 # Error in execution
3537 addAllResults = main.FALSE
3538 else:
3539 # unexpected result
3540 addAllResults = main.FALSE
3541 if addAllResults != main.TRUE:
3542 main.log.error( "Error executing set addAll" )
3543
3544 # Check if set is still correct
3545 size = len( onosSet )
3546 getResponses = []
3547 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003548 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003549 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003550 name="setTestGet-" + str( i ),
3551 args=[ onosSetName ] )
3552 threads.append( t )
3553 t.start()
3554 for t in threads:
3555 t.join()
3556 getResponses.append( t.result )
3557 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003558 for i in range( len( main.activeNodes ) ):
3559 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003560 if isinstance( getResponses[ i ], list):
3561 current = set( getResponses[ i ] )
3562 if len( current ) == len( getResponses[ i ] ):
3563 # no repeats
3564 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003565 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003566 " has incorrect view" +
3567 " of set " + onosSetName + ":\n" +
3568 str( getResponses[ i ] ) )
3569 main.log.debug( "Expected: " + str( onosSet ) )
3570 main.log.debug( "Actual: " + str( current ) )
3571 getResults = main.FALSE
3572 else:
3573 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003574 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003575 " has repeat elements in" +
3576 " set " + onosSetName + ":\n" +
3577 str( getResponses[ i ] ) )
3578 getResults = main.FALSE
3579 elif getResponses[ i ] == main.ERROR:
3580 getResults = main.FALSE
3581 sizeResponses = []
3582 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003583 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003584 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07003585 name="setTestSize-" + str( i ),
3586 args=[ onosSetName ] )
3587 threads.append( t )
3588 t.start()
3589 for t in threads:
3590 t.join()
3591 sizeResponses.append( t.result )
3592 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003593 for i in range( len( main.activeNodes ) ):
3594 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003595 if size != sizeResponses[ i ]:
3596 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003597 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003598 " expected a size of " + str( size ) +
3599 " for set " + onosSetName +
3600 " but got " + str( sizeResponses[ i ] ) )
3601 addAllResults = addAllResults and getResults and sizeResults
3602 utilities.assert_equals( expect=main.TRUE,
3603 actual=addAllResults,
3604 onpass="Set addAll correct",
3605 onfail="Set addAll was incorrect" )
3606
3607 main.step( "Distributed Set contains()" )
3608 containsResponses = []
3609 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003610 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003611 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003612 name="setContains-" + str( i ),
3613 args=[ onosSetName ],
3614 kwargs={ "values": addValue } )
3615 threads.append( t )
3616 t.start()
3617 for t in threads:
3618 t.join()
3619 # NOTE: This is the tuple
3620 containsResponses.append( t.result )
3621
3622 containsResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003623 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003624 if containsResponses[ i ] == main.ERROR:
3625 containsResults = main.FALSE
3626 else:
3627 containsResults = containsResults and\
3628 containsResponses[ i ][ 1 ]
3629 utilities.assert_equals( expect=main.TRUE,
3630 actual=containsResults,
3631 onpass="Set contains is functional",
3632 onfail="Set contains failed" )
3633
3634 main.step( "Distributed Set containsAll()" )
3635 containsAllResponses = []
3636 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003637 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003638 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003639 name="setContainsAll-" + str( i ),
3640 args=[ onosSetName ],
3641 kwargs={ "values": addAllValue } )
3642 threads.append( t )
3643 t.start()
3644 for t in threads:
3645 t.join()
3646 # NOTE: This is the tuple
3647 containsAllResponses.append( t.result )
3648
3649 containsAllResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003650 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003651 if containsResponses[ i ] == main.ERROR:
3652 containsResults = main.FALSE
3653 else:
3654 containsResults = containsResults and\
3655 containsResponses[ i ][ 1 ]
3656 utilities.assert_equals( expect=main.TRUE,
3657 actual=containsAllResults,
3658 onpass="Set containsAll is functional",
3659 onfail="Set containsAll failed" )
3660
3661 main.step( "Distributed Set remove()" )
3662 onosSet.remove( addValue )
3663 removeResponses = []
3664 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003665 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003666 t = main.Thread( target=main.CLIs[i].setTestRemove,
Jon Hall5cf14d52015-07-16 12:15:19 -07003667 name="setTestRemove-" + str( i ),
3668 args=[ onosSetName, addValue ] )
3669 threads.append( t )
3670 t.start()
3671 for t in threads:
3672 t.join()
3673 removeResponses.append( t.result )
3674
3675 # main.TRUE = successfully changed the set
3676 # main.FALSE = action resulted in no change in set
3677 # main.ERROR - Some error in executing the function
3678 removeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003679 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003680 if removeResponses[ i ] == main.TRUE:
3681 # All is well
3682 pass
3683 elif removeResponses[ i ] == main.FALSE:
3684 # not in set, probably fine
3685 pass
3686 elif removeResponses[ i ] == main.ERROR:
3687 # Error in execution
3688 removeResults = main.FALSE
3689 else:
3690 # unexpected result
3691 removeResults = main.FALSE
3692 if removeResults != main.TRUE:
3693 main.log.error( "Error executing set remove" )
3694
3695 # Check if set is still correct
3696 size = len( onosSet )
3697 getResponses = []
3698 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003699 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003700 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003701 name="setTestGet-" + str( i ),
3702 args=[ onosSetName ] )
3703 threads.append( t )
3704 t.start()
3705 for t in threads:
3706 t.join()
3707 getResponses.append( t.result )
3708 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003709 for i in range( len( main.activeNodes ) ):
3710 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003711 if isinstance( getResponses[ i ], list):
3712 current = set( getResponses[ i ] )
3713 if len( current ) == len( getResponses[ i ] ):
3714 # no repeats
3715 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003716 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003717 " has incorrect view" +
3718 " of set " + onosSetName + ":\n" +
3719 str( getResponses[ i ] ) )
3720 main.log.debug( "Expected: " + str( onosSet ) )
3721 main.log.debug( "Actual: " + str( current ) )
3722 getResults = main.FALSE
3723 else:
3724 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003725 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003726 " has repeat elements in" +
3727 " set " + onosSetName + ":\n" +
3728 str( getResponses[ i ] ) )
3729 getResults = main.FALSE
3730 elif getResponses[ i ] == main.ERROR:
3731 getResults = main.FALSE
3732 sizeResponses = []
3733 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003734 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003735 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07003736 name="setTestSize-" + str( i ),
3737 args=[ onosSetName ] )
3738 threads.append( t )
3739 t.start()
3740 for t in threads:
3741 t.join()
3742 sizeResponses.append( t.result )
3743 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003744 for i in range( len( main.activeNodes ) ):
3745 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003746 if size != sizeResponses[ i ]:
3747 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003748 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003749 " expected a size of " + str( size ) +
3750 " for set " + onosSetName +
3751 " but got " + str( sizeResponses[ i ] ) )
3752 removeResults = removeResults and getResults and sizeResults
3753 utilities.assert_equals( expect=main.TRUE,
3754 actual=removeResults,
3755 onpass="Set remove correct",
3756 onfail="Set remove was incorrect" )
3757
3758 main.step( "Distributed Set removeAll()" )
3759 onosSet.difference_update( addAllValue.split() )
3760 removeAllResponses = []
3761 threads = []
3762 try:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003763 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003764 t = main.Thread( target=main.CLIs[i].setTestRemove,
Jon Hall5cf14d52015-07-16 12:15:19 -07003765 name="setTestRemoveAll-" + str( i ),
3766 args=[ onosSetName, addAllValue ] )
3767 threads.append( t )
3768 t.start()
3769 for t in threads:
3770 t.join()
3771 removeAllResponses.append( t.result )
3772 except Exception, e:
3773 main.log.exception(e)
3774
3775 # main.TRUE = successfully changed the set
3776 # main.FALSE = action resulted in no change in set
3777 # main.ERROR - Some error in executing the function
3778 removeAllResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003779 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003780 if removeAllResponses[ i ] == main.TRUE:
3781 # All is well
3782 pass
3783 elif removeAllResponses[ i ] == main.FALSE:
3784 # not in set, probably fine
3785 pass
3786 elif removeAllResponses[ i ] == main.ERROR:
3787 # Error in execution
3788 removeAllResults = main.FALSE
3789 else:
3790 # unexpected result
3791 removeAllResults = main.FALSE
3792 if removeAllResults != main.TRUE:
3793 main.log.error( "Error executing set removeAll" )
3794
3795 # Check if set is still correct
3796 size = len( onosSet )
3797 getResponses = []
3798 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003799 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003800 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003801 name="setTestGet-" + str( i ),
3802 args=[ onosSetName ] )
3803 threads.append( t )
3804 t.start()
3805 for t in threads:
3806 t.join()
3807 getResponses.append( t.result )
3808 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003809 for i in range( len( main.activeNodes ) ):
3810 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003811 if isinstance( getResponses[ i ], list):
3812 current = set( getResponses[ i ] )
3813 if len( current ) == len( getResponses[ i ] ):
3814 # no repeats
3815 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003816 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003817 " has incorrect view" +
3818 " of set " + onosSetName + ":\n" +
3819 str( getResponses[ i ] ) )
3820 main.log.debug( "Expected: " + str( onosSet ) )
3821 main.log.debug( "Actual: " + str( current ) )
3822 getResults = main.FALSE
3823 else:
3824 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003825 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003826 " has repeat elements in" +
3827 " set " + onosSetName + ":\n" +
3828 str( getResponses[ i ] ) )
3829 getResults = main.FALSE
3830 elif getResponses[ i ] == main.ERROR:
3831 getResults = main.FALSE
3832 sizeResponses = []
3833 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003834 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003835 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07003836 name="setTestSize-" + str( i ),
3837 args=[ onosSetName ] )
3838 threads.append( t )
3839 t.start()
3840 for t in threads:
3841 t.join()
3842 sizeResponses.append( t.result )
3843 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003844 for i in range( len( main.activeNodes ) ):
3845 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003846 if size != sizeResponses[ i ]:
3847 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003848 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003849 " expected a size of " + str( size ) +
3850 " for set " + onosSetName +
3851 " but got " + str( sizeResponses[ i ] ) )
3852 removeAllResults = removeAllResults and getResults and sizeResults
3853 utilities.assert_equals( expect=main.TRUE,
3854 actual=removeAllResults,
3855 onpass="Set removeAll correct",
3856 onfail="Set removeAll was incorrect" )
3857
3858 main.step( "Distributed Set addAll()" )
3859 onosSet.update( addAllValue.split() )
3860 addResponses = []
3861 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003862 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003863 t = main.Thread( target=main.CLIs[i].setTestAdd,
Jon Hall5cf14d52015-07-16 12:15:19 -07003864 name="setTestAddAll-" + str( i ),
3865 args=[ onosSetName, addAllValue ] )
3866 threads.append( t )
3867 t.start()
3868 for t in threads:
3869 t.join()
3870 addResponses.append( t.result )
3871
3872 # main.TRUE = successfully changed the set
3873 # main.FALSE = action resulted in no change in set
3874 # main.ERROR - Some error in executing the function
3875 addAllResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003876 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003877 if addResponses[ i ] == main.TRUE:
3878 # All is well
3879 pass
3880 elif addResponses[ i ] == main.FALSE:
3881 # Already in set, probably fine
3882 pass
3883 elif addResponses[ i ] == main.ERROR:
3884 # Error in execution
3885 addAllResults = main.FALSE
3886 else:
3887 # unexpected result
3888 addAllResults = main.FALSE
3889 if addAllResults != main.TRUE:
3890 main.log.error( "Error executing set addAll" )
3891
3892 # Check if set is still correct
3893 size = len( onosSet )
3894 getResponses = []
3895 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003896 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003897 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003898 name="setTestGet-" + str( i ),
3899 args=[ onosSetName ] )
3900 threads.append( t )
3901 t.start()
3902 for t in threads:
3903 t.join()
3904 getResponses.append( t.result )
3905 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003906 for i in range( len( main.activeNodes ) ):
3907 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003908 if isinstance( getResponses[ i ], list):
3909 current = set( getResponses[ i ] )
3910 if len( current ) == len( getResponses[ i ] ):
3911 # no repeats
3912 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003913 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003914 " has incorrect view" +
3915 " of set " + onosSetName + ":\n" +
3916 str( getResponses[ i ] ) )
3917 main.log.debug( "Expected: " + str( onosSet ) )
3918 main.log.debug( "Actual: " + str( current ) )
3919 getResults = main.FALSE
3920 else:
3921 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003922 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003923 " has repeat elements in" +
3924 " set " + onosSetName + ":\n" +
3925 str( getResponses[ i ] ) )
3926 getResults = main.FALSE
3927 elif getResponses[ i ] == main.ERROR:
3928 getResults = main.FALSE
3929 sizeResponses = []
3930 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003931 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003932 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07003933 name="setTestSize-" + str( i ),
3934 args=[ onosSetName ] )
3935 threads.append( t )
3936 t.start()
3937 for t in threads:
3938 t.join()
3939 sizeResponses.append( t.result )
3940 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003941 for i in range( len( main.activeNodes ) ):
3942 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003943 if size != sizeResponses[ i ]:
3944 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003945 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003946 " expected a size of " + str( size ) +
3947 " for set " + onosSetName +
3948 " but got " + str( sizeResponses[ i ] ) )
3949 addAllResults = addAllResults and getResults and sizeResults
3950 utilities.assert_equals( expect=main.TRUE,
3951 actual=addAllResults,
3952 onpass="Set addAll correct",
3953 onfail="Set addAll was incorrect" )
3954
3955 main.step( "Distributed Set clear()" )
3956 onosSet.clear()
3957 clearResponses = []
3958 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003959 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003960 t = main.Thread( target=main.CLIs[i].setTestRemove,
Jon Hall5cf14d52015-07-16 12:15:19 -07003961 name="setTestClear-" + str( i ),
3962 args=[ onosSetName, " "], # Values doesn't matter
3963 kwargs={ "clear": True } )
3964 threads.append( t )
3965 t.start()
3966 for t in threads:
3967 t.join()
3968 clearResponses.append( t.result )
3969
3970 # main.TRUE = successfully changed the set
3971 # main.FALSE = action resulted in no change in set
3972 # main.ERROR - Some error in executing the function
3973 clearResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003974 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003975 if clearResponses[ i ] == main.TRUE:
3976 # All is well
3977 pass
3978 elif clearResponses[ i ] == main.FALSE:
3979 # Nothing set, probably fine
3980 pass
3981 elif clearResponses[ i ] == main.ERROR:
3982 # Error in execution
3983 clearResults = main.FALSE
3984 else:
3985 # unexpected result
3986 clearResults = main.FALSE
3987 if clearResults != main.TRUE:
3988 main.log.error( "Error executing set clear" )
3989
3990 # Check if set is still correct
3991 size = len( onosSet )
3992 getResponses = []
3993 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003994 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003995 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003996 name="setTestGet-" + str( i ),
3997 args=[ onosSetName ] )
3998 threads.append( t )
3999 t.start()
4000 for t in threads:
4001 t.join()
4002 getResponses.append( t.result )
4003 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004004 for i in range( len( main.activeNodes ) ):
4005 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004006 if isinstance( getResponses[ i ], list):
4007 current = set( getResponses[ i ] )
4008 if len( current ) == len( getResponses[ i ] ):
4009 # no repeats
4010 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004011 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004012 " has incorrect view" +
4013 " of set " + onosSetName + ":\n" +
4014 str( getResponses[ i ] ) )
4015 main.log.debug( "Expected: " + str( onosSet ) )
4016 main.log.debug( "Actual: " + str( current ) )
4017 getResults = main.FALSE
4018 else:
4019 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004020 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004021 " has repeat elements in" +
4022 " set " + onosSetName + ":\n" +
4023 str( getResponses[ i ] ) )
4024 getResults = main.FALSE
4025 elif getResponses[ i ] == main.ERROR:
4026 getResults = main.FALSE
4027 sizeResponses = []
4028 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004029 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004030 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07004031 name="setTestSize-" + str( i ),
4032 args=[ onosSetName ] )
4033 threads.append( t )
4034 t.start()
4035 for t in threads:
4036 t.join()
4037 sizeResponses.append( t.result )
4038 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004039 for i in range( len( main.activeNodes ) ):
4040 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004041 if size != sizeResponses[ i ]:
4042 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004043 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004044 " expected a size of " + str( size ) +
4045 " for set " + onosSetName +
4046 " but got " + str( sizeResponses[ i ] ) )
4047 clearResults = clearResults and getResults and sizeResults
4048 utilities.assert_equals( expect=main.TRUE,
4049 actual=clearResults,
4050 onpass="Set clear correct",
4051 onfail="Set clear was incorrect" )
4052
4053 main.step( "Distributed Set addAll()" )
4054 onosSet.update( addAllValue.split() )
4055 addResponses = []
4056 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004057 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004058 t = main.Thread( target=main.CLIs[i].setTestAdd,
Jon Hall5cf14d52015-07-16 12:15:19 -07004059 name="setTestAddAll-" + str( i ),
4060 args=[ onosSetName, addAllValue ] )
4061 threads.append( t )
4062 t.start()
4063 for t in threads:
4064 t.join()
4065 addResponses.append( t.result )
4066
4067 # main.TRUE = successfully changed the set
4068 # main.FALSE = action resulted in no change in set
4069 # main.ERROR - Some error in executing the function
4070 addAllResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004071 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07004072 if addResponses[ i ] == main.TRUE:
4073 # All is well
4074 pass
4075 elif addResponses[ i ] == main.FALSE:
4076 # Already in set, probably fine
4077 pass
4078 elif addResponses[ i ] == main.ERROR:
4079 # Error in execution
4080 addAllResults = main.FALSE
4081 else:
4082 # unexpected result
4083 addAllResults = main.FALSE
4084 if addAllResults != main.TRUE:
4085 main.log.error( "Error executing set addAll" )
4086
4087 # Check if set is still correct
4088 size = len( onosSet )
4089 getResponses = []
4090 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004091 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004092 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07004093 name="setTestGet-" + str( i ),
4094 args=[ onosSetName ] )
4095 threads.append( t )
4096 t.start()
4097 for t in threads:
4098 t.join()
4099 getResponses.append( t.result )
4100 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004101 for i in range( len( main.activeNodes ) ):
4102 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004103 if isinstance( getResponses[ i ], list):
4104 current = set( getResponses[ i ] )
4105 if len( current ) == len( getResponses[ i ] ):
4106 # no repeats
4107 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004108 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004109 " has incorrect view" +
4110 " of set " + onosSetName + ":\n" +
4111 str( getResponses[ i ] ) )
4112 main.log.debug( "Expected: " + str( onosSet ) )
4113 main.log.debug( "Actual: " + str( current ) )
4114 getResults = main.FALSE
4115 else:
4116 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004117 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004118 " has repeat elements in" +
4119 " set " + onosSetName + ":\n" +
4120 str( getResponses[ i ] ) )
4121 getResults = main.FALSE
4122 elif getResponses[ i ] == main.ERROR:
4123 getResults = main.FALSE
4124 sizeResponses = []
4125 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004126 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004127 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07004128 name="setTestSize-" + str( i ),
4129 args=[ onosSetName ] )
4130 threads.append( t )
4131 t.start()
4132 for t in threads:
4133 t.join()
4134 sizeResponses.append( t.result )
4135 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004136 for i in range( len( main.activeNodes ) ):
4137 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004138 if size != sizeResponses[ i ]:
4139 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004140 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004141 " expected a size of " + str( size ) +
4142 " for set " + onosSetName +
4143 " but got " + str( sizeResponses[ i ] ) )
4144 addAllResults = addAllResults and getResults and sizeResults
4145 utilities.assert_equals( expect=main.TRUE,
4146 actual=addAllResults,
4147 onpass="Set addAll correct",
4148 onfail="Set addAll was incorrect" )
4149
4150 main.step( "Distributed Set retain()" )
4151 onosSet.intersection_update( retainValue.split() )
4152 retainResponses = []
4153 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004154 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004155 t = main.Thread( target=main.CLIs[i].setTestRemove,
Jon Hall5cf14d52015-07-16 12:15:19 -07004156 name="setTestRetain-" + str( i ),
4157 args=[ onosSetName, retainValue ],
4158 kwargs={ "retain": True } )
4159 threads.append( t )
4160 t.start()
4161 for t in threads:
4162 t.join()
4163 retainResponses.append( t.result )
4164
4165 # main.TRUE = successfully changed the set
4166 # main.FALSE = action resulted in no change in set
4167 # main.ERROR - Some error in executing the function
4168 retainResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004169 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07004170 if retainResponses[ i ] == main.TRUE:
4171 # All is well
4172 pass
4173 elif retainResponses[ i ] == main.FALSE:
4174 # Already in set, probably fine
4175 pass
4176 elif retainResponses[ i ] == main.ERROR:
4177 # Error in execution
4178 retainResults = main.FALSE
4179 else:
4180 # unexpected result
4181 retainResults = main.FALSE
4182 if retainResults != main.TRUE:
4183 main.log.error( "Error executing set retain" )
4184
4185 # Check if set is still correct
4186 size = len( onosSet )
4187 getResponses = []
4188 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004189 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004190 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07004191 name="setTestGet-" + str( i ),
4192 args=[ onosSetName ] )
4193 threads.append( t )
4194 t.start()
4195 for t in threads:
4196 t.join()
4197 getResponses.append( t.result )
4198 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004199 for i in range( len( main.activeNodes ) ):
4200 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004201 if isinstance( getResponses[ i ], list):
4202 current = set( getResponses[ i ] )
4203 if len( current ) == len( getResponses[ i ] ):
4204 # no repeats
4205 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004206 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004207 " has incorrect view" +
4208 " of set " + onosSetName + ":\n" +
4209 str( getResponses[ i ] ) )
4210 main.log.debug( "Expected: " + str( onosSet ) )
4211 main.log.debug( "Actual: " + str( current ) )
4212 getResults = main.FALSE
4213 else:
4214 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004215 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004216 " has repeat elements in" +
4217 " set " + onosSetName + ":\n" +
4218 str( getResponses[ i ] ) )
4219 getResults = main.FALSE
4220 elif getResponses[ i ] == main.ERROR:
4221 getResults = main.FALSE
4222 sizeResponses = []
4223 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004224 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004225 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07004226 name="setTestSize-" + str( i ),
4227 args=[ onosSetName ] )
4228 threads.append( t )
4229 t.start()
4230 for t in threads:
4231 t.join()
4232 sizeResponses.append( t.result )
4233 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004234 for i in range( len( main.activeNodes ) ):
4235 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004236 if size != sizeResponses[ i ]:
4237 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004238 main.log.error( "ONOS" + node + " expected a size of " +
Jon Hall5cf14d52015-07-16 12:15:19 -07004239 str( size ) + " for set " + onosSetName +
4240 " but got " + str( sizeResponses[ i ] ) )
4241 retainResults = retainResults and getResults and sizeResults
4242 utilities.assert_equals( expect=main.TRUE,
4243 actual=retainResults,
4244 onpass="Set retain correct",
4245 onfail="Set retain was incorrect" )
4246
Jon Hall2a5002c2015-08-21 16:49:11 -07004247 # Transactional maps
4248 main.step( "Partitioned Transactional maps put" )
4249 tMapValue = "Testing"
4250 numKeys = 100
4251 putResult = True
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004252 node = main.activeNodes[0]
4253 putResponses = main.CLIs[node].transactionalMapPut( numKeys, tMapValue )
Jon Hall6e709752016-02-01 13:38:46 -08004254 if putResponses and len( putResponses ) == 100:
Jon Hall2a5002c2015-08-21 16:49:11 -07004255 for i in putResponses:
4256 if putResponses[ i ][ 'value' ] != tMapValue:
4257 putResult = False
4258 else:
4259 putResult = False
4260 if not putResult:
4261 main.log.debug( "Put response values: " + str( putResponses ) )
4262 utilities.assert_equals( expect=True,
4263 actual=putResult,
4264 onpass="Partitioned Transactional Map put successful",
4265 onfail="Partitioned Transactional Map put values are incorrect" )
4266
4267 main.step( "Partitioned Transactional maps get" )
4268 getCheck = True
4269 for n in range( 1, numKeys + 1 ):
4270 getResponses = []
4271 threads = []
4272 valueCheck = True
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004273 for i in main.activeNodes:
Jon Hall2a5002c2015-08-21 16:49:11 -07004274 t = main.Thread( target=main.CLIs[i].transactionalMapGet,
4275 name="TMap-get-" + str( i ),
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004276 args=[ "Key" + str( n ) ] )
Jon Hall2a5002c2015-08-21 16:49:11 -07004277 threads.append( t )
4278 t.start()
4279 for t in threads:
4280 t.join()
4281 getResponses.append( t.result )
4282 for node in getResponses:
4283 if node != tMapValue:
4284 valueCheck = False
4285 if not valueCheck:
4286 main.log.warn( "Values for key 'Key" + str( n ) + "' do not match:" )
4287 main.log.warn( getResponses )
4288 getCheck = getCheck and valueCheck
4289 utilities.assert_equals( expect=True,
4290 actual=getCheck,
4291 onpass="Partitioned Transactional Map get values were correct",
4292 onfail="Partitioned Transactional Map values incorrect" )