blob: d31cd30f79f40d117746fd87de0d4148f15c7798 [file] [log] [blame]
Jon Hall5cf14d52015-07-16 12:15:19 -07001"""
2Description: This test is to determine if ONOS can handle
3 a minority of it's nodes restarting
4
5List of test cases:
6CASE1: Compile ONOS and push it to the test machines
7CASE2: Assign devices to controllers
8CASE21: Assign mastership to controllers
9CASE3: Assign intents
10CASE4: Ping across added host intents
11CASE5: Reading state of ONOS
Jon Hallb3ed8ed2015-10-28 16:43:55 -070012CASE61: The Failure inducing case.
13CASE62: The Failure recovery case.
Jon Hall5cf14d52015-07-16 12:15:19 -070014CASE7: Check state after control plane failure
15CASE8: Compare topo
16CASE9: Link s3-s28 down
17CASE10: Link s3-s28 up
18CASE11: Switch down
19CASE12: Switch up
20CASE13: Clean up
21CASE14: start election app on all onos nodes
22CASE15: Check that Leadership Election is still functional
23CASE16: Install Distributed Primitives app
24CASE17: Check for basic functionality with distributed primitives
25"""
26
27
Jon Hallb3ed8ed2015-10-28 16:43:55 -070028class HAkillNodes:
Jon Hall5cf14d52015-07-16 12:15:19 -070029
30 def __init__( self ):
31 self.default = ''
32
33 def CASE1( self, main ):
34 """
35 CASE1 is to compile ONOS and push it to the test machines
36
37 Startup sequence:
38 cell <name>
39 onos-verify-cell
40 NOTE: temporary - onos-remove-raft-logs
41 onos-uninstall
42 start mininet
43 git pull
44 mvn clean install
45 onos-package
46 onos-install -f
47 onos-wait-for-start
48 start cli sessions
49 start tcpdump
50 """
Jon Halle1a3b752015-07-22 13:02:46 -070051 import imp
Jon Hallf3d16e72015-12-16 17:45:08 -080052 import time
Jon Hall3b489db2015-10-05 14:38:37 -070053 import pexpect
Jon Hall5cf14d52015-07-16 12:15:19 -070054 main.log.info( "ONOS HA test: Restart minority of ONOS nodes - " +
55 "initialization" )
56 main.case( "Setting up test environment" )
Jon Hall783bbf92015-07-23 14:33:19 -070057 main.caseExplanation = "Setup the test environment including " +\
Jon Hall5cf14d52015-07-16 12:15:19 -070058 "installing ONOS, starting Mininet and ONOS" +\
59 "cli sessions."
60 # TODO: save all the timers and output them for plotting
61
62 # load some variables from the params file
63 PULLCODE = False
64 if main.params[ 'Git' ] == 'True':
65 PULLCODE = True
66 gitBranch = main.params[ 'branch' ]
67 cellName = main.params[ 'ENV' ][ 'cellName' ]
68
Jon Halle1a3b752015-07-22 13:02:46 -070069 main.numCtrls = int( main.params[ 'num_controllers' ] )
Jon Hall5cf14d52015-07-16 12:15:19 -070070 if main.ONOSbench.maxNodes:
Jon Halle1a3b752015-07-22 13:02:46 -070071 if main.ONOSbench.maxNodes < main.numCtrls:
72 main.numCtrls = int( main.ONOSbench.maxNodes )
73 # set global variables
Jon Hall5cf14d52015-07-16 12:15:19 -070074 global ONOS1Port
75 global ONOS2Port
76 global ONOS3Port
77 global ONOS4Port
78 global ONOS5Port
79 global ONOS6Port
80 global ONOS7Port
81
82 # FIXME: just get controller port from params?
83 # TODO: do we really need all these?
84 ONOS1Port = main.params[ 'CTRL' ][ 'port1' ]
85 ONOS2Port = main.params[ 'CTRL' ][ 'port2' ]
86 ONOS3Port = main.params[ 'CTRL' ][ 'port3' ]
87 ONOS4Port = main.params[ 'CTRL' ][ 'port4' ]
88 ONOS5Port = main.params[ 'CTRL' ][ 'port5' ]
89 ONOS6Port = main.params[ 'CTRL' ][ 'port6' ]
90 ONOS7Port = main.params[ 'CTRL' ][ 'port7' ]
91
Jon Halle1a3b752015-07-22 13:02:46 -070092 try:
93 fileName = "Counters"
94 # TODO: Maybe make a library folder somewhere?
95 path = main.params[ 'imports' ][ 'path' ]
96 main.Counters = imp.load_source( fileName,
97 path + fileName + ".py" )
98 except Exception as e:
99 main.log.exception( e )
100 main.cleanup()
101 main.exit()
102
103 main.CLIs = []
104 main.nodes = []
Jon Hall5cf14d52015-07-16 12:15:19 -0700105 ipList = []
Jon Halle1a3b752015-07-22 13:02:46 -0700106 for i in range( 1, main.numCtrls + 1 ):
107 try:
108 main.CLIs.append( getattr( main, 'ONOScli' + str( i ) ) )
109 main.nodes.append( getattr( main, 'ONOS' + str( i ) ) )
110 ipList.append( main.nodes[ -1 ].ip_address )
111 except AttributeError:
112 break
Jon Hall5cf14d52015-07-16 12:15:19 -0700113
114 main.step( "Create cell file" )
115 cellAppString = main.params[ 'ENV' ][ 'appString' ]
116 main.ONOSbench.createCellFile( main.ONOSbench.ip_address, cellName,
117 main.Mininet1.ip_address,
118 cellAppString, ipList )
119 main.step( "Applying cell variable to environment" )
120 cellResult = main.ONOSbench.setCell( cellName )
121 verifyResult = main.ONOSbench.verifyCell()
122
123 # FIXME:this is short term fix
124 main.log.info( "Removing raft logs" )
125 main.ONOSbench.onosRemoveRaftLogs()
126
127 main.log.info( "Uninstalling ONOS" )
Jon Halle1a3b752015-07-22 13:02:46 -0700128 for node in main.nodes:
Jon Hall5cf14d52015-07-16 12:15:19 -0700129 main.ONOSbench.onosUninstall( node.ip_address )
130
131 # Make sure ONOS is DEAD
132 main.log.info( "Killing any ONOS processes" )
133 killResults = main.TRUE
Jon Halle1a3b752015-07-22 13:02:46 -0700134 for node in main.nodes:
Jon Hall5cf14d52015-07-16 12:15:19 -0700135 killed = main.ONOSbench.onosKill( node.ip_address )
136 killResults = killResults and killed
137
138 cleanInstallResult = main.TRUE
139 gitPullResult = main.TRUE
140
141 main.step( "Starting Mininet" )
142 # scp topo file to mininet
143 # TODO: move to params?
144 topoName = "obelisk.py"
145 filePath = main.ONOSbench.home + "/tools/test/topos/"
kelvin-onlabd9e23de2015-08-06 10:34:44 -0700146 main.ONOSbench.scp( main.Mininet1,
147 filePath + topoName,
148 main.Mininet1.home,
149 direction="to" )
Jon Hall5cf14d52015-07-16 12:15:19 -0700150 mnResult = main.Mininet1.startNet( )
151 utilities.assert_equals( expect=main.TRUE, actual=mnResult,
152 onpass="Mininet Started",
153 onfail="Error starting Mininet" )
154
155 main.step( "Git checkout and pull " + gitBranch )
156 if PULLCODE:
157 main.ONOSbench.gitCheckout( gitBranch )
158 gitPullResult = main.ONOSbench.gitPull()
159 # values of 1 or 3 are good
160 utilities.assert_lesser( expect=0, actual=gitPullResult,
161 onpass="Git pull successful",
162 onfail="Git pull failed" )
163 main.ONOSbench.getVersion( report=True )
164
165 main.step( "Using mvn clean install" )
166 cleanInstallResult = main.TRUE
167 if PULLCODE and gitPullResult == main.TRUE:
168 cleanInstallResult = main.ONOSbench.cleanInstall()
169 else:
170 main.log.warn( "Did not pull new code so skipping mvn " +
171 "clean install" )
172 utilities.assert_equals( expect=main.TRUE,
173 actual=cleanInstallResult,
174 onpass="MCI successful",
175 onfail="MCI failed" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700176
177 main.step( "Make sure ONOS service doesn't automatically respawn" )
178 handle = main.ONOSbench.handle
179 handle.sendline( "sed -i -e 's/^respawn$/#respawn/g' tools/package/init/onos.conf" )
180 handle.expect( "\$" ) # $ from the command
181 handle.expect( "\$" ) # $ from the prompt
182
Jon Hall5cf14d52015-07-16 12:15:19 -0700183 # GRAPHS
184 # NOTE: important params here:
185 # job = name of Jenkins job
186 # Plot Name = Plot-HA, only can be used if multiple plots
187 # index = The number of the graph under plot name
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700188 job = "HAkillNodes"
Jon Hall5cf14d52015-07-16 12:15:19 -0700189 plotName = "Plot-HA"
Jon Hallff566d52016-01-15 14:45:36 -0800190 index = "1"
Jon Hall5cf14d52015-07-16 12:15:19 -0700191 graphs = '<ac:structured-macro ac:name="html">\n'
192 graphs += '<ac:plain-text-body><![CDATA[\n'
193 graphs += '<iframe src="https://onos-jenkins.onlab.us/job/' + job +\
Jon Halla9845df2016-01-15 14:55:58 -0800194 '/plot/' + plotName + '/getPlot?index=' + index +\
Jon Hall5cf14d52015-07-16 12:15:19 -0700195 '&width=500&height=300"' +\
196 'noborder="0" width="500" height="300" scrolling="yes" ' +\
197 'seamless="seamless"></iframe>\n'
198 graphs += ']]></ac:plain-text-body>\n'
199 graphs += '</ac:structured-macro>\n'
200 main.log.wiki(graphs)
201
202 main.step( "Creating ONOS package" )
Jon Hall3b489db2015-10-05 14:38:37 -0700203 # copy gen-partions file to ONOS
204 # NOTE: this assumes TestON and ONOS are on the same machine
205 srcFile = main.testDir + "/" + main.TEST + "/dependencies/onos-gen-partitions"
206 dstDir = main.ONOSbench.home + "/tools/test/bin/onos-gen-partitions"
207 cpResult = main.ONOSbench.secureCopy( main.ONOSbench.user_name,
208 main.ONOSbench.ip_address,
209 srcFile,
210 dstDir,
211 pwd=main.ONOSbench.pwd,
212 direction="from" )
Jon Hall5cf14d52015-07-16 12:15:19 -0700213 packageResult = main.ONOSbench.onosPackage()
214 utilities.assert_equals( expect=main.TRUE, actual=packageResult,
215 onpass="ONOS package successful",
216 onfail="ONOS package failed" )
217
218 main.step( "Installing ONOS package" )
219 onosInstallResult = main.TRUE
Jon Halle1a3b752015-07-22 13:02:46 -0700220 for node in main.nodes:
Jon Hall5cf14d52015-07-16 12:15:19 -0700221 tmpResult = main.ONOSbench.onosInstall( options="-f",
222 node=node.ip_address )
223 onosInstallResult = onosInstallResult and tmpResult
224 utilities.assert_equals( expect=main.TRUE, actual=onosInstallResult,
225 onpass="ONOS install successful",
226 onfail="ONOS install failed" )
Jon Hall3b489db2015-10-05 14:38:37 -0700227 # clean up gen-partitions file
228 try:
229 main.ONOSbench.handle.sendline( "cd " + main.ONOSbench.home )
230 main.ONOSbench.handle.expect( main.ONOSbench.home + "\$" )
231 main.ONOSbench.handle.sendline( "git checkout -- tools/test/bin/onos-gen-partitions" )
232 main.ONOSbench.handle.expect( main.ONOSbench.home + "\$" )
233 main.log.info( " Cleaning custom gen partitions file, response was: \n" +
234 str( main.ONOSbench.handle.before ) )
235 except ( pexpect.TIMEOUT, pexpect.EOF ):
236 main.log.exception( "ONOSbench: pexpect exception found:" +
237 main.ONOSbench.handle.before )
238 main.cleanup()
239 main.exit()
Jon Hall5cf14d52015-07-16 12:15:19 -0700240
241 main.step( "Checking if ONOS is up yet" )
242 for i in range( 2 ):
243 onosIsupResult = main.TRUE
Jon Halle1a3b752015-07-22 13:02:46 -0700244 for node in main.nodes:
Jon Hall5cf14d52015-07-16 12:15:19 -0700245 started = main.ONOSbench.isup( node.ip_address )
246 if not started:
247 main.log.error( node.name + " didn't start!" )
248 main.ONOSbench.onosStop( node.ip_address )
249 main.ONOSbench.onosStart( node.ip_address )
250 onosIsupResult = onosIsupResult and started
251 if onosIsupResult == main.TRUE:
252 break
253 utilities.assert_equals( expect=main.TRUE, actual=onosIsupResult,
254 onpass="ONOS startup successful",
255 onfail="ONOS startup failed" )
256
257 main.log.step( "Starting ONOS CLI sessions" )
258 cliResults = main.TRUE
259 threads = []
Jon Halle1a3b752015-07-22 13:02:46 -0700260 for i in range( main.numCtrls ):
261 t = main.Thread( target=main.CLIs[i].startOnosCli,
Jon Hall5cf14d52015-07-16 12:15:19 -0700262 name="startOnosCli-" + str( i ),
Jon Halle1a3b752015-07-22 13:02:46 -0700263 args=[main.nodes[i].ip_address] )
Jon Hall5cf14d52015-07-16 12:15:19 -0700264 threads.append( t )
265 t.start()
266
267 for t in threads:
268 t.join()
269 cliResults = cliResults and t.result
270 utilities.assert_equals( expect=main.TRUE, actual=cliResults,
271 onpass="ONOS cli startup successful",
272 onfail="ONOS cli startup failed" )
273
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700274 # Create a list of active nodes for use when some nodes are stopped
275 main.activeNodes = [ i for i in range( 0, len( main.CLIs ) ) ]
276
Jon Hall5cf14d52015-07-16 12:15:19 -0700277 if main.params[ 'tcpdump' ].lower() == "true":
278 main.step( "Start Packet Capture MN" )
279 main.Mininet2.startTcpdump(
280 str( main.params[ 'MNtcpdump' ][ 'folder' ] ) + str( main.TEST )
281 + "-MN.pcap",
282 intf=main.params[ 'MNtcpdump' ][ 'intf' ],
283 port=main.params[ 'MNtcpdump' ][ 'port' ] )
284
285 main.step( "App Ids check" )
Jon Hallf3d16e72015-12-16 17:45:08 -0800286 time.sleep(60)
Jon Hall5cf14d52015-07-16 12:15:19 -0700287 appCheck = main.TRUE
288 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700289 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -0700290 t = main.Thread( target=main.CLIs[i].appToIDCheck,
Jon Hall5cf14d52015-07-16 12:15:19 -0700291 name="appToIDCheck-" + str( i ),
292 args=[] )
293 threads.append( t )
294 t.start()
295
296 for t in threads:
297 t.join()
298 appCheck = appCheck and t.result
299 if appCheck != main.TRUE:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700300 node = main.activeNodes[0]
301 main.log.warn( main.CLIs[node].apps() )
302 main.log.warn( main.CLIs[node].appIDs() )
Jon Hall5cf14d52015-07-16 12:15:19 -0700303 utilities.assert_equals( expect=main.TRUE, actual=appCheck,
304 onpass="App Ids seem to be correct",
305 onfail="Something is wrong with app Ids" )
306
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700307 main.step( "Clean up ONOS service changes" )
308 handle.sendline( "git checkout -- tools/package/init/onos.conf" )
309 handle.expect( "\$" )
310
Jon Hall5cf14d52015-07-16 12:15:19 -0700311 if cliResults == main.FALSE:
312 main.log.error( "Failed to start ONOS, stopping test" )
313 main.cleanup()
314 main.exit()
315
316 def CASE2( self, main ):
317 """
318 Assign devices to controllers
319 """
320 import re
Jon Halle1a3b752015-07-22 13:02:46 -0700321 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700322 assert main, "main not defined"
323 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -0700324 assert main.CLIs, "main.CLIs not defined"
325 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700326 assert ONOS1Port, "ONOS1Port not defined"
327 assert ONOS2Port, "ONOS2Port not defined"
328 assert ONOS3Port, "ONOS3Port not defined"
329 assert ONOS4Port, "ONOS4Port not defined"
330 assert ONOS5Port, "ONOS5Port not defined"
331 assert ONOS6Port, "ONOS6Port not defined"
332 assert ONOS7Port, "ONOS7Port not defined"
333
334 main.case( "Assigning devices to controllers" )
Jon Hall783bbf92015-07-23 14:33:19 -0700335 main.caseExplanation = "Assign switches to ONOS using 'ovs-vsctl' " +\
Jon Hall5cf14d52015-07-16 12:15:19 -0700336 "and check that an ONOS node becomes the " +\
337 "master of the device."
338 main.step( "Assign switches to controllers" )
339
340 ipList = []
Jon Halle1a3b752015-07-22 13:02:46 -0700341 for i in range( main.numCtrls ):
342 ipList.append( main.nodes[ i ].ip_address )
Jon Hall5cf14d52015-07-16 12:15:19 -0700343 swList = []
344 for i in range( 1, 29 ):
345 swList.append( "s" + str( i ) )
346 main.Mininet1.assignSwController( sw=swList, ip=ipList )
347
348 mastershipCheck = main.TRUE
349 for i in range( 1, 29 ):
350 response = main.Mininet1.getSwController( "s" + str( i ) )
351 try:
352 main.log.info( str( response ) )
353 except Exception:
354 main.log.info( repr( response ) )
Jon Halle1a3b752015-07-22 13:02:46 -0700355 for node in main.nodes:
Jon Hall5cf14d52015-07-16 12:15:19 -0700356 if re.search( "tcp:" + node.ip_address, response ):
357 mastershipCheck = mastershipCheck and main.TRUE
358 else:
359 main.log.error( "Error, node " + node.ip_address + " is " +
360 "not in the list of controllers s" +
361 str( i ) + " is connecting to." )
362 mastershipCheck = main.FALSE
363 utilities.assert_equals(
364 expect=main.TRUE,
365 actual=mastershipCheck,
366 onpass="Switch mastership assigned correctly",
367 onfail="Switches not assigned correctly to controllers" )
368
369 def CASE21( self, main ):
370 """
371 Assign mastership to controllers
372 """
Jon Hall5cf14d52015-07-16 12:15:19 -0700373 import time
Jon Halle1a3b752015-07-22 13:02:46 -0700374 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700375 assert main, "main not defined"
376 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -0700377 assert main.CLIs, "main.CLIs not defined"
378 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700379 assert ONOS1Port, "ONOS1Port not defined"
380 assert ONOS2Port, "ONOS2Port not defined"
381 assert ONOS3Port, "ONOS3Port not defined"
382 assert ONOS4Port, "ONOS4Port not defined"
383 assert ONOS5Port, "ONOS5Port not defined"
384 assert ONOS6Port, "ONOS6Port not defined"
385 assert ONOS7Port, "ONOS7Port not defined"
386
387 main.case( "Assigning Controller roles for switches" )
Jon Hall783bbf92015-07-23 14:33:19 -0700388 main.caseExplanation = "Check that ONOS is connected to each " +\
Jon Hall5cf14d52015-07-16 12:15:19 -0700389 "device. Then manually assign" +\
390 " mastership to specific ONOS nodes using" +\
391 " 'device-role'"
392 main.step( "Assign mastership of switches to specific controllers" )
393 # Manually assign mastership to the controller we want
394 roleCall = main.TRUE
395
396 ipList = [ ]
397 deviceList = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700398 onosCli = main.CLIs[ main.activeNodes[0] ]
Jon Hall5cf14d52015-07-16 12:15:19 -0700399 try:
400 # Assign mastership to specific controllers. This assignment was
401 # determined for a 7 node cluser, but will work with any sized
402 # cluster
403 for i in range( 1, 29 ): # switches 1 through 28
404 # set up correct variables:
405 if i == 1:
406 c = 0
Jon Halle1a3b752015-07-22 13:02:46 -0700407 ip = main.nodes[ c ].ip_address # ONOS1
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700408 deviceId = onosCli.getDevice( "1000" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700409 elif i == 2:
Jon Halle1a3b752015-07-22 13:02:46 -0700410 c = 1 % main.numCtrls
411 ip = main.nodes[ c ].ip_address # ONOS2
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700412 deviceId = onosCli.getDevice( "2000" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700413 elif i == 3:
Jon Halle1a3b752015-07-22 13:02:46 -0700414 c = 1 % main.numCtrls
415 ip = main.nodes[ c ].ip_address # ONOS2
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700416 deviceId = onosCli.getDevice( "3000" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700417 elif i == 4:
Jon Halle1a3b752015-07-22 13:02:46 -0700418 c = 3 % main.numCtrls
419 ip = main.nodes[ c ].ip_address # ONOS4
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700420 deviceId = onosCli.getDevice( "3004" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700421 elif i == 5:
Jon Halle1a3b752015-07-22 13:02:46 -0700422 c = 2 % main.numCtrls
423 ip = main.nodes[ c ].ip_address # ONOS3
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700424 deviceId = onosCli.getDevice( "5000" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700425 elif i == 6:
Jon Halle1a3b752015-07-22 13:02:46 -0700426 c = 2 % main.numCtrls
427 ip = main.nodes[ c ].ip_address # ONOS3
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700428 deviceId = onosCli.getDevice( "6000" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700429 elif i == 7:
Jon Halle1a3b752015-07-22 13:02:46 -0700430 c = 5 % main.numCtrls
431 ip = main.nodes[ c ].ip_address # ONOS6
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700432 deviceId = onosCli.getDevice( "6007" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700433 elif i >= 8 and i <= 17:
Jon Halle1a3b752015-07-22 13:02:46 -0700434 c = 4 % main.numCtrls
435 ip = main.nodes[ c ].ip_address # ONOS5
Jon Hall5cf14d52015-07-16 12:15:19 -0700436 dpid = '3' + str( i ).zfill( 3 )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700437 deviceId = onosCli.getDevice( dpid ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700438 elif i >= 18 and i <= 27:
Jon Halle1a3b752015-07-22 13:02:46 -0700439 c = 6 % main.numCtrls
440 ip = main.nodes[ c ].ip_address # ONOS7
Jon Hall5cf14d52015-07-16 12:15:19 -0700441 dpid = '6' + str( i ).zfill( 3 )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700442 deviceId = onosCli.getDevice( dpid ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700443 elif i == 28:
444 c = 0
Jon Halle1a3b752015-07-22 13:02:46 -0700445 ip = main.nodes[ c ].ip_address # ONOS1
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700446 deviceId = onosCli.getDevice( "2800" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700447 else:
448 main.log.error( "You didn't write an else statement for " +
449 "switch s" + str( i ) )
450 roleCall = main.FALSE
451 # Assign switch
452 assert deviceId, "No device id for s" + str( i ) + " in ONOS"
453 # TODO: make this controller dynamic
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700454 roleCall = roleCall and onosCli.deviceRole( deviceId, ip )
Jon Hall5cf14d52015-07-16 12:15:19 -0700455 ipList.append( ip )
456 deviceList.append( deviceId )
457 except ( AttributeError, AssertionError ):
458 main.log.exception( "Something is wrong with ONOS device view" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700459 main.log.info( onosCli.devices() )
Jon Hall5cf14d52015-07-16 12:15:19 -0700460 utilities.assert_equals(
461 expect=main.TRUE,
462 actual=roleCall,
463 onpass="Re-assigned switch mastership to designated controller",
464 onfail="Something wrong with deviceRole calls" )
465
466 main.step( "Check mastership was correctly assigned" )
467 roleCheck = main.TRUE
468 # NOTE: This is due to the fact that device mastership change is not
469 # atomic and is actually a multi step process
470 time.sleep( 5 )
471 for i in range( len( ipList ) ):
472 ip = ipList[i]
473 deviceId = deviceList[i]
474 # Check assignment
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700475 master = onosCli.getRole( deviceId ).get( 'master' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700476 if ip in master:
477 roleCheck = roleCheck and main.TRUE
478 else:
479 roleCheck = roleCheck and main.FALSE
480 main.log.error( "Error, controller " + ip + " is not" +
481 " master " + "of device " +
482 str( deviceId ) + ". Master is " +
483 repr( master ) + "." )
484 utilities.assert_equals(
485 expect=main.TRUE,
486 actual=roleCheck,
487 onpass="Switches were successfully reassigned to designated " +
488 "controller",
489 onfail="Switches were not successfully reassigned" )
490
491 def CASE3( self, main ):
492 """
493 Assign intents
494 """
495 import time
496 import json
Jon Halle1a3b752015-07-22 13:02:46 -0700497 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700498 assert main, "main not defined"
499 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -0700500 assert main.CLIs, "main.CLIs not defined"
501 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700502 main.case( "Adding host Intents" )
Jon Hall783bbf92015-07-23 14:33:19 -0700503 main.caseExplanation = "Discover hosts by using pingall then " +\
Jon Hall5cf14d52015-07-16 12:15:19 -0700504 "assign predetermined host-to-host intents." +\
505 " After installation, check that the intent" +\
506 " is distributed to all nodes and the state" +\
507 " is INSTALLED"
508
509 # install onos-app-fwd
510 main.step( "Install reactive forwarding app" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700511 onosCli = main.CLIs[ main.activeNodes[0] ]
512 installResults = onosCli.activateApp( "org.onosproject.fwd" )
Jon Hall5cf14d52015-07-16 12:15:19 -0700513 utilities.assert_equals( expect=main.TRUE, actual=installResults,
514 onpass="Install fwd successful",
515 onfail="Install fwd failed" )
516
517 main.step( "Check app ids" )
518 appCheck = main.TRUE
519 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700520 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -0700521 t = main.Thread( target=main.CLIs[i].appToIDCheck,
Jon Hall5cf14d52015-07-16 12:15:19 -0700522 name="appToIDCheck-" + str( i ),
523 args=[] )
524 threads.append( t )
525 t.start()
526
527 for t in threads:
528 t.join()
529 appCheck = appCheck and t.result
530 if appCheck != main.TRUE:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700531 main.log.warn( onosCli.apps() )
532 main.log.warn( onosCli.appIDs() )
Jon Hall5cf14d52015-07-16 12:15:19 -0700533 utilities.assert_equals( expect=main.TRUE, actual=appCheck,
534 onpass="App Ids seem to be correct",
535 onfail="Something is wrong with app Ids" )
536
537 main.step( "Discovering Hosts( Via pingall for now )" )
538 # FIXME: Once we have a host discovery mechanism, use that instead
539 # REACTIVE FWD test
540 pingResult = main.FALSE
Jon Hall96091e62015-09-21 17:34:17 -0700541 passMsg = "Reactive Pingall test passed"
542 time1 = time.time()
543 pingResult = main.Mininet1.pingall()
544 time2 = time.time()
545 if not pingResult:
546 main.log.warn("First pingall failed. Trying again...")
Jon Hall5cf14d52015-07-16 12:15:19 -0700547 pingResult = main.Mininet1.pingall()
Jon Hall96091e62015-09-21 17:34:17 -0700548 passMsg += " on the second try"
549 utilities.assert_equals(
550 expect=main.TRUE,
551 actual=pingResult,
552 onpass= passMsg,
553 onfail="Reactive Pingall failed, " +
554 "one or more ping pairs failed" )
555 main.log.info( "Time for pingall: %2f seconds" %
556 ( time2 - time1 ) )
Jon Hall5cf14d52015-07-16 12:15:19 -0700557 # timeout for fwd flows
558 time.sleep( 11 )
559 # uninstall onos-app-fwd
560 main.step( "Uninstall reactive forwarding app" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700561 node = main.activeNodes[0]
562 uninstallResult = main.CLIs[node].deactivateApp( "org.onosproject.fwd" )
Jon Hall5cf14d52015-07-16 12:15:19 -0700563 utilities.assert_equals( expect=main.TRUE, actual=uninstallResult,
564 onpass="Uninstall fwd successful",
565 onfail="Uninstall fwd failed" )
566
567 main.step( "Check app ids" )
568 threads = []
569 appCheck2 = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700570 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -0700571 t = main.Thread( target=main.CLIs[i].appToIDCheck,
Jon Hall5cf14d52015-07-16 12:15:19 -0700572 name="appToIDCheck-" + str( i ),
573 args=[] )
574 threads.append( t )
575 t.start()
576
577 for t in threads:
578 t.join()
579 appCheck2 = appCheck2 and t.result
580 if appCheck2 != main.TRUE:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700581 node = main.activeNodes[0]
582 main.log.warn( main.CLIs[node].apps() )
583 main.log.warn( main.CLIs[node].appIDs() )
Jon Hall5cf14d52015-07-16 12:15:19 -0700584 utilities.assert_equals( expect=main.TRUE, actual=appCheck2,
585 onpass="App Ids seem to be correct",
586 onfail="Something is wrong with app Ids" )
587
588 main.step( "Add host intents via cli" )
589 intentIds = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700590 # TODO: move the host numbers to params
591 # Maybe look at all the paths we ping?
Jon Hall5cf14d52015-07-16 12:15:19 -0700592 intentAddResult = True
593 hostResult = main.TRUE
594 for i in range( 8, 18 ):
595 main.log.info( "Adding host intent between h" + str( i ) +
596 " and h" + str( i + 10 ) )
597 host1 = "00:00:00:00:00:" + \
598 str( hex( i )[ 2: ] ).zfill( 2 ).upper()
599 host2 = "00:00:00:00:00:" + \
600 str( hex( i + 10 )[ 2: ] ).zfill( 2 ).upper()
601 # NOTE: getHost can return None
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700602 host1Dict = onosCli.getHost( host1 )
603 host2Dict = onosCli.getHost( host2 )
Jon Hall5cf14d52015-07-16 12:15:19 -0700604 host1Id = None
605 host2Id = None
606 if host1Dict and host2Dict:
607 host1Id = host1Dict.get( 'id', None )
608 host2Id = host2Dict.get( 'id', None )
609 if host1Id and host2Id:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700610 nodeNum = ( i % len( main.activeNodes ) )
611 node = main.activeNodes[nodeNum]
612 tmpId = main.CLIs[node].addHostIntent( host1Id, host2Id )
Jon Hall5cf14d52015-07-16 12:15:19 -0700613 if tmpId:
614 main.log.info( "Added intent with id: " + tmpId )
615 intentIds.append( tmpId )
616 else:
617 main.log.error( "addHostIntent returned: " +
618 repr( tmpId ) )
619 else:
620 main.log.error( "Error, getHost() failed for h" + str( i ) +
621 " and/or h" + str( i + 10 ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700622 node = main.activeNodes[0]
623 hosts = main.CLIs[node].hosts()
Jon Hall5cf14d52015-07-16 12:15:19 -0700624 main.log.warn( "Hosts output: " )
625 try:
626 main.log.warn( json.dumps( json.loads( hosts ),
627 sort_keys=True,
628 indent=4,
629 separators=( ',', ': ' ) ) )
630 except ( ValueError, TypeError ):
631 main.log.warn( repr( hosts ) )
632 hostResult = main.FALSE
633 utilities.assert_equals( expect=main.TRUE, actual=hostResult,
634 onpass="Found a host id for each host",
635 onfail="Error looking up host ids" )
636
637 intentStart = time.time()
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700638 onosIds = onosCli.getAllIntentsId()
Jon Hall5cf14d52015-07-16 12:15:19 -0700639 main.log.info( "Submitted intents: " + str( intentIds ) )
640 main.log.info( "Intents in ONOS: " + str( onosIds ) )
641 for intent in intentIds:
642 if intent in onosIds:
643 pass # intent submitted is in onos
644 else:
645 intentAddResult = False
646 if intentAddResult:
647 intentStop = time.time()
648 else:
649 intentStop = None
650 # Print the intent states
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700651 intents = onosCli.intents()
Jon Hall5cf14d52015-07-16 12:15:19 -0700652 intentStates = []
653 installedCheck = True
654 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
655 count = 0
656 try:
657 for intent in json.loads( intents ):
658 state = intent.get( 'state', None )
659 if "INSTALLED" not in state:
660 installedCheck = False
661 intentId = intent.get( 'id', None )
662 intentStates.append( ( intentId, state ) )
663 except ( ValueError, TypeError ):
664 main.log.exception( "Error parsing intents" )
665 # add submitted intents not in the store
666 tmplist = [ i for i, s in intentStates ]
667 missingIntents = False
668 for i in intentIds:
669 if i not in tmplist:
670 intentStates.append( ( i, " - " ) )
671 missingIntents = True
672 intentStates.sort()
673 for i, s in intentStates:
674 count += 1
675 main.log.info( "%-6s%-15s%-15s" %
676 ( str( count ), str( i ), str( s ) ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700677 leaders = onosCli.leaders()
Jon Hall5cf14d52015-07-16 12:15:19 -0700678 try:
679 missing = False
680 if leaders:
681 parsedLeaders = json.loads( leaders )
682 main.log.warn( json.dumps( parsedLeaders,
683 sort_keys=True,
684 indent=4,
685 separators=( ',', ': ' ) ) )
686 # check for all intent partitions
687 topics = []
688 for i in range( 14 ):
689 topics.append( "intent-partition-" + str( i ) )
690 main.log.debug( topics )
691 ONOStopics = [ j['topic'] for j in parsedLeaders ]
692 for topic in topics:
693 if topic not in ONOStopics:
694 main.log.error( "Error: " + topic +
695 " not in leaders" )
696 missing = True
697 else:
698 main.log.error( "leaders() returned None" )
699 except ( ValueError, TypeError ):
700 main.log.exception( "Error parsing leaders" )
701 main.log.error( repr( leaders ) )
702 # Check all nodes
703 if missing:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700704 for i in main.activeNodes:
705 response = main.CLIs[i].leaders( jsonFormat=False)
706 main.log.warn( str( main.CLIs[i].name ) + " leaders output: \n" +
Jon Hall5cf14d52015-07-16 12:15:19 -0700707 str( response ) )
708
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700709 partitions = onosCli.partitions()
Jon Hall5cf14d52015-07-16 12:15:19 -0700710 try:
711 if partitions :
712 parsedPartitions = json.loads( partitions )
713 main.log.warn( json.dumps( parsedPartitions,
714 sort_keys=True,
715 indent=4,
716 separators=( ',', ': ' ) ) )
717 # TODO check for a leader in all paritions
718 # TODO check for consistency among nodes
719 else:
720 main.log.error( "partitions() returned None" )
721 except ( ValueError, TypeError ):
722 main.log.exception( "Error parsing partitions" )
723 main.log.error( repr( partitions ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700724 pendingMap = onosCli.pendingMap()
Jon Hall5cf14d52015-07-16 12:15:19 -0700725 try:
726 if pendingMap :
727 parsedPending = json.loads( pendingMap )
728 main.log.warn( json.dumps( parsedPending,
729 sort_keys=True,
730 indent=4,
731 separators=( ',', ': ' ) ) )
732 # TODO check something here?
733 else:
734 main.log.error( "pendingMap() returned None" )
735 except ( ValueError, TypeError ):
736 main.log.exception( "Error parsing pending map" )
737 main.log.error( repr( pendingMap ) )
738
739 intentAddResult = bool( intentAddResult and not missingIntents and
740 installedCheck )
741 if not intentAddResult:
742 main.log.error( "Error in pushing host intents to ONOS" )
743
744 main.step( "Intent Anti-Entropy dispersion" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700745 for j in range(100):
Jon Hall5cf14d52015-07-16 12:15:19 -0700746 correct = True
747 main.log.info( "Submitted intents: " + str( sorted( intentIds ) ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700748 for i in main.activeNodes:
Jon Hall5cf14d52015-07-16 12:15:19 -0700749 onosIds = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700750 ids = main.CLIs[i].getAllIntentsId()
Jon Hall5cf14d52015-07-16 12:15:19 -0700751 onosIds.append( ids )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700752 main.log.debug( "Intents in " + main.CLIs[i].name + ": " +
Jon Hall5cf14d52015-07-16 12:15:19 -0700753 str( sorted( onosIds ) ) )
754 if sorted( ids ) != sorted( intentIds ):
755 main.log.warn( "Set of intent IDs doesn't match" )
756 correct = False
757 break
758 else:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700759 intents = json.loads( main.CLIs[i].intents() )
Jon Hall5cf14d52015-07-16 12:15:19 -0700760 for intent in intents:
761 if intent[ 'state' ] != "INSTALLED":
762 main.log.warn( "Intent " + intent[ 'id' ] +
763 " is " + intent[ 'state' ] )
764 correct = False
765 break
766 if correct:
767 break
768 else:
769 time.sleep(1)
770 if not intentStop:
771 intentStop = time.time()
772 global gossipTime
773 gossipTime = intentStop - intentStart
774 main.log.info( "It took about " + str( gossipTime ) +
775 " seconds for all intents to appear in each node" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700776 gossipPeriod = int( main.params['timers']['gossip'] )
777 maxGossipTime = gossipPeriod * len( main.activeNodes )
Jon Hall5cf14d52015-07-16 12:15:19 -0700778 utilities.assert_greater_equals(
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700779 expect=maxGossipTime, actual=gossipTime,
Jon Hall5cf14d52015-07-16 12:15:19 -0700780 onpass="ECM anti-entropy for intents worked within " +
781 "expected time",
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700782 onfail="Intent ECM anti-entropy took too long. " +
783 "Expected time:{}, Actual time:{}".format( maxGossipTime,
784 gossipTime ) )
785 if gossipTime <= maxGossipTime:
Jon Hall5cf14d52015-07-16 12:15:19 -0700786 intentAddResult = True
787
788 if not intentAddResult or "key" in pendingMap:
789 import time
790 installedCheck = True
791 main.log.info( "Sleeping 60 seconds to see if intents are found" )
792 time.sleep( 60 )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700793 onosIds = onosCli.getAllIntentsId()
Jon Hall5cf14d52015-07-16 12:15:19 -0700794 main.log.info( "Submitted intents: " + str( intentIds ) )
795 main.log.info( "Intents in ONOS: " + str( onosIds ) )
796 # Print the intent states
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700797 intents = onosCli.intents()
Jon Hall5cf14d52015-07-16 12:15:19 -0700798 intentStates = []
799 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
800 count = 0
801 try:
802 for intent in json.loads( intents ):
803 # Iter through intents of a node
804 state = intent.get( 'state', None )
805 if "INSTALLED" not in state:
806 installedCheck = False
807 intentId = intent.get( 'id', None )
808 intentStates.append( ( intentId, state ) )
809 except ( ValueError, TypeError ):
810 main.log.exception( "Error parsing intents" )
811 # add submitted intents not in the store
812 tmplist = [ i for i, s in intentStates ]
813 for i in intentIds:
814 if i not in tmplist:
815 intentStates.append( ( i, " - " ) )
816 intentStates.sort()
817 for i, s in intentStates:
818 count += 1
819 main.log.info( "%-6s%-15s%-15s" %
820 ( str( count ), str( i ), str( s ) ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700821 leaders = onosCli.leaders()
Jon Hall5cf14d52015-07-16 12:15:19 -0700822 try:
823 missing = False
824 if leaders:
825 parsedLeaders = json.loads( leaders )
826 main.log.warn( json.dumps( parsedLeaders,
827 sort_keys=True,
828 indent=4,
829 separators=( ',', ': ' ) ) )
830 # check for all intent partitions
831 # check for election
832 topics = []
833 for i in range( 14 ):
834 topics.append( "intent-partition-" + str( i ) )
835 # FIXME: this should only be after we start the app
836 topics.append( "org.onosproject.election" )
837 main.log.debug( topics )
838 ONOStopics = [ j['topic'] for j in parsedLeaders ]
839 for topic in topics:
840 if topic not in ONOStopics:
841 main.log.error( "Error: " + topic +
842 " not in leaders" )
843 missing = True
844 else:
845 main.log.error( "leaders() returned None" )
846 except ( ValueError, TypeError ):
847 main.log.exception( "Error parsing leaders" )
848 main.log.error( repr( leaders ) )
849 # Check all nodes
850 if missing:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700851 for i in main.activeNodes:
852 node = main.CLIs[i]
Jon Hall5cf14d52015-07-16 12:15:19 -0700853 response = node.leaders( jsonFormat=False)
854 main.log.warn( str( node.name ) + " leaders output: \n" +
855 str( response ) )
856
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700857 partitions = onosCli.partitions()
Jon Hall5cf14d52015-07-16 12:15:19 -0700858 try:
859 if partitions :
860 parsedPartitions = json.loads( partitions )
861 main.log.warn( json.dumps( parsedPartitions,
862 sort_keys=True,
863 indent=4,
864 separators=( ',', ': ' ) ) )
865 # TODO check for a leader in all paritions
866 # TODO check for consistency among nodes
867 else:
868 main.log.error( "partitions() returned None" )
869 except ( ValueError, TypeError ):
870 main.log.exception( "Error parsing partitions" )
871 main.log.error( repr( partitions ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700872 pendingMap = onosCli.pendingMap()
Jon Hall5cf14d52015-07-16 12:15:19 -0700873 try:
874 if pendingMap :
875 parsedPending = json.loads( pendingMap )
876 main.log.warn( json.dumps( parsedPending,
877 sort_keys=True,
878 indent=4,
879 separators=( ',', ': ' ) ) )
880 # TODO check something here?
881 else:
882 main.log.error( "pendingMap() returned None" )
883 except ( ValueError, TypeError ):
884 main.log.exception( "Error parsing pending map" )
885 main.log.error( repr( pendingMap ) )
886
887 def CASE4( self, main ):
888 """
889 Ping across added host intents
890 """
891 import json
892 import time
Jon Halle1a3b752015-07-22 13:02:46 -0700893 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700894 assert main, "main not defined"
895 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -0700896 assert main.CLIs, "main.CLIs not defined"
897 assert main.nodes, "main.nodes not defined"
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700898 main.case( "Verify connectivity by sending traffic across Intents" )
Jon Hall783bbf92015-07-23 14:33:19 -0700899 main.caseExplanation = "Ping across added host intents to check " +\
Jon Hall5cf14d52015-07-16 12:15:19 -0700900 "functionality and check the state of " +\
901 "the intent"
902 main.step( "Ping across added host intents" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700903 onosCli = main.CLIs[ main.activeNodes[0] ]
Jon Hall5cf14d52015-07-16 12:15:19 -0700904 PingResult = main.TRUE
905 for i in range( 8, 18 ):
906 ping = main.Mininet1.pingHost( src="h" + str( i ),
907 target="h" + str( i + 10 ) )
908 PingResult = PingResult and ping
909 if ping == main.FALSE:
910 main.log.warn( "Ping failed between h" + str( i ) +
911 " and h" + str( i + 10 ) )
912 elif ping == main.TRUE:
913 main.log.info( "Ping test passed!" )
914 # Don't set PingResult or you'd override failures
915 if PingResult == main.FALSE:
916 main.log.error(
917 "Intents have not been installed correctly, pings failed." )
918 # TODO: pretty print
919 main.log.warn( "ONOS1 intents: " )
920 try:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700921 tmpIntents = onosCli.intents()
Jon Hall5cf14d52015-07-16 12:15:19 -0700922 main.log.warn( json.dumps( json.loads( tmpIntents ),
923 sort_keys=True,
924 indent=4,
925 separators=( ',', ': ' ) ) )
926 except ( ValueError, TypeError ):
927 main.log.warn( repr( tmpIntents ) )
928 utilities.assert_equals(
929 expect=main.TRUE,
930 actual=PingResult,
931 onpass="Intents have been installed correctly and pings work",
932 onfail="Intents have not been installed correctly, pings failed." )
933
934 main.step( "Check Intent state" )
935 installedCheck = False
936 loopCount = 0
937 while not installedCheck and loopCount < 40:
938 installedCheck = True
939 # Print the intent states
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700940 intents = onosCli.intents()
Jon Hall5cf14d52015-07-16 12:15:19 -0700941 intentStates = []
942 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
943 count = 0
944 # Iter through intents of a node
945 try:
946 for intent in json.loads( intents ):
947 state = intent.get( 'state', None )
948 if "INSTALLED" not in state:
949 installedCheck = False
950 intentId = intent.get( 'id', None )
951 intentStates.append( ( intentId, state ) )
952 except ( ValueError, TypeError ):
953 main.log.exception( "Error parsing intents." )
954 # Print states
955 intentStates.sort()
956 for i, s in intentStates:
957 count += 1
958 main.log.info( "%-6s%-15s%-15s" %
959 ( str( count ), str( i ), str( s ) ) )
960 if not installedCheck:
961 time.sleep( 1 )
962 loopCount += 1
963 utilities.assert_equals( expect=True, actual=installedCheck,
964 onpass="Intents are all INSTALLED",
965 onfail="Intents are not all in " +
966 "INSTALLED state" )
967
968 main.step( "Check leadership of topics" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700969 leaders = onosCli.leaders()
Jon Hall5cf14d52015-07-16 12:15:19 -0700970 topicCheck = main.TRUE
971 try:
972 if leaders:
973 parsedLeaders = json.loads( leaders )
974 main.log.warn( json.dumps( parsedLeaders,
975 sort_keys=True,
976 indent=4,
977 separators=( ',', ': ' ) ) )
978 # check for all intent partitions
979 # check for election
980 # TODO: Look at Devices as topics now that it uses this system
981 topics = []
982 for i in range( 14 ):
983 topics.append( "intent-partition-" + str( i ) )
984 # FIXME: this should only be after we start the app
985 # FIXME: topics.append( "org.onosproject.election" )
986 # Print leaders output
987 main.log.debug( topics )
988 ONOStopics = [ j['topic'] for j in parsedLeaders ]
989 for topic in topics:
990 if topic not in ONOStopics:
991 main.log.error( "Error: " + topic +
992 " not in leaders" )
993 topicCheck = main.FALSE
994 else:
995 main.log.error( "leaders() returned None" )
996 topicCheck = main.FALSE
997 except ( ValueError, TypeError ):
998 topicCheck = main.FALSE
999 main.log.exception( "Error parsing leaders" )
1000 main.log.error( repr( leaders ) )
1001 # TODO: Check for a leader of these topics
1002 # Check all nodes
1003 if topicCheck:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001004 for i in main.activeNodes:
1005 node = main.CLIs[i]
Jon Hall5cf14d52015-07-16 12:15:19 -07001006 response = node.leaders( jsonFormat=False)
1007 main.log.warn( str( node.name ) + " leaders output: \n" +
1008 str( response ) )
1009
1010 utilities.assert_equals( expect=main.TRUE, actual=topicCheck,
1011 onpass="intent Partitions is in leaders",
1012 onfail="Some topics were lost " )
1013 # Print partitions
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001014 partitions = onosCli.partitions()
Jon Hall5cf14d52015-07-16 12:15:19 -07001015 try:
1016 if partitions :
1017 parsedPartitions = json.loads( partitions )
1018 main.log.warn( json.dumps( parsedPartitions,
1019 sort_keys=True,
1020 indent=4,
1021 separators=( ',', ': ' ) ) )
1022 # TODO check for a leader in all paritions
1023 # TODO check for consistency among nodes
1024 else:
1025 main.log.error( "partitions() returned None" )
1026 except ( ValueError, TypeError ):
1027 main.log.exception( "Error parsing partitions" )
1028 main.log.error( repr( partitions ) )
1029 # Print Pending Map
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001030 pendingMap = onosCli.pendingMap()
Jon Hall5cf14d52015-07-16 12:15:19 -07001031 try:
1032 if pendingMap :
1033 parsedPending = json.loads( pendingMap )
1034 main.log.warn( json.dumps( parsedPending,
1035 sort_keys=True,
1036 indent=4,
1037 separators=( ',', ': ' ) ) )
1038 # TODO check something here?
1039 else:
1040 main.log.error( "pendingMap() returned None" )
1041 except ( ValueError, TypeError ):
1042 main.log.exception( "Error parsing pending map" )
1043 main.log.error( repr( pendingMap ) )
1044
1045 if not installedCheck:
1046 main.log.info( "Waiting 60 seconds to see if the state of " +
1047 "intents change" )
1048 time.sleep( 60 )
1049 # Print the intent states
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001050 intents = onosCli.intents()
Jon Hall5cf14d52015-07-16 12:15:19 -07001051 intentStates = []
1052 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
1053 count = 0
1054 # Iter through intents of a node
1055 try:
1056 for intent in json.loads( intents ):
1057 state = intent.get( 'state', None )
1058 if "INSTALLED" not in state:
1059 installedCheck = False
1060 intentId = intent.get( 'id', None )
1061 intentStates.append( ( intentId, state ) )
1062 except ( ValueError, TypeError ):
1063 main.log.exception( "Error parsing intents." )
1064 intentStates.sort()
1065 for i, s in intentStates:
1066 count += 1
1067 main.log.info( "%-6s%-15s%-15s" %
1068 ( str( count ), str( i ), str( s ) ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001069 leaders = onosCli.leaders()
Jon Hall5cf14d52015-07-16 12:15:19 -07001070 try:
1071 missing = False
1072 if leaders:
1073 parsedLeaders = json.loads( leaders )
1074 main.log.warn( json.dumps( parsedLeaders,
1075 sort_keys=True,
1076 indent=4,
1077 separators=( ',', ': ' ) ) )
1078 # check for all intent partitions
1079 # check for election
1080 topics = []
1081 for i in range( 14 ):
1082 topics.append( "intent-partition-" + str( i ) )
1083 # FIXME: this should only be after we start the app
1084 topics.append( "org.onosproject.election" )
1085 main.log.debug( topics )
1086 ONOStopics = [ j['topic'] for j in parsedLeaders ]
1087 for topic in topics:
1088 if topic not in ONOStopics:
1089 main.log.error( "Error: " + topic +
1090 " not in leaders" )
1091 missing = True
1092 else:
1093 main.log.error( "leaders() returned None" )
1094 except ( ValueError, TypeError ):
1095 main.log.exception( "Error parsing leaders" )
1096 main.log.error( repr( leaders ) )
1097 if missing:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001098 for i in main.activeNodes:
1099 node = main.CLIs[i]
Jon Hall5cf14d52015-07-16 12:15:19 -07001100 response = node.leaders( jsonFormat=False)
1101 main.log.warn( str( node.name ) + " leaders output: \n" +
1102 str( response ) )
1103
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001104 partitions = onosCli.partitions()
Jon Hall5cf14d52015-07-16 12:15:19 -07001105 try:
1106 if partitions :
1107 parsedPartitions = json.loads( partitions )
1108 main.log.warn( json.dumps( parsedPartitions,
1109 sort_keys=True,
1110 indent=4,
1111 separators=( ',', ': ' ) ) )
1112 # TODO check for a leader in all paritions
1113 # TODO check for consistency among nodes
1114 else:
1115 main.log.error( "partitions() returned None" )
1116 except ( ValueError, TypeError ):
1117 main.log.exception( "Error parsing partitions" )
1118 main.log.error( repr( partitions ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001119 pendingMap = onosCli.pendingMap()
Jon Hall5cf14d52015-07-16 12:15:19 -07001120 try:
1121 if pendingMap :
1122 parsedPending = json.loads( pendingMap )
1123 main.log.warn( json.dumps( parsedPending,
1124 sort_keys=True,
1125 indent=4,
1126 separators=( ',', ': ' ) ) )
1127 # TODO check something here?
1128 else:
1129 main.log.error( "pendingMap() returned None" )
1130 except ( ValueError, TypeError ):
1131 main.log.exception( "Error parsing pending map" )
1132 main.log.error( repr( pendingMap ) )
1133 # Print flowrules
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001134 node = main.activeNodes[0]
1135 main.log.debug( main.CLIs[node].flows( jsonFormat=False ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001136 main.step( "Wait a minute then ping again" )
1137 # the wait is above
1138 PingResult = main.TRUE
1139 for i in range( 8, 18 ):
1140 ping = main.Mininet1.pingHost( src="h" + str( i ),
1141 target="h" + str( i + 10 ) )
1142 PingResult = PingResult and ping
1143 if ping == main.FALSE:
1144 main.log.warn( "Ping failed between h" + str( i ) +
1145 " and h" + str( i + 10 ) )
1146 elif ping == main.TRUE:
1147 main.log.info( "Ping test passed!" )
1148 # Don't set PingResult or you'd override failures
1149 if PingResult == main.FALSE:
1150 main.log.error(
1151 "Intents have not been installed correctly, pings failed." )
1152 # TODO: pretty print
1153 main.log.warn( "ONOS1 intents: " )
1154 try:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001155 tmpIntents = onosCli.intents()
Jon Hall5cf14d52015-07-16 12:15:19 -07001156 main.log.warn( json.dumps( json.loads( tmpIntents ),
1157 sort_keys=True,
1158 indent=4,
1159 separators=( ',', ': ' ) ) )
1160 except ( ValueError, TypeError ):
1161 main.log.warn( repr( tmpIntents ) )
1162 utilities.assert_equals(
1163 expect=main.TRUE,
1164 actual=PingResult,
1165 onpass="Intents have been installed correctly and pings work",
1166 onfail="Intents have not been installed correctly, pings failed." )
1167
1168 def CASE5( self, main ):
1169 """
1170 Reading state of ONOS
1171 """
1172 import json
1173 import time
Jon Halle1a3b752015-07-22 13:02:46 -07001174 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07001175 assert main, "main not defined"
1176 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07001177 assert main.CLIs, "main.CLIs not defined"
1178 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07001179
1180 main.case( "Setting up and gathering data for current state" )
1181 # The general idea for this test case is to pull the state of
1182 # ( intents,flows, topology,... ) from each ONOS node
1183 # We can then compare them with each other and also with past states
1184
1185 main.step( "Check that each switch has a master" )
1186 global mastershipState
1187 mastershipState = '[]'
1188
1189 # Assert that each device has a master
1190 rolesNotNull = main.TRUE
1191 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001192 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001193 t = main.Thread( target=main.CLIs[i].rolesNotNull,
Jon Hall5cf14d52015-07-16 12:15:19 -07001194 name="rolesNotNull-" + str( i ),
1195 args=[] )
1196 threads.append( t )
1197 t.start()
1198
1199 for t in threads:
1200 t.join()
1201 rolesNotNull = rolesNotNull and t.result
1202 utilities.assert_equals(
1203 expect=main.TRUE,
1204 actual=rolesNotNull,
1205 onpass="Each device has a master",
1206 onfail="Some devices don't have a master assigned" )
1207
1208 main.step( "Get the Mastership of each switch from each controller" )
1209 ONOSMastership = []
1210 mastershipCheck = main.FALSE
1211 consistentMastership = True
1212 rolesResults = True
1213 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001214 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001215 t = main.Thread( target=main.CLIs[i].roles,
Jon Hall5cf14d52015-07-16 12:15:19 -07001216 name="roles-" + str( i ),
1217 args=[] )
1218 threads.append( t )
1219 t.start()
1220
1221 for t in threads:
1222 t.join()
1223 ONOSMastership.append( t.result )
1224
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001225 for i in range( len( ONOSMastership ) ):
1226 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001227 if not ONOSMastership[i] or "Error" in ONOSMastership[i]:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001228 main.log.error( "Error in getting ONOS" + node + " roles" )
1229 main.log.warn( "ONOS" + node + " mastership response: " +
1230 repr( ONOSMastership[i] ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001231 rolesResults = False
1232 utilities.assert_equals(
1233 expect=True,
1234 actual=rolesResults,
1235 onpass="No error in reading roles output",
1236 onfail="Error in reading roles from ONOS" )
1237
1238 main.step( "Check for consistency in roles from each controller" )
1239 if all([ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
1240 main.log.info(
1241 "Switch roles are consistent across all ONOS nodes" )
1242 else:
1243 consistentMastership = False
1244 utilities.assert_equals(
1245 expect=True,
1246 actual=consistentMastership,
1247 onpass="Switch roles are consistent across all ONOS nodes",
1248 onfail="ONOS nodes have different views of switch roles" )
1249
1250 if rolesResults and not consistentMastership:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001251 for i in range( len( main.activeNodes ) ):
1252 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001253 try:
1254 main.log.warn(
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001255 "ONOS" + node + " roles: ",
Jon Hall5cf14d52015-07-16 12:15:19 -07001256 json.dumps(
1257 json.loads( ONOSMastership[ i ] ),
1258 sort_keys=True,
1259 indent=4,
1260 separators=( ',', ': ' ) ) )
1261 except ( ValueError, TypeError ):
1262 main.log.warn( repr( ONOSMastership[ i ] ) )
1263 elif rolesResults and consistentMastership:
1264 mastershipCheck = main.TRUE
1265 mastershipState = ONOSMastership[ 0 ]
1266
1267 main.step( "Get the intents from each controller" )
1268 global intentState
1269 intentState = []
1270 ONOSIntents = []
1271 intentCheck = main.FALSE
1272 consistentIntents = True
1273 intentsResults = True
1274 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001275 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001276 t = main.Thread( target=main.CLIs[i].intents,
Jon Hall5cf14d52015-07-16 12:15:19 -07001277 name="intents-" + str( i ),
1278 args=[],
1279 kwargs={ 'jsonFormat': True } )
1280 threads.append( t )
1281 t.start()
1282
1283 for t in threads:
1284 t.join()
1285 ONOSIntents.append( t.result )
1286
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001287 for i in range( len( ONOSIntents ) ):
1288 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001289 if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001290 main.log.error( "Error in getting ONOS" + node + " intents" )
1291 main.log.warn( "ONOS" + node + " intents response: " +
Jon Hall5cf14d52015-07-16 12:15:19 -07001292 repr( ONOSIntents[ i ] ) )
1293 intentsResults = False
1294 utilities.assert_equals(
1295 expect=True,
1296 actual=intentsResults,
1297 onpass="No error in reading intents output",
1298 onfail="Error in reading intents from ONOS" )
1299
1300 main.step( "Check for consistency in Intents from each controller" )
1301 if all([ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
1302 main.log.info( "Intents are consistent across all ONOS " +
1303 "nodes" )
1304 else:
1305 consistentIntents = False
1306 main.log.error( "Intents not consistent" )
1307 utilities.assert_equals(
1308 expect=True,
1309 actual=consistentIntents,
1310 onpass="Intents are consistent across all ONOS nodes",
1311 onfail="ONOS nodes have different views of intents" )
1312
1313 if intentsResults:
1314 # Try to make it easy to figure out what is happening
1315 #
1316 # Intent ONOS1 ONOS2 ...
1317 # 0x01 INSTALLED INSTALLING
1318 # ... ... ...
1319 # ... ... ...
1320 title = " Id"
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001321 for n in main.activeNodes:
Jon Hall5cf14d52015-07-16 12:15:19 -07001322 title += " " * 10 + "ONOS" + str( n + 1 )
1323 main.log.warn( title )
1324 # get all intent keys in the cluster
1325 keys = []
1326 for nodeStr in ONOSIntents:
1327 node = json.loads( nodeStr )
1328 for intent in node:
1329 keys.append( intent.get( 'id' ) )
1330 keys = set( keys )
1331 for key in keys:
1332 row = "%-13s" % key
1333 for nodeStr in ONOSIntents:
1334 node = json.loads( nodeStr )
1335 for intent in node:
1336 if intent.get( 'id', "Error" ) == key:
1337 row += "%-15s" % intent.get( 'state' )
1338 main.log.warn( row )
1339 # End table view
1340
1341 if intentsResults and not consistentIntents:
1342 # print the json objects
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001343 n = str( main.activeNodes[-1] + 1 )
1344 main.log.debug( "ONOS" + n + " intents: " )
Jon Hall5cf14d52015-07-16 12:15:19 -07001345 main.log.debug( json.dumps( json.loads( ONOSIntents[ -1 ] ),
1346 sort_keys=True,
1347 indent=4,
1348 separators=( ',', ': ' ) ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001349 for i in range( len( ONOSIntents ) ):
1350 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001351 if ONOSIntents[ i ] != ONOSIntents[ -1 ]:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001352 main.log.debug( "ONOS" + node + " intents: " )
Jon Hall5cf14d52015-07-16 12:15:19 -07001353 main.log.debug( json.dumps( json.loads( ONOSIntents[i] ),
1354 sort_keys=True,
1355 indent=4,
1356 separators=( ',', ': ' ) ) )
1357 else:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001358 main.log.debug( "ONOS" + node + " intents match ONOS" +
1359 n + " intents" )
Jon Hall5cf14d52015-07-16 12:15:19 -07001360 elif intentsResults and consistentIntents:
1361 intentCheck = main.TRUE
1362 intentState = ONOSIntents[ 0 ]
1363
1364 main.step( "Get the flows from each controller" )
1365 global flowState
1366 flowState = []
1367 ONOSFlows = []
1368 ONOSFlowsJson = []
1369 flowCheck = main.FALSE
1370 consistentFlows = True
1371 flowsResults = True
1372 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001373 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001374 t = main.Thread( target=main.CLIs[i].flows,
Jon Hall5cf14d52015-07-16 12:15:19 -07001375 name="flows-" + str( i ),
1376 args=[],
1377 kwargs={ 'jsonFormat': True } )
1378 threads.append( t )
1379 t.start()
1380
1381 # NOTE: Flows command can take some time to run
1382 time.sleep(30)
1383 for t in threads:
1384 t.join()
1385 result = t.result
1386 ONOSFlows.append( result )
1387
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001388 for i in range( len( ONOSFlows ) ):
1389 num = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001390 if not ONOSFlows[ i ] or "Error" in ONOSFlows[ i ]:
1391 main.log.error( "Error in getting ONOS" + num + " flows" )
1392 main.log.warn( "ONOS" + num + " flows response: " +
1393 repr( ONOSFlows[ i ] ) )
1394 flowsResults = False
1395 ONOSFlowsJson.append( None )
1396 else:
1397 try:
1398 ONOSFlowsJson.append( json.loads( ONOSFlows[ i ] ) )
1399 except ( ValueError, TypeError ):
1400 # FIXME: change this to log.error?
1401 main.log.exception( "Error in parsing ONOS" + num +
1402 " response as json." )
1403 main.log.error( repr( ONOSFlows[ i ] ) )
1404 ONOSFlowsJson.append( None )
1405 flowsResults = False
1406 utilities.assert_equals(
1407 expect=True,
1408 actual=flowsResults,
1409 onpass="No error in reading flows output",
1410 onfail="Error in reading flows from ONOS" )
1411
1412 main.step( "Check for consistency in Flows from each controller" )
1413 tmp = [ len( i ) == len( ONOSFlowsJson[ 0 ] ) for i in ONOSFlowsJson ]
1414 if all( tmp ):
1415 main.log.info( "Flow count is consistent across all ONOS nodes" )
1416 else:
1417 consistentFlows = False
1418 utilities.assert_equals(
1419 expect=True,
1420 actual=consistentFlows,
1421 onpass="The flow count is consistent across all ONOS nodes",
1422 onfail="ONOS nodes have different flow counts" )
1423
1424 if flowsResults and not consistentFlows:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001425 for i in range( len( ONOSFlows ) ):
1426 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001427 try:
1428 main.log.warn(
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001429 "ONOS" + node + " flows: " +
Jon Hall5cf14d52015-07-16 12:15:19 -07001430 json.dumps( json.loads( ONOSFlows[i] ), sort_keys=True,
1431 indent=4, separators=( ',', ': ' ) ) )
1432 except ( ValueError, TypeError ):
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001433 main.log.warn( "ONOS" + node + " flows: " +
1434 repr( ONOSFlows[ i ] ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001435 elif flowsResults and consistentFlows:
1436 flowCheck = main.TRUE
1437 flowState = ONOSFlows[ 0 ]
1438
1439 main.step( "Get the OF Table entries" )
1440 global flows
1441 flows = []
1442 for i in range( 1, 29 ):
GlennRC68467eb2015-11-16 18:01:01 -08001443 flows.append( main.Mininet1.getFlowTable( "s" + str( i ), version="1.3", debug=False ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001444 if flowCheck == main.FALSE:
1445 for table in flows:
1446 main.log.warn( table )
1447 # TODO: Compare switch flow tables with ONOS flow tables
1448
1449 main.step( "Start continuous pings" )
1450 main.Mininet2.pingLong(
1451 src=main.params[ 'PING' ][ 'source1' ],
1452 target=main.params[ 'PING' ][ 'target1' ],
1453 pingTime=500 )
1454 main.Mininet2.pingLong(
1455 src=main.params[ 'PING' ][ 'source2' ],
1456 target=main.params[ 'PING' ][ 'target2' ],
1457 pingTime=500 )
1458 main.Mininet2.pingLong(
1459 src=main.params[ 'PING' ][ 'source3' ],
1460 target=main.params[ 'PING' ][ 'target3' ],
1461 pingTime=500 )
1462 main.Mininet2.pingLong(
1463 src=main.params[ 'PING' ][ 'source4' ],
1464 target=main.params[ 'PING' ][ 'target4' ],
1465 pingTime=500 )
1466 main.Mininet2.pingLong(
1467 src=main.params[ 'PING' ][ 'source5' ],
1468 target=main.params[ 'PING' ][ 'target5' ],
1469 pingTime=500 )
1470 main.Mininet2.pingLong(
1471 src=main.params[ 'PING' ][ 'source6' ],
1472 target=main.params[ 'PING' ][ 'target6' ],
1473 pingTime=500 )
1474 main.Mininet2.pingLong(
1475 src=main.params[ 'PING' ][ 'source7' ],
1476 target=main.params[ 'PING' ][ 'target7' ],
1477 pingTime=500 )
1478 main.Mininet2.pingLong(
1479 src=main.params[ 'PING' ][ 'source8' ],
1480 target=main.params[ 'PING' ][ 'target8' ],
1481 pingTime=500 )
1482 main.Mininet2.pingLong(
1483 src=main.params[ 'PING' ][ 'source9' ],
1484 target=main.params[ 'PING' ][ 'target9' ],
1485 pingTime=500 )
1486 main.Mininet2.pingLong(
1487 src=main.params[ 'PING' ][ 'source10' ],
1488 target=main.params[ 'PING' ][ 'target10' ],
1489 pingTime=500 )
1490
1491 main.step( "Collecting topology information from ONOS" )
1492 devices = []
1493 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001494 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001495 t = main.Thread( target=main.CLIs[i].devices,
Jon Hall5cf14d52015-07-16 12:15:19 -07001496 name="devices-" + str( i ),
1497 args=[ ] )
1498 threads.append( t )
1499 t.start()
1500
1501 for t in threads:
1502 t.join()
1503 devices.append( t.result )
1504 hosts = []
1505 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001506 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001507 t = main.Thread( target=main.CLIs[i].hosts,
Jon Hall5cf14d52015-07-16 12:15:19 -07001508 name="hosts-" + str( i ),
1509 args=[ ] )
1510 threads.append( t )
1511 t.start()
1512
1513 for t in threads:
1514 t.join()
1515 try:
1516 hosts.append( json.loads( t.result ) )
1517 except ( ValueError, TypeError ):
1518 # FIXME: better handling of this, print which node
1519 # Maybe use thread name?
1520 main.log.exception( "Error parsing json output of hosts" )
Jon Hallf3d16e72015-12-16 17:45:08 -08001521 main.log.warn( repr( t.result ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001522 hosts.append( None )
1523
1524 ports = []
1525 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001526 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001527 t = main.Thread( target=main.CLIs[i].ports,
Jon Hall5cf14d52015-07-16 12:15:19 -07001528 name="ports-" + str( i ),
1529 args=[ ] )
1530 threads.append( t )
1531 t.start()
1532
1533 for t in threads:
1534 t.join()
1535 ports.append( t.result )
1536 links = []
1537 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001538 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001539 t = main.Thread( target=main.CLIs[i].links,
Jon Hall5cf14d52015-07-16 12:15:19 -07001540 name="links-" + str( i ),
1541 args=[ ] )
1542 threads.append( t )
1543 t.start()
1544
1545 for t in threads:
1546 t.join()
1547 links.append( t.result )
1548 clusters = []
1549 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001550 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001551 t = main.Thread( target=main.CLIs[i].clusters,
Jon Hall5cf14d52015-07-16 12:15:19 -07001552 name="clusters-" + str( i ),
1553 args=[ ] )
1554 threads.append( t )
1555 t.start()
1556
1557 for t in threads:
1558 t.join()
1559 clusters.append( t.result )
1560 # Compare json objects for hosts and dataplane clusters
1561
1562 # hosts
1563 main.step( "Host view is consistent across ONOS nodes" )
1564 consistentHostsResult = main.TRUE
1565 for controller in range( len( hosts ) ):
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001566 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hallf3d16e72015-12-16 17:45:08 -08001567 if hosts[ controller ] and "Error" not in hosts[ controller ]:
Jon Hall5cf14d52015-07-16 12:15:19 -07001568 if hosts[ controller ] == hosts[ 0 ]:
1569 continue
1570 else: # hosts not consistent
1571 main.log.error( "hosts from ONOS" +
1572 controllerStr +
1573 " is inconsistent with ONOS1" )
1574 main.log.warn( repr( hosts[ controller ] ) )
1575 consistentHostsResult = main.FALSE
1576
1577 else:
1578 main.log.error( "Error in getting ONOS hosts from ONOS" +
1579 controllerStr )
1580 consistentHostsResult = main.FALSE
1581 main.log.warn( "ONOS" + controllerStr +
1582 " hosts response: " +
1583 repr( hosts[ controller ] ) )
1584 utilities.assert_equals(
1585 expect=main.TRUE,
1586 actual=consistentHostsResult,
1587 onpass="Hosts view is consistent across all ONOS nodes",
1588 onfail="ONOS nodes have different views of hosts" )
1589
1590 main.step( "Each host has an IP address" )
1591 ipResult = main.TRUE
1592 for controller in range( 0, len( hosts ) ):
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001593 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hallf3d16e72015-12-16 17:45:08 -08001594 if hosts[ controller ]:
1595 for host in hosts[ controller ]:
1596 if not host.get( 'ipAddresses', [ ] ):
1597 main.log.error( "Error with host ips on controller" +
1598 controllerStr + ": " + str( host ) )
1599 ipResult = main.FALSE
Jon Hall5cf14d52015-07-16 12:15:19 -07001600 utilities.assert_equals(
1601 expect=main.TRUE,
1602 actual=ipResult,
1603 onpass="The ips of the hosts aren't empty",
1604 onfail="The ip of at least one host is missing" )
1605
1606 # Strongly connected clusters of devices
1607 main.step( "Cluster view is consistent across ONOS nodes" )
1608 consistentClustersResult = main.TRUE
1609 for controller in range( len( clusters ) ):
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001610 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001611 if "Error" not in clusters[ controller ]:
1612 if clusters[ controller ] == clusters[ 0 ]:
1613 continue
1614 else: # clusters not consistent
1615 main.log.error( "clusters from ONOS" + controllerStr +
1616 " is inconsistent with ONOS1" )
1617 consistentClustersResult = main.FALSE
1618
1619 else:
1620 main.log.error( "Error in getting dataplane clusters " +
1621 "from ONOS" + controllerStr )
1622 consistentClustersResult = main.FALSE
1623 main.log.warn( "ONOS" + controllerStr +
1624 " clusters response: " +
1625 repr( clusters[ controller ] ) )
1626 utilities.assert_equals(
1627 expect=main.TRUE,
1628 actual=consistentClustersResult,
1629 onpass="Clusters view is consistent across all ONOS nodes",
1630 onfail="ONOS nodes have different views of clusters" )
1631 # there should always only be one cluster
1632 main.step( "Cluster view correct across ONOS nodes" )
1633 try:
1634 numClusters = len( json.loads( clusters[ 0 ] ) )
1635 except ( ValueError, TypeError ):
1636 main.log.exception( "Error parsing clusters[0]: " +
1637 repr( clusters[ 0 ] ) )
1638 clusterResults = main.FALSE
1639 if numClusters == 1:
1640 clusterResults = main.TRUE
1641 utilities.assert_equals(
1642 expect=1,
1643 actual=numClusters,
1644 onpass="ONOS shows 1 SCC",
1645 onfail="ONOS shows " + str( numClusters ) + " SCCs" )
1646
1647 main.step( "Comparing ONOS topology to MN" )
1648 devicesResults = main.TRUE
1649 linksResults = main.TRUE
1650 hostsResults = main.TRUE
1651 mnSwitches = main.Mininet1.getSwitches()
1652 mnLinks = main.Mininet1.getLinks()
1653 mnHosts = main.Mininet1.getHosts()
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001654 for controller in main.activeNodes:
1655 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001656 if devices[ controller ] and ports[ controller ] and\
1657 "Error" not in devices[ controller ] and\
1658 "Error" not in ports[ controller ]:
1659
1660 currentDevicesResult = main.Mininet1.compareSwitches(
1661 mnSwitches,
1662 json.loads( devices[ controller ] ),
1663 json.loads( ports[ controller ] ) )
1664 else:
1665 currentDevicesResult = main.FALSE
1666 utilities.assert_equals( expect=main.TRUE,
1667 actual=currentDevicesResult,
1668 onpass="ONOS" + controllerStr +
1669 " Switches view is correct",
1670 onfail="ONOS" + controllerStr +
1671 " Switches view is incorrect" )
1672 if links[ controller ] and "Error" not in links[ controller ]:
1673 currentLinksResult = main.Mininet1.compareLinks(
1674 mnSwitches, mnLinks,
1675 json.loads( links[ controller ] ) )
1676 else:
1677 currentLinksResult = main.FALSE
1678 utilities.assert_equals( expect=main.TRUE,
1679 actual=currentLinksResult,
1680 onpass="ONOS" + controllerStr +
1681 " links view is correct",
1682 onfail="ONOS" + controllerStr +
1683 " links view is incorrect" )
1684
Jon Hall657cdf62015-12-17 14:40:51 -08001685 if hosts[ controller ] and "Error" not in hosts[ controller ]:
Jon Hall5cf14d52015-07-16 12:15:19 -07001686 currentHostsResult = main.Mininet1.compareHosts(
1687 mnHosts,
1688 hosts[ controller ] )
1689 else:
1690 currentHostsResult = main.FALSE
1691 utilities.assert_equals( expect=main.TRUE,
1692 actual=currentHostsResult,
1693 onpass="ONOS" + controllerStr +
1694 " hosts exist in Mininet",
1695 onfail="ONOS" + controllerStr +
1696 " hosts don't match Mininet" )
1697
1698 devicesResults = devicesResults and currentDevicesResult
1699 linksResults = linksResults and currentLinksResult
1700 hostsResults = hostsResults and currentHostsResult
1701
1702 main.step( "Device information is correct" )
1703 utilities.assert_equals(
1704 expect=main.TRUE,
1705 actual=devicesResults,
1706 onpass="Device information is correct",
1707 onfail="Device information is incorrect" )
1708
1709 main.step( "Links are correct" )
1710 utilities.assert_equals(
1711 expect=main.TRUE,
1712 actual=linksResults,
1713 onpass="Link are correct",
1714 onfail="Links are incorrect" )
1715
1716 main.step( "Hosts are correct" )
1717 utilities.assert_equals(
1718 expect=main.TRUE,
1719 actual=hostsResults,
1720 onpass="Hosts are correct",
1721 onfail="Hosts are incorrect" )
1722
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001723 def CASE61( self, main ):
Jon Hall5cf14d52015-07-16 12:15:19 -07001724 """
1725 The Failure case.
1726 """
Jon Halle1a3b752015-07-22 13:02:46 -07001727 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07001728 assert main, "main not defined"
1729 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07001730 assert main.CLIs, "main.CLIs not defined"
1731 assert main.nodes, "main.nodes not defined"
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001732 main.case( "Kill minority of ONOS nodes" )
Jon Hall96091e62015-09-21 17:34:17 -07001733
1734 main.step( "Checking ONOS Logs for errors" )
1735 for node in main.nodes:
1736 main.log.debug( "Checking logs for errors on " + node.name + ":" )
1737 main.log.warn( main.ONOSbench.checkLogs( node.ip_address ) )
1738
Jon Hall3b489db2015-10-05 14:38:37 -07001739 n = len( main.nodes ) # Number of nodes
1740 p = ( ( n + 1 ) / 2 ) + 1 # Number of partitions
1741 main.kill = [ 0 ] # ONOS node to kill, listed by index in main.nodes
1742 if n > 3:
1743 main.kill.append( p - 1 )
1744 # NOTE: This only works for cluster sizes of 3,5, or 7.
1745
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001746 main.step( "Kill " + str( len( main.kill ) ) + " ONOS nodes" )
Jon Hall3b489db2015-10-05 14:38:37 -07001747 killResults = main.TRUE
1748 for i in main.kill:
1749 killResults = killResults and\
1750 main.ONOSbench.onosKill( main.nodes[i].ip_address )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001751 main.activeNodes.remove( i )
Jon Hall5cf14d52015-07-16 12:15:19 -07001752 utilities.assert_equals( expect=main.TRUE, actual=killResults,
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001753 onpass="ONOS nodes killed successfully",
1754 onfail="ONOS nodes NOT successfully killed" )
1755
1756 def CASE62( self, main ):
1757 """
1758 The bring up stopped nodes
1759 """
1760 import time
1761 assert main.numCtrls, "main.numCtrls not defined"
1762 assert main, "main not defined"
1763 assert utilities.assert_equals, "utilities.assert_equals not defined"
1764 assert main.CLIs, "main.CLIs not defined"
1765 assert main.nodes, "main.nodes not defined"
1766 assert main.kill, "main.kill not defined"
1767 main.case( "Restart minority of ONOS nodes" )
1768
1769 main.step( "Restarting " + str( len( main.kill ) ) + " ONOS nodes" )
1770 startResults = main.TRUE
1771 restartTime = time.time()
1772 for i in main.kill:
1773 startResults = startResults and\
1774 main.ONOSbench.onosStart( main.nodes[i].ip_address )
1775 utilities.assert_equals( expect=main.TRUE, actual=startResults,
1776 onpass="ONOS nodes started successfully",
1777 onfail="ONOS nodes NOT successfully started" )
Jon Hall5cf14d52015-07-16 12:15:19 -07001778
1779 main.step( "Checking if ONOS is up yet" )
1780 count = 0
1781 onosIsupResult = main.FALSE
1782 while onosIsupResult == main.FALSE and count < 10:
Jon Hall3b489db2015-10-05 14:38:37 -07001783 onosIsupResult = main.TRUE
1784 for i in main.kill:
1785 onosIsupResult = onosIsupResult and\
1786 main.ONOSbench.isup( main.nodes[i].ip_address )
Jon Hall5cf14d52015-07-16 12:15:19 -07001787 count = count + 1
Jon Hall5cf14d52015-07-16 12:15:19 -07001788 utilities.assert_equals( expect=main.TRUE, actual=onosIsupResult,
1789 onpass="ONOS restarted successfully",
1790 onfail="ONOS restart NOT successful" )
1791
Jon Halle1a3b752015-07-22 13:02:46 -07001792 main.step( "Restarting ONOS main.CLIs" )
Jon Hall3b489db2015-10-05 14:38:37 -07001793 cliResults = main.TRUE
1794 for i in main.kill:
1795 cliResults = cliResults and\
1796 main.CLIs[i].startOnosCli( main.nodes[i].ip_address )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001797 main.activeNodes.append( i )
Jon Hall5cf14d52015-07-16 12:15:19 -07001798 utilities.assert_equals( expect=main.TRUE, actual=cliResults,
1799 onpass="ONOS cli restarted",
1800 onfail="ONOS cli did not restart" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001801 main.activeNodes.sort()
1802 try:
1803 assert list( set( main.activeNodes ) ) == main.activeNodes,\
1804 "List of active nodes has duplicates, this likely indicates something was run out of order"
1805 except AssertionError:
1806 main.log.exception( "" )
1807 main.cleanup()
1808 main.exit()
Jon Hall5cf14d52015-07-16 12:15:19 -07001809
1810 # Grab the time of restart so we chan check how long the gossip
1811 # protocol has had time to work
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001812 main.restartTime = time.time() - restartTime
Jon Hall5cf14d52015-07-16 12:15:19 -07001813 main.log.debug( "Restart time: " + str( main.restartTime ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001814 # TODO: MAke this configurable. Also, we are breaking the above timer
1815 time.sleep( 60 )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001816 node = main.activeNodes[0]
1817 main.log.debug( main.CLIs[node].nodes( jsonFormat=False ) )
1818 main.log.debug( main.CLIs[node].leaders( jsonFormat=False ) )
1819 main.log.debug( main.CLIs[node].partitions( jsonFormat=False ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001820
1821 def CASE7( self, main ):
1822 """
1823 Check state after ONOS failure
1824 """
1825 import json
Jon Halle1a3b752015-07-22 13:02:46 -07001826 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07001827 assert main, "main not defined"
1828 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07001829 assert main.CLIs, "main.CLIs not defined"
1830 assert main.nodes, "main.nodes not defined"
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001831 try:
1832 main.kill
1833 except AttributeError:
1834 main.kill = []
1835
Jon Hall5cf14d52015-07-16 12:15:19 -07001836 main.case( "Running ONOS Constant State Tests" )
1837
1838 main.step( "Check that each switch has a master" )
1839 # Assert that each device has a master
1840 rolesNotNull = main.TRUE
1841 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001842 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001843 t = main.Thread( target=main.CLIs[i].rolesNotNull,
Jon Hall5cf14d52015-07-16 12:15:19 -07001844 name="rolesNotNull-" + str( i ),
1845 args=[ ] )
1846 threads.append( t )
1847 t.start()
1848
1849 for t in threads:
1850 t.join()
1851 rolesNotNull = rolesNotNull and t.result
1852 utilities.assert_equals(
1853 expect=main.TRUE,
1854 actual=rolesNotNull,
1855 onpass="Each device has a master",
1856 onfail="Some devices don't have a master assigned" )
1857
1858 main.step( "Read device roles from ONOS" )
1859 ONOSMastership = []
1860 consistentMastership = True
1861 rolesResults = True
1862 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001863 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001864 t = main.Thread( target=main.CLIs[i].roles,
Jon Hall5cf14d52015-07-16 12:15:19 -07001865 name="roles-" + str( i ),
1866 args=[] )
1867 threads.append( t )
1868 t.start()
1869
1870 for t in threads:
1871 t.join()
1872 ONOSMastership.append( t.result )
1873
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001874 for i in range( len( ONOSMastership ) ):
1875 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001876 if not ONOSMastership[i] or "Error" in ONOSMastership[i]:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001877 main.log.error( "Error in getting ONOS" + node + " roles" )
1878 main.log.warn( "ONOS" + node + " mastership response: " +
1879 repr( ONOSMastership[i] ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001880 rolesResults = False
1881 utilities.assert_equals(
1882 expect=True,
1883 actual=rolesResults,
1884 onpass="No error in reading roles output",
1885 onfail="Error in reading roles from ONOS" )
1886
1887 main.step( "Check for consistency in roles from each controller" )
1888 if all([ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
1889 main.log.info(
1890 "Switch roles are consistent across all ONOS nodes" )
1891 else:
1892 consistentMastership = False
1893 utilities.assert_equals(
1894 expect=True,
1895 actual=consistentMastership,
1896 onpass="Switch roles are consistent across all ONOS nodes",
1897 onfail="ONOS nodes have different views of switch roles" )
1898
1899 if rolesResults and not consistentMastership:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001900 for i in range( len( ONOSMastership ) ):
1901 node = str( main.activeNodes[i] + 1 )
1902 main.log.warn( "ONOS" + node + " roles: ",
1903 json.dumps( json.loads( ONOSMastership[ i ] ),
1904 sort_keys=True,
1905 indent=4,
1906 separators=( ',', ': ' ) ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001907
1908 # NOTE: we expect mastership to change on controller failure
Jon Hall5cf14d52015-07-16 12:15:19 -07001909
1910 main.step( "Get the intents and compare across all nodes" )
1911 ONOSIntents = []
1912 intentCheck = main.FALSE
1913 consistentIntents = True
1914 intentsResults = True
1915 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001916 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001917 t = main.Thread( target=main.CLIs[i].intents,
Jon Hall5cf14d52015-07-16 12:15:19 -07001918 name="intents-" + str( i ),
1919 args=[],
1920 kwargs={ 'jsonFormat': True } )
1921 threads.append( t )
1922 t.start()
1923
1924 for t in threads:
1925 t.join()
1926 ONOSIntents.append( t.result )
1927
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001928 for i in range( len( ONOSIntents) ):
1929 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001930 if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001931 main.log.error( "Error in getting ONOS" + node + " intents" )
1932 main.log.warn( "ONOS" + node + " intents response: " +
Jon Hall5cf14d52015-07-16 12:15:19 -07001933 repr( ONOSIntents[ i ] ) )
1934 intentsResults = False
1935 utilities.assert_equals(
1936 expect=True,
1937 actual=intentsResults,
1938 onpass="No error in reading intents output",
1939 onfail="Error in reading intents from ONOS" )
1940
1941 main.step( "Check for consistency in Intents from each controller" )
1942 if all([ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
1943 main.log.info( "Intents are consistent across all ONOS " +
1944 "nodes" )
1945 else:
1946 consistentIntents = False
1947
1948 # Try to make it easy to figure out what is happening
1949 #
1950 # Intent ONOS1 ONOS2 ...
1951 # 0x01 INSTALLED INSTALLING
1952 # ... ... ...
1953 # ... ... ...
1954 title = " ID"
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001955 for n in main.activeNodes:
Jon Hall5cf14d52015-07-16 12:15:19 -07001956 title += " " * 10 + "ONOS" + str( n + 1 )
1957 main.log.warn( title )
1958 # get all intent keys in the cluster
1959 keys = []
1960 for nodeStr in ONOSIntents:
1961 node = json.loads( nodeStr )
1962 for intent in node:
1963 keys.append( intent.get( 'id' ) )
1964 keys = set( keys )
1965 for key in keys:
1966 row = "%-13s" % key
1967 for nodeStr in ONOSIntents:
1968 node = json.loads( nodeStr )
1969 for intent in node:
1970 if intent.get( 'id' ) == key:
1971 row += "%-15s" % intent.get( 'state' )
1972 main.log.warn( row )
1973 # End table view
1974
1975 utilities.assert_equals(
1976 expect=True,
1977 actual=consistentIntents,
1978 onpass="Intents are consistent across all ONOS nodes",
1979 onfail="ONOS nodes have different views of intents" )
1980 intentStates = []
1981 for node in ONOSIntents: # Iter through ONOS nodes
1982 nodeStates = []
1983 # Iter through intents of a node
1984 try:
1985 for intent in json.loads( node ):
1986 nodeStates.append( intent[ 'state' ] )
1987 except ( ValueError, TypeError ):
1988 main.log.exception( "Error in parsing intents" )
1989 main.log.error( repr( node ) )
1990 intentStates.append( nodeStates )
1991 out = [ (i, nodeStates.count( i ) ) for i in set( nodeStates ) ]
1992 main.log.info( dict( out ) )
1993
1994 if intentsResults and not consistentIntents:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001995 for i in range( len( main.activeNodes ) ):
1996 node = str( main.activeNodes[i] + 1 )
1997 main.log.warn( "ONOS" + node + " intents: " )
Jon Hall5cf14d52015-07-16 12:15:19 -07001998 main.log.warn( json.dumps(
1999 json.loads( ONOSIntents[ i ] ),
2000 sort_keys=True,
2001 indent=4,
2002 separators=( ',', ': ' ) ) )
2003 elif intentsResults and consistentIntents:
2004 intentCheck = main.TRUE
2005
2006 # NOTE: Store has no durability, so intents are lost across system
2007 # restarts
2008 main.step( "Compare current intents with intents before the failure" )
2009 # NOTE: this requires case 5 to pass for intentState to be set.
2010 # maybe we should stop the test if that fails?
2011 sameIntents = main.FALSE
2012 if intentState and intentState == ONOSIntents[ 0 ]:
2013 sameIntents = main.TRUE
2014 main.log.info( "Intents are consistent with before failure" )
2015 # TODO: possibly the states have changed? we may need to figure out
2016 # what the acceptable states are
2017 elif len( intentState ) == len( ONOSIntents[ 0 ] ):
2018 sameIntents = main.TRUE
2019 try:
2020 before = json.loads( intentState )
2021 after = json.loads( ONOSIntents[ 0 ] )
2022 for intent in before:
2023 if intent not in after:
2024 sameIntents = main.FALSE
2025 main.log.debug( "Intent is not currently in ONOS " +
2026 "(at least in the same form):" )
2027 main.log.debug( json.dumps( intent ) )
2028 except ( ValueError, TypeError ):
2029 main.log.exception( "Exception printing intents" )
2030 main.log.debug( repr( ONOSIntents[0] ) )
2031 main.log.debug( repr( intentState ) )
2032 if sameIntents == main.FALSE:
2033 try:
2034 main.log.debug( "ONOS intents before: " )
2035 main.log.debug( json.dumps( json.loads( intentState ),
2036 sort_keys=True, indent=4,
2037 separators=( ',', ': ' ) ) )
2038 main.log.debug( "Current ONOS intents: " )
2039 main.log.debug( json.dumps( json.loads( ONOSIntents[ 0 ] ),
2040 sort_keys=True, indent=4,
2041 separators=( ',', ': ' ) ) )
2042 except ( ValueError, TypeError ):
2043 main.log.exception( "Exception printing intents" )
2044 main.log.debug( repr( ONOSIntents[0] ) )
2045 main.log.debug( repr( intentState ) )
2046 utilities.assert_equals(
2047 expect=main.TRUE,
2048 actual=sameIntents,
2049 onpass="Intents are consistent with before failure",
2050 onfail="The Intents changed during failure" )
2051 intentCheck = intentCheck and sameIntents
2052
2053 main.step( "Get the OF Table entries and compare to before " +
2054 "component failure" )
2055 FlowTables = main.TRUE
Jon Hall5cf14d52015-07-16 12:15:19 -07002056 for i in range( 28 ):
2057 main.log.info( "Checking flow table on s" + str( i + 1 ) )
GlennRC68467eb2015-11-16 18:01:01 -08002058 tmpFlows = main.Mininet1.getFlowTable( "s" + str( i + 1 ), version="1.3", debug=False )
2059 FlowTables = FlowTables and main.Mininet1.flowTableComp( flows[i], tmpFlows )
Jon Hall5cf14d52015-07-16 12:15:19 -07002060 if FlowTables == main.FALSE:
GlennRC68467eb2015-11-16 18:01:01 -08002061 main.log.warn( "Differences in flow table for switch: s{}".format( i + 1 ) )
2062
Jon Hall5cf14d52015-07-16 12:15:19 -07002063 utilities.assert_equals(
2064 expect=main.TRUE,
2065 actual=FlowTables,
2066 onpass="No changes were found in the flow tables",
2067 onfail="Changes were found in the flow tables" )
2068
2069 main.Mininet2.pingLongKill()
2070 '''
2071 main.step( "Check the continuous pings to ensure that no packets " +
2072 "were dropped during component failure" )
2073 main.Mininet2.pingKill( main.params[ 'TESTONUSER' ],
2074 main.params[ 'TESTONIP' ] )
2075 LossInPings = main.FALSE
2076 # NOTE: checkForLoss returns main.FALSE with 0% packet loss
2077 for i in range( 8, 18 ):
2078 main.log.info(
2079 "Checking for a loss in pings along flow from s" +
2080 str( i ) )
2081 LossInPings = main.Mininet2.checkForLoss(
2082 "/tmp/ping.h" +
2083 str( i ) ) or LossInPings
2084 if LossInPings == main.TRUE:
2085 main.log.info( "Loss in ping detected" )
2086 elif LossInPings == main.ERROR:
2087 main.log.info( "There are multiple mininet process running" )
2088 elif LossInPings == main.FALSE:
2089 main.log.info( "No Loss in the pings" )
2090 main.log.info( "No loss of dataplane connectivity" )
2091 utilities.assert_equals(
2092 expect=main.FALSE,
2093 actual=LossInPings,
2094 onpass="No Loss of connectivity",
2095 onfail="Loss of dataplane connectivity detected" )
2096 '''
2097
2098 main.step( "Leadership Election is still functional" )
2099 # Test of LeadershipElection
2100 leaderList = []
Jon Hall5cf14d52015-07-16 12:15:19 -07002101
Jon Hall3b489db2015-10-05 14:38:37 -07002102 restarted = []
2103 for i in main.kill:
2104 restarted.append( main.nodes[i].ip_address )
Jon Hall5cf14d52015-07-16 12:15:19 -07002105 leaderResult = main.TRUE
Jon Hall3b489db2015-10-05 14:38:37 -07002106
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002107 for i in main.activeNodes:
2108 cli = main.CLIs[i]
Jon Hall5cf14d52015-07-16 12:15:19 -07002109 leaderN = cli.electionTestLeader()
2110 leaderList.append( leaderN )
2111 if leaderN == main.FALSE:
2112 # error in response
2113 main.log.error( "Something is wrong with " +
2114 "electionTestLeader function, check the" +
2115 " error logs" )
2116 leaderResult = main.FALSE
2117 elif leaderN is None:
2118 main.log.error( cli.name +
2119 " shows no leader for the election-app was" +
2120 " elected after the old one died" )
2121 leaderResult = main.FALSE
2122 elif leaderN in restarted:
2123 main.log.error( cli.name + " shows " + str( leaderN ) +
2124 " as leader for the election-app, but it " +
2125 "was restarted" )
2126 leaderResult = main.FALSE
2127 if len( set( leaderList ) ) != 1:
2128 leaderResult = main.FALSE
2129 main.log.error(
2130 "Inconsistent view of leader for the election test app" )
2131 # TODO: print the list
2132 utilities.assert_equals(
2133 expect=main.TRUE,
2134 actual=leaderResult,
2135 onpass="Leadership election passed",
2136 onfail="Something went wrong with Leadership election" )
2137
2138 def CASE8( self, main ):
2139 """
2140 Compare topo
2141 """
2142 import json
2143 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002144 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002145 assert main, "main not defined"
2146 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002147 assert main.CLIs, "main.CLIs not defined"
2148 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002149
2150 main.case( "Compare ONOS Topology view to Mininet topology" )
Jon Hall783bbf92015-07-23 14:33:19 -07002151 main.caseExplanation = "Compare topology objects between Mininet" +\
Jon Hall5cf14d52015-07-16 12:15:19 -07002152 " and ONOS"
Jon Hall5cf14d52015-07-16 12:15:19 -07002153 topoResult = main.FALSE
2154 elapsed = 0
2155 count = 0
Jon Halle9b1fa32015-12-08 15:32:21 -08002156 main.step( "Comparing ONOS topology to MN topology" )
Jon Hall5cf14d52015-07-16 12:15:19 -07002157 startTime = time.time()
2158 # Give time for Gossip to work
Jon Halle9b1fa32015-12-08 15:32:21 -08002159 while topoResult == main.FALSE and ( elapsed < 60 or count < 3 ):
Jon Hall96091e62015-09-21 17:34:17 -07002160 devicesResults = main.TRUE
2161 linksResults = main.TRUE
2162 hostsResults = main.TRUE
2163 hostAttachmentResults = True
Jon Hall5cf14d52015-07-16 12:15:19 -07002164 count += 1
2165 cliStart = time.time()
2166 devices = []
2167 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002168 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07002169 t = main.Thread( target=main.CLIs[i].devices,
Jon Hall5cf14d52015-07-16 12:15:19 -07002170 name="devices-" + str( i ),
2171 args=[ ] )
2172 threads.append( t )
2173 t.start()
2174
2175 for t in threads:
2176 t.join()
2177 devices.append( t.result )
2178 hosts = []
2179 ipResult = main.TRUE
2180 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002181 for i in main.activeNodes:
Jon Halld8f6de82015-12-17 17:04:34 -08002182 t = main.Thread( target=utilities.retry,
Jon Hall5cf14d52015-07-16 12:15:19 -07002183 name="hosts-" + str( i ),
Jon Halld8f6de82015-12-17 17:04:34 -08002184 args=[ main.CLIs[i].hosts, [ None ] ],
2185 kwargs= { 'sleep': 5, 'attempts': 5,
2186 'randomTime': True } )
Jon Hall5cf14d52015-07-16 12:15:19 -07002187 threads.append( t )
2188 t.start()
2189
2190 for t in threads:
2191 t.join()
2192 try:
2193 hosts.append( json.loads( t.result ) )
2194 except ( ValueError, TypeError ):
2195 main.log.exception( "Error parsing hosts results" )
2196 main.log.error( repr( t.result ) )
Jon Hallf3d16e72015-12-16 17:45:08 -08002197 hosts.append( None )
Jon Hall5cf14d52015-07-16 12:15:19 -07002198 for controller in range( 0, len( hosts ) ):
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002199 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hallacd1b182015-12-17 11:43:20 -08002200 if hosts[ controller ]:
2201 for host in hosts[ controller ]:
2202 if host is None or host.get( 'ipAddresses', [] ) == []:
2203 main.log.error(
2204 "Error with host ipAddresses on controller" +
2205 controllerStr + ": " + str( host ) )
2206 ipResult = main.FALSE
Jon Hall5cf14d52015-07-16 12:15:19 -07002207 ports = []
2208 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002209 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07002210 t = main.Thread( target=main.CLIs[i].ports,
Jon Hall5cf14d52015-07-16 12:15:19 -07002211 name="ports-" + str( i ),
2212 args=[ ] )
2213 threads.append( t )
2214 t.start()
2215
2216 for t in threads:
2217 t.join()
2218 ports.append( t.result )
2219 links = []
2220 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002221 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07002222 t = main.Thread( target=main.CLIs[i].links,
Jon Hall5cf14d52015-07-16 12:15:19 -07002223 name="links-" + str( i ),
2224 args=[ ] )
2225 threads.append( t )
2226 t.start()
2227
2228 for t in threads:
2229 t.join()
2230 links.append( t.result )
2231 clusters = []
2232 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002233 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07002234 t = main.Thread( target=main.CLIs[i].clusters,
Jon Hall5cf14d52015-07-16 12:15:19 -07002235 name="clusters-" + str( i ),
2236 args=[ ] )
2237 threads.append( t )
2238 t.start()
2239
2240 for t in threads:
2241 t.join()
2242 clusters.append( t.result )
2243
2244 elapsed = time.time() - startTime
2245 cliTime = time.time() - cliStart
2246 print "Elapsed time: " + str( elapsed )
2247 print "CLI time: " + str( cliTime )
2248
2249 mnSwitches = main.Mininet1.getSwitches()
2250 mnLinks = main.Mininet1.getLinks()
2251 mnHosts = main.Mininet1.getHosts()
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002252 for controller in range( len( main.activeNodes ) ):
2253 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07002254 if devices[ controller ] and ports[ controller ] and\
2255 "Error" not in devices[ controller ] and\
2256 "Error" not in ports[ controller ]:
2257
2258 currentDevicesResult = main.Mininet1.compareSwitches(
2259 mnSwitches,
2260 json.loads( devices[ controller ] ),
2261 json.loads( ports[ controller ] ) )
2262 else:
2263 currentDevicesResult = main.FALSE
2264 utilities.assert_equals( expect=main.TRUE,
2265 actual=currentDevicesResult,
2266 onpass="ONOS" + controllerStr +
2267 " Switches view is correct",
2268 onfail="ONOS" + controllerStr +
2269 " Switches view is incorrect" )
2270
2271 if links[ controller ] and "Error" not in links[ controller ]:
2272 currentLinksResult = main.Mininet1.compareLinks(
2273 mnSwitches, mnLinks,
2274 json.loads( links[ controller ] ) )
2275 else:
2276 currentLinksResult = main.FALSE
2277 utilities.assert_equals( expect=main.TRUE,
2278 actual=currentLinksResult,
2279 onpass="ONOS" + controllerStr +
2280 " links view is correct",
2281 onfail="ONOS" + controllerStr +
2282 " links view is incorrect" )
Jon Hall657cdf62015-12-17 14:40:51 -08002283 if hosts[ controller ] and "Error" not in hosts[ controller ]:
Jon Hall5cf14d52015-07-16 12:15:19 -07002284 currentHostsResult = main.Mininet1.compareHosts(
2285 mnHosts,
2286 hosts[ controller ] )
Jon Hall13b446e2016-01-05 12:17:01 -08002287 elif hosts[ controller ] == []:
2288 currentHostsResult = main.TRUE
Jon Hall5cf14d52015-07-16 12:15:19 -07002289 else:
2290 currentHostsResult = main.FALSE
2291 utilities.assert_equals( expect=main.TRUE,
2292 actual=currentHostsResult,
2293 onpass="ONOS" + controllerStr +
2294 " hosts exist in Mininet",
2295 onfail="ONOS" + controllerStr +
2296 " hosts don't match Mininet" )
2297 # CHECKING HOST ATTACHMENT POINTS
2298 hostAttachment = True
2299 zeroHosts = False
2300 # FIXME: topo-HA/obelisk specific mappings:
2301 # key is mac and value is dpid
2302 mappings = {}
2303 for i in range( 1, 29 ): # hosts 1 through 28
2304 # set up correct variables:
2305 macId = "00:" * 5 + hex( i ).split( "0x" )[1].upper().zfill(2)
2306 if i == 1:
2307 deviceId = "1000".zfill(16)
2308 elif i == 2:
2309 deviceId = "2000".zfill(16)
2310 elif i == 3:
2311 deviceId = "3000".zfill(16)
2312 elif i == 4:
2313 deviceId = "3004".zfill(16)
2314 elif i == 5:
2315 deviceId = "5000".zfill(16)
2316 elif i == 6:
2317 deviceId = "6000".zfill(16)
2318 elif i == 7:
2319 deviceId = "6007".zfill(16)
2320 elif i >= 8 and i <= 17:
2321 dpid = '3' + str( i ).zfill( 3 )
2322 deviceId = dpid.zfill(16)
2323 elif i >= 18 and i <= 27:
2324 dpid = '6' + str( i ).zfill( 3 )
2325 deviceId = dpid.zfill(16)
2326 elif i == 28:
2327 deviceId = "2800".zfill(16)
2328 mappings[ macId ] = deviceId
Jon Halld8f6de82015-12-17 17:04:34 -08002329 if hosts[ controller ] is not None and "Error" not in hosts[ controller ]:
Jon Hall5cf14d52015-07-16 12:15:19 -07002330 if hosts[ controller ] == []:
2331 main.log.warn( "There are no hosts discovered" )
2332 zeroHosts = True
2333 else:
2334 for host in hosts[ controller ]:
2335 mac = None
2336 location = None
2337 device = None
2338 port = None
2339 try:
2340 mac = host.get( 'mac' )
2341 assert mac, "mac field could not be found for this host object"
2342
2343 location = host.get( 'location' )
2344 assert location, "location field could not be found for this host object"
2345
2346 # Trim the protocol identifier off deviceId
2347 device = str( location.get( 'elementId' ) ).split(':')[1]
2348 assert device, "elementId field could not be found for this host location object"
2349
2350 port = location.get( 'port' )
2351 assert port, "port field could not be found for this host location object"
2352
2353 # Now check if this matches where they should be
2354 if mac and device and port:
2355 if str( port ) != "1":
2356 main.log.error( "The attachment port is incorrect for " +
2357 "host " + str( mac ) +
2358 ". Expected: 1 Actual: " + str( port) )
2359 hostAttachment = False
2360 if device != mappings[ str( mac ) ]:
2361 main.log.error( "The attachment device is incorrect for " +
2362 "host " + str( mac ) +
2363 ". Expected: " + mappings[ str( mac ) ] +
2364 " Actual: " + device )
2365 hostAttachment = False
2366 else:
2367 hostAttachment = False
2368 except AssertionError:
2369 main.log.exception( "Json object not as expected" )
2370 main.log.error( repr( host ) )
2371 hostAttachment = False
2372 else:
2373 main.log.error( "No hosts json output or \"Error\"" +
2374 " in output. hosts = " +
2375 repr( hosts[ controller ] ) )
2376 if zeroHosts is False:
2377 hostAttachment = True
2378
2379 # END CHECKING HOST ATTACHMENT POINTS
2380 devicesResults = devicesResults and currentDevicesResult
2381 linksResults = linksResults and currentLinksResult
2382 hostsResults = hostsResults and currentHostsResult
2383 hostAttachmentResults = hostAttachmentResults and\
2384 hostAttachment
Jon Halle9b1fa32015-12-08 15:32:21 -08002385 topoResult = devicesResults and linksResults and\
2386 hostsResults and hostAttachmentResults
2387 utilities.assert_equals( expect=True,
2388 actual=topoResult,
2389 onpass="ONOS topology matches Mininet",
2390 onfail="ONOS topology don't match Mininet" )
2391 # End of While loop to pull ONOS state
Jon Hall5cf14d52015-07-16 12:15:19 -07002392
2393 # Compare json objects for hosts and dataplane clusters
2394
2395 # hosts
2396 main.step( "Hosts view is consistent across all ONOS nodes" )
2397 consistentHostsResult = main.TRUE
2398 for controller in range( len( hosts ) ):
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002399 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hall13b446e2016-01-05 12:17:01 -08002400 if hosts[ controller ] is not None and "Error" not in hosts[ controller ]:
Jon Hall5cf14d52015-07-16 12:15:19 -07002401 if hosts[ controller ] == hosts[ 0 ]:
2402 continue
2403 else: # hosts not consistent
2404 main.log.error( "hosts from ONOS" + controllerStr +
2405 " is inconsistent with ONOS1" )
2406 main.log.warn( repr( hosts[ controller ] ) )
2407 consistentHostsResult = main.FALSE
2408
2409 else:
2410 main.log.error( "Error in getting ONOS hosts from ONOS" +
2411 controllerStr )
2412 consistentHostsResult = main.FALSE
2413 main.log.warn( "ONOS" + controllerStr +
2414 " hosts response: " +
2415 repr( hosts[ controller ] ) )
2416 utilities.assert_equals(
2417 expect=main.TRUE,
2418 actual=consistentHostsResult,
2419 onpass="Hosts view is consistent across all ONOS nodes",
2420 onfail="ONOS nodes have different views of hosts" )
2421
2422 main.step( "Hosts information is correct" )
2423 hostsResults = hostsResults and ipResult
2424 utilities.assert_equals(
2425 expect=main.TRUE,
2426 actual=hostsResults,
2427 onpass="Host information is correct",
2428 onfail="Host information is incorrect" )
2429
2430 main.step( "Host attachment points to the network" )
2431 utilities.assert_equals(
2432 expect=True,
2433 actual=hostAttachmentResults,
2434 onpass="Hosts are correctly attached to the network",
2435 onfail="ONOS did not correctly attach hosts to the network" )
2436
2437 # Strongly connected clusters of devices
2438 main.step( "Clusters view is consistent across all ONOS nodes" )
2439 consistentClustersResult = main.TRUE
2440 for controller in range( len( clusters ) ):
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002441 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07002442 if "Error" not in clusters[ controller ]:
2443 if clusters[ controller ] == clusters[ 0 ]:
2444 continue
2445 else: # clusters not consistent
2446 main.log.error( "clusters from ONOS" +
2447 controllerStr +
2448 " is inconsistent with ONOS1" )
2449 consistentClustersResult = main.FALSE
2450
2451 else:
2452 main.log.error( "Error in getting dataplane clusters " +
2453 "from ONOS" + controllerStr )
2454 consistentClustersResult = main.FALSE
2455 main.log.warn( "ONOS" + controllerStr +
2456 " clusters response: " +
2457 repr( clusters[ controller ] ) )
2458 utilities.assert_equals(
2459 expect=main.TRUE,
2460 actual=consistentClustersResult,
2461 onpass="Clusters view is consistent across all ONOS nodes",
2462 onfail="ONOS nodes have different views of clusters" )
2463
2464 main.step( "There is only one SCC" )
2465 # there should always only be one cluster
2466 try:
2467 numClusters = len( json.loads( clusters[ 0 ] ) )
2468 except ( ValueError, TypeError ):
2469 main.log.exception( "Error parsing clusters[0]: " +
2470 repr( clusters[0] ) )
2471 clusterResults = main.FALSE
2472 if numClusters == 1:
2473 clusterResults = main.TRUE
2474 utilities.assert_equals(
2475 expect=1,
2476 actual=numClusters,
2477 onpass="ONOS shows 1 SCC",
2478 onfail="ONOS shows " + str( numClusters ) + " SCCs" )
2479
2480 topoResult = ( devicesResults and linksResults
2481 and hostsResults and consistentHostsResult
2482 and consistentClustersResult and clusterResults
2483 and ipResult and hostAttachmentResults )
2484
2485 topoResult = topoResult and int( count <= 2 )
2486 note = "note it takes about " + str( int( cliTime ) ) + \
2487 " seconds for the test to make all the cli calls to fetch " +\
2488 "the topology from each ONOS instance"
2489 main.log.info(
2490 "Very crass estimate for topology discovery/convergence( " +
2491 str( note ) + " ): " + str( elapsed ) + " seconds, " +
2492 str( count ) + " tries" )
2493
2494 main.step( "Device information is correct" )
2495 utilities.assert_equals(
2496 expect=main.TRUE,
2497 actual=devicesResults,
2498 onpass="Device information is correct",
2499 onfail="Device information is incorrect" )
2500
2501 main.step( "Links are correct" )
2502 utilities.assert_equals(
2503 expect=main.TRUE,
2504 actual=linksResults,
2505 onpass="Link are correct",
2506 onfail="Links are incorrect" )
2507
2508 # FIXME: move this to an ONOS state case
2509 main.step( "Checking ONOS nodes" )
2510 nodesOutput = []
2511 nodeResults = main.TRUE
2512 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002513 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07002514 t = main.Thread( target=main.CLIs[i].nodes,
Jon Hall5cf14d52015-07-16 12:15:19 -07002515 name="nodes-" + str( i ),
2516 args=[ ] )
2517 threads.append( t )
2518 t.start()
2519
2520 for t in threads:
2521 t.join()
2522 nodesOutput.append( t.result )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002523 ips = [ main.nodes[node].ip_address for node in main.activeNodes ]
Jon Halle9b1fa32015-12-08 15:32:21 -08002524 ips.sort()
Jon Hall5cf14d52015-07-16 12:15:19 -07002525 for i in nodesOutput:
2526 try:
2527 current = json.loads( i )
Jon Halle9b1fa32015-12-08 15:32:21 -08002528 activeIps = []
2529 currentResult = main.FALSE
Jon Hall5cf14d52015-07-16 12:15:19 -07002530 for node in current:
Jon Halle9b1fa32015-12-08 15:32:21 -08002531 if node['state'] == 'ACTIVE':
2532 activeIps.append( node['ip'] )
2533 activeIps.sort()
2534 if ips == activeIps:
2535 currentResult = main.TRUE
Jon Hall5cf14d52015-07-16 12:15:19 -07002536 except ( ValueError, TypeError ):
2537 main.log.error( "Error parsing nodes output" )
2538 main.log.warn( repr( i ) )
Jon Halle9b1fa32015-12-08 15:32:21 -08002539 currentResult = main.FALSE
2540 nodeResults = nodeResults and currentResult
Jon Hall5cf14d52015-07-16 12:15:19 -07002541 utilities.assert_equals( expect=main.TRUE, actual=nodeResults,
2542 onpass="Nodes check successful",
2543 onfail="Nodes check NOT successful" )
2544
2545 def CASE9( self, main ):
2546 """
2547 Link s3-s28 down
2548 """
2549 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002550 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002551 assert main, "main not defined"
2552 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002553 assert main.CLIs, "main.CLIs not defined"
2554 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002555 # NOTE: You should probably run a topology check after this
2556
2557 linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
2558
2559 description = "Turn off a link to ensure that Link Discovery " +\
2560 "is working properly"
2561 main.case( description )
2562
2563 main.step( "Kill Link between s3 and s28" )
2564 LinkDown = main.Mininet1.link( END1="s3", END2="s28", OPTION="down" )
2565 main.log.info( "Waiting " + str( linkSleep ) +
2566 " seconds for link down to be discovered" )
2567 time.sleep( linkSleep )
2568 utilities.assert_equals( expect=main.TRUE, actual=LinkDown,
2569 onpass="Link down successful",
2570 onfail="Failed to bring link down" )
2571 # TODO do some sort of check here
2572
2573 def CASE10( self, main ):
2574 """
2575 Link s3-s28 up
2576 """
2577 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002578 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002579 assert main, "main not defined"
2580 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002581 assert main.CLIs, "main.CLIs not defined"
2582 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002583 # NOTE: You should probably run a topology check after this
2584
2585 linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
2586
2587 description = "Restore a link to ensure that Link Discovery is " + \
2588 "working properly"
2589 main.case( description )
2590
2591 main.step( "Bring link between s3 and s28 back up" )
2592 LinkUp = main.Mininet1.link( END1="s3", END2="s28", OPTION="up" )
2593 main.log.info( "Waiting " + str( linkSleep ) +
2594 " seconds for link up to be discovered" )
2595 time.sleep( linkSleep )
2596 utilities.assert_equals( expect=main.TRUE, actual=LinkUp,
2597 onpass="Link up successful",
2598 onfail="Failed to bring link up" )
2599 # TODO do some sort of check here
2600
2601 def CASE11( self, main ):
2602 """
2603 Switch Down
2604 """
2605 # NOTE: You should probably run a topology check after this
2606 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002607 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002608 assert main, "main not defined"
2609 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002610 assert main.CLIs, "main.CLIs not defined"
2611 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002612
2613 switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
2614
2615 description = "Killing a switch to ensure it is discovered correctly"
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002616 onosCli = main.CLIs[ main.activeNodes[0] ]
Jon Hall5cf14d52015-07-16 12:15:19 -07002617 main.case( description )
2618 switch = main.params[ 'kill' ][ 'switch' ]
2619 switchDPID = main.params[ 'kill' ][ 'dpid' ]
2620
2621 # TODO: Make this switch parameterizable
2622 main.step( "Kill " + switch )
2623 main.log.info( "Deleting " + switch )
2624 main.Mininet1.delSwitch( switch )
2625 main.log.info( "Waiting " + str( switchSleep ) +
2626 " seconds for switch down to be discovered" )
2627 time.sleep( switchSleep )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002628 device = onosCli.getDevice( dpid=switchDPID )
Jon Hall5cf14d52015-07-16 12:15:19 -07002629 # Peek at the deleted switch
2630 main.log.warn( str( device ) )
2631 result = main.FALSE
2632 if device and device[ 'available' ] is False:
2633 result = main.TRUE
2634 utilities.assert_equals( expect=main.TRUE, actual=result,
2635 onpass="Kill switch successful",
2636 onfail="Failed to kill switch?" )
2637
2638 def CASE12( self, main ):
2639 """
2640 Switch Up
2641 """
2642 # NOTE: You should probably run a topology check after this
2643 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002644 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002645 assert main, "main not defined"
2646 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002647 assert main.CLIs, "main.CLIs not defined"
2648 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002649 assert ONOS1Port, "ONOS1Port not defined"
2650 assert ONOS2Port, "ONOS2Port not defined"
2651 assert ONOS3Port, "ONOS3Port not defined"
2652 assert ONOS4Port, "ONOS4Port not defined"
2653 assert ONOS5Port, "ONOS5Port not defined"
2654 assert ONOS6Port, "ONOS6Port not defined"
2655 assert ONOS7Port, "ONOS7Port not defined"
2656
2657 switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
2658 switch = main.params[ 'kill' ][ 'switch' ]
2659 switchDPID = main.params[ 'kill' ][ 'dpid' ]
2660 links = main.params[ 'kill' ][ 'links' ].split()
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002661 onosCli = main.CLIs[ main.activeNodes[0] ]
Jon Hall5cf14d52015-07-16 12:15:19 -07002662 description = "Adding a switch to ensure it is discovered correctly"
2663 main.case( description )
2664
2665 main.step( "Add back " + switch )
2666 main.Mininet1.addSwitch( switch, dpid=switchDPID )
2667 for peer in links:
2668 main.Mininet1.addLink( switch, peer )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002669 ipList = [ node.ip_address for node in main.nodes ]
Jon Hall5cf14d52015-07-16 12:15:19 -07002670 main.Mininet1.assignSwController( sw=switch, ip=ipList )
2671 main.log.info( "Waiting " + str( switchSleep ) +
2672 " seconds for switch up to be discovered" )
2673 time.sleep( switchSleep )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002674 device = onosCli.getDevice( dpid=switchDPID )
Jon Hall5cf14d52015-07-16 12:15:19 -07002675 # Peek at the deleted switch
2676 main.log.warn( str( device ) )
2677 result = main.FALSE
2678 if device and device[ 'available' ]:
2679 result = main.TRUE
2680 utilities.assert_equals( expect=main.TRUE, actual=result,
2681 onpass="add switch successful",
2682 onfail="Failed to add switch?" )
2683
2684 def CASE13( self, main ):
2685 """
2686 Clean up
2687 """
2688 import os
2689 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002690 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002691 assert main, "main not defined"
2692 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002693 assert main.CLIs, "main.CLIs not defined"
2694 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002695
2696 # printing colors to terminal
2697 colors = { 'cyan': '\033[96m', 'purple': '\033[95m',
2698 'blue': '\033[94m', 'green': '\033[92m',
2699 'yellow': '\033[93m', 'red': '\033[91m', 'end': '\033[0m' }
2700 main.case( "Test Cleanup" )
2701 main.step( "Killing tcpdumps" )
2702 main.Mininet2.stopTcpdump()
2703
2704 testname = main.TEST
Jon Hall96091e62015-09-21 17:34:17 -07002705 if main.params[ 'BACKUP' ][ 'ENABLED' ] == "True":
Jon Hall5cf14d52015-07-16 12:15:19 -07002706 main.step( "Copying MN pcap and ONOS log files to test station" )
2707 teststationUser = main.params[ 'BACKUP' ][ 'TESTONUSER' ]
2708 teststationIP = main.params[ 'BACKUP' ][ 'TESTONIP' ]
Jon Hall96091e62015-09-21 17:34:17 -07002709 # NOTE: MN Pcap file is being saved to logdir.
2710 # We scp this file as MN and TestON aren't necessarily the same vm
2711
2712 # FIXME: To be replaced with a Jenkin's post script
Jon Hall5cf14d52015-07-16 12:15:19 -07002713 # TODO: Load these from params
2714 # NOTE: must end in /
2715 logFolder = "/opt/onos/log/"
2716 logFiles = [ "karaf.log", "karaf.log.1" ]
2717 # NOTE: must end in /
Jon Hall5cf14d52015-07-16 12:15:19 -07002718 for f in logFiles:
Jon Halle1a3b752015-07-22 13:02:46 -07002719 for node in main.nodes:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002720 dstName = main.logdir + "/" + node.name + "-" + f
Jon Hall96091e62015-09-21 17:34:17 -07002721 main.ONOSbench.secureCopy( node.user_name, node.ip_address,
2722 logFolder + f, dstName )
Jon Hall5cf14d52015-07-16 12:15:19 -07002723 # std*.log's
2724 # NOTE: must end in /
2725 logFolder = "/opt/onos/var/"
2726 logFiles = [ "stderr.log", "stdout.log" ]
2727 # NOTE: must end in /
Jon Hall5cf14d52015-07-16 12:15:19 -07002728 for f in logFiles:
Jon Halle1a3b752015-07-22 13:02:46 -07002729 for node in main.nodes:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002730 dstName = main.logdir + "/" + node.name + "-" + f
Jon Hall96091e62015-09-21 17:34:17 -07002731 main.ONOSbench.secureCopy( node.user_name, node.ip_address,
2732 logFolder + f, dstName )
2733 else:
2734 main.log.debug( "skipping saving log files" )
Jon Hall5cf14d52015-07-16 12:15:19 -07002735
2736 main.step( "Stopping Mininet" )
2737 mnResult = main.Mininet1.stopNet()
2738 utilities.assert_equals( expect=main.TRUE, actual=mnResult,
2739 onpass="Mininet stopped",
2740 onfail="MN cleanup NOT successful" )
2741
2742 main.step( "Checking ONOS Logs for errors" )
Jon Halle1a3b752015-07-22 13:02:46 -07002743 for node in main.nodes:
Jon Hall96091e62015-09-21 17:34:17 -07002744 main.log.debug( "Checking logs for errors on " + node.name + ":" )
2745 main.log.warn( main.ONOSbench.checkLogs( node.ip_address ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07002746
2747 try:
2748 timerLog = open( main.logdir + "/Timers.csv", 'w')
2749 # Overwrite with empty line and close
2750 labels = "Gossip Intents, Restart"
2751 data = str( gossipTime ) + ", " + str( main.restartTime )
2752 timerLog.write( labels + "\n" + data )
2753 timerLog.close()
2754 except NameError, e:
2755 main.log.exception(e)
2756
2757 def CASE14( self, main ):
2758 """
2759 start election app on all onos nodes
2760 """
Jon Halle1a3b752015-07-22 13:02:46 -07002761 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002762 assert main, "main not defined"
2763 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002764 assert main.CLIs, "main.CLIs not defined"
2765 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002766
2767 main.case("Start Leadership Election app")
2768 main.step( "Install leadership election app" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002769 onosCli = main.CLIs[ main.activeNodes[0] ]
2770 appResult = onosCli.activateApp( "org.onosproject.election" )
Jon Hall5cf14d52015-07-16 12:15:19 -07002771 utilities.assert_equals(
2772 expect=main.TRUE,
2773 actual=appResult,
2774 onpass="Election app installed",
2775 onfail="Something went wrong with installing Leadership election" )
2776
2777 main.step( "Run for election on each node" )
2778 leaderResult = main.TRUE
2779 leaders = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002780 for i in main.activeNodes:
2781 main.CLIs[i].electionTestRun()
2782 for i in main.activeNodes:
2783 cli = main.CLIs[i]
Jon Hall5cf14d52015-07-16 12:15:19 -07002784 leader = cli.electionTestLeader()
2785 if leader is None or leader == main.FALSE:
2786 main.log.error( cli.name + ": Leader for the election app " +
2787 "should be an ONOS node, instead got '" +
2788 str( leader ) + "'" )
2789 leaderResult = main.FALSE
2790 leaders.append( leader )
2791 utilities.assert_equals(
2792 expect=main.TRUE,
2793 actual=leaderResult,
2794 onpass="Successfully ran for leadership",
2795 onfail="Failed to run for leadership" )
2796
2797 main.step( "Check that each node shows the same leader" )
2798 sameLeader = main.TRUE
2799 if len( set( leaders ) ) != 1:
2800 sameLeader = main.FALSE
Jon Halle1a3b752015-07-22 13:02:46 -07002801 main.log.error( "Results of electionTestLeader is order of main.CLIs:" +
Jon Hall5cf14d52015-07-16 12:15:19 -07002802 str( leaders ) )
2803 utilities.assert_equals(
2804 expect=main.TRUE,
2805 actual=sameLeader,
2806 onpass="Leadership is consistent for the election topic",
2807 onfail="Nodes have different leaders" )
2808
2809 def CASE15( self, main ):
2810 """
2811 Check that Leadership Election is still functional
acsmars71adceb2015-08-31 15:09:26 -07002812 15.1 Run election on each node
2813 15.2 Check that each node has the same leaders and candidates
2814 15.3 Find current leader and withdraw
2815 15.4 Check that a new node was elected leader
2816 15.5 Check that that new leader was the candidate of old leader
2817 15.6 Run for election on old leader
2818 15.7 Check that oldLeader is a candidate, and leader if only 1 node
2819 15.8 Make sure that the old leader was added to the candidate list
2820
2821 old and new variable prefixes refer to data from before vs after
2822 withdrawl and later before withdrawl vs after re-election
Jon Hall5cf14d52015-07-16 12:15:19 -07002823 """
2824 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002825 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002826 assert main, "main not defined"
2827 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002828 assert main.CLIs, "main.CLIs not defined"
2829 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002830
Jon Hall5cf14d52015-07-16 12:15:19 -07002831 description = "Check that Leadership Election is still functional"
2832 main.case( description )
acsmars71adceb2015-08-31 15:09:26 -07002833 # NOTE: Need to re-run since being a canidate is not persistant
2834 # TODO: add check for "Command not found:" in the driver, this
2835 # means the election test app isn't loaded
Jon Hall5cf14d52015-07-16 12:15:19 -07002836
acsmars71adceb2015-08-31 15:09:26 -07002837 oldLeaders = [] # leaders by node before withdrawl from candidates
2838 newLeaders = [] # leaders by node after withdrawl from candidates
2839 oldAllCandidates = [] # list of lists of each nodes' candidates before
2840 newAllCandidates = [] # list of lists of each nodes' candidates after
2841 oldCandidates = [] # list of candidates from node 0 before withdrawl
2842 newCandidates = [] # list of candidates from node 0 after withdrawl
2843 oldLeader = '' # the old leader from oldLeaders, None if not same
2844 newLeader = '' # the new leaders fron newLoeaders, None if not same
2845 oldLeaderCLI = None # the CLI of the old leader used for re-electing
2846 expectNoLeader = False # True when there is only one leader
2847 if main.numCtrls == 1:
2848 expectNoLeader = True
2849
2850 main.step( "Run for election on each node" )
2851 electionResult = main.TRUE
2852
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002853 for i in main.activeNodes: # run test election on each node
2854 if main.CLIs[i].electionTestRun() == main.FALSE:
acsmars71adceb2015-08-31 15:09:26 -07002855 electionResult = main.FALSE
Jon Hall5cf14d52015-07-16 12:15:19 -07002856 utilities.assert_equals(
2857 expect=main.TRUE,
acsmars71adceb2015-08-31 15:09:26 -07002858 actual=electionResult,
2859 onpass="All nodes successfully ran for leadership",
2860 onfail="At least one node failed to run for leadership" )
2861
acsmars3a72bde2015-09-02 14:16:22 -07002862 if electionResult == main.FALSE:
2863 main.log.error(
2864 "Skipping Test Case because Election Test App isn't loaded" )
2865 main.skipCase()
2866
acsmars71adceb2015-08-31 15:09:26 -07002867 main.step( "Check that each node shows the same leader and candidates" )
2868 sameResult = main.TRUE
2869 failMessage = "Nodes have different leaders"
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002870 for i in main.activeNodes:
2871 cli = main.CLIs[i]
acsmars71adceb2015-08-31 15:09:26 -07002872 node = cli.specificLeaderCandidate( 'org.onosproject.election' )
2873 oldAllCandidates.append( node )
2874 oldLeaders.append( node[ 0 ] )
2875 oldCandidates = oldAllCandidates[ 0 ]
2876
2877 # Check that each node has the same leader. Defines oldLeader
2878 if len( set( oldLeaders ) ) != 1:
2879 sameResult = main.FALSE
2880 main.log.error( "More than one leader present:" + str( oldLeaders ) )
2881 oldLeader = None
2882 else:
2883 oldLeader = oldLeaders[ 0 ]
2884
2885 # Check that each node's candidate list is the same
acsmars29233db2015-11-04 11:15:00 -08002886 candidateDiscrepancy = False # Boolean of candidate mismatches
acsmars71adceb2015-08-31 15:09:26 -07002887 for candidates in oldAllCandidates:
2888 if set( candidates ) != set( oldCandidates ):
2889 sameResult = main.FALSE
acsmars29233db2015-11-04 11:15:00 -08002890 candidateDiscrepancy = True
2891
2892 if candidateDiscrepancy:
2893 failMessage += " and candidates"
2894
acsmars71adceb2015-08-31 15:09:26 -07002895 utilities.assert_equals(
2896 expect=main.TRUE,
2897 actual=sameResult,
Jon Hall5cf14d52015-07-16 12:15:19 -07002898 onpass="Leadership is consistent for the election topic",
acsmars71adceb2015-08-31 15:09:26 -07002899 onfail=failMessage )
Jon Hall5cf14d52015-07-16 12:15:19 -07002900
2901 main.step( "Find current leader and withdraw" )
acsmars71adceb2015-08-31 15:09:26 -07002902 withdrawResult = main.TRUE
Jon Hall5cf14d52015-07-16 12:15:19 -07002903 # do some sanity checking on leader before using it
acsmars71adceb2015-08-31 15:09:26 -07002904 if oldLeader is None:
2905 main.log.error( "Leadership isn't consistent." )
2906 withdrawResult = main.FALSE
2907 # Get the CLI of the oldLeader
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002908 for i in main.activeNodes:
acsmars71adceb2015-08-31 15:09:26 -07002909 if oldLeader == main.nodes[ i ].ip_address:
2910 oldLeaderCLI = main.CLIs[ i ]
Jon Hall5cf14d52015-07-16 12:15:19 -07002911 break
2912 else: # FOR/ELSE statement
2913 main.log.error( "Leader election, could not find current leader" )
2914 if oldLeader:
acsmars71adceb2015-08-31 15:09:26 -07002915 withdrawResult = oldLeaderCLI.electionTestWithdraw()
Jon Hall5cf14d52015-07-16 12:15:19 -07002916 utilities.assert_equals(
2917 expect=main.TRUE,
2918 actual=withdrawResult,
2919 onpass="Node was withdrawn from election",
2920 onfail="Node was not withdrawn from election" )
2921
acsmars71adceb2015-08-31 15:09:26 -07002922 main.step( "Check that a new node was elected leader" )
2923
Jon Hall5cf14d52015-07-16 12:15:19 -07002924 # FIXME: use threads
acsmars71adceb2015-08-31 15:09:26 -07002925 newLeaderResult = main.TRUE
2926 failMessage = "Nodes have different leaders"
2927
2928 # Get new leaders and candidates
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002929 for i in main.activeNodes:
2930 cli = main.CLIs[i]
acsmars71adceb2015-08-31 15:09:26 -07002931 node = cli.specificLeaderCandidate( 'org.onosproject.election' )
2932 # elections might no have finished yet
2933 if node[ 0 ] == 'none' and not expectNoLeader:
2934 main.log.info( "Node has no leader, waiting 5 seconds to be " +
2935 "sure elections are complete." )
2936 time.sleep(5)
2937 node = cli.specificLeaderCandidate( 'org.onosproject.election' )
2938 # election still isn't done or there is a problem
2939 if node[ 0 ] == 'none':
2940 main.log.error( "No leader was elected on at least 1 node" )
2941 newLeaderResult = main.FALSE
2942 newAllCandidates.append( node )
2943 newLeaders.append( node[ 0 ] )
2944 newCandidates = newAllCandidates[ 0 ]
2945
2946 # Check that each node has the same leader. Defines newLeader
2947 if len( set( newLeaders ) ) != 1:
2948 newLeaderResult = main.FALSE
2949 main.log.error( "Nodes have different leaders: " +
2950 str( newLeaders ) )
2951 newLeader = None
Jon Hall5cf14d52015-07-16 12:15:19 -07002952 else:
acsmars71adceb2015-08-31 15:09:26 -07002953 newLeader = newLeaders[ 0 ]
2954
2955 # Check that each node's candidate list is the same
2956 for candidates in newAllCandidates:
2957 if set( candidates ) != set( newCandidates ):
2958 newLeaderResult = main.FALSE
Jon Hallceb4abb2015-09-25 12:03:06 -07002959 main.log.error( "Discrepancy in candidate lists detected" )
acsmars71adceb2015-08-31 15:09:26 -07002960
2961 # Check that the new leader is not the older leader, which was withdrawn
2962 if newLeader == oldLeader:
2963 newLeaderResult = main.FALSE
2964 main.log.error( "All nodes still see old leader: " + oldLeader +
2965 " as the current leader" )
2966
Jon Hall5cf14d52015-07-16 12:15:19 -07002967 utilities.assert_equals(
2968 expect=main.TRUE,
acsmars71adceb2015-08-31 15:09:26 -07002969 actual=newLeaderResult,
Jon Hall5cf14d52015-07-16 12:15:19 -07002970 onpass="Leadership election passed",
2971 onfail="Something went wrong with Leadership election" )
2972
acsmars71adceb2015-08-31 15:09:26 -07002973 main.step( "Check that that new leader was the candidate of old leader")
2974 # candidates[ 2 ] should be come the top candidate after withdrawl
2975 correctCandidateResult = main.TRUE
2976 if expectNoLeader:
2977 if newLeader == 'none':
2978 main.log.info( "No leader expected. None found. Pass" )
2979 correctCandidateResult = main.TRUE
2980 else:
2981 main.log.info( "Expected no leader, got: " + str( newLeader ) )
2982 correctCandidateResult = main.FALSE
2983 elif newLeader != oldCandidates[ 2 ]:
2984 correctCandidateResult = main.FALSE
2985 main.log.error( "Candidate " + newLeader + " was elected. " +
2986 oldCandidates[ 2 ] + " should have had priority." )
2987
2988 utilities.assert_equals(
2989 expect=main.TRUE,
2990 actual=correctCandidateResult,
2991 onpass="Correct Candidate Elected",
2992 onfail="Incorrect Candidate Elected" )
2993
Jon Hall5cf14d52015-07-16 12:15:19 -07002994 main.step( "Run for election on old leader( just so everyone " +
2995 "is in the hat )" )
acsmars71adceb2015-08-31 15:09:26 -07002996 if oldLeaderCLI is not None:
2997 runResult = oldLeaderCLI.electionTestRun()
Jon Hall5cf14d52015-07-16 12:15:19 -07002998 else:
acsmars71adceb2015-08-31 15:09:26 -07002999 main.log.error( "No old leader to re-elect" )
Jon Hall5cf14d52015-07-16 12:15:19 -07003000 runResult = main.FALSE
3001 utilities.assert_equals(
3002 expect=main.TRUE,
3003 actual=runResult,
3004 onpass="App re-ran for election",
3005 onfail="App failed to run for election" )
acsmars71adceb2015-08-31 15:09:26 -07003006 main.step(
3007 "Check that oldLeader is a candidate, and leader if only 1 node" )
Jon Hall5cf14d52015-07-16 12:15:19 -07003008 # verify leader didn't just change
acsmars71adceb2015-08-31 15:09:26 -07003009 positionResult = main.TRUE
3010 # Get new leaders and candidates, wait if oldLeader is not a candidate yet
3011
3012 # Reset and reuse the new candidate and leaders lists
3013 newAllCandidates = []
3014 newCandidates = []
3015 newLeaders = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003016 for i in main.activeNodes:
3017 cli = main.CLIs[i]
acsmars71adceb2015-08-31 15:09:26 -07003018 node = cli.specificLeaderCandidate( 'org.onosproject.election' )
3019 if oldLeader not in node: # election might no have finished yet
3020 main.log.info( "Old Leader not elected, waiting 5 seconds to " +
3021 "be sure elections are complete" )
3022 time.sleep(5)
3023 node = cli.specificLeaderCandidate( 'org.onosproject.election' )
3024 if oldLeader not in node: # election still isn't done, errors
3025 main.log.error(
3026 "Old leader was not elected on at least one node" )
3027 positionResult = main.FALSE
3028 newAllCandidates.append( node )
3029 newLeaders.append( node[ 0 ] )
3030 newCandidates = newAllCandidates[ 0 ]
3031
3032 # Check that each node has the same leader. Defines newLeader
3033 if len( set( newLeaders ) ) != 1:
3034 positionResult = main.FALSE
3035 main.log.error( "Nodes have different leaders: " +
3036 str( newLeaders ) )
3037 newLeader = None
Jon Hall5cf14d52015-07-16 12:15:19 -07003038 else:
acsmars71adceb2015-08-31 15:09:26 -07003039 newLeader = newLeaders[ 0 ]
3040
3041 # Check that each node's candidate list is the same
3042 for candidates in newAllCandidates:
3043 if set( candidates ) != set( newCandidates ):
3044 newLeaderResult = main.FALSE
Jon Hallceb4abb2015-09-25 12:03:06 -07003045 main.log.error( "Discrepancy in candidate lists detected" )
acsmars71adceb2015-08-31 15:09:26 -07003046
3047 # Check that the re-elected node is last on the candidate List
3048 if oldLeader != newCandidates[ -1 ]:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003049 main.log.error( "Old Leader (" + oldLeader + ") not in the proper position " +
acsmars71adceb2015-08-31 15:09:26 -07003050 str( newCandidates ) )
3051 positionResult = main.FALSE
Jon Hall5cf14d52015-07-16 12:15:19 -07003052
3053 utilities.assert_equals(
3054 expect=main.TRUE,
acsmars71adceb2015-08-31 15:09:26 -07003055 actual=positionResult,
Jon Hall5cf14d52015-07-16 12:15:19 -07003056 onpass="Old leader successfully re-ran for election",
3057 onfail="Something went wrong with Leadership election after " +
3058 "the old leader re-ran for election" )
3059
3060 def CASE16( self, main ):
3061 """
3062 Install Distributed Primitives app
3063 """
3064 import time
Jon Halle1a3b752015-07-22 13:02:46 -07003065 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07003066 assert main, "main not defined"
3067 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07003068 assert main.CLIs, "main.CLIs not defined"
3069 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07003070
3071 # Variables for the distributed primitives tests
3072 global pCounterName
3073 global iCounterName
3074 global pCounterValue
3075 global iCounterValue
3076 global onosSet
3077 global onosSetName
3078 pCounterName = "TestON-Partitions"
3079 iCounterName = "TestON-inMemory"
3080 pCounterValue = 0
3081 iCounterValue = 0
3082 onosSet = set([])
3083 onosSetName = "TestON-set"
3084
3085 description = "Install Primitives app"
3086 main.case( description )
3087 main.step( "Install Primitives app" )
3088 appName = "org.onosproject.distributedprimitives"
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003089 node = main.activeNodes[0]
3090 appResults = main.CLIs[node].activateApp( appName )
Jon Hall5cf14d52015-07-16 12:15:19 -07003091 utilities.assert_equals( expect=main.TRUE,
3092 actual=appResults,
3093 onpass="Primitives app activated",
3094 onfail="Primitives app not activated" )
3095 time.sleep( 5 ) # To allow all nodes to activate
3096
3097 def CASE17( self, main ):
3098 """
3099 Check for basic functionality with distributed primitives
3100 """
Jon Hall5cf14d52015-07-16 12:15:19 -07003101 # Make sure variables are defined/set
Jon Halle1a3b752015-07-22 13:02:46 -07003102 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07003103 assert main, "main not defined"
3104 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07003105 assert main.CLIs, "main.CLIs not defined"
3106 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07003107 assert pCounterName, "pCounterName not defined"
3108 assert iCounterName, "iCounterName not defined"
3109 assert onosSetName, "onosSetName not defined"
3110 # NOTE: assert fails if value is 0/None/Empty/False
3111 try:
3112 pCounterValue
3113 except NameError:
3114 main.log.error( "pCounterValue not defined, setting to 0" )
3115 pCounterValue = 0
3116 try:
3117 iCounterValue
3118 except NameError:
3119 main.log.error( "iCounterValue not defined, setting to 0" )
3120 iCounterValue = 0
3121 try:
3122 onosSet
3123 except NameError:
3124 main.log.error( "onosSet not defined, setting to empty Set" )
3125 onosSet = set([])
3126 # Variables for the distributed primitives tests. These are local only
3127 addValue = "a"
3128 addAllValue = "a b c d e f"
3129 retainValue = "c d e f"
3130
3131 description = "Check for basic functionality with distributed " +\
3132 "primitives"
3133 main.case( description )
Jon Halle1a3b752015-07-22 13:02:46 -07003134 main.caseExplanation = "Test the methods of the distributed " +\
3135 "primitives (counters and sets) throught the cli"
Jon Hall5cf14d52015-07-16 12:15:19 -07003136 # DISTRIBUTED ATOMIC COUNTERS
Jon Halle1a3b752015-07-22 13:02:46 -07003137 # Partitioned counters
3138 main.step( "Increment then get a default counter on each node" )
Jon Hall5cf14d52015-07-16 12:15:19 -07003139 pCounters = []
3140 threads = []
3141 addedPValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003142 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003143 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3144 name="counterAddAndGet-" + str( i ),
Jon Hall5cf14d52015-07-16 12:15:19 -07003145 args=[ pCounterName ] )
3146 pCounterValue += 1
3147 addedPValues.append( pCounterValue )
3148 threads.append( t )
3149 t.start()
3150
3151 for t in threads:
3152 t.join()
3153 pCounters.append( t.result )
3154 # Check that counter incremented numController times
3155 pCounterResults = True
3156 for i in addedPValues:
3157 tmpResult = i in pCounters
3158 pCounterResults = pCounterResults and tmpResult
3159 if not tmpResult:
3160 main.log.error( str( i ) + " is not in partitioned "
3161 "counter incremented results" )
3162 utilities.assert_equals( expect=True,
3163 actual=pCounterResults,
3164 onpass="Default counter incremented",
3165 onfail="Error incrementing default" +
3166 " counter" )
3167
Jon Halle1a3b752015-07-22 13:02:46 -07003168 main.step( "Get then Increment a default counter on each node" )
3169 pCounters = []
3170 threads = []
3171 addedPValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003172 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003173 t = main.Thread( target=main.CLIs[i].counterTestGetAndAdd,
3174 name="counterGetAndAdd-" + str( i ),
3175 args=[ pCounterName ] )
3176 addedPValues.append( pCounterValue )
3177 pCounterValue += 1
3178 threads.append( t )
3179 t.start()
3180
3181 for t in threads:
3182 t.join()
3183 pCounters.append( t.result )
3184 # Check that counter incremented numController times
3185 pCounterResults = True
3186 for i in addedPValues:
3187 tmpResult = i in pCounters
3188 pCounterResults = pCounterResults and tmpResult
3189 if not tmpResult:
3190 main.log.error( str( i ) + " is not in partitioned "
3191 "counter incremented results" )
3192 utilities.assert_equals( expect=True,
3193 actual=pCounterResults,
3194 onpass="Default counter incremented",
3195 onfail="Error incrementing default" +
3196 " counter" )
3197
3198 main.step( "Counters we added have the correct values" )
3199 incrementCheck = main.Counters.counterCheck( pCounterName, pCounterValue )
3200 utilities.assert_equals( expect=main.TRUE,
3201 actual=incrementCheck,
3202 onpass="Added counters are correct",
3203 onfail="Added counters are incorrect" )
3204
3205 main.step( "Add -8 to then get a default counter on each node" )
3206 pCounters = []
3207 threads = []
3208 addedPValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003209 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003210 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3211 name="counterIncrement-" + str( i ),
3212 args=[ pCounterName ],
3213 kwargs={ "delta": -8 } )
3214 pCounterValue += -8
3215 addedPValues.append( pCounterValue )
3216 threads.append( t )
3217 t.start()
3218
3219 for t in threads:
3220 t.join()
3221 pCounters.append( t.result )
3222 # Check that counter incremented numController times
3223 pCounterResults = True
3224 for i in addedPValues:
3225 tmpResult = i in pCounters
3226 pCounterResults = pCounterResults and tmpResult
3227 if not tmpResult:
3228 main.log.error( str( i ) + " is not in partitioned "
3229 "counter incremented results" )
3230 utilities.assert_equals( expect=True,
3231 actual=pCounterResults,
3232 onpass="Default counter incremented",
3233 onfail="Error incrementing default" +
3234 " counter" )
3235
3236 main.step( "Add 5 to then get a default counter on each node" )
3237 pCounters = []
3238 threads = []
3239 addedPValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003240 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003241 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3242 name="counterIncrement-" + str( i ),
3243 args=[ pCounterName ],
3244 kwargs={ "delta": 5 } )
3245 pCounterValue += 5
3246 addedPValues.append( pCounterValue )
3247 threads.append( t )
3248 t.start()
3249
3250 for t in threads:
3251 t.join()
3252 pCounters.append( t.result )
3253 # Check that counter incremented numController times
3254 pCounterResults = True
3255 for i in addedPValues:
3256 tmpResult = i in pCounters
3257 pCounterResults = pCounterResults and tmpResult
3258 if not tmpResult:
3259 main.log.error( str( i ) + " is not in partitioned "
3260 "counter incremented results" )
3261 utilities.assert_equals( expect=True,
3262 actual=pCounterResults,
3263 onpass="Default counter incremented",
3264 onfail="Error incrementing default" +
3265 " counter" )
3266
3267 main.step( "Get then add 5 to a default counter on each node" )
3268 pCounters = []
3269 threads = []
3270 addedPValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003271 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003272 t = main.Thread( target=main.CLIs[i].counterTestGetAndAdd,
3273 name="counterIncrement-" + str( i ),
3274 args=[ pCounterName ],
3275 kwargs={ "delta": 5 } )
3276 addedPValues.append( pCounterValue )
3277 pCounterValue += 5
3278 threads.append( t )
3279 t.start()
3280
3281 for t in threads:
3282 t.join()
3283 pCounters.append( t.result )
3284 # Check that counter incremented numController times
3285 pCounterResults = True
3286 for i in addedPValues:
3287 tmpResult = i in pCounters
3288 pCounterResults = pCounterResults and tmpResult
3289 if not tmpResult:
3290 main.log.error( str( i ) + " is not in partitioned "
3291 "counter incremented results" )
3292 utilities.assert_equals( expect=True,
3293 actual=pCounterResults,
3294 onpass="Default counter incremented",
3295 onfail="Error incrementing default" +
3296 " counter" )
3297
3298 main.step( "Counters we added have the correct values" )
3299 incrementCheck = main.Counters.counterCheck( pCounterName, pCounterValue )
3300 utilities.assert_equals( expect=main.TRUE,
3301 actual=incrementCheck,
3302 onpass="Added counters are correct",
3303 onfail="Added counters are incorrect" )
3304
3305 # In-Memory counters
3306 main.step( "Increment and get an in-memory counter on each node" )
Jon Hall5cf14d52015-07-16 12:15:19 -07003307 iCounters = []
3308 addedIValues = []
3309 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003310 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003311 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003312 name="icounterIncrement-" + str( i ),
3313 args=[ iCounterName ],
3314 kwargs={ "inMemory": True } )
3315 iCounterValue += 1
3316 addedIValues.append( iCounterValue )
3317 threads.append( t )
3318 t.start()
3319
3320 for t in threads:
3321 t.join()
3322 iCounters.append( t.result )
3323 # Check that counter incremented numController times
3324 iCounterResults = True
3325 for i in addedIValues:
3326 tmpResult = i in iCounters
3327 iCounterResults = iCounterResults and tmpResult
3328 if not tmpResult:
3329 main.log.error( str( i ) + " is not in the in-memory "
3330 "counter incremented results" )
3331 utilities.assert_equals( expect=True,
3332 actual=iCounterResults,
Jon Halle1a3b752015-07-22 13:02:46 -07003333 onpass="In-memory counter incremented",
3334 onfail="Error incrementing in-memory" +
Jon Hall5cf14d52015-07-16 12:15:19 -07003335 " counter" )
3336
Jon Halle1a3b752015-07-22 13:02:46 -07003337 main.step( "Get then Increment a in-memory counter on each node" )
3338 iCounters = []
3339 threads = []
3340 addedIValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003341 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003342 t = main.Thread( target=main.CLIs[i].counterTestGetAndAdd,
3343 name="counterGetAndAdd-" + str( i ),
3344 args=[ iCounterName ],
3345 kwargs={ "inMemory": True } )
3346 addedIValues.append( iCounterValue )
3347 iCounterValue += 1
3348 threads.append( t )
3349 t.start()
3350
3351 for t in threads:
3352 t.join()
3353 iCounters.append( t.result )
3354 # Check that counter incremented numController times
3355 iCounterResults = True
3356 for i in addedIValues:
3357 tmpResult = i in iCounters
3358 iCounterResults = iCounterResults and tmpResult
3359 if not tmpResult:
3360 main.log.error( str( i ) + " is not in in-memory "
3361 "counter incremented results" )
3362 utilities.assert_equals( expect=True,
3363 actual=iCounterResults,
3364 onpass="In-memory counter incremented",
3365 onfail="Error incrementing in-memory" +
3366 " counter" )
3367
3368 main.step( "Counters we added have the correct values" )
3369 incrementCheck = main.Counters.counterCheck( iCounterName, iCounterValue )
3370 utilities.assert_equals( expect=main.TRUE,
3371 actual=incrementCheck,
3372 onpass="Added counters are correct",
3373 onfail="Added counters are incorrect" )
3374
3375 main.step( "Add -8 to then get a in-memory counter on each node" )
3376 iCounters = []
3377 threads = []
3378 addedIValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003379 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003380 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3381 name="counterIncrement-" + str( i ),
3382 args=[ iCounterName ],
3383 kwargs={ "delta": -8, "inMemory": True } )
3384 iCounterValue += -8
3385 addedIValues.append( iCounterValue )
3386 threads.append( t )
3387 t.start()
3388
3389 for t in threads:
3390 t.join()
3391 iCounters.append( t.result )
3392 # Check that counter incremented numController times
3393 iCounterResults = True
3394 for i in addedIValues:
3395 tmpResult = i in iCounters
3396 iCounterResults = iCounterResults and tmpResult
3397 if not tmpResult:
3398 main.log.error( str( i ) + " is not in in-memory "
3399 "counter incremented results" )
3400 utilities.assert_equals( expect=True,
3401 actual=pCounterResults,
3402 onpass="In-memory counter incremented",
3403 onfail="Error incrementing in-memory" +
3404 " counter" )
3405
3406 main.step( "Add 5 to then get a in-memory counter on each node" )
3407 iCounters = []
3408 threads = []
3409 addedIValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003410 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003411 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3412 name="counterIncrement-" + str( i ),
3413 args=[ iCounterName ],
3414 kwargs={ "delta": 5, "inMemory": True } )
3415 iCounterValue += 5
3416 addedIValues.append( iCounterValue )
3417 threads.append( t )
3418 t.start()
3419
3420 for t in threads:
3421 t.join()
3422 iCounters.append( t.result )
3423 # Check that counter incremented numController times
3424 iCounterResults = True
3425 for i in addedIValues:
3426 tmpResult = i in iCounters
3427 iCounterResults = iCounterResults and tmpResult
3428 if not tmpResult:
3429 main.log.error( str( i ) + " is not in in-memory "
3430 "counter incremented results" )
3431 utilities.assert_equals( expect=True,
3432 actual=pCounterResults,
3433 onpass="In-memory counter incremented",
3434 onfail="Error incrementing in-memory" +
3435 " counter" )
3436
3437 main.step( "Get then add 5 to a in-memory counter on each node" )
3438 iCounters = []
3439 threads = []
3440 addedIValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003441 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003442 t = main.Thread( target=main.CLIs[i].counterTestGetAndAdd,
3443 name="counterIncrement-" + str( i ),
3444 args=[ iCounterName ],
3445 kwargs={ "delta": 5, "inMemory": True } )
3446 addedIValues.append( iCounterValue )
3447 iCounterValue += 5
3448 threads.append( t )
3449 t.start()
3450
3451 for t in threads:
3452 t.join()
3453 iCounters.append( t.result )
3454 # Check that counter incremented numController times
3455 iCounterResults = True
3456 for i in addedIValues:
3457 tmpResult = i in iCounters
3458 iCounterResults = iCounterResults and tmpResult
3459 if not tmpResult:
3460 main.log.error( str( i ) + " is not in in-memory "
3461 "counter incremented results" )
3462 utilities.assert_equals( expect=True,
3463 actual=iCounterResults,
3464 onpass="In-memory counter incremented",
3465 onfail="Error incrementing in-memory" +
3466 " counter" )
3467
3468 main.step( "Counters we added have the correct values" )
3469 incrementCheck = main.Counters.counterCheck( iCounterName, iCounterValue )
3470 utilities.assert_equals( expect=main.TRUE,
3471 actual=incrementCheck,
3472 onpass="Added counters are correct",
3473 onfail="Added counters are incorrect" )
3474
Jon Hall5cf14d52015-07-16 12:15:19 -07003475 main.step( "Check counters are consistant across nodes" )
Jon Hall3b489db2015-10-05 14:38:37 -07003476 onosCounters, consistentCounterResults = main.Counters.consistentCheck()
Jon Hall5cf14d52015-07-16 12:15:19 -07003477 utilities.assert_equals( expect=main.TRUE,
3478 actual=consistentCounterResults,
3479 onpass="ONOS counters are consistent " +
3480 "across nodes",
3481 onfail="ONOS Counters are inconsistent " +
3482 "across nodes" )
3483
3484 main.step( "Counters we added have the correct values" )
Jon Halle1a3b752015-07-22 13:02:46 -07003485 incrementCheck = main.Counters.counterCheck( iCounterName, iCounterValue )
3486 incrementCheck = incrementCheck and \
3487 main.Counters.counterCheck( iCounterName, iCounterValue )
Jon Hall5cf14d52015-07-16 12:15:19 -07003488 utilities.assert_equals( expect=main.TRUE,
Jon Halle1a3b752015-07-22 13:02:46 -07003489 actual=incrementCheck,
Jon Hall5cf14d52015-07-16 12:15:19 -07003490 onpass="Added counters are correct",
3491 onfail="Added counters are incorrect" )
3492 # DISTRIBUTED SETS
3493 main.step( "Distributed Set get" )
3494 size = len( onosSet )
3495 getResponses = []
3496 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003497 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003498 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003499 name="setTestGet-" + str( i ),
3500 args=[ onosSetName ] )
3501 threads.append( t )
3502 t.start()
3503 for t in threads:
3504 t.join()
3505 getResponses.append( t.result )
3506
3507 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003508 for i in range( len( main.activeNodes ) ):
3509 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003510 if isinstance( getResponses[ i ], list):
3511 current = set( getResponses[ i ] )
3512 if len( current ) == len( getResponses[ i ] ):
3513 # no repeats
3514 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003515 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003516 " has incorrect view" +
3517 " of set " + onosSetName + ":\n" +
3518 str( getResponses[ i ] ) )
3519 main.log.debug( "Expected: " + str( onosSet ) )
3520 main.log.debug( "Actual: " + str( current ) )
3521 getResults = main.FALSE
3522 else:
3523 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003524 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003525 " has repeat elements in" +
3526 " set " + onosSetName + ":\n" +
3527 str( getResponses[ i ] ) )
3528 getResults = main.FALSE
3529 elif getResponses[ i ] == main.ERROR:
3530 getResults = main.FALSE
3531 utilities.assert_equals( expect=main.TRUE,
3532 actual=getResults,
3533 onpass="Set elements are correct",
3534 onfail="Set elements are incorrect" )
3535
3536 main.step( "Distributed Set size" )
3537 sizeResponses = []
3538 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003539 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003540 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07003541 name="setTestSize-" + str( i ),
3542 args=[ onosSetName ] )
3543 threads.append( t )
3544 t.start()
3545 for t in threads:
3546 t.join()
3547 sizeResponses.append( t.result )
3548
3549 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003550 for i in range( len( main.activeNodes ) ):
3551 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003552 if size != sizeResponses[ i ]:
3553 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003554 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003555 " expected a size of " + str( size ) +
3556 " for set " + onosSetName +
3557 " but got " + str( sizeResponses[ i ] ) )
3558 utilities.assert_equals( expect=main.TRUE,
3559 actual=sizeResults,
3560 onpass="Set sizes are correct",
3561 onfail="Set sizes are incorrect" )
3562
3563 main.step( "Distributed Set add()" )
3564 onosSet.add( addValue )
3565 addResponses = []
3566 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003567 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003568 t = main.Thread( target=main.CLIs[i].setTestAdd,
Jon Hall5cf14d52015-07-16 12:15:19 -07003569 name="setTestAdd-" + str( i ),
3570 args=[ onosSetName, addValue ] )
3571 threads.append( t )
3572 t.start()
3573 for t in threads:
3574 t.join()
3575 addResponses.append( t.result )
3576
3577 # main.TRUE = successfully changed the set
3578 # main.FALSE = action resulted in no change in set
3579 # main.ERROR - Some error in executing the function
3580 addResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003581 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003582 if addResponses[ i ] == main.TRUE:
3583 # All is well
3584 pass
3585 elif addResponses[ i ] == main.FALSE:
3586 # Already in set, probably fine
3587 pass
3588 elif addResponses[ i ] == main.ERROR:
3589 # Error in execution
3590 addResults = main.FALSE
3591 else:
3592 # unexpected result
3593 addResults = main.FALSE
3594 if addResults != main.TRUE:
3595 main.log.error( "Error executing set add" )
3596
3597 # Check if set is still correct
3598 size = len( onosSet )
3599 getResponses = []
3600 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003601 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003602 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003603 name="setTestGet-" + str( i ),
3604 args=[ onosSetName ] )
3605 threads.append( t )
3606 t.start()
3607 for t in threads:
3608 t.join()
3609 getResponses.append( t.result )
3610 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003611 for i in range( len( main.activeNodes ) ):
3612 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003613 if isinstance( getResponses[ i ], list):
3614 current = set( getResponses[ i ] )
3615 if len( current ) == len( getResponses[ i ] ):
3616 # no repeats
3617 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003618 main.log.error( "ONOS" + node + " has incorrect view" +
Jon Hall5cf14d52015-07-16 12:15:19 -07003619 " of set " + onosSetName + ":\n" +
3620 str( getResponses[ i ] ) )
3621 main.log.debug( "Expected: " + str( onosSet ) )
3622 main.log.debug( "Actual: " + str( current ) )
3623 getResults = main.FALSE
3624 else:
3625 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003626 main.log.error( "ONOS" + node + " has repeat elements in" +
Jon Hall5cf14d52015-07-16 12:15:19 -07003627 " set " + onosSetName + ":\n" +
3628 str( getResponses[ i ] ) )
3629 getResults = main.FALSE
3630 elif getResponses[ i ] == main.ERROR:
3631 getResults = main.FALSE
3632 sizeResponses = []
3633 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003634 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003635 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07003636 name="setTestSize-" + str( i ),
3637 args=[ onosSetName ] )
3638 threads.append( t )
3639 t.start()
3640 for t in threads:
3641 t.join()
3642 sizeResponses.append( t.result )
3643 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003644 for i in range( len( main.activeNodes ) ):
3645 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003646 if size != sizeResponses[ i ]:
3647 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003648 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003649 " expected a size of " + str( size ) +
3650 " for set " + onosSetName +
3651 " but got " + str( sizeResponses[ i ] ) )
3652 addResults = addResults and getResults and sizeResults
3653 utilities.assert_equals( expect=main.TRUE,
3654 actual=addResults,
3655 onpass="Set add correct",
3656 onfail="Set add was incorrect" )
3657
3658 main.step( "Distributed Set addAll()" )
3659 onosSet.update( addAllValue.split() )
3660 addResponses = []
3661 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003662 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003663 t = main.Thread( target=main.CLIs[i].setTestAdd,
Jon Hall5cf14d52015-07-16 12:15:19 -07003664 name="setTestAddAll-" + str( i ),
3665 args=[ onosSetName, addAllValue ] )
3666 threads.append( t )
3667 t.start()
3668 for t in threads:
3669 t.join()
3670 addResponses.append( t.result )
3671
3672 # main.TRUE = successfully changed the set
3673 # main.FALSE = action resulted in no change in set
3674 # main.ERROR - Some error in executing the function
3675 addAllResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003676 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003677 if addResponses[ i ] == main.TRUE:
3678 # All is well
3679 pass
3680 elif addResponses[ i ] == main.FALSE:
3681 # Already in set, probably fine
3682 pass
3683 elif addResponses[ i ] == main.ERROR:
3684 # Error in execution
3685 addAllResults = main.FALSE
3686 else:
3687 # unexpected result
3688 addAllResults = main.FALSE
3689 if addAllResults != main.TRUE:
3690 main.log.error( "Error executing set addAll" )
3691
3692 # Check if set is still correct
3693 size = len( onosSet )
3694 getResponses = []
3695 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003696 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003697 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003698 name="setTestGet-" + str( i ),
3699 args=[ onosSetName ] )
3700 threads.append( t )
3701 t.start()
3702 for t in threads:
3703 t.join()
3704 getResponses.append( t.result )
3705 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003706 for i in range( len( main.activeNodes ) ):
3707 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003708 if isinstance( getResponses[ i ], list):
3709 current = set( getResponses[ i ] )
3710 if len( current ) == len( getResponses[ i ] ):
3711 # no repeats
3712 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003713 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003714 " has incorrect view" +
3715 " of set " + onosSetName + ":\n" +
3716 str( getResponses[ i ] ) )
3717 main.log.debug( "Expected: " + str( onosSet ) )
3718 main.log.debug( "Actual: " + str( current ) )
3719 getResults = main.FALSE
3720 else:
3721 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003722 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003723 " has repeat elements in" +
3724 " set " + onosSetName + ":\n" +
3725 str( getResponses[ i ] ) )
3726 getResults = main.FALSE
3727 elif getResponses[ i ] == main.ERROR:
3728 getResults = main.FALSE
3729 sizeResponses = []
3730 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003731 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003732 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07003733 name="setTestSize-" + str( i ),
3734 args=[ onosSetName ] )
3735 threads.append( t )
3736 t.start()
3737 for t in threads:
3738 t.join()
3739 sizeResponses.append( t.result )
3740 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003741 for i in range( len( main.activeNodes ) ):
3742 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003743 if size != sizeResponses[ i ]:
3744 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003745 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003746 " expected a size of " + str( size ) +
3747 " for set " + onosSetName +
3748 " but got " + str( sizeResponses[ i ] ) )
3749 addAllResults = addAllResults and getResults and sizeResults
3750 utilities.assert_equals( expect=main.TRUE,
3751 actual=addAllResults,
3752 onpass="Set addAll correct",
3753 onfail="Set addAll was incorrect" )
3754
3755 main.step( "Distributed Set contains()" )
3756 containsResponses = []
3757 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003758 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003759 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003760 name="setContains-" + str( i ),
3761 args=[ onosSetName ],
3762 kwargs={ "values": addValue } )
3763 threads.append( t )
3764 t.start()
3765 for t in threads:
3766 t.join()
3767 # NOTE: This is the tuple
3768 containsResponses.append( t.result )
3769
3770 containsResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003771 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003772 if containsResponses[ i ] == main.ERROR:
3773 containsResults = main.FALSE
3774 else:
3775 containsResults = containsResults and\
3776 containsResponses[ i ][ 1 ]
3777 utilities.assert_equals( expect=main.TRUE,
3778 actual=containsResults,
3779 onpass="Set contains is functional",
3780 onfail="Set contains failed" )
3781
3782 main.step( "Distributed Set containsAll()" )
3783 containsAllResponses = []
3784 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003785 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003786 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003787 name="setContainsAll-" + str( i ),
3788 args=[ onosSetName ],
3789 kwargs={ "values": addAllValue } )
3790 threads.append( t )
3791 t.start()
3792 for t in threads:
3793 t.join()
3794 # NOTE: This is the tuple
3795 containsAllResponses.append( t.result )
3796
3797 containsAllResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003798 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003799 if containsResponses[ i ] == main.ERROR:
3800 containsResults = main.FALSE
3801 else:
3802 containsResults = containsResults and\
3803 containsResponses[ i ][ 1 ]
3804 utilities.assert_equals( expect=main.TRUE,
3805 actual=containsAllResults,
3806 onpass="Set containsAll is functional",
3807 onfail="Set containsAll failed" )
3808
3809 main.step( "Distributed Set remove()" )
3810 onosSet.remove( addValue )
3811 removeResponses = []
3812 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003813 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003814 t = main.Thread( target=main.CLIs[i].setTestRemove,
Jon Hall5cf14d52015-07-16 12:15:19 -07003815 name="setTestRemove-" + str( i ),
3816 args=[ onosSetName, addValue ] )
3817 threads.append( t )
3818 t.start()
3819 for t in threads:
3820 t.join()
3821 removeResponses.append( t.result )
3822
3823 # main.TRUE = successfully changed the set
3824 # main.FALSE = action resulted in no change in set
3825 # main.ERROR - Some error in executing the function
3826 removeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003827 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003828 if removeResponses[ i ] == main.TRUE:
3829 # All is well
3830 pass
3831 elif removeResponses[ i ] == main.FALSE:
3832 # not in set, probably fine
3833 pass
3834 elif removeResponses[ i ] == main.ERROR:
3835 # Error in execution
3836 removeResults = main.FALSE
3837 else:
3838 # unexpected result
3839 removeResults = main.FALSE
3840 if removeResults != main.TRUE:
3841 main.log.error( "Error executing set remove" )
3842
3843 # Check if set is still correct
3844 size = len( onosSet )
3845 getResponses = []
3846 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003847 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003848 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003849 name="setTestGet-" + str( i ),
3850 args=[ onosSetName ] )
3851 threads.append( t )
3852 t.start()
3853 for t in threads:
3854 t.join()
3855 getResponses.append( t.result )
3856 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003857 for i in range( len( main.activeNodes ) ):
3858 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003859 if isinstance( getResponses[ i ], list):
3860 current = set( getResponses[ i ] )
3861 if len( current ) == len( getResponses[ i ] ):
3862 # no repeats
3863 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003864 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003865 " has incorrect view" +
3866 " of set " + onosSetName + ":\n" +
3867 str( getResponses[ i ] ) )
3868 main.log.debug( "Expected: " + str( onosSet ) )
3869 main.log.debug( "Actual: " + str( current ) )
3870 getResults = main.FALSE
3871 else:
3872 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003873 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003874 " has repeat elements in" +
3875 " set " + onosSetName + ":\n" +
3876 str( getResponses[ i ] ) )
3877 getResults = main.FALSE
3878 elif getResponses[ i ] == main.ERROR:
3879 getResults = main.FALSE
3880 sizeResponses = []
3881 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003882 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003883 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07003884 name="setTestSize-" + str( i ),
3885 args=[ onosSetName ] )
3886 threads.append( t )
3887 t.start()
3888 for t in threads:
3889 t.join()
3890 sizeResponses.append( t.result )
3891 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003892 for i in range( len( main.activeNodes ) ):
3893 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003894 if size != sizeResponses[ i ]:
3895 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003896 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003897 " expected a size of " + str( size ) +
3898 " for set " + onosSetName +
3899 " but got " + str( sizeResponses[ i ] ) )
3900 removeResults = removeResults and getResults and sizeResults
3901 utilities.assert_equals( expect=main.TRUE,
3902 actual=removeResults,
3903 onpass="Set remove correct",
3904 onfail="Set remove was incorrect" )
3905
3906 main.step( "Distributed Set removeAll()" )
3907 onosSet.difference_update( addAllValue.split() )
3908 removeAllResponses = []
3909 threads = []
3910 try:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003911 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003912 t = main.Thread( target=main.CLIs[i].setTestRemove,
Jon Hall5cf14d52015-07-16 12:15:19 -07003913 name="setTestRemoveAll-" + str( i ),
3914 args=[ onosSetName, addAllValue ] )
3915 threads.append( t )
3916 t.start()
3917 for t in threads:
3918 t.join()
3919 removeAllResponses.append( t.result )
3920 except Exception, e:
3921 main.log.exception(e)
3922
3923 # main.TRUE = successfully changed the set
3924 # main.FALSE = action resulted in no change in set
3925 # main.ERROR - Some error in executing the function
3926 removeAllResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003927 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003928 if removeAllResponses[ i ] == main.TRUE:
3929 # All is well
3930 pass
3931 elif removeAllResponses[ i ] == main.FALSE:
3932 # not in set, probably fine
3933 pass
3934 elif removeAllResponses[ i ] == main.ERROR:
3935 # Error in execution
3936 removeAllResults = main.FALSE
3937 else:
3938 # unexpected result
3939 removeAllResults = main.FALSE
3940 if removeAllResults != main.TRUE:
3941 main.log.error( "Error executing set removeAll" )
3942
3943 # Check if set is still correct
3944 size = len( onosSet )
3945 getResponses = []
3946 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003947 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003948 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003949 name="setTestGet-" + str( i ),
3950 args=[ onosSetName ] )
3951 threads.append( t )
3952 t.start()
3953 for t in threads:
3954 t.join()
3955 getResponses.append( t.result )
3956 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003957 for i in range( len( main.activeNodes ) ):
3958 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003959 if isinstance( getResponses[ i ], list):
3960 current = set( getResponses[ i ] )
3961 if len( current ) == len( getResponses[ i ] ):
3962 # no repeats
3963 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003964 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003965 " has incorrect view" +
3966 " of set " + onosSetName + ":\n" +
3967 str( getResponses[ i ] ) )
3968 main.log.debug( "Expected: " + str( onosSet ) )
3969 main.log.debug( "Actual: " + str( current ) )
3970 getResults = main.FALSE
3971 else:
3972 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003973 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003974 " has repeat elements in" +
3975 " set " + onosSetName + ":\n" +
3976 str( getResponses[ i ] ) )
3977 getResults = main.FALSE
3978 elif getResponses[ i ] == main.ERROR:
3979 getResults = main.FALSE
3980 sizeResponses = []
3981 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003982 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003983 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07003984 name="setTestSize-" + str( i ),
3985 args=[ onosSetName ] )
3986 threads.append( t )
3987 t.start()
3988 for t in threads:
3989 t.join()
3990 sizeResponses.append( t.result )
3991 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003992 for i in range( len( main.activeNodes ) ):
3993 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003994 if size != sizeResponses[ i ]:
3995 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003996 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003997 " expected a size of " + str( size ) +
3998 " for set " + onosSetName +
3999 " but got " + str( sizeResponses[ i ] ) )
4000 removeAllResults = removeAllResults and getResults and sizeResults
4001 utilities.assert_equals( expect=main.TRUE,
4002 actual=removeAllResults,
4003 onpass="Set removeAll correct",
4004 onfail="Set removeAll was incorrect" )
4005
4006 main.step( "Distributed Set addAll()" )
4007 onosSet.update( addAllValue.split() )
4008 addResponses = []
4009 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004010 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004011 t = main.Thread( target=main.CLIs[i].setTestAdd,
Jon Hall5cf14d52015-07-16 12:15:19 -07004012 name="setTestAddAll-" + str( i ),
4013 args=[ onosSetName, addAllValue ] )
4014 threads.append( t )
4015 t.start()
4016 for t in threads:
4017 t.join()
4018 addResponses.append( t.result )
4019
4020 # main.TRUE = successfully changed the set
4021 # main.FALSE = action resulted in no change in set
4022 # main.ERROR - Some error in executing the function
4023 addAllResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004024 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07004025 if addResponses[ i ] == main.TRUE:
4026 # All is well
4027 pass
4028 elif addResponses[ i ] == main.FALSE:
4029 # Already in set, probably fine
4030 pass
4031 elif addResponses[ i ] == main.ERROR:
4032 # Error in execution
4033 addAllResults = main.FALSE
4034 else:
4035 # unexpected result
4036 addAllResults = main.FALSE
4037 if addAllResults != main.TRUE:
4038 main.log.error( "Error executing set addAll" )
4039
4040 # Check if set is still correct
4041 size = len( onosSet )
4042 getResponses = []
4043 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004044 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004045 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07004046 name="setTestGet-" + str( i ),
4047 args=[ onosSetName ] )
4048 threads.append( t )
4049 t.start()
4050 for t in threads:
4051 t.join()
4052 getResponses.append( t.result )
4053 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004054 for i in range( len( main.activeNodes ) ):
4055 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004056 if isinstance( getResponses[ i ], list):
4057 current = set( getResponses[ i ] )
4058 if len( current ) == len( getResponses[ i ] ):
4059 # no repeats
4060 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004061 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004062 " has incorrect view" +
4063 " of set " + onosSetName + ":\n" +
4064 str( getResponses[ i ] ) )
4065 main.log.debug( "Expected: " + str( onosSet ) )
4066 main.log.debug( "Actual: " + str( current ) )
4067 getResults = main.FALSE
4068 else:
4069 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004070 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004071 " has repeat elements in" +
4072 " set " + onosSetName + ":\n" +
4073 str( getResponses[ i ] ) )
4074 getResults = main.FALSE
4075 elif getResponses[ i ] == main.ERROR:
4076 getResults = main.FALSE
4077 sizeResponses = []
4078 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004079 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004080 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07004081 name="setTestSize-" + str( i ),
4082 args=[ onosSetName ] )
4083 threads.append( t )
4084 t.start()
4085 for t in threads:
4086 t.join()
4087 sizeResponses.append( t.result )
4088 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004089 for i in range( len( main.activeNodes ) ):
4090 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004091 if size != sizeResponses[ i ]:
4092 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004093 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004094 " expected a size of " + str( size ) +
4095 " for set " + onosSetName +
4096 " but got " + str( sizeResponses[ i ] ) )
4097 addAllResults = addAllResults and getResults and sizeResults
4098 utilities.assert_equals( expect=main.TRUE,
4099 actual=addAllResults,
4100 onpass="Set addAll correct",
4101 onfail="Set addAll was incorrect" )
4102
4103 main.step( "Distributed Set clear()" )
4104 onosSet.clear()
4105 clearResponses = []
4106 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004107 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004108 t = main.Thread( target=main.CLIs[i].setTestRemove,
Jon Hall5cf14d52015-07-16 12:15:19 -07004109 name="setTestClear-" + str( i ),
4110 args=[ onosSetName, " "], # Values doesn't matter
4111 kwargs={ "clear": True } )
4112 threads.append( t )
4113 t.start()
4114 for t in threads:
4115 t.join()
4116 clearResponses.append( t.result )
4117
4118 # main.TRUE = successfully changed the set
4119 # main.FALSE = action resulted in no change in set
4120 # main.ERROR - Some error in executing the function
4121 clearResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004122 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07004123 if clearResponses[ i ] == main.TRUE:
4124 # All is well
4125 pass
4126 elif clearResponses[ i ] == main.FALSE:
4127 # Nothing set, probably fine
4128 pass
4129 elif clearResponses[ i ] == main.ERROR:
4130 # Error in execution
4131 clearResults = main.FALSE
4132 else:
4133 # unexpected result
4134 clearResults = main.FALSE
4135 if clearResults != main.TRUE:
4136 main.log.error( "Error executing set clear" )
4137
4138 # Check if set is still correct
4139 size = len( onosSet )
4140 getResponses = []
4141 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004142 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004143 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07004144 name="setTestGet-" + str( i ),
4145 args=[ onosSetName ] )
4146 threads.append( t )
4147 t.start()
4148 for t in threads:
4149 t.join()
4150 getResponses.append( t.result )
4151 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004152 for i in range( len( main.activeNodes ) ):
4153 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004154 if isinstance( getResponses[ i ], list):
4155 current = set( getResponses[ i ] )
4156 if len( current ) == len( getResponses[ i ] ):
4157 # no repeats
4158 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004159 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004160 " has incorrect view" +
4161 " of set " + onosSetName + ":\n" +
4162 str( getResponses[ i ] ) )
4163 main.log.debug( "Expected: " + str( onosSet ) )
4164 main.log.debug( "Actual: " + str( current ) )
4165 getResults = main.FALSE
4166 else:
4167 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004168 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004169 " has repeat elements in" +
4170 " set " + onosSetName + ":\n" +
4171 str( getResponses[ i ] ) )
4172 getResults = main.FALSE
4173 elif getResponses[ i ] == main.ERROR:
4174 getResults = main.FALSE
4175 sizeResponses = []
4176 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004177 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004178 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07004179 name="setTestSize-" + str( i ),
4180 args=[ onosSetName ] )
4181 threads.append( t )
4182 t.start()
4183 for t in threads:
4184 t.join()
4185 sizeResponses.append( t.result )
4186 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004187 for i in range( len( main.activeNodes ) ):
4188 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004189 if size != sizeResponses[ i ]:
4190 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004191 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004192 " expected a size of " + str( size ) +
4193 " for set " + onosSetName +
4194 " but got " + str( sizeResponses[ i ] ) )
4195 clearResults = clearResults and getResults and sizeResults
4196 utilities.assert_equals( expect=main.TRUE,
4197 actual=clearResults,
4198 onpass="Set clear correct",
4199 onfail="Set clear was incorrect" )
4200
4201 main.step( "Distributed Set addAll()" )
4202 onosSet.update( addAllValue.split() )
4203 addResponses = []
4204 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004205 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004206 t = main.Thread( target=main.CLIs[i].setTestAdd,
Jon Hall5cf14d52015-07-16 12:15:19 -07004207 name="setTestAddAll-" + str( i ),
4208 args=[ onosSetName, addAllValue ] )
4209 threads.append( t )
4210 t.start()
4211 for t in threads:
4212 t.join()
4213 addResponses.append( t.result )
4214
4215 # main.TRUE = successfully changed the set
4216 # main.FALSE = action resulted in no change in set
4217 # main.ERROR - Some error in executing the function
4218 addAllResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004219 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07004220 if addResponses[ i ] == main.TRUE:
4221 # All is well
4222 pass
4223 elif addResponses[ i ] == main.FALSE:
4224 # Already in set, probably fine
4225 pass
4226 elif addResponses[ i ] == main.ERROR:
4227 # Error in execution
4228 addAllResults = main.FALSE
4229 else:
4230 # unexpected result
4231 addAllResults = main.FALSE
4232 if addAllResults != main.TRUE:
4233 main.log.error( "Error executing set addAll" )
4234
4235 # Check if set is still correct
4236 size = len( onosSet )
4237 getResponses = []
4238 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004239 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004240 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07004241 name="setTestGet-" + str( i ),
4242 args=[ onosSetName ] )
4243 threads.append( t )
4244 t.start()
4245 for t in threads:
4246 t.join()
4247 getResponses.append( t.result )
4248 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004249 for i in range( len( main.activeNodes ) ):
4250 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004251 if isinstance( getResponses[ i ], list):
4252 current = set( getResponses[ i ] )
4253 if len( current ) == len( getResponses[ i ] ):
4254 # no repeats
4255 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004256 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004257 " has incorrect view" +
4258 " of set " + onosSetName + ":\n" +
4259 str( getResponses[ i ] ) )
4260 main.log.debug( "Expected: " + str( onosSet ) )
4261 main.log.debug( "Actual: " + str( current ) )
4262 getResults = main.FALSE
4263 else:
4264 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004265 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004266 " has repeat elements in" +
4267 " set " + onosSetName + ":\n" +
4268 str( getResponses[ i ] ) )
4269 getResults = main.FALSE
4270 elif getResponses[ i ] == main.ERROR:
4271 getResults = main.FALSE
4272 sizeResponses = []
4273 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004274 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004275 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07004276 name="setTestSize-" + str( i ),
4277 args=[ onosSetName ] )
4278 threads.append( t )
4279 t.start()
4280 for t in threads:
4281 t.join()
4282 sizeResponses.append( t.result )
4283 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004284 for i in range( len( main.activeNodes ) ):
4285 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004286 if size != sizeResponses[ i ]:
4287 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004288 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004289 " expected a size of " + str( size ) +
4290 " for set " + onosSetName +
4291 " but got " + str( sizeResponses[ i ] ) )
4292 addAllResults = addAllResults and getResults and sizeResults
4293 utilities.assert_equals( expect=main.TRUE,
4294 actual=addAllResults,
4295 onpass="Set addAll correct",
4296 onfail="Set addAll was incorrect" )
4297
4298 main.step( "Distributed Set retain()" )
4299 onosSet.intersection_update( retainValue.split() )
4300 retainResponses = []
4301 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004302 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004303 t = main.Thread( target=main.CLIs[i].setTestRemove,
Jon Hall5cf14d52015-07-16 12:15:19 -07004304 name="setTestRetain-" + str( i ),
4305 args=[ onosSetName, retainValue ],
4306 kwargs={ "retain": True } )
4307 threads.append( t )
4308 t.start()
4309 for t in threads:
4310 t.join()
4311 retainResponses.append( t.result )
4312
4313 # main.TRUE = successfully changed the set
4314 # main.FALSE = action resulted in no change in set
4315 # main.ERROR - Some error in executing the function
4316 retainResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004317 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07004318 if retainResponses[ i ] == main.TRUE:
4319 # All is well
4320 pass
4321 elif retainResponses[ i ] == main.FALSE:
4322 # Already in set, probably fine
4323 pass
4324 elif retainResponses[ i ] == main.ERROR:
4325 # Error in execution
4326 retainResults = main.FALSE
4327 else:
4328 # unexpected result
4329 retainResults = main.FALSE
4330 if retainResults != main.TRUE:
4331 main.log.error( "Error executing set retain" )
4332
4333 # Check if set is still correct
4334 size = len( onosSet )
4335 getResponses = []
4336 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004337 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004338 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07004339 name="setTestGet-" + str( i ),
4340 args=[ onosSetName ] )
4341 threads.append( t )
4342 t.start()
4343 for t in threads:
4344 t.join()
4345 getResponses.append( t.result )
4346 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004347 for i in range( len( main.activeNodes ) ):
4348 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004349 if isinstance( getResponses[ i ], list):
4350 current = set( getResponses[ i ] )
4351 if len( current ) == len( getResponses[ i ] ):
4352 # no repeats
4353 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004354 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004355 " has incorrect view" +
4356 " of set " + onosSetName + ":\n" +
4357 str( getResponses[ i ] ) )
4358 main.log.debug( "Expected: " + str( onosSet ) )
4359 main.log.debug( "Actual: " + str( current ) )
4360 getResults = main.FALSE
4361 else:
4362 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004363 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004364 " has repeat elements in" +
4365 " set " + onosSetName + ":\n" +
4366 str( getResponses[ i ] ) )
4367 getResults = main.FALSE
4368 elif getResponses[ i ] == main.ERROR:
4369 getResults = main.FALSE
4370 sizeResponses = []
4371 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004372 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004373 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07004374 name="setTestSize-" + str( i ),
4375 args=[ onosSetName ] )
4376 threads.append( t )
4377 t.start()
4378 for t in threads:
4379 t.join()
4380 sizeResponses.append( t.result )
4381 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004382 for i in range( len( main.activeNodes ) ):
4383 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004384 if size != sizeResponses[ i ]:
4385 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004386 main.log.error( "ONOS" + node + " expected a size of " +
Jon Hall5cf14d52015-07-16 12:15:19 -07004387 str( size ) + " for set " + onosSetName +
4388 " but got " + str( sizeResponses[ i ] ) )
4389 retainResults = retainResults and getResults and sizeResults
4390 utilities.assert_equals( expect=main.TRUE,
4391 actual=retainResults,
4392 onpass="Set retain correct",
4393 onfail="Set retain was incorrect" )
4394
Jon Hall2a5002c2015-08-21 16:49:11 -07004395 # Transactional maps
4396 main.step( "Partitioned Transactional maps put" )
4397 tMapValue = "Testing"
4398 numKeys = 100
4399 putResult = True
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004400 node = main.activeNodes[0]
4401 putResponses = main.CLIs[node].transactionalMapPut( numKeys, tMapValue )
Jon Hall2a5002c2015-08-21 16:49:11 -07004402 if len( putResponses ) == 100:
4403 for i in putResponses:
4404 if putResponses[ i ][ 'value' ] != tMapValue:
4405 putResult = False
4406 else:
4407 putResult = False
4408 if not putResult:
4409 main.log.debug( "Put response values: " + str( putResponses ) )
4410 utilities.assert_equals( expect=True,
4411 actual=putResult,
4412 onpass="Partitioned Transactional Map put successful",
4413 onfail="Partitioned Transactional Map put values are incorrect" )
4414
4415 main.step( "Partitioned Transactional maps get" )
4416 getCheck = True
4417 for n in range( 1, numKeys + 1 ):
4418 getResponses = []
4419 threads = []
4420 valueCheck = True
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004421 for i in main.activeNodes:
Jon Hall2a5002c2015-08-21 16:49:11 -07004422 t = main.Thread( target=main.CLIs[i].transactionalMapGet,
4423 name="TMap-get-" + str( i ),
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004424 args=[ "Key" + str( n ) ] )
Jon Hall2a5002c2015-08-21 16:49:11 -07004425 threads.append( t )
4426 t.start()
4427 for t in threads:
4428 t.join()
4429 getResponses.append( t.result )
4430 for node in getResponses:
4431 if node != tMapValue:
4432 valueCheck = False
4433 if not valueCheck:
4434 main.log.warn( "Values for key 'Key" + str( n ) + "' do not match:" )
4435 main.log.warn( getResponses )
4436 getCheck = getCheck and valueCheck
4437 utilities.assert_equals( expect=True,
4438 actual=getCheck,
4439 onpass="Partitioned Transactional Map get values were correct",
4440 onfail="Partitioned Transactional Map values incorrect" )
4441
4442 main.step( "In-memory Transactional maps put" )
4443 tMapValue = "Testing"
4444 numKeys = 100
4445 putResult = True
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004446 node = main.activeNodes[0]
4447 putResponses = main.CLIs[node].transactionalMapPut( numKeys, tMapValue, inMemory=True )
Jon Hall2a5002c2015-08-21 16:49:11 -07004448 if len( putResponses ) == 100:
4449 for i in putResponses:
4450 if putResponses[ i ][ 'value' ] != tMapValue:
4451 putResult = False
4452 else:
4453 putResult = False
4454 if not putResult:
4455 main.log.debug( "Put response values: " + str( putResponses ) )
4456 utilities.assert_equals( expect=True,
4457 actual=putResult,
4458 onpass="In-Memory Transactional Map put successful",
4459 onfail="In-Memory Transactional Map put values are incorrect" )
4460
4461 main.step( "In-Memory Transactional maps get" )
4462 getCheck = True
4463 for n in range( 1, numKeys + 1 ):
4464 getResponses = []
4465 threads = []
4466 valueCheck = True
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004467 for i in main.activeNodes:
Jon Hall2a5002c2015-08-21 16:49:11 -07004468 t = main.Thread( target=main.CLIs[i].transactionalMapGet,
4469 name="TMap-get-" + str( i ),
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004470 args=[ "Key" + str( n ) ],
Jon Hall2a5002c2015-08-21 16:49:11 -07004471 kwargs={ "inMemory": True } )
4472 threads.append( t )
4473 t.start()
4474 for t in threads:
4475 t.join()
4476 getResponses.append( t.result )
4477 for node in getResponses:
4478 if node != tMapValue:
4479 valueCheck = False
4480 if not valueCheck:
4481 main.log.warn( "Values for key 'Key" + str( n ) + "' do not match:" )
4482 main.log.warn( getResponses )
4483 getCheck = getCheck and valueCheck
4484 utilities.assert_equals( expect=True,
4485 actual=getCheck,
4486 onpass="In-Memory Transactional Map get values were correct",
4487 onfail="In-Memory Transactional Map values incorrect" )