blob: c1f979a4b22299c3c05c1a898f2a8338ae352eb9 [file] [log] [blame]
Jon Hall5cf14d52015-07-16 12:15:19 -07001"""
2Description: This test is to determine if ONOS can handle
3 a minority of it's nodes restarting
4
5List of test cases:
6CASE1: Compile ONOS and push it to the test machines
7CASE2: Assign devices to controllers
8CASE21: Assign mastership to controllers
9CASE3: Assign intents
10CASE4: Ping across added host intents
11CASE5: Reading state of ONOS
Jon Hallb3ed8ed2015-10-28 16:43:55 -070012CASE61: The Failure inducing case.
13CASE62: The Failure recovery case.
Jon Hall5cf14d52015-07-16 12:15:19 -070014CASE7: Check state after control plane failure
15CASE8: Compare topo
16CASE9: Link s3-s28 down
17CASE10: Link s3-s28 up
18CASE11: Switch down
19CASE12: Switch up
20CASE13: Clean up
21CASE14: start election app on all onos nodes
22CASE15: Check that Leadership Election is still functional
23CASE16: Install Distributed Primitives app
24CASE17: Check for basic functionality with distributed primitives
25"""
26
27
Jon Hallb3ed8ed2015-10-28 16:43:55 -070028class HAkillNodes:
Jon Hall5cf14d52015-07-16 12:15:19 -070029
30 def __init__( self ):
31 self.default = ''
32
33 def CASE1( self, main ):
34 """
35 CASE1 is to compile ONOS and push it to the test machines
36
37 Startup sequence:
38 cell <name>
39 onos-verify-cell
40 NOTE: temporary - onos-remove-raft-logs
41 onos-uninstall
42 start mininet
43 git pull
44 mvn clean install
45 onos-package
46 onos-install -f
47 onos-wait-for-start
48 start cli sessions
49 start tcpdump
50 """
Jon Halle1a3b752015-07-22 13:02:46 -070051 import imp
Jon Hallf3d16e72015-12-16 17:45:08 -080052 import time
Jon Hall3b489db2015-10-05 14:38:37 -070053 import pexpect
Jon Hall5cf14d52015-07-16 12:15:19 -070054 main.log.info( "ONOS HA test: Restart minority of ONOS nodes - " +
55 "initialization" )
56 main.case( "Setting up test environment" )
Jon Hall783bbf92015-07-23 14:33:19 -070057 main.caseExplanation = "Setup the test environment including " +\
Jon Hall5cf14d52015-07-16 12:15:19 -070058 "installing ONOS, starting Mininet and ONOS" +\
59 "cli sessions."
60 # TODO: save all the timers and output them for plotting
61
62 # load some variables from the params file
63 PULLCODE = False
64 if main.params[ 'Git' ] == 'True':
65 PULLCODE = True
66 gitBranch = main.params[ 'branch' ]
67 cellName = main.params[ 'ENV' ][ 'cellName' ]
68
Jon Halle1a3b752015-07-22 13:02:46 -070069 main.numCtrls = int( main.params[ 'num_controllers' ] )
Jon Hall5cf14d52015-07-16 12:15:19 -070070 if main.ONOSbench.maxNodes:
Jon Halle1a3b752015-07-22 13:02:46 -070071 if main.ONOSbench.maxNodes < main.numCtrls:
72 main.numCtrls = int( main.ONOSbench.maxNodes )
73 # set global variables
Jon Hall5cf14d52015-07-16 12:15:19 -070074 global ONOS1Port
75 global ONOS2Port
76 global ONOS3Port
77 global ONOS4Port
78 global ONOS5Port
79 global ONOS6Port
80 global ONOS7Port
81
82 # FIXME: just get controller port from params?
83 # TODO: do we really need all these?
84 ONOS1Port = main.params[ 'CTRL' ][ 'port1' ]
85 ONOS2Port = main.params[ 'CTRL' ][ 'port2' ]
86 ONOS3Port = main.params[ 'CTRL' ][ 'port3' ]
87 ONOS4Port = main.params[ 'CTRL' ][ 'port4' ]
88 ONOS5Port = main.params[ 'CTRL' ][ 'port5' ]
89 ONOS6Port = main.params[ 'CTRL' ][ 'port6' ]
90 ONOS7Port = main.params[ 'CTRL' ][ 'port7' ]
91
Jon Halle1a3b752015-07-22 13:02:46 -070092 try:
93 fileName = "Counters"
94 # TODO: Maybe make a library folder somewhere?
95 path = main.params[ 'imports' ][ 'path' ]
96 main.Counters = imp.load_source( fileName,
97 path + fileName + ".py" )
98 except Exception as e:
99 main.log.exception( e )
100 main.cleanup()
101 main.exit()
102
103 main.CLIs = []
104 main.nodes = []
Jon Hall5cf14d52015-07-16 12:15:19 -0700105 ipList = []
Jon Halle1a3b752015-07-22 13:02:46 -0700106 for i in range( 1, main.numCtrls + 1 ):
107 try:
108 main.CLIs.append( getattr( main, 'ONOScli' + str( i ) ) )
109 main.nodes.append( getattr( main, 'ONOS' + str( i ) ) )
110 ipList.append( main.nodes[ -1 ].ip_address )
111 except AttributeError:
112 break
Jon Hall5cf14d52015-07-16 12:15:19 -0700113
114 main.step( "Create cell file" )
115 cellAppString = main.params[ 'ENV' ][ 'appString' ]
116 main.ONOSbench.createCellFile( main.ONOSbench.ip_address, cellName,
117 main.Mininet1.ip_address,
118 cellAppString, ipList )
119 main.step( "Applying cell variable to environment" )
120 cellResult = main.ONOSbench.setCell( cellName )
121 verifyResult = main.ONOSbench.verifyCell()
122
123 # FIXME:this is short term fix
124 main.log.info( "Removing raft logs" )
125 main.ONOSbench.onosRemoveRaftLogs()
126
127 main.log.info( "Uninstalling ONOS" )
Jon Halle1a3b752015-07-22 13:02:46 -0700128 for node in main.nodes:
Jon Hall5cf14d52015-07-16 12:15:19 -0700129 main.ONOSbench.onosUninstall( node.ip_address )
130
131 # Make sure ONOS is DEAD
132 main.log.info( "Killing any ONOS processes" )
133 killResults = main.TRUE
Jon Halle1a3b752015-07-22 13:02:46 -0700134 for node in main.nodes:
Jon Hall5cf14d52015-07-16 12:15:19 -0700135 killed = main.ONOSbench.onosKill( node.ip_address )
136 killResults = killResults and killed
137
138 cleanInstallResult = main.TRUE
139 gitPullResult = main.TRUE
140
141 main.step( "Starting Mininet" )
142 # scp topo file to mininet
143 # TODO: move to params?
144 topoName = "obelisk.py"
145 filePath = main.ONOSbench.home + "/tools/test/topos/"
kelvin-onlabd9e23de2015-08-06 10:34:44 -0700146 main.ONOSbench.scp( main.Mininet1,
147 filePath + topoName,
148 main.Mininet1.home,
149 direction="to" )
Jon Hall5cf14d52015-07-16 12:15:19 -0700150 mnResult = main.Mininet1.startNet( )
151 utilities.assert_equals( expect=main.TRUE, actual=mnResult,
152 onpass="Mininet Started",
153 onfail="Error starting Mininet" )
154
155 main.step( "Git checkout and pull " + gitBranch )
156 if PULLCODE:
157 main.ONOSbench.gitCheckout( gitBranch )
158 gitPullResult = main.ONOSbench.gitPull()
159 # values of 1 or 3 are good
160 utilities.assert_lesser( expect=0, actual=gitPullResult,
161 onpass="Git pull successful",
162 onfail="Git pull failed" )
163 main.ONOSbench.getVersion( report=True )
164
165 main.step( "Using mvn clean install" )
166 cleanInstallResult = main.TRUE
167 if PULLCODE and gitPullResult == main.TRUE:
168 cleanInstallResult = main.ONOSbench.cleanInstall()
169 else:
170 main.log.warn( "Did not pull new code so skipping mvn " +
171 "clean install" )
172 utilities.assert_equals( expect=main.TRUE,
173 actual=cleanInstallResult,
174 onpass="MCI successful",
175 onfail="MCI failed" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700176
177 main.step( "Make sure ONOS service doesn't automatically respawn" )
178 handle = main.ONOSbench.handle
179 handle.sendline( "sed -i -e 's/^respawn$/#respawn/g' tools/package/init/onos.conf" )
180 handle.expect( "\$" ) # $ from the command
181 handle.expect( "\$" ) # $ from the prompt
182
Jon Hall5cf14d52015-07-16 12:15:19 -0700183 # GRAPHS
184 # NOTE: important params here:
185 # job = name of Jenkins job
186 # Plot Name = Plot-HA, only can be used if multiple plots
187 # index = The number of the graph under plot name
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700188 job = "HAkillNodes"
Jon Hall5cf14d52015-07-16 12:15:19 -0700189 plotName = "Plot-HA"
Jon Hallff566d52016-01-15 14:45:36 -0800190 index = "1"
Jon Hall5cf14d52015-07-16 12:15:19 -0700191 graphs = '<ac:structured-macro ac:name="html">\n'
192 graphs += '<ac:plain-text-body><![CDATA[\n'
193 graphs += '<iframe src="https://onos-jenkins.onlab.us/job/' + job +\
Jon Halla9845df2016-01-15 14:55:58 -0800194 '/plot/' + plotName + '/getPlot?index=' + index +\
Jon Hall5cf14d52015-07-16 12:15:19 -0700195 '&width=500&height=300"' +\
196 'noborder="0" width="500" height="300" scrolling="yes" ' +\
197 'seamless="seamless"></iframe>\n'
198 graphs += ']]></ac:plain-text-body>\n'
199 graphs += '</ac:structured-macro>\n'
200 main.log.wiki(graphs)
201
202 main.step( "Creating ONOS package" )
Jon Hall3b489db2015-10-05 14:38:37 -0700203 # copy gen-partions file to ONOS
204 # NOTE: this assumes TestON and ONOS are on the same machine
205 srcFile = main.testDir + "/" + main.TEST + "/dependencies/onos-gen-partitions"
206 dstDir = main.ONOSbench.home + "/tools/test/bin/onos-gen-partitions"
207 cpResult = main.ONOSbench.secureCopy( main.ONOSbench.user_name,
208 main.ONOSbench.ip_address,
209 srcFile,
210 dstDir,
211 pwd=main.ONOSbench.pwd,
212 direction="from" )
Jon Hall5cf14d52015-07-16 12:15:19 -0700213 packageResult = main.ONOSbench.onosPackage()
214 utilities.assert_equals( expect=main.TRUE, actual=packageResult,
215 onpass="ONOS package successful",
216 onfail="ONOS package failed" )
217
218 main.step( "Installing ONOS package" )
219 onosInstallResult = main.TRUE
Jon Halle1a3b752015-07-22 13:02:46 -0700220 for node in main.nodes:
Jon Hall5cf14d52015-07-16 12:15:19 -0700221 tmpResult = main.ONOSbench.onosInstall( options="-f",
222 node=node.ip_address )
223 onosInstallResult = onosInstallResult and tmpResult
224 utilities.assert_equals( expect=main.TRUE, actual=onosInstallResult,
225 onpass="ONOS install successful",
226 onfail="ONOS install failed" )
Jon Hall3b489db2015-10-05 14:38:37 -0700227 # clean up gen-partitions file
228 try:
229 main.ONOSbench.handle.sendline( "cd " + main.ONOSbench.home )
230 main.ONOSbench.handle.expect( main.ONOSbench.home + "\$" )
231 main.ONOSbench.handle.sendline( "git checkout -- tools/test/bin/onos-gen-partitions" )
232 main.ONOSbench.handle.expect( main.ONOSbench.home + "\$" )
233 main.log.info( " Cleaning custom gen partitions file, response was: \n" +
234 str( main.ONOSbench.handle.before ) )
235 except ( pexpect.TIMEOUT, pexpect.EOF ):
236 main.log.exception( "ONOSbench: pexpect exception found:" +
237 main.ONOSbench.handle.before )
238 main.cleanup()
239 main.exit()
Jon Hall5cf14d52015-07-16 12:15:19 -0700240
241 main.step( "Checking if ONOS is up yet" )
242 for i in range( 2 ):
243 onosIsupResult = main.TRUE
Jon Halle1a3b752015-07-22 13:02:46 -0700244 for node in main.nodes:
Jon Hall5cf14d52015-07-16 12:15:19 -0700245 started = main.ONOSbench.isup( node.ip_address )
246 if not started:
Jon Hallc6793552016-01-19 14:18:37 -0800247 main.log.error( node.name + " hasn't started" )
Jon Hall5cf14d52015-07-16 12:15:19 -0700248 onosIsupResult = onosIsupResult and started
249 if onosIsupResult == main.TRUE:
250 break
251 utilities.assert_equals( expect=main.TRUE, actual=onosIsupResult,
252 onpass="ONOS startup successful",
253 onfail="ONOS startup failed" )
254
255 main.log.step( "Starting ONOS CLI sessions" )
256 cliResults = main.TRUE
257 threads = []
Jon Halle1a3b752015-07-22 13:02:46 -0700258 for i in range( main.numCtrls ):
259 t = main.Thread( target=main.CLIs[i].startOnosCli,
Jon Hall5cf14d52015-07-16 12:15:19 -0700260 name="startOnosCli-" + str( i ),
Jon Halle1a3b752015-07-22 13:02:46 -0700261 args=[main.nodes[i].ip_address] )
Jon Hall5cf14d52015-07-16 12:15:19 -0700262 threads.append( t )
263 t.start()
264
265 for t in threads:
266 t.join()
267 cliResults = cliResults and t.result
268 utilities.assert_equals( expect=main.TRUE, actual=cliResults,
269 onpass="ONOS cli startup successful",
270 onfail="ONOS cli startup failed" )
271
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700272 # Create a list of active nodes for use when some nodes are stopped
273 main.activeNodes = [ i for i in range( 0, len( main.CLIs ) ) ]
274
Jon Hall5cf14d52015-07-16 12:15:19 -0700275 if main.params[ 'tcpdump' ].lower() == "true":
276 main.step( "Start Packet Capture MN" )
277 main.Mininet2.startTcpdump(
278 str( main.params[ 'MNtcpdump' ][ 'folder' ] ) + str( main.TEST )
279 + "-MN.pcap",
280 intf=main.params[ 'MNtcpdump' ][ 'intf' ],
281 port=main.params[ 'MNtcpdump' ][ 'port' ] )
282
283 main.step( "App Ids check" )
Jon Hallf3d16e72015-12-16 17:45:08 -0800284 time.sleep(60)
Jon Hall5cf14d52015-07-16 12:15:19 -0700285 appCheck = main.TRUE
286 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700287 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -0700288 t = main.Thread( target=main.CLIs[i].appToIDCheck,
Jon Hall5cf14d52015-07-16 12:15:19 -0700289 name="appToIDCheck-" + str( i ),
290 args=[] )
291 threads.append( t )
292 t.start()
293
294 for t in threads:
295 t.join()
296 appCheck = appCheck and t.result
297 if appCheck != main.TRUE:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700298 node = main.activeNodes[0]
299 main.log.warn( main.CLIs[node].apps() )
300 main.log.warn( main.CLIs[node].appIDs() )
Jon Hall5cf14d52015-07-16 12:15:19 -0700301 utilities.assert_equals( expect=main.TRUE, actual=appCheck,
302 onpass="App Ids seem to be correct",
303 onfail="Something is wrong with app Ids" )
304
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700305 main.step( "Clean up ONOS service changes" )
306 handle.sendline( "git checkout -- tools/package/init/onos.conf" )
307 handle.expect( "\$" )
308
Jon Hall5cf14d52015-07-16 12:15:19 -0700309 if cliResults == main.FALSE:
310 main.log.error( "Failed to start ONOS, stopping test" )
311 main.cleanup()
312 main.exit()
313
314 def CASE2( self, main ):
315 """
316 Assign devices to controllers
317 """
318 import re
Jon Halle1a3b752015-07-22 13:02:46 -0700319 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700320 assert main, "main not defined"
321 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -0700322 assert main.CLIs, "main.CLIs not defined"
323 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700324 assert ONOS1Port, "ONOS1Port not defined"
325 assert ONOS2Port, "ONOS2Port not defined"
326 assert ONOS3Port, "ONOS3Port not defined"
327 assert ONOS4Port, "ONOS4Port not defined"
328 assert ONOS5Port, "ONOS5Port not defined"
329 assert ONOS6Port, "ONOS6Port not defined"
330 assert ONOS7Port, "ONOS7Port not defined"
331
332 main.case( "Assigning devices to controllers" )
Jon Hall783bbf92015-07-23 14:33:19 -0700333 main.caseExplanation = "Assign switches to ONOS using 'ovs-vsctl' " +\
Jon Hall5cf14d52015-07-16 12:15:19 -0700334 "and check that an ONOS node becomes the " +\
335 "master of the device."
336 main.step( "Assign switches to controllers" )
337
338 ipList = []
Jon Halle1a3b752015-07-22 13:02:46 -0700339 for i in range( main.numCtrls ):
340 ipList.append( main.nodes[ i ].ip_address )
Jon Hall5cf14d52015-07-16 12:15:19 -0700341 swList = []
342 for i in range( 1, 29 ):
343 swList.append( "s" + str( i ) )
344 main.Mininet1.assignSwController( sw=swList, ip=ipList )
345
346 mastershipCheck = main.TRUE
347 for i in range( 1, 29 ):
348 response = main.Mininet1.getSwController( "s" + str( i ) )
349 try:
350 main.log.info( str( response ) )
351 except Exception:
352 main.log.info( repr( response ) )
Jon Halle1a3b752015-07-22 13:02:46 -0700353 for node in main.nodes:
Jon Hall5cf14d52015-07-16 12:15:19 -0700354 if re.search( "tcp:" + node.ip_address, response ):
355 mastershipCheck = mastershipCheck and main.TRUE
356 else:
357 main.log.error( "Error, node " + node.ip_address + " is " +
358 "not in the list of controllers s" +
359 str( i ) + " is connecting to." )
360 mastershipCheck = main.FALSE
361 utilities.assert_equals(
362 expect=main.TRUE,
363 actual=mastershipCheck,
364 onpass="Switch mastership assigned correctly",
365 onfail="Switches not assigned correctly to controllers" )
366
367 def CASE21( self, main ):
368 """
369 Assign mastership to controllers
370 """
Jon Hall5cf14d52015-07-16 12:15:19 -0700371 import time
Jon Halle1a3b752015-07-22 13:02:46 -0700372 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700373 assert main, "main not defined"
374 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -0700375 assert main.CLIs, "main.CLIs not defined"
376 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700377 assert ONOS1Port, "ONOS1Port not defined"
378 assert ONOS2Port, "ONOS2Port not defined"
379 assert ONOS3Port, "ONOS3Port not defined"
380 assert ONOS4Port, "ONOS4Port not defined"
381 assert ONOS5Port, "ONOS5Port not defined"
382 assert ONOS6Port, "ONOS6Port not defined"
383 assert ONOS7Port, "ONOS7Port not defined"
384
385 main.case( "Assigning Controller roles for switches" )
Jon Hall783bbf92015-07-23 14:33:19 -0700386 main.caseExplanation = "Check that ONOS is connected to each " +\
Jon Hall5cf14d52015-07-16 12:15:19 -0700387 "device. Then manually assign" +\
388 " mastership to specific ONOS nodes using" +\
389 " 'device-role'"
390 main.step( "Assign mastership of switches to specific controllers" )
391 # Manually assign mastership to the controller we want
392 roleCall = main.TRUE
393
394 ipList = [ ]
395 deviceList = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700396 onosCli = main.CLIs[ main.activeNodes[0] ]
Jon Hall5cf14d52015-07-16 12:15:19 -0700397 try:
398 # Assign mastership to specific controllers. This assignment was
399 # determined for a 7 node cluser, but will work with any sized
400 # cluster
401 for i in range( 1, 29 ): # switches 1 through 28
402 # set up correct variables:
403 if i == 1:
404 c = 0
Jon Halle1a3b752015-07-22 13:02:46 -0700405 ip = main.nodes[ c ].ip_address # ONOS1
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700406 deviceId = onosCli.getDevice( "1000" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700407 elif i == 2:
Jon Halle1a3b752015-07-22 13:02:46 -0700408 c = 1 % main.numCtrls
409 ip = main.nodes[ c ].ip_address # ONOS2
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700410 deviceId = onosCli.getDevice( "2000" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700411 elif i == 3:
Jon Halle1a3b752015-07-22 13:02:46 -0700412 c = 1 % main.numCtrls
413 ip = main.nodes[ c ].ip_address # ONOS2
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700414 deviceId = onosCli.getDevice( "3000" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700415 elif i == 4:
Jon Halle1a3b752015-07-22 13:02:46 -0700416 c = 3 % main.numCtrls
417 ip = main.nodes[ c ].ip_address # ONOS4
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700418 deviceId = onosCli.getDevice( "3004" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700419 elif i == 5:
Jon Halle1a3b752015-07-22 13:02:46 -0700420 c = 2 % main.numCtrls
421 ip = main.nodes[ c ].ip_address # ONOS3
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700422 deviceId = onosCli.getDevice( "5000" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700423 elif i == 6:
Jon Halle1a3b752015-07-22 13:02:46 -0700424 c = 2 % main.numCtrls
425 ip = main.nodes[ c ].ip_address # ONOS3
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700426 deviceId = onosCli.getDevice( "6000" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700427 elif i == 7:
Jon Halle1a3b752015-07-22 13:02:46 -0700428 c = 5 % main.numCtrls
429 ip = main.nodes[ c ].ip_address # ONOS6
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700430 deviceId = onosCli.getDevice( "6007" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700431 elif i >= 8 and i <= 17:
Jon Halle1a3b752015-07-22 13:02:46 -0700432 c = 4 % main.numCtrls
433 ip = main.nodes[ c ].ip_address # ONOS5
Jon Hall5cf14d52015-07-16 12:15:19 -0700434 dpid = '3' + str( i ).zfill( 3 )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700435 deviceId = onosCli.getDevice( dpid ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700436 elif i >= 18 and i <= 27:
Jon Halle1a3b752015-07-22 13:02:46 -0700437 c = 6 % main.numCtrls
438 ip = main.nodes[ c ].ip_address # ONOS7
Jon Hall5cf14d52015-07-16 12:15:19 -0700439 dpid = '6' + str( i ).zfill( 3 )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700440 deviceId = onosCli.getDevice( dpid ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700441 elif i == 28:
442 c = 0
Jon Halle1a3b752015-07-22 13:02:46 -0700443 ip = main.nodes[ c ].ip_address # ONOS1
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700444 deviceId = onosCli.getDevice( "2800" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700445 else:
446 main.log.error( "You didn't write an else statement for " +
447 "switch s" + str( i ) )
448 roleCall = main.FALSE
449 # Assign switch
450 assert deviceId, "No device id for s" + str( i ) + " in ONOS"
451 # TODO: make this controller dynamic
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700452 roleCall = roleCall and onosCli.deviceRole( deviceId, ip )
Jon Hall5cf14d52015-07-16 12:15:19 -0700453 ipList.append( ip )
454 deviceList.append( deviceId )
455 except ( AttributeError, AssertionError ):
456 main.log.exception( "Something is wrong with ONOS device view" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700457 main.log.info( onosCli.devices() )
Jon Hall5cf14d52015-07-16 12:15:19 -0700458 utilities.assert_equals(
459 expect=main.TRUE,
460 actual=roleCall,
461 onpass="Re-assigned switch mastership to designated controller",
462 onfail="Something wrong with deviceRole calls" )
463
464 main.step( "Check mastership was correctly assigned" )
465 roleCheck = main.TRUE
466 # NOTE: This is due to the fact that device mastership change is not
467 # atomic and is actually a multi step process
468 time.sleep( 5 )
469 for i in range( len( ipList ) ):
470 ip = ipList[i]
471 deviceId = deviceList[i]
472 # Check assignment
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700473 master = onosCli.getRole( deviceId ).get( 'master' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700474 if ip in master:
475 roleCheck = roleCheck and main.TRUE
476 else:
477 roleCheck = roleCheck and main.FALSE
478 main.log.error( "Error, controller " + ip + " is not" +
479 " master " + "of device " +
480 str( deviceId ) + ". Master is " +
481 repr( master ) + "." )
482 utilities.assert_equals(
483 expect=main.TRUE,
484 actual=roleCheck,
485 onpass="Switches were successfully reassigned to designated " +
486 "controller",
487 onfail="Switches were not successfully reassigned" )
488
489 def CASE3( self, main ):
490 """
491 Assign intents
492 """
493 import time
494 import json
Jon Halle1a3b752015-07-22 13:02:46 -0700495 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700496 assert main, "main not defined"
497 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -0700498 assert main.CLIs, "main.CLIs not defined"
499 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700500 main.case( "Adding host Intents" )
Jon Hall783bbf92015-07-23 14:33:19 -0700501 main.caseExplanation = "Discover hosts by using pingall then " +\
Jon Hall5cf14d52015-07-16 12:15:19 -0700502 "assign predetermined host-to-host intents." +\
503 " After installation, check that the intent" +\
504 " is distributed to all nodes and the state" +\
505 " is INSTALLED"
506
507 # install onos-app-fwd
508 main.step( "Install reactive forwarding app" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700509 onosCli = main.CLIs[ main.activeNodes[0] ]
510 installResults = onosCli.activateApp( "org.onosproject.fwd" )
Jon Hall5cf14d52015-07-16 12:15:19 -0700511 utilities.assert_equals( expect=main.TRUE, actual=installResults,
512 onpass="Install fwd successful",
513 onfail="Install fwd failed" )
514
515 main.step( "Check app ids" )
516 appCheck = main.TRUE
517 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700518 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -0700519 t = main.Thread( target=main.CLIs[i].appToIDCheck,
Jon Hall5cf14d52015-07-16 12:15:19 -0700520 name="appToIDCheck-" + str( i ),
521 args=[] )
522 threads.append( t )
523 t.start()
524
525 for t in threads:
526 t.join()
527 appCheck = appCheck and t.result
528 if appCheck != main.TRUE:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700529 main.log.warn( onosCli.apps() )
530 main.log.warn( onosCli.appIDs() )
Jon Hall5cf14d52015-07-16 12:15:19 -0700531 utilities.assert_equals( expect=main.TRUE, actual=appCheck,
532 onpass="App Ids seem to be correct",
533 onfail="Something is wrong with app Ids" )
534
535 main.step( "Discovering Hosts( Via pingall for now )" )
536 # FIXME: Once we have a host discovery mechanism, use that instead
537 # REACTIVE FWD test
538 pingResult = main.FALSE
Jon Hall96091e62015-09-21 17:34:17 -0700539 passMsg = "Reactive Pingall test passed"
540 time1 = time.time()
541 pingResult = main.Mininet1.pingall()
542 time2 = time.time()
543 if not pingResult:
544 main.log.warn("First pingall failed. Trying again...")
Jon Hall5cf14d52015-07-16 12:15:19 -0700545 pingResult = main.Mininet1.pingall()
Jon Hall96091e62015-09-21 17:34:17 -0700546 passMsg += " on the second try"
547 utilities.assert_equals(
548 expect=main.TRUE,
549 actual=pingResult,
550 onpass= passMsg,
551 onfail="Reactive Pingall failed, " +
552 "one or more ping pairs failed" )
553 main.log.info( "Time for pingall: %2f seconds" %
554 ( time2 - time1 ) )
Jon Hall5cf14d52015-07-16 12:15:19 -0700555 # timeout for fwd flows
556 time.sleep( 11 )
557 # uninstall onos-app-fwd
558 main.step( "Uninstall reactive forwarding app" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700559 node = main.activeNodes[0]
560 uninstallResult = main.CLIs[node].deactivateApp( "org.onosproject.fwd" )
Jon Hall5cf14d52015-07-16 12:15:19 -0700561 utilities.assert_equals( expect=main.TRUE, actual=uninstallResult,
562 onpass="Uninstall fwd successful",
563 onfail="Uninstall fwd failed" )
564
565 main.step( "Check app ids" )
566 threads = []
567 appCheck2 = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700568 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -0700569 t = main.Thread( target=main.CLIs[i].appToIDCheck,
Jon Hall5cf14d52015-07-16 12:15:19 -0700570 name="appToIDCheck-" + str( i ),
571 args=[] )
572 threads.append( t )
573 t.start()
574
575 for t in threads:
576 t.join()
577 appCheck2 = appCheck2 and t.result
578 if appCheck2 != main.TRUE:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700579 node = main.activeNodes[0]
580 main.log.warn( main.CLIs[node].apps() )
581 main.log.warn( main.CLIs[node].appIDs() )
Jon Hall5cf14d52015-07-16 12:15:19 -0700582 utilities.assert_equals( expect=main.TRUE, actual=appCheck2,
583 onpass="App Ids seem to be correct",
584 onfail="Something is wrong with app Ids" )
585
586 main.step( "Add host intents via cli" )
587 intentIds = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700588 # TODO: move the host numbers to params
589 # Maybe look at all the paths we ping?
Jon Hall5cf14d52015-07-16 12:15:19 -0700590 intentAddResult = True
591 hostResult = main.TRUE
592 for i in range( 8, 18 ):
593 main.log.info( "Adding host intent between h" + str( i ) +
594 " and h" + str( i + 10 ) )
595 host1 = "00:00:00:00:00:" + \
596 str( hex( i )[ 2: ] ).zfill( 2 ).upper()
597 host2 = "00:00:00:00:00:" + \
598 str( hex( i + 10 )[ 2: ] ).zfill( 2 ).upper()
599 # NOTE: getHost can return None
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700600 host1Dict = onosCli.getHost( host1 )
601 host2Dict = onosCli.getHost( host2 )
Jon Hall5cf14d52015-07-16 12:15:19 -0700602 host1Id = None
603 host2Id = None
604 if host1Dict and host2Dict:
605 host1Id = host1Dict.get( 'id', None )
606 host2Id = host2Dict.get( 'id', None )
607 if host1Id and host2Id:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700608 nodeNum = ( i % len( main.activeNodes ) )
609 node = main.activeNodes[nodeNum]
610 tmpId = main.CLIs[node].addHostIntent( host1Id, host2Id )
Jon Hall5cf14d52015-07-16 12:15:19 -0700611 if tmpId:
612 main.log.info( "Added intent with id: " + tmpId )
613 intentIds.append( tmpId )
614 else:
615 main.log.error( "addHostIntent returned: " +
616 repr( tmpId ) )
617 else:
618 main.log.error( "Error, getHost() failed for h" + str( i ) +
619 " and/or h" + str( i + 10 ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700620 node = main.activeNodes[0]
621 hosts = main.CLIs[node].hosts()
Jon Hall5cf14d52015-07-16 12:15:19 -0700622 main.log.warn( "Hosts output: " )
623 try:
624 main.log.warn( json.dumps( json.loads( hosts ),
625 sort_keys=True,
626 indent=4,
627 separators=( ',', ': ' ) ) )
628 except ( ValueError, TypeError ):
629 main.log.warn( repr( hosts ) )
630 hostResult = main.FALSE
631 utilities.assert_equals( expect=main.TRUE, actual=hostResult,
632 onpass="Found a host id for each host",
633 onfail="Error looking up host ids" )
634
635 intentStart = time.time()
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700636 onosIds = onosCli.getAllIntentsId()
Jon Hall5cf14d52015-07-16 12:15:19 -0700637 main.log.info( "Submitted intents: " + str( intentIds ) )
638 main.log.info( "Intents in ONOS: " + str( onosIds ) )
639 for intent in intentIds:
640 if intent in onosIds:
641 pass # intent submitted is in onos
642 else:
643 intentAddResult = False
644 if intentAddResult:
645 intentStop = time.time()
646 else:
647 intentStop = None
648 # Print the intent states
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700649 intents = onosCli.intents()
Jon Hall5cf14d52015-07-16 12:15:19 -0700650 intentStates = []
651 installedCheck = True
652 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
653 count = 0
654 try:
655 for intent in json.loads( intents ):
656 state = intent.get( 'state', None )
657 if "INSTALLED" not in state:
658 installedCheck = False
659 intentId = intent.get( 'id', None )
660 intentStates.append( ( intentId, state ) )
661 except ( ValueError, TypeError ):
662 main.log.exception( "Error parsing intents" )
663 # add submitted intents not in the store
664 tmplist = [ i for i, s in intentStates ]
665 missingIntents = False
666 for i in intentIds:
667 if i not in tmplist:
668 intentStates.append( ( i, " - " ) )
669 missingIntents = True
670 intentStates.sort()
671 for i, s in intentStates:
672 count += 1
673 main.log.info( "%-6s%-15s%-15s" %
674 ( str( count ), str( i ), str( s ) ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700675 leaders = onosCli.leaders()
Jon Hall5cf14d52015-07-16 12:15:19 -0700676 try:
677 missing = False
678 if leaders:
679 parsedLeaders = json.loads( leaders )
680 main.log.warn( json.dumps( parsedLeaders,
681 sort_keys=True,
682 indent=4,
683 separators=( ',', ': ' ) ) )
684 # check for all intent partitions
685 topics = []
686 for i in range( 14 ):
687 topics.append( "intent-partition-" + str( i ) )
688 main.log.debug( topics )
689 ONOStopics = [ j['topic'] for j in parsedLeaders ]
690 for topic in topics:
691 if topic not in ONOStopics:
692 main.log.error( "Error: " + topic +
693 " not in leaders" )
694 missing = True
695 else:
696 main.log.error( "leaders() returned None" )
697 except ( ValueError, TypeError ):
698 main.log.exception( "Error parsing leaders" )
699 main.log.error( repr( leaders ) )
700 # Check all nodes
701 if missing:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700702 for i in main.activeNodes:
703 response = main.CLIs[i].leaders( jsonFormat=False)
704 main.log.warn( str( main.CLIs[i].name ) + " leaders output: \n" +
Jon Hall5cf14d52015-07-16 12:15:19 -0700705 str( response ) )
706
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700707 partitions = onosCli.partitions()
Jon Hall5cf14d52015-07-16 12:15:19 -0700708 try:
709 if partitions :
710 parsedPartitions = json.loads( partitions )
711 main.log.warn( json.dumps( parsedPartitions,
712 sort_keys=True,
713 indent=4,
714 separators=( ',', ': ' ) ) )
715 # TODO check for a leader in all paritions
716 # TODO check for consistency among nodes
717 else:
718 main.log.error( "partitions() returned None" )
719 except ( ValueError, TypeError ):
720 main.log.exception( "Error parsing partitions" )
721 main.log.error( repr( partitions ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700722 pendingMap = onosCli.pendingMap()
Jon Hall5cf14d52015-07-16 12:15:19 -0700723 try:
724 if pendingMap :
725 parsedPending = json.loads( pendingMap )
726 main.log.warn( json.dumps( parsedPending,
727 sort_keys=True,
728 indent=4,
729 separators=( ',', ': ' ) ) )
730 # TODO check something here?
731 else:
732 main.log.error( "pendingMap() returned None" )
733 except ( ValueError, TypeError ):
734 main.log.exception( "Error parsing pending map" )
735 main.log.error( repr( pendingMap ) )
736
737 intentAddResult = bool( intentAddResult and not missingIntents and
738 installedCheck )
739 if not intentAddResult:
740 main.log.error( "Error in pushing host intents to ONOS" )
741
742 main.step( "Intent Anti-Entropy dispersion" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700743 for j in range(100):
Jon Hall5cf14d52015-07-16 12:15:19 -0700744 correct = True
745 main.log.info( "Submitted intents: " + str( sorted( intentIds ) ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700746 for i in main.activeNodes:
Jon Hall5cf14d52015-07-16 12:15:19 -0700747 onosIds = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700748 ids = main.CLIs[i].getAllIntentsId()
Jon Hall5cf14d52015-07-16 12:15:19 -0700749 onosIds.append( ids )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700750 main.log.debug( "Intents in " + main.CLIs[i].name + ": " +
Jon Hall5cf14d52015-07-16 12:15:19 -0700751 str( sorted( onosIds ) ) )
752 if sorted( ids ) != sorted( intentIds ):
753 main.log.warn( "Set of intent IDs doesn't match" )
754 correct = False
755 break
756 else:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700757 intents = json.loads( main.CLIs[i].intents() )
Jon Hall5cf14d52015-07-16 12:15:19 -0700758 for intent in intents:
759 if intent[ 'state' ] != "INSTALLED":
760 main.log.warn( "Intent " + intent[ 'id' ] +
761 " is " + intent[ 'state' ] )
762 correct = False
763 break
764 if correct:
765 break
766 else:
767 time.sleep(1)
768 if not intentStop:
769 intentStop = time.time()
770 global gossipTime
771 gossipTime = intentStop - intentStart
772 main.log.info( "It took about " + str( gossipTime ) +
773 " seconds for all intents to appear in each node" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700774 gossipPeriod = int( main.params['timers']['gossip'] )
775 maxGossipTime = gossipPeriod * len( main.activeNodes )
Jon Hall5cf14d52015-07-16 12:15:19 -0700776 utilities.assert_greater_equals(
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700777 expect=maxGossipTime, actual=gossipTime,
Jon Hall5cf14d52015-07-16 12:15:19 -0700778 onpass="ECM anti-entropy for intents worked within " +
779 "expected time",
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700780 onfail="Intent ECM anti-entropy took too long. " +
781 "Expected time:{}, Actual time:{}".format( maxGossipTime,
782 gossipTime ) )
783 if gossipTime <= maxGossipTime:
Jon Hall5cf14d52015-07-16 12:15:19 -0700784 intentAddResult = True
785
786 if not intentAddResult or "key" in pendingMap:
787 import time
788 installedCheck = True
789 main.log.info( "Sleeping 60 seconds to see if intents are found" )
790 time.sleep( 60 )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700791 onosIds = onosCli.getAllIntentsId()
Jon Hall5cf14d52015-07-16 12:15:19 -0700792 main.log.info( "Submitted intents: " + str( intentIds ) )
793 main.log.info( "Intents in ONOS: " + str( onosIds ) )
794 # Print the intent states
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700795 intents = onosCli.intents()
Jon Hall5cf14d52015-07-16 12:15:19 -0700796 intentStates = []
797 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
798 count = 0
799 try:
800 for intent in json.loads( intents ):
801 # Iter through intents of a node
802 state = intent.get( 'state', None )
803 if "INSTALLED" not in state:
804 installedCheck = False
805 intentId = intent.get( 'id', None )
806 intentStates.append( ( intentId, state ) )
807 except ( ValueError, TypeError ):
808 main.log.exception( "Error parsing intents" )
809 # add submitted intents not in the store
810 tmplist = [ i for i, s in intentStates ]
811 for i in intentIds:
812 if i not in tmplist:
813 intentStates.append( ( i, " - " ) )
814 intentStates.sort()
815 for i, s in intentStates:
816 count += 1
817 main.log.info( "%-6s%-15s%-15s" %
818 ( str( count ), str( i ), str( s ) ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700819 leaders = onosCli.leaders()
Jon Hall5cf14d52015-07-16 12:15:19 -0700820 try:
821 missing = False
822 if leaders:
823 parsedLeaders = json.loads( leaders )
824 main.log.warn( json.dumps( parsedLeaders,
825 sort_keys=True,
826 indent=4,
827 separators=( ',', ': ' ) ) )
828 # check for all intent partitions
829 # check for election
830 topics = []
831 for i in range( 14 ):
832 topics.append( "intent-partition-" + str( i ) )
833 # FIXME: this should only be after we start the app
834 topics.append( "org.onosproject.election" )
835 main.log.debug( topics )
836 ONOStopics = [ j['topic'] for j in parsedLeaders ]
837 for topic in topics:
838 if topic not in ONOStopics:
839 main.log.error( "Error: " + topic +
840 " not in leaders" )
841 missing = True
842 else:
843 main.log.error( "leaders() returned None" )
844 except ( ValueError, TypeError ):
845 main.log.exception( "Error parsing leaders" )
846 main.log.error( repr( leaders ) )
847 # Check all nodes
848 if missing:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700849 for i in main.activeNodes:
850 node = main.CLIs[i]
Jon Hall5cf14d52015-07-16 12:15:19 -0700851 response = node.leaders( jsonFormat=False)
852 main.log.warn( str( node.name ) + " leaders output: \n" +
853 str( response ) )
854
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700855 partitions = onosCli.partitions()
Jon Hall5cf14d52015-07-16 12:15:19 -0700856 try:
857 if partitions :
858 parsedPartitions = json.loads( partitions )
859 main.log.warn( json.dumps( parsedPartitions,
860 sort_keys=True,
861 indent=4,
862 separators=( ',', ': ' ) ) )
863 # TODO check for a leader in all paritions
864 # TODO check for consistency among nodes
865 else:
866 main.log.error( "partitions() returned None" )
867 except ( ValueError, TypeError ):
868 main.log.exception( "Error parsing partitions" )
869 main.log.error( repr( partitions ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700870 pendingMap = onosCli.pendingMap()
Jon Hall5cf14d52015-07-16 12:15:19 -0700871 try:
872 if pendingMap :
873 parsedPending = json.loads( pendingMap )
874 main.log.warn( json.dumps( parsedPending,
875 sort_keys=True,
876 indent=4,
877 separators=( ',', ': ' ) ) )
878 # TODO check something here?
879 else:
880 main.log.error( "pendingMap() returned None" )
881 except ( ValueError, TypeError ):
882 main.log.exception( "Error parsing pending map" )
883 main.log.error( repr( pendingMap ) )
884
885 def CASE4( self, main ):
886 """
887 Ping across added host intents
888 """
889 import json
890 import time
Jon Halle1a3b752015-07-22 13:02:46 -0700891 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700892 assert main, "main not defined"
893 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -0700894 assert main.CLIs, "main.CLIs not defined"
895 assert main.nodes, "main.nodes not defined"
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700896 main.case( "Verify connectivity by sending traffic across Intents" )
Jon Hall783bbf92015-07-23 14:33:19 -0700897 main.caseExplanation = "Ping across added host intents to check " +\
Jon Hall5cf14d52015-07-16 12:15:19 -0700898 "functionality and check the state of " +\
899 "the intent"
900 main.step( "Ping across added host intents" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700901 onosCli = main.CLIs[ main.activeNodes[0] ]
Jon Hall5cf14d52015-07-16 12:15:19 -0700902 PingResult = main.TRUE
903 for i in range( 8, 18 ):
904 ping = main.Mininet1.pingHost( src="h" + str( i ),
905 target="h" + str( i + 10 ) )
906 PingResult = PingResult and ping
907 if ping == main.FALSE:
908 main.log.warn( "Ping failed between h" + str( i ) +
909 " and h" + str( i + 10 ) )
910 elif ping == main.TRUE:
911 main.log.info( "Ping test passed!" )
912 # Don't set PingResult or you'd override failures
913 if PingResult == main.FALSE:
914 main.log.error(
915 "Intents have not been installed correctly, pings failed." )
916 # TODO: pretty print
917 main.log.warn( "ONOS1 intents: " )
918 try:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700919 tmpIntents = onosCli.intents()
Jon Hall5cf14d52015-07-16 12:15:19 -0700920 main.log.warn( json.dumps( json.loads( tmpIntents ),
921 sort_keys=True,
922 indent=4,
923 separators=( ',', ': ' ) ) )
924 except ( ValueError, TypeError ):
925 main.log.warn( repr( tmpIntents ) )
926 utilities.assert_equals(
927 expect=main.TRUE,
928 actual=PingResult,
929 onpass="Intents have been installed correctly and pings work",
930 onfail="Intents have not been installed correctly, pings failed." )
931
932 main.step( "Check Intent state" )
933 installedCheck = False
934 loopCount = 0
935 while not installedCheck and loopCount < 40:
936 installedCheck = True
937 # Print the intent states
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700938 intents = onosCli.intents()
Jon Hall5cf14d52015-07-16 12:15:19 -0700939 intentStates = []
940 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
941 count = 0
942 # Iter through intents of a node
943 try:
944 for intent in json.loads( intents ):
945 state = intent.get( 'state', None )
946 if "INSTALLED" not in state:
947 installedCheck = False
948 intentId = intent.get( 'id', None )
949 intentStates.append( ( intentId, state ) )
950 except ( ValueError, TypeError ):
951 main.log.exception( "Error parsing intents." )
952 # Print states
953 intentStates.sort()
954 for i, s in intentStates:
955 count += 1
956 main.log.info( "%-6s%-15s%-15s" %
957 ( str( count ), str( i ), str( s ) ) )
958 if not installedCheck:
959 time.sleep( 1 )
960 loopCount += 1
961 utilities.assert_equals( expect=True, actual=installedCheck,
962 onpass="Intents are all INSTALLED",
963 onfail="Intents are not all in " +
964 "INSTALLED state" )
965
966 main.step( "Check leadership of topics" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700967 leaders = onosCli.leaders()
Jon Hall5cf14d52015-07-16 12:15:19 -0700968 topicCheck = main.TRUE
969 try:
970 if leaders:
971 parsedLeaders = json.loads( leaders )
972 main.log.warn( json.dumps( parsedLeaders,
973 sort_keys=True,
974 indent=4,
975 separators=( ',', ': ' ) ) )
976 # check for all intent partitions
977 # check for election
978 # TODO: Look at Devices as topics now that it uses this system
979 topics = []
980 for i in range( 14 ):
981 topics.append( "intent-partition-" + str( i ) )
982 # FIXME: this should only be after we start the app
983 # FIXME: topics.append( "org.onosproject.election" )
984 # Print leaders output
985 main.log.debug( topics )
986 ONOStopics = [ j['topic'] for j in parsedLeaders ]
987 for topic in topics:
988 if topic not in ONOStopics:
989 main.log.error( "Error: " + topic +
990 " not in leaders" )
991 topicCheck = main.FALSE
992 else:
993 main.log.error( "leaders() returned None" )
994 topicCheck = main.FALSE
995 except ( ValueError, TypeError ):
996 topicCheck = main.FALSE
997 main.log.exception( "Error parsing leaders" )
998 main.log.error( repr( leaders ) )
999 # TODO: Check for a leader of these topics
1000 # Check all nodes
1001 if topicCheck:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001002 for i in main.activeNodes:
1003 node = main.CLIs[i]
Jon Hall5cf14d52015-07-16 12:15:19 -07001004 response = node.leaders( jsonFormat=False)
1005 main.log.warn( str( node.name ) + " leaders output: \n" +
1006 str( response ) )
1007
1008 utilities.assert_equals( expect=main.TRUE, actual=topicCheck,
1009 onpass="intent Partitions is in leaders",
1010 onfail="Some topics were lost " )
1011 # Print partitions
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001012 partitions = onosCli.partitions()
Jon Hall5cf14d52015-07-16 12:15:19 -07001013 try:
1014 if partitions :
1015 parsedPartitions = json.loads( partitions )
1016 main.log.warn( json.dumps( parsedPartitions,
1017 sort_keys=True,
1018 indent=4,
1019 separators=( ',', ': ' ) ) )
1020 # TODO check for a leader in all paritions
1021 # TODO check for consistency among nodes
1022 else:
1023 main.log.error( "partitions() returned None" )
1024 except ( ValueError, TypeError ):
1025 main.log.exception( "Error parsing partitions" )
1026 main.log.error( repr( partitions ) )
1027 # Print Pending Map
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001028 pendingMap = onosCli.pendingMap()
Jon Hall5cf14d52015-07-16 12:15:19 -07001029 try:
1030 if pendingMap :
1031 parsedPending = json.loads( pendingMap )
1032 main.log.warn( json.dumps( parsedPending,
1033 sort_keys=True,
1034 indent=4,
1035 separators=( ',', ': ' ) ) )
1036 # TODO check something here?
1037 else:
1038 main.log.error( "pendingMap() returned None" )
1039 except ( ValueError, TypeError ):
1040 main.log.exception( "Error parsing pending map" )
1041 main.log.error( repr( pendingMap ) )
1042
1043 if not installedCheck:
1044 main.log.info( "Waiting 60 seconds to see if the state of " +
1045 "intents change" )
1046 time.sleep( 60 )
1047 # Print the intent states
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001048 intents = onosCli.intents()
Jon Hall5cf14d52015-07-16 12:15:19 -07001049 intentStates = []
1050 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
1051 count = 0
1052 # Iter through intents of a node
1053 try:
1054 for intent in json.loads( intents ):
1055 state = intent.get( 'state', None )
1056 if "INSTALLED" not in state:
1057 installedCheck = False
1058 intentId = intent.get( 'id', None )
1059 intentStates.append( ( intentId, state ) )
1060 except ( ValueError, TypeError ):
1061 main.log.exception( "Error parsing intents." )
1062 intentStates.sort()
1063 for i, s in intentStates:
1064 count += 1
1065 main.log.info( "%-6s%-15s%-15s" %
1066 ( str( count ), str( i ), str( s ) ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001067 leaders = onosCli.leaders()
Jon Hall5cf14d52015-07-16 12:15:19 -07001068 try:
1069 missing = False
1070 if leaders:
1071 parsedLeaders = json.loads( leaders )
1072 main.log.warn( json.dumps( parsedLeaders,
1073 sort_keys=True,
1074 indent=4,
1075 separators=( ',', ': ' ) ) )
1076 # check for all intent partitions
1077 # check for election
1078 topics = []
1079 for i in range( 14 ):
1080 topics.append( "intent-partition-" + str( i ) )
1081 # FIXME: this should only be after we start the app
1082 topics.append( "org.onosproject.election" )
1083 main.log.debug( topics )
1084 ONOStopics = [ j['topic'] for j in parsedLeaders ]
1085 for topic in topics:
1086 if topic not in ONOStopics:
1087 main.log.error( "Error: " + topic +
1088 " not in leaders" )
1089 missing = True
1090 else:
1091 main.log.error( "leaders() returned None" )
1092 except ( ValueError, TypeError ):
1093 main.log.exception( "Error parsing leaders" )
1094 main.log.error( repr( leaders ) )
1095 if missing:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001096 for i in main.activeNodes:
1097 node = main.CLIs[i]
Jon Hall5cf14d52015-07-16 12:15:19 -07001098 response = node.leaders( jsonFormat=False)
1099 main.log.warn( str( node.name ) + " leaders output: \n" +
1100 str( response ) )
1101
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001102 partitions = onosCli.partitions()
Jon Hall5cf14d52015-07-16 12:15:19 -07001103 try:
1104 if partitions :
1105 parsedPartitions = json.loads( partitions )
1106 main.log.warn( json.dumps( parsedPartitions,
1107 sort_keys=True,
1108 indent=4,
1109 separators=( ',', ': ' ) ) )
1110 # TODO check for a leader in all paritions
1111 # TODO check for consistency among nodes
1112 else:
1113 main.log.error( "partitions() returned None" )
1114 except ( ValueError, TypeError ):
1115 main.log.exception( "Error parsing partitions" )
1116 main.log.error( repr( partitions ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001117 pendingMap = onosCli.pendingMap()
Jon Hall5cf14d52015-07-16 12:15:19 -07001118 try:
1119 if pendingMap :
1120 parsedPending = json.loads( pendingMap )
1121 main.log.warn( json.dumps( parsedPending,
1122 sort_keys=True,
1123 indent=4,
1124 separators=( ',', ': ' ) ) )
1125 # TODO check something here?
1126 else:
1127 main.log.error( "pendingMap() returned None" )
1128 except ( ValueError, TypeError ):
1129 main.log.exception( "Error parsing pending map" )
1130 main.log.error( repr( pendingMap ) )
1131 # Print flowrules
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001132 node = main.activeNodes[0]
1133 main.log.debug( main.CLIs[node].flows( jsonFormat=False ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001134 main.step( "Wait a minute then ping again" )
1135 # the wait is above
1136 PingResult = main.TRUE
1137 for i in range( 8, 18 ):
1138 ping = main.Mininet1.pingHost( src="h" + str( i ),
1139 target="h" + str( i + 10 ) )
1140 PingResult = PingResult and ping
1141 if ping == main.FALSE:
1142 main.log.warn( "Ping failed between h" + str( i ) +
1143 " and h" + str( i + 10 ) )
1144 elif ping == main.TRUE:
1145 main.log.info( "Ping test passed!" )
1146 # Don't set PingResult or you'd override failures
1147 if PingResult == main.FALSE:
1148 main.log.error(
1149 "Intents have not been installed correctly, pings failed." )
1150 # TODO: pretty print
1151 main.log.warn( "ONOS1 intents: " )
1152 try:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001153 tmpIntents = onosCli.intents()
Jon Hall5cf14d52015-07-16 12:15:19 -07001154 main.log.warn( json.dumps( json.loads( tmpIntents ),
1155 sort_keys=True,
1156 indent=4,
1157 separators=( ',', ': ' ) ) )
1158 except ( ValueError, TypeError ):
1159 main.log.warn( repr( tmpIntents ) )
1160 utilities.assert_equals(
1161 expect=main.TRUE,
1162 actual=PingResult,
1163 onpass="Intents have been installed correctly and pings work",
1164 onfail="Intents have not been installed correctly, pings failed." )
1165
1166 def CASE5( self, main ):
1167 """
1168 Reading state of ONOS
1169 """
1170 import json
1171 import time
Jon Halle1a3b752015-07-22 13:02:46 -07001172 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07001173 assert main, "main not defined"
1174 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07001175 assert main.CLIs, "main.CLIs not defined"
1176 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07001177
1178 main.case( "Setting up and gathering data for current state" )
1179 # The general idea for this test case is to pull the state of
1180 # ( intents,flows, topology,... ) from each ONOS node
1181 # We can then compare them with each other and also with past states
1182
1183 main.step( "Check that each switch has a master" )
1184 global mastershipState
1185 mastershipState = '[]'
1186
1187 # Assert that each device has a master
1188 rolesNotNull = main.TRUE
1189 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001190 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001191 t = main.Thread( target=main.CLIs[i].rolesNotNull,
Jon Hall5cf14d52015-07-16 12:15:19 -07001192 name="rolesNotNull-" + str( i ),
1193 args=[] )
1194 threads.append( t )
1195 t.start()
1196
1197 for t in threads:
1198 t.join()
1199 rolesNotNull = rolesNotNull and t.result
1200 utilities.assert_equals(
1201 expect=main.TRUE,
1202 actual=rolesNotNull,
1203 onpass="Each device has a master",
1204 onfail="Some devices don't have a master assigned" )
1205
1206 main.step( "Get the Mastership of each switch from each controller" )
1207 ONOSMastership = []
1208 mastershipCheck = main.FALSE
1209 consistentMastership = True
1210 rolesResults = True
1211 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001212 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001213 t = main.Thread( target=main.CLIs[i].roles,
Jon Hall5cf14d52015-07-16 12:15:19 -07001214 name="roles-" + str( i ),
1215 args=[] )
1216 threads.append( t )
1217 t.start()
1218
1219 for t in threads:
1220 t.join()
1221 ONOSMastership.append( t.result )
1222
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001223 for i in range( len( ONOSMastership ) ):
1224 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001225 if not ONOSMastership[i] or "Error" in ONOSMastership[i]:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001226 main.log.error( "Error in getting ONOS" + node + " roles" )
1227 main.log.warn( "ONOS" + node + " mastership response: " +
1228 repr( ONOSMastership[i] ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001229 rolesResults = False
1230 utilities.assert_equals(
1231 expect=True,
1232 actual=rolesResults,
1233 onpass="No error in reading roles output",
1234 onfail="Error in reading roles from ONOS" )
1235
1236 main.step( "Check for consistency in roles from each controller" )
1237 if all([ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
1238 main.log.info(
1239 "Switch roles are consistent across all ONOS nodes" )
1240 else:
1241 consistentMastership = False
1242 utilities.assert_equals(
1243 expect=True,
1244 actual=consistentMastership,
1245 onpass="Switch roles are consistent across all ONOS nodes",
1246 onfail="ONOS nodes have different views of switch roles" )
1247
1248 if rolesResults and not consistentMastership:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001249 for i in range( len( main.activeNodes ) ):
1250 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001251 try:
1252 main.log.warn(
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001253 "ONOS" + node + " roles: ",
Jon Hall5cf14d52015-07-16 12:15:19 -07001254 json.dumps(
1255 json.loads( ONOSMastership[ i ] ),
1256 sort_keys=True,
1257 indent=4,
1258 separators=( ',', ': ' ) ) )
1259 except ( ValueError, TypeError ):
1260 main.log.warn( repr( ONOSMastership[ i ] ) )
1261 elif rolesResults and consistentMastership:
1262 mastershipCheck = main.TRUE
1263 mastershipState = ONOSMastership[ 0 ]
1264
1265 main.step( "Get the intents from each controller" )
1266 global intentState
1267 intentState = []
1268 ONOSIntents = []
1269 intentCheck = main.FALSE
1270 consistentIntents = True
1271 intentsResults = True
1272 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001273 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001274 t = main.Thread( target=main.CLIs[i].intents,
Jon Hall5cf14d52015-07-16 12:15:19 -07001275 name="intents-" + str( i ),
1276 args=[],
1277 kwargs={ 'jsonFormat': True } )
1278 threads.append( t )
1279 t.start()
1280
1281 for t in threads:
1282 t.join()
1283 ONOSIntents.append( t.result )
1284
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001285 for i in range( len( ONOSIntents ) ):
1286 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001287 if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001288 main.log.error( "Error in getting ONOS" + node + " intents" )
1289 main.log.warn( "ONOS" + node + " intents response: " +
Jon Hall5cf14d52015-07-16 12:15:19 -07001290 repr( ONOSIntents[ i ] ) )
1291 intentsResults = False
1292 utilities.assert_equals(
1293 expect=True,
1294 actual=intentsResults,
1295 onpass="No error in reading intents output",
1296 onfail="Error in reading intents from ONOS" )
1297
1298 main.step( "Check for consistency in Intents from each controller" )
1299 if all([ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
1300 main.log.info( "Intents are consistent across all ONOS " +
1301 "nodes" )
1302 else:
1303 consistentIntents = False
1304 main.log.error( "Intents not consistent" )
1305 utilities.assert_equals(
1306 expect=True,
1307 actual=consistentIntents,
1308 onpass="Intents are consistent across all ONOS nodes",
1309 onfail="ONOS nodes have different views of intents" )
1310
1311 if intentsResults:
1312 # Try to make it easy to figure out what is happening
1313 #
1314 # Intent ONOS1 ONOS2 ...
1315 # 0x01 INSTALLED INSTALLING
1316 # ... ... ...
1317 # ... ... ...
1318 title = " Id"
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001319 for n in main.activeNodes:
Jon Hall5cf14d52015-07-16 12:15:19 -07001320 title += " " * 10 + "ONOS" + str( n + 1 )
1321 main.log.warn( title )
1322 # get all intent keys in the cluster
1323 keys = []
1324 for nodeStr in ONOSIntents:
1325 node = json.loads( nodeStr )
1326 for intent in node:
1327 keys.append( intent.get( 'id' ) )
1328 keys = set( keys )
1329 for key in keys:
1330 row = "%-13s" % key
1331 for nodeStr in ONOSIntents:
1332 node = json.loads( nodeStr )
1333 for intent in node:
1334 if intent.get( 'id', "Error" ) == key:
1335 row += "%-15s" % intent.get( 'state' )
1336 main.log.warn( row )
1337 # End table view
1338
1339 if intentsResults and not consistentIntents:
1340 # print the json objects
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001341 n = str( main.activeNodes[-1] + 1 )
1342 main.log.debug( "ONOS" + n + " intents: " )
Jon Hall5cf14d52015-07-16 12:15:19 -07001343 main.log.debug( json.dumps( json.loads( ONOSIntents[ -1 ] ),
1344 sort_keys=True,
1345 indent=4,
1346 separators=( ',', ': ' ) ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001347 for i in range( len( ONOSIntents ) ):
1348 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001349 if ONOSIntents[ i ] != ONOSIntents[ -1 ]:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001350 main.log.debug( "ONOS" + node + " intents: " )
Jon Hall5cf14d52015-07-16 12:15:19 -07001351 main.log.debug( json.dumps( json.loads( ONOSIntents[i] ),
1352 sort_keys=True,
1353 indent=4,
1354 separators=( ',', ': ' ) ) )
1355 else:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001356 main.log.debug( "ONOS" + node + " intents match ONOS" +
1357 n + " intents" )
Jon Hall5cf14d52015-07-16 12:15:19 -07001358 elif intentsResults and consistentIntents:
1359 intentCheck = main.TRUE
1360 intentState = ONOSIntents[ 0 ]
1361
1362 main.step( "Get the flows from each controller" )
1363 global flowState
1364 flowState = []
1365 ONOSFlows = []
1366 ONOSFlowsJson = []
1367 flowCheck = main.FALSE
1368 consistentFlows = True
1369 flowsResults = True
1370 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001371 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001372 t = main.Thread( target=main.CLIs[i].flows,
Jon Hall5cf14d52015-07-16 12:15:19 -07001373 name="flows-" + str( i ),
1374 args=[],
1375 kwargs={ 'jsonFormat': True } )
1376 threads.append( t )
1377 t.start()
1378
1379 # NOTE: Flows command can take some time to run
1380 time.sleep(30)
1381 for t in threads:
1382 t.join()
1383 result = t.result
1384 ONOSFlows.append( result )
1385
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001386 for i in range( len( ONOSFlows ) ):
1387 num = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001388 if not ONOSFlows[ i ] or "Error" in ONOSFlows[ i ]:
1389 main.log.error( "Error in getting ONOS" + num + " flows" )
1390 main.log.warn( "ONOS" + num + " flows response: " +
1391 repr( ONOSFlows[ i ] ) )
1392 flowsResults = False
1393 ONOSFlowsJson.append( None )
1394 else:
1395 try:
1396 ONOSFlowsJson.append( json.loads( ONOSFlows[ i ] ) )
1397 except ( ValueError, TypeError ):
1398 # FIXME: change this to log.error?
1399 main.log.exception( "Error in parsing ONOS" + num +
1400 " response as json." )
1401 main.log.error( repr( ONOSFlows[ i ] ) )
1402 ONOSFlowsJson.append( None )
1403 flowsResults = False
1404 utilities.assert_equals(
1405 expect=True,
1406 actual=flowsResults,
1407 onpass="No error in reading flows output",
1408 onfail="Error in reading flows from ONOS" )
1409
1410 main.step( "Check for consistency in Flows from each controller" )
1411 tmp = [ len( i ) == len( ONOSFlowsJson[ 0 ] ) for i in ONOSFlowsJson ]
1412 if all( tmp ):
1413 main.log.info( "Flow count is consistent across all ONOS nodes" )
1414 else:
1415 consistentFlows = False
1416 utilities.assert_equals(
1417 expect=True,
1418 actual=consistentFlows,
1419 onpass="The flow count is consistent across all ONOS nodes",
1420 onfail="ONOS nodes have different flow counts" )
1421
1422 if flowsResults and not consistentFlows:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001423 for i in range( len( ONOSFlows ) ):
1424 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001425 try:
1426 main.log.warn(
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001427 "ONOS" + node + " flows: " +
Jon Hall5cf14d52015-07-16 12:15:19 -07001428 json.dumps( json.loads( ONOSFlows[i] ), sort_keys=True,
1429 indent=4, separators=( ',', ': ' ) ) )
1430 except ( ValueError, TypeError ):
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001431 main.log.warn( "ONOS" + node + " flows: " +
1432 repr( ONOSFlows[ i ] ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001433 elif flowsResults and consistentFlows:
1434 flowCheck = main.TRUE
1435 flowState = ONOSFlows[ 0 ]
1436
1437 main.step( "Get the OF Table entries" )
1438 global flows
1439 flows = []
1440 for i in range( 1, 29 ):
GlennRC68467eb2015-11-16 18:01:01 -08001441 flows.append( main.Mininet1.getFlowTable( "s" + str( i ), version="1.3", debug=False ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001442 if flowCheck == main.FALSE:
1443 for table in flows:
1444 main.log.warn( table )
1445 # TODO: Compare switch flow tables with ONOS flow tables
1446
1447 main.step( "Start continuous pings" )
1448 main.Mininet2.pingLong(
1449 src=main.params[ 'PING' ][ 'source1' ],
1450 target=main.params[ 'PING' ][ 'target1' ],
1451 pingTime=500 )
1452 main.Mininet2.pingLong(
1453 src=main.params[ 'PING' ][ 'source2' ],
1454 target=main.params[ 'PING' ][ 'target2' ],
1455 pingTime=500 )
1456 main.Mininet2.pingLong(
1457 src=main.params[ 'PING' ][ 'source3' ],
1458 target=main.params[ 'PING' ][ 'target3' ],
1459 pingTime=500 )
1460 main.Mininet2.pingLong(
1461 src=main.params[ 'PING' ][ 'source4' ],
1462 target=main.params[ 'PING' ][ 'target4' ],
1463 pingTime=500 )
1464 main.Mininet2.pingLong(
1465 src=main.params[ 'PING' ][ 'source5' ],
1466 target=main.params[ 'PING' ][ 'target5' ],
1467 pingTime=500 )
1468 main.Mininet2.pingLong(
1469 src=main.params[ 'PING' ][ 'source6' ],
1470 target=main.params[ 'PING' ][ 'target6' ],
1471 pingTime=500 )
1472 main.Mininet2.pingLong(
1473 src=main.params[ 'PING' ][ 'source7' ],
1474 target=main.params[ 'PING' ][ 'target7' ],
1475 pingTime=500 )
1476 main.Mininet2.pingLong(
1477 src=main.params[ 'PING' ][ 'source8' ],
1478 target=main.params[ 'PING' ][ 'target8' ],
1479 pingTime=500 )
1480 main.Mininet2.pingLong(
1481 src=main.params[ 'PING' ][ 'source9' ],
1482 target=main.params[ 'PING' ][ 'target9' ],
1483 pingTime=500 )
1484 main.Mininet2.pingLong(
1485 src=main.params[ 'PING' ][ 'source10' ],
1486 target=main.params[ 'PING' ][ 'target10' ],
1487 pingTime=500 )
1488
1489 main.step( "Collecting topology information from ONOS" )
1490 devices = []
1491 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001492 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001493 t = main.Thread( target=main.CLIs[i].devices,
Jon Hall5cf14d52015-07-16 12:15:19 -07001494 name="devices-" + str( i ),
1495 args=[ ] )
1496 threads.append( t )
1497 t.start()
1498
1499 for t in threads:
1500 t.join()
1501 devices.append( t.result )
1502 hosts = []
1503 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001504 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001505 t = main.Thread( target=main.CLIs[i].hosts,
Jon Hall5cf14d52015-07-16 12:15:19 -07001506 name="hosts-" + str( i ),
1507 args=[ ] )
1508 threads.append( t )
1509 t.start()
1510
1511 for t in threads:
1512 t.join()
1513 try:
1514 hosts.append( json.loads( t.result ) )
1515 except ( ValueError, TypeError ):
1516 # FIXME: better handling of this, print which node
1517 # Maybe use thread name?
1518 main.log.exception( "Error parsing json output of hosts" )
Jon Hallf3d16e72015-12-16 17:45:08 -08001519 main.log.warn( repr( t.result ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001520 hosts.append( None )
1521
1522 ports = []
1523 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001524 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001525 t = main.Thread( target=main.CLIs[i].ports,
Jon Hall5cf14d52015-07-16 12:15:19 -07001526 name="ports-" + str( i ),
1527 args=[ ] )
1528 threads.append( t )
1529 t.start()
1530
1531 for t in threads:
1532 t.join()
1533 ports.append( t.result )
1534 links = []
1535 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001536 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001537 t = main.Thread( target=main.CLIs[i].links,
Jon Hall5cf14d52015-07-16 12:15:19 -07001538 name="links-" + str( i ),
1539 args=[ ] )
1540 threads.append( t )
1541 t.start()
1542
1543 for t in threads:
1544 t.join()
1545 links.append( t.result )
1546 clusters = []
1547 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001548 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001549 t = main.Thread( target=main.CLIs[i].clusters,
Jon Hall5cf14d52015-07-16 12:15:19 -07001550 name="clusters-" + str( i ),
1551 args=[ ] )
1552 threads.append( t )
1553 t.start()
1554
1555 for t in threads:
1556 t.join()
1557 clusters.append( t.result )
1558 # Compare json objects for hosts and dataplane clusters
1559
1560 # hosts
1561 main.step( "Host view is consistent across ONOS nodes" )
1562 consistentHostsResult = main.TRUE
1563 for controller in range( len( hosts ) ):
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001564 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hallf3d16e72015-12-16 17:45:08 -08001565 if hosts[ controller ] and "Error" not in hosts[ controller ]:
Jon Hall5cf14d52015-07-16 12:15:19 -07001566 if hosts[ controller ] == hosts[ 0 ]:
1567 continue
1568 else: # hosts not consistent
1569 main.log.error( "hosts from ONOS" +
1570 controllerStr +
1571 " is inconsistent with ONOS1" )
1572 main.log.warn( repr( hosts[ controller ] ) )
1573 consistentHostsResult = main.FALSE
1574
1575 else:
1576 main.log.error( "Error in getting ONOS hosts from ONOS" +
1577 controllerStr )
1578 consistentHostsResult = main.FALSE
1579 main.log.warn( "ONOS" + controllerStr +
1580 " hosts response: " +
1581 repr( hosts[ controller ] ) )
1582 utilities.assert_equals(
1583 expect=main.TRUE,
1584 actual=consistentHostsResult,
1585 onpass="Hosts view is consistent across all ONOS nodes",
1586 onfail="ONOS nodes have different views of hosts" )
1587
1588 main.step( "Each host has an IP address" )
1589 ipResult = main.TRUE
1590 for controller in range( 0, len( hosts ) ):
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001591 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hallf3d16e72015-12-16 17:45:08 -08001592 if hosts[ controller ]:
1593 for host in hosts[ controller ]:
1594 if not host.get( 'ipAddresses', [ ] ):
1595 main.log.error( "Error with host ips on controller" +
1596 controllerStr + ": " + str( host ) )
1597 ipResult = main.FALSE
Jon Hall5cf14d52015-07-16 12:15:19 -07001598 utilities.assert_equals(
1599 expect=main.TRUE,
1600 actual=ipResult,
1601 onpass="The ips of the hosts aren't empty",
1602 onfail="The ip of at least one host is missing" )
1603
1604 # Strongly connected clusters of devices
1605 main.step( "Cluster view is consistent across ONOS nodes" )
1606 consistentClustersResult = main.TRUE
1607 for controller in range( len( clusters ) ):
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001608 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001609 if "Error" not in clusters[ controller ]:
1610 if clusters[ controller ] == clusters[ 0 ]:
1611 continue
1612 else: # clusters not consistent
1613 main.log.error( "clusters from ONOS" + controllerStr +
1614 " is inconsistent with ONOS1" )
1615 consistentClustersResult = main.FALSE
1616
1617 else:
1618 main.log.error( "Error in getting dataplane clusters " +
1619 "from ONOS" + controllerStr )
1620 consistentClustersResult = main.FALSE
1621 main.log.warn( "ONOS" + controllerStr +
1622 " clusters response: " +
1623 repr( clusters[ controller ] ) )
1624 utilities.assert_equals(
1625 expect=main.TRUE,
1626 actual=consistentClustersResult,
1627 onpass="Clusters view is consistent across all ONOS nodes",
1628 onfail="ONOS nodes have different views of clusters" )
1629 # there should always only be one cluster
1630 main.step( "Cluster view correct across ONOS nodes" )
1631 try:
1632 numClusters = len( json.loads( clusters[ 0 ] ) )
1633 except ( ValueError, TypeError ):
1634 main.log.exception( "Error parsing clusters[0]: " +
1635 repr( clusters[ 0 ] ) )
1636 clusterResults = main.FALSE
1637 if numClusters == 1:
1638 clusterResults = main.TRUE
1639 utilities.assert_equals(
1640 expect=1,
1641 actual=numClusters,
1642 onpass="ONOS shows 1 SCC",
1643 onfail="ONOS shows " + str( numClusters ) + " SCCs" )
1644
1645 main.step( "Comparing ONOS topology to MN" )
1646 devicesResults = main.TRUE
1647 linksResults = main.TRUE
1648 hostsResults = main.TRUE
1649 mnSwitches = main.Mininet1.getSwitches()
1650 mnLinks = main.Mininet1.getLinks()
1651 mnHosts = main.Mininet1.getHosts()
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001652 for controller in main.activeNodes:
1653 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001654 if devices[ controller ] and ports[ controller ] and\
1655 "Error" not in devices[ controller ] and\
1656 "Error" not in ports[ controller ]:
1657
1658 currentDevicesResult = main.Mininet1.compareSwitches(
1659 mnSwitches,
1660 json.loads( devices[ controller ] ),
1661 json.loads( ports[ controller ] ) )
1662 else:
1663 currentDevicesResult = main.FALSE
1664 utilities.assert_equals( expect=main.TRUE,
1665 actual=currentDevicesResult,
1666 onpass="ONOS" + controllerStr +
1667 " Switches view is correct",
1668 onfail="ONOS" + controllerStr +
1669 " Switches view is incorrect" )
1670 if links[ controller ] and "Error" not in links[ controller ]:
1671 currentLinksResult = main.Mininet1.compareLinks(
1672 mnSwitches, mnLinks,
1673 json.loads( links[ controller ] ) )
1674 else:
1675 currentLinksResult = main.FALSE
1676 utilities.assert_equals( expect=main.TRUE,
1677 actual=currentLinksResult,
1678 onpass="ONOS" + controllerStr +
1679 " links view is correct",
1680 onfail="ONOS" + controllerStr +
1681 " links view is incorrect" )
1682
Jon Hall657cdf62015-12-17 14:40:51 -08001683 if hosts[ controller ] and "Error" not in hosts[ controller ]:
Jon Hall5cf14d52015-07-16 12:15:19 -07001684 currentHostsResult = main.Mininet1.compareHosts(
1685 mnHosts,
1686 hosts[ controller ] )
1687 else:
1688 currentHostsResult = main.FALSE
1689 utilities.assert_equals( expect=main.TRUE,
1690 actual=currentHostsResult,
1691 onpass="ONOS" + controllerStr +
1692 " hosts exist in Mininet",
1693 onfail="ONOS" + controllerStr +
1694 " hosts don't match Mininet" )
1695
1696 devicesResults = devicesResults and currentDevicesResult
1697 linksResults = linksResults and currentLinksResult
1698 hostsResults = hostsResults and currentHostsResult
1699
1700 main.step( "Device information is correct" )
1701 utilities.assert_equals(
1702 expect=main.TRUE,
1703 actual=devicesResults,
1704 onpass="Device information is correct",
1705 onfail="Device information is incorrect" )
1706
1707 main.step( "Links are correct" )
1708 utilities.assert_equals(
1709 expect=main.TRUE,
1710 actual=linksResults,
1711 onpass="Link are correct",
1712 onfail="Links are incorrect" )
1713
1714 main.step( "Hosts are correct" )
1715 utilities.assert_equals(
1716 expect=main.TRUE,
1717 actual=hostsResults,
1718 onpass="Hosts are correct",
1719 onfail="Hosts are incorrect" )
1720
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001721 def CASE61( self, main ):
Jon Hall5cf14d52015-07-16 12:15:19 -07001722 """
1723 The Failure case.
1724 """
Jon Halle1a3b752015-07-22 13:02:46 -07001725 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07001726 assert main, "main not defined"
1727 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07001728 assert main.CLIs, "main.CLIs not defined"
1729 assert main.nodes, "main.nodes not defined"
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001730 main.case( "Kill minority of ONOS nodes" )
Jon Hall96091e62015-09-21 17:34:17 -07001731
1732 main.step( "Checking ONOS Logs for errors" )
1733 for node in main.nodes:
1734 main.log.debug( "Checking logs for errors on " + node.name + ":" )
1735 main.log.warn( main.ONOSbench.checkLogs( node.ip_address ) )
1736
Jon Hall3b489db2015-10-05 14:38:37 -07001737 n = len( main.nodes ) # Number of nodes
1738 p = ( ( n + 1 ) / 2 ) + 1 # Number of partitions
1739 main.kill = [ 0 ] # ONOS node to kill, listed by index in main.nodes
1740 if n > 3:
1741 main.kill.append( p - 1 )
1742 # NOTE: This only works for cluster sizes of 3,5, or 7.
1743
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001744 main.step( "Kill " + str( len( main.kill ) ) + " ONOS nodes" )
Jon Hall3b489db2015-10-05 14:38:37 -07001745 killResults = main.TRUE
1746 for i in main.kill:
1747 killResults = killResults and\
1748 main.ONOSbench.onosKill( main.nodes[i].ip_address )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001749 main.activeNodes.remove( i )
Jon Hall5cf14d52015-07-16 12:15:19 -07001750 utilities.assert_equals( expect=main.TRUE, actual=killResults,
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001751 onpass="ONOS nodes killed successfully",
1752 onfail="ONOS nodes NOT successfully killed" )
1753
1754 def CASE62( self, main ):
1755 """
1756 The bring up stopped nodes
1757 """
1758 import time
1759 assert main.numCtrls, "main.numCtrls not defined"
1760 assert main, "main not defined"
1761 assert utilities.assert_equals, "utilities.assert_equals not defined"
1762 assert main.CLIs, "main.CLIs not defined"
1763 assert main.nodes, "main.nodes not defined"
1764 assert main.kill, "main.kill not defined"
1765 main.case( "Restart minority of ONOS nodes" )
1766
1767 main.step( "Restarting " + str( len( main.kill ) ) + " ONOS nodes" )
1768 startResults = main.TRUE
1769 restartTime = time.time()
1770 for i in main.kill:
1771 startResults = startResults and\
1772 main.ONOSbench.onosStart( main.nodes[i].ip_address )
1773 utilities.assert_equals( expect=main.TRUE, actual=startResults,
1774 onpass="ONOS nodes started successfully",
1775 onfail="ONOS nodes NOT successfully started" )
Jon Hall5cf14d52015-07-16 12:15:19 -07001776
1777 main.step( "Checking if ONOS is up yet" )
1778 count = 0
1779 onosIsupResult = main.FALSE
1780 while onosIsupResult == main.FALSE and count < 10:
Jon Hall3b489db2015-10-05 14:38:37 -07001781 onosIsupResult = main.TRUE
1782 for i in main.kill:
1783 onosIsupResult = onosIsupResult and\
1784 main.ONOSbench.isup( main.nodes[i].ip_address )
Jon Hall5cf14d52015-07-16 12:15:19 -07001785 count = count + 1
Jon Hall5cf14d52015-07-16 12:15:19 -07001786 utilities.assert_equals( expect=main.TRUE, actual=onosIsupResult,
1787 onpass="ONOS restarted successfully",
1788 onfail="ONOS restart NOT successful" )
1789
Jon Halle1a3b752015-07-22 13:02:46 -07001790 main.step( "Restarting ONOS main.CLIs" )
Jon Hall3b489db2015-10-05 14:38:37 -07001791 cliResults = main.TRUE
1792 for i in main.kill:
1793 cliResults = cliResults and\
1794 main.CLIs[i].startOnosCli( main.nodes[i].ip_address )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001795 main.activeNodes.append( i )
Jon Hall5cf14d52015-07-16 12:15:19 -07001796 utilities.assert_equals( expect=main.TRUE, actual=cliResults,
1797 onpass="ONOS cli restarted",
1798 onfail="ONOS cli did not restart" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001799 main.activeNodes.sort()
1800 try:
1801 assert list( set( main.activeNodes ) ) == main.activeNodes,\
1802 "List of active nodes has duplicates, this likely indicates something was run out of order"
1803 except AssertionError:
1804 main.log.exception( "" )
1805 main.cleanup()
1806 main.exit()
Jon Hall5cf14d52015-07-16 12:15:19 -07001807
1808 # Grab the time of restart so we chan check how long the gossip
1809 # protocol has had time to work
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001810 main.restartTime = time.time() - restartTime
Jon Hall5cf14d52015-07-16 12:15:19 -07001811 main.log.debug( "Restart time: " + str( main.restartTime ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001812 # TODO: MAke this configurable. Also, we are breaking the above timer
1813 time.sleep( 60 )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001814 node = main.activeNodes[0]
1815 main.log.debug( main.CLIs[node].nodes( jsonFormat=False ) )
1816 main.log.debug( main.CLIs[node].leaders( jsonFormat=False ) )
1817 main.log.debug( main.CLIs[node].partitions( jsonFormat=False ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001818
1819 def CASE7( self, main ):
1820 """
1821 Check state after ONOS failure
1822 """
1823 import json
Jon Halle1a3b752015-07-22 13:02:46 -07001824 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07001825 assert main, "main not defined"
1826 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07001827 assert main.CLIs, "main.CLIs not defined"
1828 assert main.nodes, "main.nodes not defined"
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001829 try:
1830 main.kill
1831 except AttributeError:
1832 main.kill = []
1833
Jon Hall5cf14d52015-07-16 12:15:19 -07001834 main.case( "Running ONOS Constant State Tests" )
1835
1836 main.step( "Check that each switch has a master" )
1837 # Assert that each device has a master
1838 rolesNotNull = main.TRUE
1839 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001840 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001841 t = main.Thread( target=main.CLIs[i].rolesNotNull,
Jon Hall5cf14d52015-07-16 12:15:19 -07001842 name="rolesNotNull-" + str( i ),
1843 args=[ ] )
1844 threads.append( t )
1845 t.start()
1846
1847 for t in threads:
1848 t.join()
1849 rolesNotNull = rolesNotNull and t.result
1850 utilities.assert_equals(
1851 expect=main.TRUE,
1852 actual=rolesNotNull,
1853 onpass="Each device has a master",
1854 onfail="Some devices don't have a master assigned" )
1855
1856 main.step( "Read device roles from ONOS" )
1857 ONOSMastership = []
1858 consistentMastership = True
1859 rolesResults = True
1860 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001861 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001862 t = main.Thread( target=main.CLIs[i].roles,
Jon Hall5cf14d52015-07-16 12:15:19 -07001863 name="roles-" + str( i ),
1864 args=[] )
1865 threads.append( t )
1866 t.start()
1867
1868 for t in threads:
1869 t.join()
1870 ONOSMastership.append( t.result )
1871
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001872 for i in range( len( ONOSMastership ) ):
1873 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001874 if not ONOSMastership[i] or "Error" in ONOSMastership[i]:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001875 main.log.error( "Error in getting ONOS" + node + " roles" )
1876 main.log.warn( "ONOS" + node + " mastership response: " +
1877 repr( ONOSMastership[i] ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001878 rolesResults = False
1879 utilities.assert_equals(
1880 expect=True,
1881 actual=rolesResults,
1882 onpass="No error in reading roles output",
1883 onfail="Error in reading roles from ONOS" )
1884
1885 main.step( "Check for consistency in roles from each controller" )
1886 if all([ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
1887 main.log.info(
1888 "Switch roles are consistent across all ONOS nodes" )
1889 else:
1890 consistentMastership = False
1891 utilities.assert_equals(
1892 expect=True,
1893 actual=consistentMastership,
1894 onpass="Switch roles are consistent across all ONOS nodes",
1895 onfail="ONOS nodes have different views of switch roles" )
1896
1897 if rolesResults and not consistentMastership:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001898 for i in range( len( ONOSMastership ) ):
1899 node = str( main.activeNodes[i] + 1 )
1900 main.log.warn( "ONOS" + node + " roles: ",
1901 json.dumps( json.loads( ONOSMastership[ i ] ),
1902 sort_keys=True,
1903 indent=4,
1904 separators=( ',', ': ' ) ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001905
1906 # NOTE: we expect mastership to change on controller failure
Jon Hall5cf14d52015-07-16 12:15:19 -07001907
1908 main.step( "Get the intents and compare across all nodes" )
1909 ONOSIntents = []
1910 intentCheck = main.FALSE
1911 consistentIntents = True
1912 intentsResults = True
1913 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001914 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001915 t = main.Thread( target=main.CLIs[i].intents,
Jon Hall5cf14d52015-07-16 12:15:19 -07001916 name="intents-" + str( i ),
1917 args=[],
1918 kwargs={ 'jsonFormat': True } )
1919 threads.append( t )
1920 t.start()
1921
1922 for t in threads:
1923 t.join()
1924 ONOSIntents.append( t.result )
1925
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001926 for i in range( len( ONOSIntents) ):
1927 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001928 if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001929 main.log.error( "Error in getting ONOS" + node + " intents" )
1930 main.log.warn( "ONOS" + node + " intents response: " +
Jon Hall5cf14d52015-07-16 12:15:19 -07001931 repr( ONOSIntents[ i ] ) )
1932 intentsResults = False
1933 utilities.assert_equals(
1934 expect=True,
1935 actual=intentsResults,
1936 onpass="No error in reading intents output",
1937 onfail="Error in reading intents from ONOS" )
1938
1939 main.step( "Check for consistency in Intents from each controller" )
1940 if all([ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
1941 main.log.info( "Intents are consistent across all ONOS " +
1942 "nodes" )
1943 else:
1944 consistentIntents = False
1945
1946 # Try to make it easy to figure out what is happening
1947 #
1948 # Intent ONOS1 ONOS2 ...
1949 # 0x01 INSTALLED INSTALLING
1950 # ... ... ...
1951 # ... ... ...
1952 title = " ID"
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001953 for n in main.activeNodes:
Jon Hall5cf14d52015-07-16 12:15:19 -07001954 title += " " * 10 + "ONOS" + str( n + 1 )
1955 main.log.warn( title )
1956 # get all intent keys in the cluster
1957 keys = []
1958 for nodeStr in ONOSIntents:
1959 node = json.loads( nodeStr )
1960 for intent in node:
1961 keys.append( intent.get( 'id' ) )
1962 keys = set( keys )
1963 for key in keys:
1964 row = "%-13s" % key
1965 for nodeStr in ONOSIntents:
1966 node = json.loads( nodeStr )
1967 for intent in node:
1968 if intent.get( 'id' ) == key:
1969 row += "%-15s" % intent.get( 'state' )
1970 main.log.warn( row )
1971 # End table view
1972
1973 utilities.assert_equals(
1974 expect=True,
1975 actual=consistentIntents,
1976 onpass="Intents are consistent across all ONOS nodes",
1977 onfail="ONOS nodes have different views of intents" )
1978 intentStates = []
1979 for node in ONOSIntents: # Iter through ONOS nodes
1980 nodeStates = []
1981 # Iter through intents of a node
1982 try:
1983 for intent in json.loads( node ):
1984 nodeStates.append( intent[ 'state' ] )
1985 except ( ValueError, TypeError ):
1986 main.log.exception( "Error in parsing intents" )
1987 main.log.error( repr( node ) )
1988 intentStates.append( nodeStates )
1989 out = [ (i, nodeStates.count( i ) ) for i in set( nodeStates ) ]
1990 main.log.info( dict( out ) )
1991
1992 if intentsResults and not consistentIntents:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001993 for i in range( len( main.activeNodes ) ):
1994 node = str( main.activeNodes[i] + 1 )
1995 main.log.warn( "ONOS" + node + " intents: " )
Jon Hall5cf14d52015-07-16 12:15:19 -07001996 main.log.warn( json.dumps(
1997 json.loads( ONOSIntents[ i ] ),
1998 sort_keys=True,
1999 indent=4,
2000 separators=( ',', ': ' ) ) )
2001 elif intentsResults and consistentIntents:
2002 intentCheck = main.TRUE
2003
2004 # NOTE: Store has no durability, so intents are lost across system
2005 # restarts
2006 main.step( "Compare current intents with intents before the failure" )
2007 # NOTE: this requires case 5 to pass for intentState to be set.
2008 # maybe we should stop the test if that fails?
2009 sameIntents = main.FALSE
2010 if intentState and intentState == ONOSIntents[ 0 ]:
2011 sameIntents = main.TRUE
2012 main.log.info( "Intents are consistent with before failure" )
2013 # TODO: possibly the states have changed? we may need to figure out
2014 # what the acceptable states are
2015 elif len( intentState ) == len( ONOSIntents[ 0 ] ):
2016 sameIntents = main.TRUE
2017 try:
2018 before = json.loads( intentState )
2019 after = json.loads( ONOSIntents[ 0 ] )
2020 for intent in before:
2021 if intent not in after:
2022 sameIntents = main.FALSE
2023 main.log.debug( "Intent is not currently in ONOS " +
2024 "(at least in the same form):" )
2025 main.log.debug( json.dumps( intent ) )
2026 except ( ValueError, TypeError ):
2027 main.log.exception( "Exception printing intents" )
2028 main.log.debug( repr( ONOSIntents[0] ) )
2029 main.log.debug( repr( intentState ) )
2030 if sameIntents == main.FALSE:
2031 try:
2032 main.log.debug( "ONOS intents before: " )
2033 main.log.debug( json.dumps( json.loads( intentState ),
2034 sort_keys=True, indent=4,
2035 separators=( ',', ': ' ) ) )
2036 main.log.debug( "Current ONOS intents: " )
2037 main.log.debug( json.dumps( json.loads( ONOSIntents[ 0 ] ),
2038 sort_keys=True, indent=4,
2039 separators=( ',', ': ' ) ) )
2040 except ( ValueError, TypeError ):
2041 main.log.exception( "Exception printing intents" )
2042 main.log.debug( repr( ONOSIntents[0] ) )
2043 main.log.debug( repr( intentState ) )
2044 utilities.assert_equals(
2045 expect=main.TRUE,
2046 actual=sameIntents,
2047 onpass="Intents are consistent with before failure",
2048 onfail="The Intents changed during failure" )
2049 intentCheck = intentCheck and sameIntents
2050
2051 main.step( "Get the OF Table entries and compare to before " +
2052 "component failure" )
2053 FlowTables = main.TRUE
Jon Hall5cf14d52015-07-16 12:15:19 -07002054 for i in range( 28 ):
2055 main.log.info( "Checking flow table on s" + str( i + 1 ) )
GlennRC68467eb2015-11-16 18:01:01 -08002056 tmpFlows = main.Mininet1.getFlowTable( "s" + str( i + 1 ), version="1.3", debug=False )
2057 FlowTables = FlowTables and main.Mininet1.flowTableComp( flows[i], tmpFlows )
Jon Hall5cf14d52015-07-16 12:15:19 -07002058 if FlowTables == main.FALSE:
GlennRC68467eb2015-11-16 18:01:01 -08002059 main.log.warn( "Differences in flow table for switch: s{}".format( i + 1 ) )
2060
Jon Hall5cf14d52015-07-16 12:15:19 -07002061 utilities.assert_equals(
2062 expect=main.TRUE,
2063 actual=FlowTables,
2064 onpass="No changes were found in the flow tables",
2065 onfail="Changes were found in the flow tables" )
2066
2067 main.Mininet2.pingLongKill()
2068 '''
2069 main.step( "Check the continuous pings to ensure that no packets " +
2070 "were dropped during component failure" )
2071 main.Mininet2.pingKill( main.params[ 'TESTONUSER' ],
2072 main.params[ 'TESTONIP' ] )
2073 LossInPings = main.FALSE
2074 # NOTE: checkForLoss returns main.FALSE with 0% packet loss
2075 for i in range( 8, 18 ):
2076 main.log.info(
2077 "Checking for a loss in pings along flow from s" +
2078 str( i ) )
2079 LossInPings = main.Mininet2.checkForLoss(
2080 "/tmp/ping.h" +
2081 str( i ) ) or LossInPings
2082 if LossInPings == main.TRUE:
2083 main.log.info( "Loss in ping detected" )
2084 elif LossInPings == main.ERROR:
2085 main.log.info( "There are multiple mininet process running" )
2086 elif LossInPings == main.FALSE:
2087 main.log.info( "No Loss in the pings" )
2088 main.log.info( "No loss of dataplane connectivity" )
2089 utilities.assert_equals(
2090 expect=main.FALSE,
2091 actual=LossInPings,
2092 onpass="No Loss of connectivity",
2093 onfail="Loss of dataplane connectivity detected" )
2094 '''
2095
2096 main.step( "Leadership Election is still functional" )
2097 # Test of LeadershipElection
2098 leaderList = []
Jon Hall5cf14d52015-07-16 12:15:19 -07002099
Jon Hall3b489db2015-10-05 14:38:37 -07002100 restarted = []
2101 for i in main.kill:
2102 restarted.append( main.nodes[i].ip_address )
Jon Hall5cf14d52015-07-16 12:15:19 -07002103 leaderResult = main.TRUE
Jon Hall3b489db2015-10-05 14:38:37 -07002104
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002105 for i in main.activeNodes:
2106 cli = main.CLIs[i]
Jon Hall5cf14d52015-07-16 12:15:19 -07002107 leaderN = cli.electionTestLeader()
2108 leaderList.append( leaderN )
2109 if leaderN == main.FALSE:
2110 # error in response
2111 main.log.error( "Something is wrong with " +
2112 "electionTestLeader function, check the" +
2113 " error logs" )
2114 leaderResult = main.FALSE
2115 elif leaderN is None:
2116 main.log.error( cli.name +
2117 " shows no leader for the election-app was" +
2118 " elected after the old one died" )
2119 leaderResult = main.FALSE
2120 elif leaderN in restarted:
2121 main.log.error( cli.name + " shows " + str( leaderN ) +
2122 " as leader for the election-app, but it " +
2123 "was restarted" )
2124 leaderResult = main.FALSE
2125 if len( set( leaderList ) ) != 1:
2126 leaderResult = main.FALSE
2127 main.log.error(
2128 "Inconsistent view of leader for the election test app" )
2129 # TODO: print the list
2130 utilities.assert_equals(
2131 expect=main.TRUE,
2132 actual=leaderResult,
2133 onpass="Leadership election passed",
2134 onfail="Something went wrong with Leadership election" )
2135
2136 def CASE8( self, main ):
2137 """
2138 Compare topo
2139 """
2140 import json
2141 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002142 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002143 assert main, "main not defined"
2144 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002145 assert main.CLIs, "main.CLIs not defined"
2146 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002147
2148 main.case( "Compare ONOS Topology view to Mininet topology" )
Jon Hall783bbf92015-07-23 14:33:19 -07002149 main.caseExplanation = "Compare topology objects between Mininet" +\
Jon Hall5cf14d52015-07-16 12:15:19 -07002150 " and ONOS"
Jon Hall5cf14d52015-07-16 12:15:19 -07002151 topoResult = main.FALSE
2152 elapsed = 0
2153 count = 0
Jon Halle9b1fa32015-12-08 15:32:21 -08002154 main.step( "Comparing ONOS topology to MN topology" )
Jon Hall5cf14d52015-07-16 12:15:19 -07002155 startTime = time.time()
2156 # Give time for Gossip to work
Jon Halle9b1fa32015-12-08 15:32:21 -08002157 while topoResult == main.FALSE and ( elapsed < 60 or count < 3 ):
Jon Hall96091e62015-09-21 17:34:17 -07002158 devicesResults = main.TRUE
2159 linksResults = main.TRUE
2160 hostsResults = main.TRUE
2161 hostAttachmentResults = True
Jon Hall5cf14d52015-07-16 12:15:19 -07002162 count += 1
2163 cliStart = time.time()
2164 devices = []
2165 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002166 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07002167 t = main.Thread( target=main.CLIs[i].devices,
Jon Hall5cf14d52015-07-16 12:15:19 -07002168 name="devices-" + str( i ),
2169 args=[ ] )
2170 threads.append( t )
2171 t.start()
2172
2173 for t in threads:
2174 t.join()
2175 devices.append( t.result )
2176 hosts = []
2177 ipResult = main.TRUE
2178 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002179 for i in main.activeNodes:
Jon Halld8f6de82015-12-17 17:04:34 -08002180 t = main.Thread( target=utilities.retry,
Jon Hall5cf14d52015-07-16 12:15:19 -07002181 name="hosts-" + str( i ),
Jon Halld8f6de82015-12-17 17:04:34 -08002182 args=[ main.CLIs[i].hosts, [ None ] ],
2183 kwargs= { 'sleep': 5, 'attempts': 5,
2184 'randomTime': True } )
Jon Hall5cf14d52015-07-16 12:15:19 -07002185 threads.append( t )
2186 t.start()
2187
2188 for t in threads:
2189 t.join()
2190 try:
2191 hosts.append( json.loads( t.result ) )
2192 except ( ValueError, TypeError ):
2193 main.log.exception( "Error parsing hosts results" )
2194 main.log.error( repr( t.result ) )
Jon Hallf3d16e72015-12-16 17:45:08 -08002195 hosts.append( None )
Jon Hall5cf14d52015-07-16 12:15:19 -07002196 for controller in range( 0, len( hosts ) ):
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002197 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hallacd1b182015-12-17 11:43:20 -08002198 if hosts[ controller ]:
2199 for host in hosts[ controller ]:
2200 if host is None or host.get( 'ipAddresses', [] ) == []:
2201 main.log.error(
2202 "Error with host ipAddresses on controller" +
2203 controllerStr + ": " + str( host ) )
2204 ipResult = main.FALSE
Jon Hall5cf14d52015-07-16 12:15:19 -07002205 ports = []
2206 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002207 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07002208 t = main.Thread( target=main.CLIs[i].ports,
Jon Hall5cf14d52015-07-16 12:15:19 -07002209 name="ports-" + str( i ),
2210 args=[ ] )
2211 threads.append( t )
2212 t.start()
2213
2214 for t in threads:
2215 t.join()
2216 ports.append( t.result )
2217 links = []
2218 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002219 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07002220 t = main.Thread( target=main.CLIs[i].links,
Jon Hall5cf14d52015-07-16 12:15:19 -07002221 name="links-" + str( i ),
2222 args=[ ] )
2223 threads.append( t )
2224 t.start()
2225
2226 for t in threads:
2227 t.join()
2228 links.append( t.result )
2229 clusters = []
2230 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002231 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07002232 t = main.Thread( target=main.CLIs[i].clusters,
Jon Hall5cf14d52015-07-16 12:15:19 -07002233 name="clusters-" + str( i ),
2234 args=[ ] )
2235 threads.append( t )
2236 t.start()
2237
2238 for t in threads:
2239 t.join()
2240 clusters.append( t.result )
2241
2242 elapsed = time.time() - startTime
2243 cliTime = time.time() - cliStart
2244 print "Elapsed time: " + str( elapsed )
2245 print "CLI time: " + str( cliTime )
2246
2247 mnSwitches = main.Mininet1.getSwitches()
2248 mnLinks = main.Mininet1.getLinks()
2249 mnHosts = main.Mininet1.getHosts()
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002250 for controller in range( len( main.activeNodes ) ):
2251 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07002252 if devices[ controller ] and ports[ controller ] and\
2253 "Error" not in devices[ controller ] and\
2254 "Error" not in ports[ controller ]:
2255
Jon Hallc6793552016-01-19 14:18:37 -08002256 try:
2257 currentDevicesResult = main.Mininet1.compareSwitches(
2258 mnSwitches,
2259 json.loads( devices[ controller ] ),
2260 json.loads( ports[ controller ] ) )
2261 except ( TypeError, ValueError ) as e:
2262 main.log.exception( "Object not as expected; devices={!r}\nports={!r}".format(
2263 devices[ controller ], ports[ controller ] ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07002264 else:
2265 currentDevicesResult = main.FALSE
2266 utilities.assert_equals( expect=main.TRUE,
2267 actual=currentDevicesResult,
2268 onpass="ONOS" + controllerStr +
2269 " Switches view is correct",
2270 onfail="ONOS" + controllerStr +
2271 " Switches view is incorrect" )
2272
2273 if links[ controller ] and "Error" not in links[ controller ]:
2274 currentLinksResult = main.Mininet1.compareLinks(
2275 mnSwitches, mnLinks,
2276 json.loads( links[ controller ] ) )
2277 else:
2278 currentLinksResult = main.FALSE
2279 utilities.assert_equals( expect=main.TRUE,
2280 actual=currentLinksResult,
2281 onpass="ONOS" + controllerStr +
2282 " links view is correct",
2283 onfail="ONOS" + controllerStr +
2284 " links view is incorrect" )
Jon Hall657cdf62015-12-17 14:40:51 -08002285 if hosts[ controller ] and "Error" not in hosts[ controller ]:
Jon Hall5cf14d52015-07-16 12:15:19 -07002286 currentHostsResult = main.Mininet1.compareHosts(
2287 mnHosts,
2288 hosts[ controller ] )
Jon Hall13b446e2016-01-05 12:17:01 -08002289 elif hosts[ controller ] == []:
2290 currentHostsResult = main.TRUE
Jon Hall5cf14d52015-07-16 12:15:19 -07002291 else:
2292 currentHostsResult = main.FALSE
2293 utilities.assert_equals( expect=main.TRUE,
2294 actual=currentHostsResult,
2295 onpass="ONOS" + controllerStr +
2296 " hosts exist in Mininet",
2297 onfail="ONOS" + controllerStr +
2298 " hosts don't match Mininet" )
2299 # CHECKING HOST ATTACHMENT POINTS
2300 hostAttachment = True
2301 zeroHosts = False
2302 # FIXME: topo-HA/obelisk specific mappings:
2303 # key is mac and value is dpid
2304 mappings = {}
2305 for i in range( 1, 29 ): # hosts 1 through 28
2306 # set up correct variables:
2307 macId = "00:" * 5 + hex( i ).split( "0x" )[1].upper().zfill(2)
2308 if i == 1:
2309 deviceId = "1000".zfill(16)
2310 elif i == 2:
2311 deviceId = "2000".zfill(16)
2312 elif i == 3:
2313 deviceId = "3000".zfill(16)
2314 elif i == 4:
2315 deviceId = "3004".zfill(16)
2316 elif i == 5:
2317 deviceId = "5000".zfill(16)
2318 elif i == 6:
2319 deviceId = "6000".zfill(16)
2320 elif i == 7:
2321 deviceId = "6007".zfill(16)
2322 elif i >= 8 and i <= 17:
2323 dpid = '3' + str( i ).zfill( 3 )
2324 deviceId = dpid.zfill(16)
2325 elif i >= 18 and i <= 27:
2326 dpid = '6' + str( i ).zfill( 3 )
2327 deviceId = dpid.zfill(16)
2328 elif i == 28:
2329 deviceId = "2800".zfill(16)
2330 mappings[ macId ] = deviceId
Jon Halld8f6de82015-12-17 17:04:34 -08002331 if hosts[ controller ] is not None and "Error" not in hosts[ controller ]:
Jon Hall5cf14d52015-07-16 12:15:19 -07002332 if hosts[ controller ] == []:
2333 main.log.warn( "There are no hosts discovered" )
2334 zeroHosts = True
2335 else:
2336 for host in hosts[ controller ]:
2337 mac = None
2338 location = None
2339 device = None
2340 port = None
2341 try:
2342 mac = host.get( 'mac' )
2343 assert mac, "mac field could not be found for this host object"
2344
2345 location = host.get( 'location' )
2346 assert location, "location field could not be found for this host object"
2347
2348 # Trim the protocol identifier off deviceId
2349 device = str( location.get( 'elementId' ) ).split(':')[1]
2350 assert device, "elementId field could not be found for this host location object"
2351
2352 port = location.get( 'port' )
2353 assert port, "port field could not be found for this host location object"
2354
2355 # Now check if this matches where they should be
2356 if mac and device and port:
2357 if str( port ) != "1":
2358 main.log.error( "The attachment port is incorrect for " +
2359 "host " + str( mac ) +
2360 ". Expected: 1 Actual: " + str( port) )
2361 hostAttachment = False
2362 if device != mappings[ str( mac ) ]:
2363 main.log.error( "The attachment device is incorrect for " +
2364 "host " + str( mac ) +
2365 ". Expected: " + mappings[ str( mac ) ] +
2366 " Actual: " + device )
2367 hostAttachment = False
2368 else:
2369 hostAttachment = False
2370 except AssertionError:
2371 main.log.exception( "Json object not as expected" )
2372 main.log.error( repr( host ) )
2373 hostAttachment = False
2374 else:
2375 main.log.error( "No hosts json output or \"Error\"" +
2376 " in output. hosts = " +
2377 repr( hosts[ controller ] ) )
2378 if zeroHosts is False:
2379 hostAttachment = True
2380
2381 # END CHECKING HOST ATTACHMENT POINTS
2382 devicesResults = devicesResults and currentDevicesResult
2383 linksResults = linksResults and currentLinksResult
2384 hostsResults = hostsResults and currentHostsResult
2385 hostAttachmentResults = hostAttachmentResults and\
2386 hostAttachment
Jon Halle9b1fa32015-12-08 15:32:21 -08002387 topoResult = devicesResults and linksResults and\
2388 hostsResults and hostAttachmentResults
2389 utilities.assert_equals( expect=True,
2390 actual=topoResult,
2391 onpass="ONOS topology matches Mininet",
2392 onfail="ONOS topology don't match Mininet" )
2393 # End of While loop to pull ONOS state
Jon Hall5cf14d52015-07-16 12:15:19 -07002394
2395 # Compare json objects for hosts and dataplane clusters
2396
2397 # hosts
2398 main.step( "Hosts view is consistent across all ONOS nodes" )
2399 consistentHostsResult = main.TRUE
2400 for controller in range( len( hosts ) ):
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002401 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hall13b446e2016-01-05 12:17:01 -08002402 if hosts[ controller ] is not None and "Error" not in hosts[ controller ]:
Jon Hall5cf14d52015-07-16 12:15:19 -07002403 if hosts[ controller ] == hosts[ 0 ]:
2404 continue
2405 else: # hosts not consistent
2406 main.log.error( "hosts from ONOS" + controllerStr +
2407 " is inconsistent with ONOS1" )
2408 main.log.warn( repr( hosts[ controller ] ) )
2409 consistentHostsResult = main.FALSE
2410
2411 else:
2412 main.log.error( "Error in getting ONOS hosts from ONOS" +
2413 controllerStr )
2414 consistentHostsResult = main.FALSE
2415 main.log.warn( "ONOS" + controllerStr +
2416 " hosts response: " +
2417 repr( hosts[ controller ] ) )
2418 utilities.assert_equals(
2419 expect=main.TRUE,
2420 actual=consistentHostsResult,
2421 onpass="Hosts view is consistent across all ONOS nodes",
2422 onfail="ONOS nodes have different views of hosts" )
2423
2424 main.step( "Hosts information is correct" )
2425 hostsResults = hostsResults and ipResult
2426 utilities.assert_equals(
2427 expect=main.TRUE,
2428 actual=hostsResults,
2429 onpass="Host information is correct",
2430 onfail="Host information is incorrect" )
2431
2432 main.step( "Host attachment points to the network" )
2433 utilities.assert_equals(
2434 expect=True,
2435 actual=hostAttachmentResults,
2436 onpass="Hosts are correctly attached to the network",
2437 onfail="ONOS did not correctly attach hosts to the network" )
2438
2439 # Strongly connected clusters of devices
2440 main.step( "Clusters view is consistent across all ONOS nodes" )
2441 consistentClustersResult = main.TRUE
2442 for controller in range( len( clusters ) ):
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002443 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07002444 if "Error" not in clusters[ controller ]:
2445 if clusters[ controller ] == clusters[ 0 ]:
2446 continue
2447 else: # clusters not consistent
2448 main.log.error( "clusters from ONOS" +
2449 controllerStr +
2450 " is inconsistent with ONOS1" )
2451 consistentClustersResult = main.FALSE
2452
2453 else:
2454 main.log.error( "Error in getting dataplane clusters " +
2455 "from ONOS" + controllerStr )
2456 consistentClustersResult = main.FALSE
2457 main.log.warn( "ONOS" + controllerStr +
2458 " clusters response: " +
2459 repr( clusters[ controller ] ) )
2460 utilities.assert_equals(
2461 expect=main.TRUE,
2462 actual=consistentClustersResult,
2463 onpass="Clusters view is consistent across all ONOS nodes",
2464 onfail="ONOS nodes have different views of clusters" )
2465
2466 main.step( "There is only one SCC" )
2467 # there should always only be one cluster
2468 try:
2469 numClusters = len( json.loads( clusters[ 0 ] ) )
2470 except ( ValueError, TypeError ):
2471 main.log.exception( "Error parsing clusters[0]: " +
2472 repr( clusters[0] ) )
2473 clusterResults = main.FALSE
2474 if numClusters == 1:
2475 clusterResults = main.TRUE
2476 utilities.assert_equals(
2477 expect=1,
2478 actual=numClusters,
2479 onpass="ONOS shows 1 SCC",
2480 onfail="ONOS shows " + str( numClusters ) + " SCCs" )
2481
2482 topoResult = ( devicesResults and linksResults
2483 and hostsResults and consistentHostsResult
2484 and consistentClustersResult and clusterResults
2485 and ipResult and hostAttachmentResults )
2486
2487 topoResult = topoResult and int( count <= 2 )
2488 note = "note it takes about " + str( int( cliTime ) ) + \
2489 " seconds for the test to make all the cli calls to fetch " +\
2490 "the topology from each ONOS instance"
2491 main.log.info(
2492 "Very crass estimate for topology discovery/convergence( " +
2493 str( note ) + " ): " + str( elapsed ) + " seconds, " +
2494 str( count ) + " tries" )
2495
2496 main.step( "Device information is correct" )
2497 utilities.assert_equals(
2498 expect=main.TRUE,
2499 actual=devicesResults,
2500 onpass="Device information is correct",
2501 onfail="Device information is incorrect" )
2502
2503 main.step( "Links are correct" )
2504 utilities.assert_equals(
2505 expect=main.TRUE,
2506 actual=linksResults,
2507 onpass="Link are correct",
2508 onfail="Links are incorrect" )
2509
2510 # FIXME: move this to an ONOS state case
2511 main.step( "Checking ONOS nodes" )
2512 nodesOutput = []
2513 nodeResults = main.TRUE
2514 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002515 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07002516 t = main.Thread( target=main.CLIs[i].nodes,
Jon Hall5cf14d52015-07-16 12:15:19 -07002517 name="nodes-" + str( i ),
2518 args=[ ] )
2519 threads.append( t )
2520 t.start()
2521
2522 for t in threads:
2523 t.join()
2524 nodesOutput.append( t.result )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002525 ips = [ main.nodes[node].ip_address for node in main.activeNodes ]
Jon Halle9b1fa32015-12-08 15:32:21 -08002526 ips.sort()
Jon Hall5cf14d52015-07-16 12:15:19 -07002527 for i in nodesOutput:
2528 try:
2529 current = json.loads( i )
Jon Halle9b1fa32015-12-08 15:32:21 -08002530 activeIps = []
2531 currentResult = main.FALSE
Jon Hall5cf14d52015-07-16 12:15:19 -07002532 for node in current:
Jon Halle9b1fa32015-12-08 15:32:21 -08002533 if node['state'] == 'ACTIVE':
2534 activeIps.append( node['ip'] )
2535 activeIps.sort()
2536 if ips == activeIps:
2537 currentResult = main.TRUE
Jon Hall5cf14d52015-07-16 12:15:19 -07002538 except ( ValueError, TypeError ):
2539 main.log.error( "Error parsing nodes output" )
2540 main.log.warn( repr( i ) )
Jon Halle9b1fa32015-12-08 15:32:21 -08002541 currentResult = main.FALSE
2542 nodeResults = nodeResults and currentResult
Jon Hall5cf14d52015-07-16 12:15:19 -07002543 utilities.assert_equals( expect=main.TRUE, actual=nodeResults,
2544 onpass="Nodes check successful",
2545 onfail="Nodes check NOT successful" )
2546
2547 def CASE9( self, main ):
2548 """
2549 Link s3-s28 down
2550 """
2551 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002552 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002553 assert main, "main not defined"
2554 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002555 assert main.CLIs, "main.CLIs not defined"
2556 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002557 # NOTE: You should probably run a topology check after this
2558
2559 linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
2560
2561 description = "Turn off a link to ensure that Link Discovery " +\
2562 "is working properly"
2563 main.case( description )
2564
2565 main.step( "Kill Link between s3 and s28" )
2566 LinkDown = main.Mininet1.link( END1="s3", END2="s28", OPTION="down" )
2567 main.log.info( "Waiting " + str( linkSleep ) +
2568 " seconds for link down to be discovered" )
2569 time.sleep( linkSleep )
2570 utilities.assert_equals( expect=main.TRUE, actual=LinkDown,
2571 onpass="Link down successful",
2572 onfail="Failed to bring link down" )
2573 # TODO do some sort of check here
2574
2575 def CASE10( self, main ):
2576 """
2577 Link s3-s28 up
2578 """
2579 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002580 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002581 assert main, "main not defined"
2582 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002583 assert main.CLIs, "main.CLIs not defined"
2584 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002585 # NOTE: You should probably run a topology check after this
2586
2587 linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
2588
2589 description = "Restore a link to ensure that Link Discovery is " + \
2590 "working properly"
2591 main.case( description )
2592
2593 main.step( "Bring link between s3 and s28 back up" )
2594 LinkUp = main.Mininet1.link( END1="s3", END2="s28", OPTION="up" )
2595 main.log.info( "Waiting " + str( linkSleep ) +
2596 " seconds for link up to be discovered" )
2597 time.sleep( linkSleep )
2598 utilities.assert_equals( expect=main.TRUE, actual=LinkUp,
2599 onpass="Link up successful",
2600 onfail="Failed to bring link up" )
2601 # TODO do some sort of check here
2602
2603 def CASE11( self, main ):
2604 """
2605 Switch Down
2606 """
2607 # NOTE: You should probably run a topology check after this
2608 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002609 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002610 assert main, "main not defined"
2611 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002612 assert main.CLIs, "main.CLIs not defined"
2613 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002614
2615 switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
2616
2617 description = "Killing a switch to ensure it is discovered correctly"
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002618 onosCli = main.CLIs[ main.activeNodes[0] ]
Jon Hall5cf14d52015-07-16 12:15:19 -07002619 main.case( description )
2620 switch = main.params[ 'kill' ][ 'switch' ]
2621 switchDPID = main.params[ 'kill' ][ 'dpid' ]
2622
2623 # TODO: Make this switch parameterizable
2624 main.step( "Kill " + switch )
2625 main.log.info( "Deleting " + switch )
2626 main.Mininet1.delSwitch( switch )
2627 main.log.info( "Waiting " + str( switchSleep ) +
2628 " seconds for switch down to be discovered" )
2629 time.sleep( switchSleep )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002630 device = onosCli.getDevice( dpid=switchDPID )
Jon Hall5cf14d52015-07-16 12:15:19 -07002631 # Peek at the deleted switch
2632 main.log.warn( str( device ) )
2633 result = main.FALSE
2634 if device and device[ 'available' ] is False:
2635 result = main.TRUE
2636 utilities.assert_equals( expect=main.TRUE, actual=result,
2637 onpass="Kill switch successful",
2638 onfail="Failed to kill switch?" )
2639
2640 def CASE12( self, main ):
2641 """
2642 Switch Up
2643 """
2644 # NOTE: You should probably run a topology check after this
2645 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002646 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002647 assert main, "main not defined"
2648 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002649 assert main.CLIs, "main.CLIs not defined"
2650 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002651 assert ONOS1Port, "ONOS1Port not defined"
2652 assert ONOS2Port, "ONOS2Port not defined"
2653 assert ONOS3Port, "ONOS3Port not defined"
2654 assert ONOS4Port, "ONOS4Port not defined"
2655 assert ONOS5Port, "ONOS5Port not defined"
2656 assert ONOS6Port, "ONOS6Port not defined"
2657 assert ONOS7Port, "ONOS7Port not defined"
2658
2659 switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
2660 switch = main.params[ 'kill' ][ 'switch' ]
2661 switchDPID = main.params[ 'kill' ][ 'dpid' ]
2662 links = main.params[ 'kill' ][ 'links' ].split()
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002663 onosCli = main.CLIs[ main.activeNodes[0] ]
Jon Hall5cf14d52015-07-16 12:15:19 -07002664 description = "Adding a switch to ensure it is discovered correctly"
2665 main.case( description )
2666
2667 main.step( "Add back " + switch )
2668 main.Mininet1.addSwitch( switch, dpid=switchDPID )
2669 for peer in links:
2670 main.Mininet1.addLink( switch, peer )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002671 ipList = [ node.ip_address for node in main.nodes ]
Jon Hall5cf14d52015-07-16 12:15:19 -07002672 main.Mininet1.assignSwController( sw=switch, ip=ipList )
2673 main.log.info( "Waiting " + str( switchSleep ) +
2674 " seconds for switch up to be discovered" )
2675 time.sleep( switchSleep )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002676 device = onosCli.getDevice( dpid=switchDPID )
Jon Hall5cf14d52015-07-16 12:15:19 -07002677 # Peek at the deleted switch
2678 main.log.warn( str( device ) )
2679 result = main.FALSE
2680 if device and device[ 'available' ]:
2681 result = main.TRUE
2682 utilities.assert_equals( expect=main.TRUE, actual=result,
2683 onpass="add switch successful",
2684 onfail="Failed to add switch?" )
2685
2686 def CASE13( self, main ):
2687 """
2688 Clean up
2689 """
2690 import os
2691 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002692 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002693 assert main, "main not defined"
2694 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002695 assert main.CLIs, "main.CLIs not defined"
2696 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002697
2698 # printing colors to terminal
2699 colors = { 'cyan': '\033[96m', 'purple': '\033[95m',
2700 'blue': '\033[94m', 'green': '\033[92m',
2701 'yellow': '\033[93m', 'red': '\033[91m', 'end': '\033[0m' }
2702 main.case( "Test Cleanup" )
2703 main.step( "Killing tcpdumps" )
2704 main.Mininet2.stopTcpdump()
2705
2706 testname = main.TEST
Jon Hall96091e62015-09-21 17:34:17 -07002707 if main.params[ 'BACKUP' ][ 'ENABLED' ] == "True":
Jon Hall5cf14d52015-07-16 12:15:19 -07002708 main.step( "Copying MN pcap and ONOS log files to test station" )
2709 teststationUser = main.params[ 'BACKUP' ][ 'TESTONUSER' ]
2710 teststationIP = main.params[ 'BACKUP' ][ 'TESTONIP' ]
Jon Hall96091e62015-09-21 17:34:17 -07002711 # NOTE: MN Pcap file is being saved to logdir.
2712 # We scp this file as MN and TestON aren't necessarily the same vm
2713
2714 # FIXME: To be replaced with a Jenkin's post script
Jon Hall5cf14d52015-07-16 12:15:19 -07002715 # TODO: Load these from params
2716 # NOTE: must end in /
2717 logFolder = "/opt/onos/log/"
2718 logFiles = [ "karaf.log", "karaf.log.1" ]
2719 # NOTE: must end in /
Jon Hall5cf14d52015-07-16 12:15:19 -07002720 for f in logFiles:
Jon Halle1a3b752015-07-22 13:02:46 -07002721 for node in main.nodes:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002722 dstName = main.logdir + "/" + node.name + "-" + f
Jon Hall96091e62015-09-21 17:34:17 -07002723 main.ONOSbench.secureCopy( node.user_name, node.ip_address,
2724 logFolder + f, dstName )
Jon Hall5cf14d52015-07-16 12:15:19 -07002725 # std*.log's
2726 # NOTE: must end in /
2727 logFolder = "/opt/onos/var/"
2728 logFiles = [ "stderr.log", "stdout.log" ]
2729 # NOTE: must end in /
Jon Hall5cf14d52015-07-16 12:15:19 -07002730 for f in logFiles:
Jon Halle1a3b752015-07-22 13:02:46 -07002731 for node in main.nodes:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002732 dstName = main.logdir + "/" + node.name + "-" + f
Jon Hall96091e62015-09-21 17:34:17 -07002733 main.ONOSbench.secureCopy( node.user_name, node.ip_address,
2734 logFolder + f, dstName )
2735 else:
2736 main.log.debug( "skipping saving log files" )
Jon Hall5cf14d52015-07-16 12:15:19 -07002737
2738 main.step( "Stopping Mininet" )
2739 mnResult = main.Mininet1.stopNet()
2740 utilities.assert_equals( expect=main.TRUE, actual=mnResult,
2741 onpass="Mininet stopped",
2742 onfail="MN cleanup NOT successful" )
2743
2744 main.step( "Checking ONOS Logs for errors" )
Jon Halle1a3b752015-07-22 13:02:46 -07002745 for node in main.nodes:
Jon Hall96091e62015-09-21 17:34:17 -07002746 main.log.debug( "Checking logs for errors on " + node.name + ":" )
2747 main.log.warn( main.ONOSbench.checkLogs( node.ip_address ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07002748
2749 try:
2750 timerLog = open( main.logdir + "/Timers.csv", 'w')
2751 # Overwrite with empty line and close
2752 labels = "Gossip Intents, Restart"
2753 data = str( gossipTime ) + ", " + str( main.restartTime )
2754 timerLog.write( labels + "\n" + data )
2755 timerLog.close()
2756 except NameError, e:
2757 main.log.exception(e)
2758
2759 def CASE14( self, main ):
2760 """
2761 start election app on all onos nodes
2762 """
Jon Halle1a3b752015-07-22 13:02:46 -07002763 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002764 assert main, "main not defined"
2765 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002766 assert main.CLIs, "main.CLIs not defined"
2767 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002768
2769 main.case("Start Leadership Election app")
2770 main.step( "Install leadership election app" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002771 onosCli = main.CLIs[ main.activeNodes[0] ]
2772 appResult = onosCli.activateApp( "org.onosproject.election" )
Jon Hall5cf14d52015-07-16 12:15:19 -07002773 utilities.assert_equals(
2774 expect=main.TRUE,
2775 actual=appResult,
2776 onpass="Election app installed",
2777 onfail="Something went wrong with installing Leadership election" )
2778
2779 main.step( "Run for election on each node" )
2780 leaderResult = main.TRUE
2781 leaders = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002782 for i in main.activeNodes:
2783 main.CLIs[i].electionTestRun()
2784 for i in main.activeNodes:
2785 cli = main.CLIs[i]
Jon Hall5cf14d52015-07-16 12:15:19 -07002786 leader = cli.electionTestLeader()
2787 if leader is None or leader == main.FALSE:
2788 main.log.error( cli.name + ": Leader for the election app " +
2789 "should be an ONOS node, instead got '" +
2790 str( leader ) + "'" )
2791 leaderResult = main.FALSE
2792 leaders.append( leader )
2793 utilities.assert_equals(
2794 expect=main.TRUE,
2795 actual=leaderResult,
2796 onpass="Successfully ran for leadership",
2797 onfail="Failed to run for leadership" )
2798
2799 main.step( "Check that each node shows the same leader" )
2800 sameLeader = main.TRUE
2801 if len( set( leaders ) ) != 1:
2802 sameLeader = main.FALSE
Jon Halle1a3b752015-07-22 13:02:46 -07002803 main.log.error( "Results of electionTestLeader is order of main.CLIs:" +
Jon Hall5cf14d52015-07-16 12:15:19 -07002804 str( leaders ) )
2805 utilities.assert_equals(
2806 expect=main.TRUE,
2807 actual=sameLeader,
2808 onpass="Leadership is consistent for the election topic",
2809 onfail="Nodes have different leaders" )
2810
2811 def CASE15( self, main ):
2812 """
2813 Check that Leadership Election is still functional
acsmars71adceb2015-08-31 15:09:26 -07002814 15.1 Run election on each node
2815 15.2 Check that each node has the same leaders and candidates
2816 15.3 Find current leader and withdraw
2817 15.4 Check that a new node was elected leader
2818 15.5 Check that that new leader was the candidate of old leader
2819 15.6 Run for election on old leader
2820 15.7 Check that oldLeader is a candidate, and leader if only 1 node
2821 15.8 Make sure that the old leader was added to the candidate list
2822
2823 old and new variable prefixes refer to data from before vs after
2824 withdrawl and later before withdrawl vs after re-election
Jon Hall5cf14d52015-07-16 12:15:19 -07002825 """
2826 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002827 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002828 assert main, "main not defined"
2829 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002830 assert main.CLIs, "main.CLIs not defined"
2831 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002832
Jon Hall5cf14d52015-07-16 12:15:19 -07002833 description = "Check that Leadership Election is still functional"
2834 main.case( description )
acsmars71adceb2015-08-31 15:09:26 -07002835 # NOTE: Need to re-run since being a canidate is not persistant
2836 # TODO: add check for "Command not found:" in the driver, this
2837 # means the election test app isn't loaded
Jon Hall5cf14d52015-07-16 12:15:19 -07002838
acsmars71adceb2015-08-31 15:09:26 -07002839 oldLeaders = [] # leaders by node before withdrawl from candidates
2840 newLeaders = [] # leaders by node after withdrawl from candidates
2841 oldAllCandidates = [] # list of lists of each nodes' candidates before
2842 newAllCandidates = [] # list of lists of each nodes' candidates after
2843 oldCandidates = [] # list of candidates from node 0 before withdrawl
2844 newCandidates = [] # list of candidates from node 0 after withdrawl
2845 oldLeader = '' # the old leader from oldLeaders, None if not same
2846 newLeader = '' # the new leaders fron newLoeaders, None if not same
2847 oldLeaderCLI = None # the CLI of the old leader used for re-electing
2848 expectNoLeader = False # True when there is only one leader
2849 if main.numCtrls == 1:
2850 expectNoLeader = True
2851
2852 main.step( "Run for election on each node" )
2853 electionResult = main.TRUE
2854
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002855 for i in main.activeNodes: # run test election on each node
2856 if main.CLIs[i].electionTestRun() == main.FALSE:
acsmars71adceb2015-08-31 15:09:26 -07002857 electionResult = main.FALSE
Jon Hall5cf14d52015-07-16 12:15:19 -07002858 utilities.assert_equals(
2859 expect=main.TRUE,
acsmars71adceb2015-08-31 15:09:26 -07002860 actual=electionResult,
2861 onpass="All nodes successfully ran for leadership",
2862 onfail="At least one node failed to run for leadership" )
2863
acsmars3a72bde2015-09-02 14:16:22 -07002864 if electionResult == main.FALSE:
2865 main.log.error(
2866 "Skipping Test Case because Election Test App isn't loaded" )
2867 main.skipCase()
2868
acsmars71adceb2015-08-31 15:09:26 -07002869 main.step( "Check that each node shows the same leader and candidates" )
2870 sameResult = main.TRUE
2871 failMessage = "Nodes have different leaders"
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002872 for i in main.activeNodes:
2873 cli = main.CLIs[i]
acsmars71adceb2015-08-31 15:09:26 -07002874 node = cli.specificLeaderCandidate( 'org.onosproject.election' )
2875 oldAllCandidates.append( node )
2876 oldLeaders.append( node[ 0 ] )
2877 oldCandidates = oldAllCandidates[ 0 ]
2878
2879 # Check that each node has the same leader. Defines oldLeader
2880 if len( set( oldLeaders ) ) != 1:
2881 sameResult = main.FALSE
2882 main.log.error( "More than one leader present:" + str( oldLeaders ) )
2883 oldLeader = None
2884 else:
2885 oldLeader = oldLeaders[ 0 ]
2886
2887 # Check that each node's candidate list is the same
acsmars29233db2015-11-04 11:15:00 -08002888 candidateDiscrepancy = False # Boolean of candidate mismatches
acsmars71adceb2015-08-31 15:09:26 -07002889 for candidates in oldAllCandidates:
2890 if set( candidates ) != set( oldCandidates ):
2891 sameResult = main.FALSE
acsmars29233db2015-11-04 11:15:00 -08002892 candidateDiscrepancy = True
2893
2894 if candidateDiscrepancy:
2895 failMessage += " and candidates"
2896
acsmars71adceb2015-08-31 15:09:26 -07002897 utilities.assert_equals(
2898 expect=main.TRUE,
2899 actual=sameResult,
Jon Hall5cf14d52015-07-16 12:15:19 -07002900 onpass="Leadership is consistent for the election topic",
acsmars71adceb2015-08-31 15:09:26 -07002901 onfail=failMessage )
Jon Hall5cf14d52015-07-16 12:15:19 -07002902
2903 main.step( "Find current leader and withdraw" )
acsmars71adceb2015-08-31 15:09:26 -07002904 withdrawResult = main.TRUE
Jon Hall5cf14d52015-07-16 12:15:19 -07002905 # do some sanity checking on leader before using it
acsmars71adceb2015-08-31 15:09:26 -07002906 if oldLeader is None:
2907 main.log.error( "Leadership isn't consistent." )
2908 withdrawResult = main.FALSE
2909 # Get the CLI of the oldLeader
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002910 for i in main.activeNodes:
acsmars71adceb2015-08-31 15:09:26 -07002911 if oldLeader == main.nodes[ i ].ip_address:
2912 oldLeaderCLI = main.CLIs[ i ]
Jon Hall5cf14d52015-07-16 12:15:19 -07002913 break
2914 else: # FOR/ELSE statement
2915 main.log.error( "Leader election, could not find current leader" )
2916 if oldLeader:
acsmars71adceb2015-08-31 15:09:26 -07002917 withdrawResult = oldLeaderCLI.electionTestWithdraw()
Jon Hall5cf14d52015-07-16 12:15:19 -07002918 utilities.assert_equals(
2919 expect=main.TRUE,
2920 actual=withdrawResult,
2921 onpass="Node was withdrawn from election",
2922 onfail="Node was not withdrawn from election" )
2923
acsmars71adceb2015-08-31 15:09:26 -07002924 main.step( "Check that a new node was elected leader" )
2925
Jon Hall5cf14d52015-07-16 12:15:19 -07002926 # FIXME: use threads
acsmars71adceb2015-08-31 15:09:26 -07002927 newLeaderResult = main.TRUE
2928 failMessage = "Nodes have different leaders"
2929
2930 # Get new leaders and candidates
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002931 for i in main.activeNodes:
2932 cli = main.CLIs[i]
acsmars71adceb2015-08-31 15:09:26 -07002933 node = cli.specificLeaderCandidate( 'org.onosproject.election' )
2934 # elections might no have finished yet
2935 if node[ 0 ] == 'none' and not expectNoLeader:
2936 main.log.info( "Node has no leader, waiting 5 seconds to be " +
2937 "sure elections are complete." )
2938 time.sleep(5)
2939 node = cli.specificLeaderCandidate( 'org.onosproject.election' )
2940 # election still isn't done or there is a problem
2941 if node[ 0 ] == 'none':
2942 main.log.error( "No leader was elected on at least 1 node" )
2943 newLeaderResult = main.FALSE
2944 newAllCandidates.append( node )
2945 newLeaders.append( node[ 0 ] )
2946 newCandidates = newAllCandidates[ 0 ]
2947
2948 # Check that each node has the same leader. Defines newLeader
2949 if len( set( newLeaders ) ) != 1:
2950 newLeaderResult = main.FALSE
2951 main.log.error( "Nodes have different leaders: " +
2952 str( newLeaders ) )
2953 newLeader = None
Jon Hall5cf14d52015-07-16 12:15:19 -07002954 else:
acsmars71adceb2015-08-31 15:09:26 -07002955 newLeader = newLeaders[ 0 ]
2956
2957 # Check that each node's candidate list is the same
2958 for candidates in newAllCandidates:
2959 if set( candidates ) != set( newCandidates ):
2960 newLeaderResult = main.FALSE
Jon Hallceb4abb2015-09-25 12:03:06 -07002961 main.log.error( "Discrepancy in candidate lists detected" )
acsmars71adceb2015-08-31 15:09:26 -07002962
2963 # Check that the new leader is not the older leader, which was withdrawn
2964 if newLeader == oldLeader:
2965 newLeaderResult = main.FALSE
2966 main.log.error( "All nodes still see old leader: " + oldLeader +
2967 " as the current leader" )
2968
Jon Hall5cf14d52015-07-16 12:15:19 -07002969 utilities.assert_equals(
2970 expect=main.TRUE,
acsmars71adceb2015-08-31 15:09:26 -07002971 actual=newLeaderResult,
Jon Hall5cf14d52015-07-16 12:15:19 -07002972 onpass="Leadership election passed",
2973 onfail="Something went wrong with Leadership election" )
2974
acsmars71adceb2015-08-31 15:09:26 -07002975 main.step( "Check that that new leader was the candidate of old leader")
2976 # candidates[ 2 ] should be come the top candidate after withdrawl
2977 correctCandidateResult = main.TRUE
2978 if expectNoLeader:
2979 if newLeader == 'none':
2980 main.log.info( "No leader expected. None found. Pass" )
2981 correctCandidateResult = main.TRUE
2982 else:
2983 main.log.info( "Expected no leader, got: " + str( newLeader ) )
2984 correctCandidateResult = main.FALSE
2985 elif newLeader != oldCandidates[ 2 ]:
2986 correctCandidateResult = main.FALSE
2987 main.log.error( "Candidate " + newLeader + " was elected. " +
2988 oldCandidates[ 2 ] + " should have had priority." )
2989
2990 utilities.assert_equals(
2991 expect=main.TRUE,
2992 actual=correctCandidateResult,
2993 onpass="Correct Candidate Elected",
2994 onfail="Incorrect Candidate Elected" )
2995
Jon Hall5cf14d52015-07-16 12:15:19 -07002996 main.step( "Run for election on old leader( just so everyone " +
2997 "is in the hat )" )
acsmars71adceb2015-08-31 15:09:26 -07002998 if oldLeaderCLI is not None:
2999 runResult = oldLeaderCLI.electionTestRun()
Jon Hall5cf14d52015-07-16 12:15:19 -07003000 else:
acsmars71adceb2015-08-31 15:09:26 -07003001 main.log.error( "No old leader to re-elect" )
Jon Hall5cf14d52015-07-16 12:15:19 -07003002 runResult = main.FALSE
3003 utilities.assert_equals(
3004 expect=main.TRUE,
3005 actual=runResult,
3006 onpass="App re-ran for election",
3007 onfail="App failed to run for election" )
acsmars71adceb2015-08-31 15:09:26 -07003008 main.step(
3009 "Check that oldLeader is a candidate, and leader if only 1 node" )
Jon Hall5cf14d52015-07-16 12:15:19 -07003010 # verify leader didn't just change
acsmars71adceb2015-08-31 15:09:26 -07003011 positionResult = main.TRUE
3012 # Get new leaders and candidates, wait if oldLeader is not a candidate yet
3013
3014 # Reset and reuse the new candidate and leaders lists
3015 newAllCandidates = []
3016 newCandidates = []
3017 newLeaders = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003018 for i in main.activeNodes:
3019 cli = main.CLIs[i]
acsmars71adceb2015-08-31 15:09:26 -07003020 node = cli.specificLeaderCandidate( 'org.onosproject.election' )
3021 if oldLeader not in node: # election might no have finished yet
3022 main.log.info( "Old Leader not elected, waiting 5 seconds to " +
3023 "be sure elections are complete" )
3024 time.sleep(5)
3025 node = cli.specificLeaderCandidate( 'org.onosproject.election' )
3026 if oldLeader not in node: # election still isn't done, errors
3027 main.log.error(
3028 "Old leader was not elected on at least one node" )
3029 positionResult = main.FALSE
3030 newAllCandidates.append( node )
3031 newLeaders.append( node[ 0 ] )
3032 newCandidates = newAllCandidates[ 0 ]
3033
3034 # Check that each node has the same leader. Defines newLeader
3035 if len( set( newLeaders ) ) != 1:
3036 positionResult = main.FALSE
3037 main.log.error( "Nodes have different leaders: " +
3038 str( newLeaders ) )
3039 newLeader = None
Jon Hall5cf14d52015-07-16 12:15:19 -07003040 else:
acsmars71adceb2015-08-31 15:09:26 -07003041 newLeader = newLeaders[ 0 ]
3042
3043 # Check that each node's candidate list is the same
3044 for candidates in newAllCandidates:
3045 if set( candidates ) != set( newCandidates ):
3046 newLeaderResult = main.FALSE
Jon Hallceb4abb2015-09-25 12:03:06 -07003047 main.log.error( "Discrepancy in candidate lists detected" )
acsmars71adceb2015-08-31 15:09:26 -07003048
3049 # Check that the re-elected node is last on the candidate List
3050 if oldLeader != newCandidates[ -1 ]:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003051 main.log.error( "Old Leader (" + oldLeader + ") not in the proper position " +
acsmars71adceb2015-08-31 15:09:26 -07003052 str( newCandidates ) )
3053 positionResult = main.FALSE
Jon Hall5cf14d52015-07-16 12:15:19 -07003054
3055 utilities.assert_equals(
3056 expect=main.TRUE,
acsmars71adceb2015-08-31 15:09:26 -07003057 actual=positionResult,
Jon Hall5cf14d52015-07-16 12:15:19 -07003058 onpass="Old leader successfully re-ran for election",
3059 onfail="Something went wrong with Leadership election after " +
3060 "the old leader re-ran for election" )
3061
3062 def CASE16( self, main ):
3063 """
3064 Install Distributed Primitives app
3065 """
3066 import time
Jon Halle1a3b752015-07-22 13:02:46 -07003067 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07003068 assert main, "main not defined"
3069 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07003070 assert main.CLIs, "main.CLIs not defined"
3071 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07003072
3073 # Variables for the distributed primitives tests
3074 global pCounterName
3075 global iCounterName
3076 global pCounterValue
3077 global iCounterValue
3078 global onosSet
3079 global onosSetName
3080 pCounterName = "TestON-Partitions"
3081 iCounterName = "TestON-inMemory"
3082 pCounterValue = 0
3083 iCounterValue = 0
3084 onosSet = set([])
3085 onosSetName = "TestON-set"
3086
3087 description = "Install Primitives app"
3088 main.case( description )
3089 main.step( "Install Primitives app" )
3090 appName = "org.onosproject.distributedprimitives"
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003091 node = main.activeNodes[0]
3092 appResults = main.CLIs[node].activateApp( appName )
Jon Hall5cf14d52015-07-16 12:15:19 -07003093 utilities.assert_equals( expect=main.TRUE,
3094 actual=appResults,
3095 onpass="Primitives app activated",
3096 onfail="Primitives app not activated" )
3097 time.sleep( 5 ) # To allow all nodes to activate
3098
3099 def CASE17( self, main ):
3100 """
3101 Check for basic functionality with distributed primitives
3102 """
Jon Hall5cf14d52015-07-16 12:15:19 -07003103 # Make sure variables are defined/set
Jon Halle1a3b752015-07-22 13:02:46 -07003104 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07003105 assert main, "main not defined"
3106 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07003107 assert main.CLIs, "main.CLIs not defined"
3108 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07003109 assert pCounterName, "pCounterName not defined"
3110 assert iCounterName, "iCounterName not defined"
3111 assert onosSetName, "onosSetName not defined"
3112 # NOTE: assert fails if value is 0/None/Empty/False
3113 try:
3114 pCounterValue
3115 except NameError:
3116 main.log.error( "pCounterValue not defined, setting to 0" )
3117 pCounterValue = 0
3118 try:
3119 iCounterValue
3120 except NameError:
3121 main.log.error( "iCounterValue not defined, setting to 0" )
3122 iCounterValue = 0
3123 try:
3124 onosSet
3125 except NameError:
3126 main.log.error( "onosSet not defined, setting to empty Set" )
3127 onosSet = set([])
3128 # Variables for the distributed primitives tests. These are local only
3129 addValue = "a"
3130 addAllValue = "a b c d e f"
3131 retainValue = "c d e f"
3132
3133 description = "Check for basic functionality with distributed " +\
3134 "primitives"
3135 main.case( description )
Jon Halle1a3b752015-07-22 13:02:46 -07003136 main.caseExplanation = "Test the methods of the distributed " +\
3137 "primitives (counters and sets) throught the cli"
Jon Hall5cf14d52015-07-16 12:15:19 -07003138 # DISTRIBUTED ATOMIC COUNTERS
Jon Halle1a3b752015-07-22 13:02:46 -07003139 # Partitioned counters
3140 main.step( "Increment then get a default counter on each node" )
Jon Hall5cf14d52015-07-16 12:15:19 -07003141 pCounters = []
3142 threads = []
3143 addedPValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003144 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003145 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3146 name="counterAddAndGet-" + str( i ),
Jon Hall5cf14d52015-07-16 12:15:19 -07003147 args=[ pCounterName ] )
3148 pCounterValue += 1
3149 addedPValues.append( pCounterValue )
3150 threads.append( t )
3151 t.start()
3152
3153 for t in threads:
3154 t.join()
3155 pCounters.append( t.result )
3156 # Check that counter incremented numController times
3157 pCounterResults = True
3158 for i in addedPValues:
3159 tmpResult = i in pCounters
3160 pCounterResults = pCounterResults and tmpResult
3161 if not tmpResult:
3162 main.log.error( str( i ) + " is not in partitioned "
3163 "counter incremented results" )
3164 utilities.assert_equals( expect=True,
3165 actual=pCounterResults,
3166 onpass="Default counter incremented",
3167 onfail="Error incrementing default" +
3168 " counter" )
3169
Jon Halle1a3b752015-07-22 13:02:46 -07003170 main.step( "Get then Increment a default counter on each node" )
3171 pCounters = []
3172 threads = []
3173 addedPValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003174 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003175 t = main.Thread( target=main.CLIs[i].counterTestGetAndAdd,
3176 name="counterGetAndAdd-" + str( i ),
3177 args=[ pCounterName ] )
3178 addedPValues.append( pCounterValue )
3179 pCounterValue += 1
3180 threads.append( t )
3181 t.start()
3182
3183 for t in threads:
3184 t.join()
3185 pCounters.append( t.result )
3186 # Check that counter incremented numController times
3187 pCounterResults = True
3188 for i in addedPValues:
3189 tmpResult = i in pCounters
3190 pCounterResults = pCounterResults and tmpResult
3191 if not tmpResult:
3192 main.log.error( str( i ) + " is not in partitioned "
3193 "counter incremented results" )
3194 utilities.assert_equals( expect=True,
3195 actual=pCounterResults,
3196 onpass="Default counter incremented",
3197 onfail="Error incrementing default" +
3198 " counter" )
3199
3200 main.step( "Counters we added have the correct values" )
3201 incrementCheck = main.Counters.counterCheck( pCounterName, pCounterValue )
3202 utilities.assert_equals( expect=main.TRUE,
3203 actual=incrementCheck,
3204 onpass="Added counters are correct",
3205 onfail="Added counters are incorrect" )
3206
3207 main.step( "Add -8 to then get a default counter on each node" )
3208 pCounters = []
3209 threads = []
3210 addedPValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003211 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003212 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3213 name="counterIncrement-" + str( i ),
3214 args=[ pCounterName ],
3215 kwargs={ "delta": -8 } )
3216 pCounterValue += -8
3217 addedPValues.append( pCounterValue )
3218 threads.append( t )
3219 t.start()
3220
3221 for t in threads:
3222 t.join()
3223 pCounters.append( t.result )
3224 # Check that counter incremented numController times
3225 pCounterResults = True
3226 for i in addedPValues:
3227 tmpResult = i in pCounters
3228 pCounterResults = pCounterResults and tmpResult
3229 if not tmpResult:
3230 main.log.error( str( i ) + " is not in partitioned "
3231 "counter incremented results" )
3232 utilities.assert_equals( expect=True,
3233 actual=pCounterResults,
3234 onpass="Default counter incremented",
3235 onfail="Error incrementing default" +
3236 " counter" )
3237
3238 main.step( "Add 5 to then get a default counter on each node" )
3239 pCounters = []
3240 threads = []
3241 addedPValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003242 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003243 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3244 name="counterIncrement-" + str( i ),
3245 args=[ pCounterName ],
3246 kwargs={ "delta": 5 } )
3247 pCounterValue += 5
3248 addedPValues.append( pCounterValue )
3249 threads.append( t )
3250 t.start()
3251
3252 for t in threads:
3253 t.join()
3254 pCounters.append( t.result )
3255 # Check that counter incremented numController times
3256 pCounterResults = True
3257 for i in addedPValues:
3258 tmpResult = i in pCounters
3259 pCounterResults = pCounterResults and tmpResult
3260 if not tmpResult:
3261 main.log.error( str( i ) + " is not in partitioned "
3262 "counter incremented results" )
3263 utilities.assert_equals( expect=True,
3264 actual=pCounterResults,
3265 onpass="Default counter incremented",
3266 onfail="Error incrementing default" +
3267 " counter" )
3268
3269 main.step( "Get then add 5 to a default counter on each node" )
3270 pCounters = []
3271 threads = []
3272 addedPValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003273 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003274 t = main.Thread( target=main.CLIs[i].counterTestGetAndAdd,
3275 name="counterIncrement-" + str( i ),
3276 args=[ pCounterName ],
3277 kwargs={ "delta": 5 } )
3278 addedPValues.append( pCounterValue )
3279 pCounterValue += 5
3280 threads.append( t )
3281 t.start()
3282
3283 for t in threads:
3284 t.join()
3285 pCounters.append( t.result )
3286 # Check that counter incremented numController times
3287 pCounterResults = True
3288 for i in addedPValues:
3289 tmpResult = i in pCounters
3290 pCounterResults = pCounterResults and tmpResult
3291 if not tmpResult:
3292 main.log.error( str( i ) + " is not in partitioned "
3293 "counter incremented results" )
3294 utilities.assert_equals( expect=True,
3295 actual=pCounterResults,
3296 onpass="Default counter incremented",
3297 onfail="Error incrementing default" +
3298 " counter" )
3299
3300 main.step( "Counters we added have the correct values" )
3301 incrementCheck = main.Counters.counterCheck( pCounterName, pCounterValue )
3302 utilities.assert_equals( expect=main.TRUE,
3303 actual=incrementCheck,
3304 onpass="Added counters are correct",
3305 onfail="Added counters are incorrect" )
3306
3307 # In-Memory counters
3308 main.step( "Increment and get an in-memory counter on each node" )
Jon Hall5cf14d52015-07-16 12:15:19 -07003309 iCounters = []
3310 addedIValues = []
3311 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003312 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003313 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003314 name="icounterIncrement-" + str( i ),
3315 args=[ iCounterName ],
3316 kwargs={ "inMemory": True } )
3317 iCounterValue += 1
3318 addedIValues.append( iCounterValue )
3319 threads.append( t )
3320 t.start()
3321
3322 for t in threads:
3323 t.join()
3324 iCounters.append( t.result )
3325 # Check that counter incremented numController times
3326 iCounterResults = True
3327 for i in addedIValues:
3328 tmpResult = i in iCounters
3329 iCounterResults = iCounterResults and tmpResult
3330 if not tmpResult:
3331 main.log.error( str( i ) + " is not in the in-memory "
3332 "counter incremented results" )
3333 utilities.assert_equals( expect=True,
3334 actual=iCounterResults,
Jon Halle1a3b752015-07-22 13:02:46 -07003335 onpass="In-memory counter incremented",
3336 onfail="Error incrementing in-memory" +
Jon Hall5cf14d52015-07-16 12:15:19 -07003337 " counter" )
3338
Jon Halle1a3b752015-07-22 13:02:46 -07003339 main.step( "Get then Increment a in-memory counter on each node" )
3340 iCounters = []
3341 threads = []
3342 addedIValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003343 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003344 t = main.Thread( target=main.CLIs[i].counterTestGetAndAdd,
3345 name="counterGetAndAdd-" + str( i ),
3346 args=[ iCounterName ],
3347 kwargs={ "inMemory": True } )
3348 addedIValues.append( iCounterValue )
3349 iCounterValue += 1
3350 threads.append( t )
3351 t.start()
3352
3353 for t in threads:
3354 t.join()
3355 iCounters.append( t.result )
3356 # Check that counter incremented numController times
3357 iCounterResults = True
3358 for i in addedIValues:
3359 tmpResult = i in iCounters
3360 iCounterResults = iCounterResults and tmpResult
3361 if not tmpResult:
3362 main.log.error( str( i ) + " is not in in-memory "
3363 "counter incremented results" )
3364 utilities.assert_equals( expect=True,
3365 actual=iCounterResults,
3366 onpass="In-memory counter incremented",
3367 onfail="Error incrementing in-memory" +
3368 " counter" )
3369
3370 main.step( "Counters we added have the correct values" )
3371 incrementCheck = main.Counters.counterCheck( iCounterName, iCounterValue )
3372 utilities.assert_equals( expect=main.TRUE,
3373 actual=incrementCheck,
3374 onpass="Added counters are correct",
3375 onfail="Added counters are incorrect" )
3376
3377 main.step( "Add -8 to then get a in-memory counter on each node" )
3378 iCounters = []
3379 threads = []
3380 addedIValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003381 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003382 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3383 name="counterIncrement-" + str( i ),
3384 args=[ iCounterName ],
3385 kwargs={ "delta": -8, "inMemory": True } )
3386 iCounterValue += -8
3387 addedIValues.append( iCounterValue )
3388 threads.append( t )
3389 t.start()
3390
3391 for t in threads:
3392 t.join()
3393 iCounters.append( t.result )
3394 # Check that counter incremented numController times
3395 iCounterResults = True
3396 for i in addedIValues:
3397 tmpResult = i in iCounters
3398 iCounterResults = iCounterResults and tmpResult
3399 if not tmpResult:
3400 main.log.error( str( i ) + " is not in in-memory "
3401 "counter incremented results" )
3402 utilities.assert_equals( expect=True,
3403 actual=pCounterResults,
3404 onpass="In-memory counter incremented",
3405 onfail="Error incrementing in-memory" +
3406 " counter" )
3407
3408 main.step( "Add 5 to then get a in-memory counter on each node" )
3409 iCounters = []
3410 threads = []
3411 addedIValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003412 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003413 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3414 name="counterIncrement-" + str( i ),
3415 args=[ iCounterName ],
3416 kwargs={ "delta": 5, "inMemory": True } )
3417 iCounterValue += 5
3418 addedIValues.append( iCounterValue )
3419 threads.append( t )
3420 t.start()
3421
3422 for t in threads:
3423 t.join()
3424 iCounters.append( t.result )
3425 # Check that counter incremented numController times
3426 iCounterResults = True
3427 for i in addedIValues:
3428 tmpResult = i in iCounters
3429 iCounterResults = iCounterResults and tmpResult
3430 if not tmpResult:
3431 main.log.error( str( i ) + " is not in in-memory "
3432 "counter incremented results" )
3433 utilities.assert_equals( expect=True,
3434 actual=pCounterResults,
3435 onpass="In-memory counter incremented",
3436 onfail="Error incrementing in-memory" +
3437 " counter" )
3438
3439 main.step( "Get then add 5 to a in-memory counter on each node" )
3440 iCounters = []
3441 threads = []
3442 addedIValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003443 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003444 t = main.Thread( target=main.CLIs[i].counterTestGetAndAdd,
3445 name="counterIncrement-" + str( i ),
3446 args=[ iCounterName ],
3447 kwargs={ "delta": 5, "inMemory": True } )
3448 addedIValues.append( iCounterValue )
3449 iCounterValue += 5
3450 threads.append( t )
3451 t.start()
3452
3453 for t in threads:
3454 t.join()
3455 iCounters.append( t.result )
3456 # Check that counter incremented numController times
3457 iCounterResults = True
3458 for i in addedIValues:
3459 tmpResult = i in iCounters
3460 iCounterResults = iCounterResults and tmpResult
3461 if not tmpResult:
3462 main.log.error( str( i ) + " is not in in-memory "
3463 "counter incremented results" )
3464 utilities.assert_equals( expect=True,
3465 actual=iCounterResults,
3466 onpass="In-memory counter incremented",
3467 onfail="Error incrementing in-memory" +
3468 " counter" )
3469
3470 main.step( "Counters we added have the correct values" )
3471 incrementCheck = main.Counters.counterCheck( iCounterName, iCounterValue )
3472 utilities.assert_equals( expect=main.TRUE,
3473 actual=incrementCheck,
3474 onpass="Added counters are correct",
3475 onfail="Added counters are incorrect" )
3476
Jon Hall5cf14d52015-07-16 12:15:19 -07003477 main.step( "Check counters are consistant across nodes" )
Jon Hall3b489db2015-10-05 14:38:37 -07003478 onosCounters, consistentCounterResults = main.Counters.consistentCheck()
Jon Hall5cf14d52015-07-16 12:15:19 -07003479 utilities.assert_equals( expect=main.TRUE,
3480 actual=consistentCounterResults,
3481 onpass="ONOS counters are consistent " +
3482 "across nodes",
3483 onfail="ONOS Counters are inconsistent " +
3484 "across nodes" )
3485
3486 main.step( "Counters we added have the correct values" )
Jon Halle1a3b752015-07-22 13:02:46 -07003487 incrementCheck = main.Counters.counterCheck( iCounterName, iCounterValue )
3488 incrementCheck = incrementCheck and \
3489 main.Counters.counterCheck( iCounterName, iCounterValue )
Jon Hall5cf14d52015-07-16 12:15:19 -07003490 utilities.assert_equals( expect=main.TRUE,
Jon Halle1a3b752015-07-22 13:02:46 -07003491 actual=incrementCheck,
Jon Hall5cf14d52015-07-16 12:15:19 -07003492 onpass="Added counters are correct",
3493 onfail="Added counters are incorrect" )
3494 # DISTRIBUTED SETS
3495 main.step( "Distributed Set get" )
3496 size = len( onosSet )
3497 getResponses = []
3498 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003499 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003500 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003501 name="setTestGet-" + str( i ),
3502 args=[ onosSetName ] )
3503 threads.append( t )
3504 t.start()
3505 for t in threads:
3506 t.join()
3507 getResponses.append( t.result )
3508
3509 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003510 for i in range( len( main.activeNodes ) ):
3511 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003512 if isinstance( getResponses[ i ], list):
3513 current = set( getResponses[ i ] )
3514 if len( current ) == len( getResponses[ i ] ):
3515 # no repeats
3516 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003517 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003518 " has incorrect view" +
3519 " of set " + onosSetName + ":\n" +
3520 str( getResponses[ i ] ) )
3521 main.log.debug( "Expected: " + str( onosSet ) )
3522 main.log.debug( "Actual: " + str( current ) )
3523 getResults = main.FALSE
3524 else:
3525 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003526 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003527 " has repeat elements in" +
3528 " set " + onosSetName + ":\n" +
3529 str( getResponses[ i ] ) )
3530 getResults = main.FALSE
3531 elif getResponses[ i ] == main.ERROR:
3532 getResults = main.FALSE
3533 utilities.assert_equals( expect=main.TRUE,
3534 actual=getResults,
3535 onpass="Set elements are correct",
3536 onfail="Set elements are incorrect" )
3537
3538 main.step( "Distributed Set size" )
3539 sizeResponses = []
3540 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003541 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003542 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07003543 name="setTestSize-" + str( i ),
3544 args=[ onosSetName ] )
3545 threads.append( t )
3546 t.start()
3547 for t in threads:
3548 t.join()
3549 sizeResponses.append( t.result )
3550
3551 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003552 for i in range( len( main.activeNodes ) ):
3553 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003554 if size != sizeResponses[ i ]:
3555 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003556 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003557 " expected a size of " + str( size ) +
3558 " for set " + onosSetName +
3559 " but got " + str( sizeResponses[ i ] ) )
3560 utilities.assert_equals( expect=main.TRUE,
3561 actual=sizeResults,
3562 onpass="Set sizes are correct",
3563 onfail="Set sizes are incorrect" )
3564
3565 main.step( "Distributed Set add()" )
3566 onosSet.add( addValue )
3567 addResponses = []
3568 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003569 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003570 t = main.Thread( target=main.CLIs[i].setTestAdd,
Jon Hall5cf14d52015-07-16 12:15:19 -07003571 name="setTestAdd-" + str( i ),
3572 args=[ onosSetName, addValue ] )
3573 threads.append( t )
3574 t.start()
3575 for t in threads:
3576 t.join()
3577 addResponses.append( t.result )
3578
3579 # main.TRUE = successfully changed the set
3580 # main.FALSE = action resulted in no change in set
3581 # main.ERROR - Some error in executing the function
3582 addResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003583 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003584 if addResponses[ i ] == main.TRUE:
3585 # All is well
3586 pass
3587 elif addResponses[ i ] == main.FALSE:
3588 # Already in set, probably fine
3589 pass
3590 elif addResponses[ i ] == main.ERROR:
3591 # Error in execution
3592 addResults = main.FALSE
3593 else:
3594 # unexpected result
3595 addResults = main.FALSE
3596 if addResults != main.TRUE:
3597 main.log.error( "Error executing set add" )
3598
3599 # Check if set is still correct
3600 size = len( onosSet )
3601 getResponses = []
3602 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003603 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003604 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003605 name="setTestGet-" + str( i ),
3606 args=[ onosSetName ] )
3607 threads.append( t )
3608 t.start()
3609 for t in threads:
3610 t.join()
3611 getResponses.append( t.result )
3612 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003613 for i in range( len( main.activeNodes ) ):
3614 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003615 if isinstance( getResponses[ i ], list):
3616 current = set( getResponses[ i ] )
3617 if len( current ) == len( getResponses[ i ] ):
3618 # no repeats
3619 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003620 main.log.error( "ONOS" + node + " has incorrect view" +
Jon Hall5cf14d52015-07-16 12:15:19 -07003621 " of set " + onosSetName + ":\n" +
3622 str( getResponses[ i ] ) )
3623 main.log.debug( "Expected: " + str( onosSet ) )
3624 main.log.debug( "Actual: " + str( current ) )
3625 getResults = main.FALSE
3626 else:
3627 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003628 main.log.error( "ONOS" + node + " has repeat elements in" +
Jon Hall5cf14d52015-07-16 12:15:19 -07003629 " set " + onosSetName + ":\n" +
3630 str( getResponses[ i ] ) )
3631 getResults = main.FALSE
3632 elif getResponses[ i ] == main.ERROR:
3633 getResults = main.FALSE
3634 sizeResponses = []
3635 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003636 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003637 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07003638 name="setTestSize-" + str( i ),
3639 args=[ onosSetName ] )
3640 threads.append( t )
3641 t.start()
3642 for t in threads:
3643 t.join()
3644 sizeResponses.append( t.result )
3645 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003646 for i in range( len( main.activeNodes ) ):
3647 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003648 if size != sizeResponses[ i ]:
3649 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003650 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003651 " expected a size of " + str( size ) +
3652 " for set " + onosSetName +
3653 " but got " + str( sizeResponses[ i ] ) )
3654 addResults = addResults and getResults and sizeResults
3655 utilities.assert_equals( expect=main.TRUE,
3656 actual=addResults,
3657 onpass="Set add correct",
3658 onfail="Set add was incorrect" )
3659
3660 main.step( "Distributed Set addAll()" )
3661 onosSet.update( addAllValue.split() )
3662 addResponses = []
3663 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003664 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003665 t = main.Thread( target=main.CLIs[i].setTestAdd,
Jon Hall5cf14d52015-07-16 12:15:19 -07003666 name="setTestAddAll-" + str( i ),
3667 args=[ onosSetName, addAllValue ] )
3668 threads.append( t )
3669 t.start()
3670 for t in threads:
3671 t.join()
3672 addResponses.append( t.result )
3673
3674 # main.TRUE = successfully changed the set
3675 # main.FALSE = action resulted in no change in set
3676 # main.ERROR - Some error in executing the function
3677 addAllResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003678 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003679 if addResponses[ i ] == main.TRUE:
3680 # All is well
3681 pass
3682 elif addResponses[ i ] == main.FALSE:
3683 # Already in set, probably fine
3684 pass
3685 elif addResponses[ i ] == main.ERROR:
3686 # Error in execution
3687 addAllResults = main.FALSE
3688 else:
3689 # unexpected result
3690 addAllResults = main.FALSE
3691 if addAllResults != main.TRUE:
3692 main.log.error( "Error executing set addAll" )
3693
3694 # Check if set is still correct
3695 size = len( onosSet )
3696 getResponses = []
3697 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003698 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003699 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003700 name="setTestGet-" + str( i ),
3701 args=[ onosSetName ] )
3702 threads.append( t )
3703 t.start()
3704 for t in threads:
3705 t.join()
3706 getResponses.append( t.result )
3707 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003708 for i in range( len( main.activeNodes ) ):
3709 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003710 if isinstance( getResponses[ i ], list):
3711 current = set( getResponses[ i ] )
3712 if len( current ) == len( getResponses[ i ] ):
3713 # no repeats
3714 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003715 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003716 " has incorrect view" +
3717 " of set " + onosSetName + ":\n" +
3718 str( getResponses[ i ] ) )
3719 main.log.debug( "Expected: " + str( onosSet ) )
3720 main.log.debug( "Actual: " + str( current ) )
3721 getResults = main.FALSE
3722 else:
3723 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003724 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003725 " has repeat elements in" +
3726 " set " + onosSetName + ":\n" +
3727 str( getResponses[ i ] ) )
3728 getResults = main.FALSE
3729 elif getResponses[ i ] == main.ERROR:
3730 getResults = main.FALSE
3731 sizeResponses = []
3732 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003733 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003734 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07003735 name="setTestSize-" + str( i ),
3736 args=[ onosSetName ] )
3737 threads.append( t )
3738 t.start()
3739 for t in threads:
3740 t.join()
3741 sizeResponses.append( t.result )
3742 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003743 for i in range( len( main.activeNodes ) ):
3744 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003745 if size != sizeResponses[ i ]:
3746 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003747 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003748 " expected a size of " + str( size ) +
3749 " for set " + onosSetName +
3750 " but got " + str( sizeResponses[ i ] ) )
3751 addAllResults = addAllResults and getResults and sizeResults
3752 utilities.assert_equals( expect=main.TRUE,
3753 actual=addAllResults,
3754 onpass="Set addAll correct",
3755 onfail="Set addAll was incorrect" )
3756
3757 main.step( "Distributed Set contains()" )
3758 containsResponses = []
3759 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003760 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003761 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003762 name="setContains-" + str( i ),
3763 args=[ onosSetName ],
3764 kwargs={ "values": addValue } )
3765 threads.append( t )
3766 t.start()
3767 for t in threads:
3768 t.join()
3769 # NOTE: This is the tuple
3770 containsResponses.append( t.result )
3771
3772 containsResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003773 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003774 if containsResponses[ i ] == main.ERROR:
3775 containsResults = main.FALSE
3776 else:
3777 containsResults = containsResults and\
3778 containsResponses[ i ][ 1 ]
3779 utilities.assert_equals( expect=main.TRUE,
3780 actual=containsResults,
3781 onpass="Set contains is functional",
3782 onfail="Set contains failed" )
3783
3784 main.step( "Distributed Set containsAll()" )
3785 containsAllResponses = []
3786 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003787 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003788 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003789 name="setContainsAll-" + str( i ),
3790 args=[ onosSetName ],
3791 kwargs={ "values": addAllValue } )
3792 threads.append( t )
3793 t.start()
3794 for t in threads:
3795 t.join()
3796 # NOTE: This is the tuple
3797 containsAllResponses.append( t.result )
3798
3799 containsAllResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003800 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003801 if containsResponses[ i ] == main.ERROR:
3802 containsResults = main.FALSE
3803 else:
3804 containsResults = containsResults and\
3805 containsResponses[ i ][ 1 ]
3806 utilities.assert_equals( expect=main.TRUE,
3807 actual=containsAllResults,
3808 onpass="Set containsAll is functional",
3809 onfail="Set containsAll failed" )
3810
3811 main.step( "Distributed Set remove()" )
3812 onosSet.remove( addValue )
3813 removeResponses = []
3814 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003815 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003816 t = main.Thread( target=main.CLIs[i].setTestRemove,
Jon Hall5cf14d52015-07-16 12:15:19 -07003817 name="setTestRemove-" + str( i ),
3818 args=[ onosSetName, addValue ] )
3819 threads.append( t )
3820 t.start()
3821 for t in threads:
3822 t.join()
3823 removeResponses.append( t.result )
3824
3825 # main.TRUE = successfully changed the set
3826 # main.FALSE = action resulted in no change in set
3827 # main.ERROR - Some error in executing the function
3828 removeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003829 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003830 if removeResponses[ i ] == main.TRUE:
3831 # All is well
3832 pass
3833 elif removeResponses[ i ] == main.FALSE:
3834 # not in set, probably fine
3835 pass
3836 elif removeResponses[ i ] == main.ERROR:
3837 # Error in execution
3838 removeResults = main.FALSE
3839 else:
3840 # unexpected result
3841 removeResults = main.FALSE
3842 if removeResults != main.TRUE:
3843 main.log.error( "Error executing set remove" )
3844
3845 # Check if set is still correct
3846 size = len( onosSet )
3847 getResponses = []
3848 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003849 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003850 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003851 name="setTestGet-" + str( i ),
3852 args=[ onosSetName ] )
3853 threads.append( t )
3854 t.start()
3855 for t in threads:
3856 t.join()
3857 getResponses.append( t.result )
3858 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003859 for i in range( len( main.activeNodes ) ):
3860 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003861 if isinstance( getResponses[ i ], list):
3862 current = set( getResponses[ i ] )
3863 if len( current ) == len( getResponses[ i ] ):
3864 # no repeats
3865 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003866 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003867 " has incorrect view" +
3868 " of set " + onosSetName + ":\n" +
3869 str( getResponses[ i ] ) )
3870 main.log.debug( "Expected: " + str( onosSet ) )
3871 main.log.debug( "Actual: " + str( current ) )
3872 getResults = main.FALSE
3873 else:
3874 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003875 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003876 " has repeat elements in" +
3877 " set " + onosSetName + ":\n" +
3878 str( getResponses[ i ] ) )
3879 getResults = main.FALSE
3880 elif getResponses[ i ] == main.ERROR:
3881 getResults = main.FALSE
3882 sizeResponses = []
3883 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003884 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003885 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07003886 name="setTestSize-" + str( i ),
3887 args=[ onosSetName ] )
3888 threads.append( t )
3889 t.start()
3890 for t in threads:
3891 t.join()
3892 sizeResponses.append( t.result )
3893 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003894 for i in range( len( main.activeNodes ) ):
3895 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003896 if size != sizeResponses[ i ]:
3897 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003898 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003899 " expected a size of " + str( size ) +
3900 " for set " + onosSetName +
3901 " but got " + str( sizeResponses[ i ] ) )
3902 removeResults = removeResults and getResults and sizeResults
3903 utilities.assert_equals( expect=main.TRUE,
3904 actual=removeResults,
3905 onpass="Set remove correct",
3906 onfail="Set remove was incorrect" )
3907
3908 main.step( "Distributed Set removeAll()" )
3909 onosSet.difference_update( addAllValue.split() )
3910 removeAllResponses = []
3911 threads = []
3912 try:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003913 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003914 t = main.Thread( target=main.CLIs[i].setTestRemove,
Jon Hall5cf14d52015-07-16 12:15:19 -07003915 name="setTestRemoveAll-" + str( i ),
3916 args=[ onosSetName, addAllValue ] )
3917 threads.append( t )
3918 t.start()
3919 for t in threads:
3920 t.join()
3921 removeAllResponses.append( t.result )
3922 except Exception, e:
3923 main.log.exception(e)
3924
3925 # main.TRUE = successfully changed the set
3926 # main.FALSE = action resulted in no change in set
3927 # main.ERROR - Some error in executing the function
3928 removeAllResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003929 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003930 if removeAllResponses[ i ] == main.TRUE:
3931 # All is well
3932 pass
3933 elif removeAllResponses[ i ] == main.FALSE:
3934 # not in set, probably fine
3935 pass
3936 elif removeAllResponses[ i ] == main.ERROR:
3937 # Error in execution
3938 removeAllResults = main.FALSE
3939 else:
3940 # unexpected result
3941 removeAllResults = main.FALSE
3942 if removeAllResults != main.TRUE:
3943 main.log.error( "Error executing set removeAll" )
3944
3945 # Check if set is still correct
3946 size = len( onosSet )
3947 getResponses = []
3948 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003949 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003950 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003951 name="setTestGet-" + str( i ),
3952 args=[ onosSetName ] )
3953 threads.append( t )
3954 t.start()
3955 for t in threads:
3956 t.join()
3957 getResponses.append( t.result )
3958 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003959 for i in range( len( main.activeNodes ) ):
3960 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003961 if isinstance( getResponses[ i ], list):
3962 current = set( getResponses[ i ] )
3963 if len( current ) == len( getResponses[ i ] ):
3964 # no repeats
3965 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003966 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003967 " has incorrect view" +
3968 " of set " + onosSetName + ":\n" +
3969 str( getResponses[ i ] ) )
3970 main.log.debug( "Expected: " + str( onosSet ) )
3971 main.log.debug( "Actual: " + str( current ) )
3972 getResults = main.FALSE
3973 else:
3974 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003975 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003976 " has repeat elements in" +
3977 " set " + onosSetName + ":\n" +
3978 str( getResponses[ i ] ) )
3979 getResults = main.FALSE
3980 elif getResponses[ i ] == main.ERROR:
3981 getResults = main.FALSE
3982 sizeResponses = []
3983 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003984 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003985 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07003986 name="setTestSize-" + str( i ),
3987 args=[ onosSetName ] )
3988 threads.append( t )
3989 t.start()
3990 for t in threads:
3991 t.join()
3992 sizeResponses.append( t.result )
3993 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003994 for i in range( len( main.activeNodes ) ):
3995 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003996 if size != sizeResponses[ i ]:
3997 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003998 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003999 " expected a size of " + str( size ) +
4000 " for set " + onosSetName +
4001 " but got " + str( sizeResponses[ i ] ) )
4002 removeAllResults = removeAllResults and getResults and sizeResults
4003 utilities.assert_equals( expect=main.TRUE,
4004 actual=removeAllResults,
4005 onpass="Set removeAll correct",
4006 onfail="Set removeAll was incorrect" )
4007
4008 main.step( "Distributed Set addAll()" )
4009 onosSet.update( addAllValue.split() )
4010 addResponses = []
4011 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004012 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004013 t = main.Thread( target=main.CLIs[i].setTestAdd,
Jon Hall5cf14d52015-07-16 12:15:19 -07004014 name="setTestAddAll-" + str( i ),
4015 args=[ onosSetName, addAllValue ] )
4016 threads.append( t )
4017 t.start()
4018 for t in threads:
4019 t.join()
4020 addResponses.append( t.result )
4021
4022 # main.TRUE = successfully changed the set
4023 # main.FALSE = action resulted in no change in set
4024 # main.ERROR - Some error in executing the function
4025 addAllResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004026 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07004027 if addResponses[ i ] == main.TRUE:
4028 # All is well
4029 pass
4030 elif addResponses[ i ] == main.FALSE:
4031 # Already in set, probably fine
4032 pass
4033 elif addResponses[ i ] == main.ERROR:
4034 # Error in execution
4035 addAllResults = main.FALSE
4036 else:
4037 # unexpected result
4038 addAllResults = main.FALSE
4039 if addAllResults != main.TRUE:
4040 main.log.error( "Error executing set addAll" )
4041
4042 # Check if set is still correct
4043 size = len( onosSet )
4044 getResponses = []
4045 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004046 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004047 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07004048 name="setTestGet-" + str( i ),
4049 args=[ onosSetName ] )
4050 threads.append( t )
4051 t.start()
4052 for t in threads:
4053 t.join()
4054 getResponses.append( t.result )
4055 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004056 for i in range( len( main.activeNodes ) ):
4057 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004058 if isinstance( getResponses[ i ], list):
4059 current = set( getResponses[ i ] )
4060 if len( current ) == len( getResponses[ i ] ):
4061 # no repeats
4062 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004063 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004064 " has incorrect view" +
4065 " of set " + onosSetName + ":\n" +
4066 str( getResponses[ i ] ) )
4067 main.log.debug( "Expected: " + str( onosSet ) )
4068 main.log.debug( "Actual: " + str( current ) )
4069 getResults = main.FALSE
4070 else:
4071 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004072 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004073 " has repeat elements in" +
4074 " set " + onosSetName + ":\n" +
4075 str( getResponses[ i ] ) )
4076 getResults = main.FALSE
4077 elif getResponses[ i ] == main.ERROR:
4078 getResults = main.FALSE
4079 sizeResponses = []
4080 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004081 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004082 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07004083 name="setTestSize-" + str( i ),
4084 args=[ onosSetName ] )
4085 threads.append( t )
4086 t.start()
4087 for t in threads:
4088 t.join()
4089 sizeResponses.append( t.result )
4090 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004091 for i in range( len( main.activeNodes ) ):
4092 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004093 if size != sizeResponses[ i ]:
4094 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004095 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004096 " expected a size of " + str( size ) +
4097 " for set " + onosSetName +
4098 " but got " + str( sizeResponses[ i ] ) )
4099 addAllResults = addAllResults and getResults and sizeResults
4100 utilities.assert_equals( expect=main.TRUE,
4101 actual=addAllResults,
4102 onpass="Set addAll correct",
4103 onfail="Set addAll was incorrect" )
4104
4105 main.step( "Distributed Set clear()" )
4106 onosSet.clear()
4107 clearResponses = []
4108 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004109 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004110 t = main.Thread( target=main.CLIs[i].setTestRemove,
Jon Hall5cf14d52015-07-16 12:15:19 -07004111 name="setTestClear-" + str( i ),
4112 args=[ onosSetName, " "], # Values doesn't matter
4113 kwargs={ "clear": True } )
4114 threads.append( t )
4115 t.start()
4116 for t in threads:
4117 t.join()
4118 clearResponses.append( t.result )
4119
4120 # main.TRUE = successfully changed the set
4121 # main.FALSE = action resulted in no change in set
4122 # main.ERROR - Some error in executing the function
4123 clearResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004124 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07004125 if clearResponses[ i ] == main.TRUE:
4126 # All is well
4127 pass
4128 elif clearResponses[ i ] == main.FALSE:
4129 # Nothing set, probably fine
4130 pass
4131 elif clearResponses[ i ] == main.ERROR:
4132 # Error in execution
4133 clearResults = main.FALSE
4134 else:
4135 # unexpected result
4136 clearResults = main.FALSE
4137 if clearResults != main.TRUE:
4138 main.log.error( "Error executing set clear" )
4139
4140 # Check if set is still correct
4141 size = len( onosSet )
4142 getResponses = []
4143 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004144 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004145 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07004146 name="setTestGet-" + str( i ),
4147 args=[ onosSetName ] )
4148 threads.append( t )
4149 t.start()
4150 for t in threads:
4151 t.join()
4152 getResponses.append( t.result )
4153 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004154 for i in range( len( main.activeNodes ) ):
4155 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004156 if isinstance( getResponses[ i ], list):
4157 current = set( getResponses[ i ] )
4158 if len( current ) == len( getResponses[ i ] ):
4159 # no repeats
4160 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004161 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004162 " has incorrect view" +
4163 " of set " + onosSetName + ":\n" +
4164 str( getResponses[ i ] ) )
4165 main.log.debug( "Expected: " + str( onosSet ) )
4166 main.log.debug( "Actual: " + str( current ) )
4167 getResults = main.FALSE
4168 else:
4169 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004170 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004171 " has repeat elements in" +
4172 " set " + onosSetName + ":\n" +
4173 str( getResponses[ i ] ) )
4174 getResults = main.FALSE
4175 elif getResponses[ i ] == main.ERROR:
4176 getResults = main.FALSE
4177 sizeResponses = []
4178 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004179 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004180 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07004181 name="setTestSize-" + str( i ),
4182 args=[ onosSetName ] )
4183 threads.append( t )
4184 t.start()
4185 for t in threads:
4186 t.join()
4187 sizeResponses.append( t.result )
4188 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004189 for i in range( len( main.activeNodes ) ):
4190 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004191 if size != sizeResponses[ i ]:
4192 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004193 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004194 " expected a size of " + str( size ) +
4195 " for set " + onosSetName +
4196 " but got " + str( sizeResponses[ i ] ) )
4197 clearResults = clearResults and getResults and sizeResults
4198 utilities.assert_equals( expect=main.TRUE,
4199 actual=clearResults,
4200 onpass="Set clear correct",
4201 onfail="Set clear was incorrect" )
4202
4203 main.step( "Distributed Set addAll()" )
4204 onosSet.update( addAllValue.split() )
4205 addResponses = []
4206 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004207 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004208 t = main.Thread( target=main.CLIs[i].setTestAdd,
Jon Hall5cf14d52015-07-16 12:15:19 -07004209 name="setTestAddAll-" + str( i ),
4210 args=[ onosSetName, addAllValue ] )
4211 threads.append( t )
4212 t.start()
4213 for t in threads:
4214 t.join()
4215 addResponses.append( t.result )
4216
4217 # main.TRUE = successfully changed the set
4218 # main.FALSE = action resulted in no change in set
4219 # main.ERROR - Some error in executing the function
4220 addAllResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004221 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07004222 if addResponses[ i ] == main.TRUE:
4223 # All is well
4224 pass
4225 elif addResponses[ i ] == main.FALSE:
4226 # Already in set, probably fine
4227 pass
4228 elif addResponses[ i ] == main.ERROR:
4229 # Error in execution
4230 addAllResults = main.FALSE
4231 else:
4232 # unexpected result
4233 addAllResults = main.FALSE
4234 if addAllResults != main.TRUE:
4235 main.log.error( "Error executing set addAll" )
4236
4237 # Check if set is still correct
4238 size = len( onosSet )
4239 getResponses = []
4240 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004241 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004242 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07004243 name="setTestGet-" + str( i ),
4244 args=[ onosSetName ] )
4245 threads.append( t )
4246 t.start()
4247 for t in threads:
4248 t.join()
4249 getResponses.append( t.result )
4250 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004251 for i in range( len( main.activeNodes ) ):
4252 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004253 if isinstance( getResponses[ i ], list):
4254 current = set( getResponses[ i ] )
4255 if len( current ) == len( getResponses[ i ] ):
4256 # no repeats
4257 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004258 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004259 " has incorrect view" +
4260 " of set " + onosSetName + ":\n" +
4261 str( getResponses[ i ] ) )
4262 main.log.debug( "Expected: " + str( onosSet ) )
4263 main.log.debug( "Actual: " + str( current ) )
4264 getResults = main.FALSE
4265 else:
4266 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004267 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004268 " has repeat elements in" +
4269 " set " + onosSetName + ":\n" +
4270 str( getResponses[ i ] ) )
4271 getResults = main.FALSE
4272 elif getResponses[ i ] == main.ERROR:
4273 getResults = main.FALSE
4274 sizeResponses = []
4275 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004276 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004277 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07004278 name="setTestSize-" + str( i ),
4279 args=[ onosSetName ] )
4280 threads.append( t )
4281 t.start()
4282 for t in threads:
4283 t.join()
4284 sizeResponses.append( t.result )
4285 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004286 for i in range( len( main.activeNodes ) ):
4287 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004288 if size != sizeResponses[ i ]:
4289 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004290 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004291 " expected a size of " + str( size ) +
4292 " for set " + onosSetName +
4293 " but got " + str( sizeResponses[ i ] ) )
4294 addAllResults = addAllResults and getResults and sizeResults
4295 utilities.assert_equals( expect=main.TRUE,
4296 actual=addAllResults,
4297 onpass="Set addAll correct",
4298 onfail="Set addAll was incorrect" )
4299
4300 main.step( "Distributed Set retain()" )
4301 onosSet.intersection_update( retainValue.split() )
4302 retainResponses = []
4303 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004304 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004305 t = main.Thread( target=main.CLIs[i].setTestRemove,
Jon Hall5cf14d52015-07-16 12:15:19 -07004306 name="setTestRetain-" + str( i ),
4307 args=[ onosSetName, retainValue ],
4308 kwargs={ "retain": True } )
4309 threads.append( t )
4310 t.start()
4311 for t in threads:
4312 t.join()
4313 retainResponses.append( t.result )
4314
4315 # main.TRUE = successfully changed the set
4316 # main.FALSE = action resulted in no change in set
4317 # main.ERROR - Some error in executing the function
4318 retainResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004319 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07004320 if retainResponses[ i ] == main.TRUE:
4321 # All is well
4322 pass
4323 elif retainResponses[ i ] == main.FALSE:
4324 # Already in set, probably fine
4325 pass
4326 elif retainResponses[ i ] == main.ERROR:
4327 # Error in execution
4328 retainResults = main.FALSE
4329 else:
4330 # unexpected result
4331 retainResults = main.FALSE
4332 if retainResults != main.TRUE:
4333 main.log.error( "Error executing set retain" )
4334
4335 # Check if set is still correct
4336 size = len( onosSet )
4337 getResponses = []
4338 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004339 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004340 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07004341 name="setTestGet-" + str( i ),
4342 args=[ onosSetName ] )
4343 threads.append( t )
4344 t.start()
4345 for t in threads:
4346 t.join()
4347 getResponses.append( t.result )
4348 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004349 for i in range( len( main.activeNodes ) ):
4350 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004351 if isinstance( getResponses[ i ], list):
4352 current = set( getResponses[ i ] )
4353 if len( current ) == len( getResponses[ i ] ):
4354 # no repeats
4355 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004356 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004357 " has incorrect view" +
4358 " of set " + onosSetName + ":\n" +
4359 str( getResponses[ i ] ) )
4360 main.log.debug( "Expected: " + str( onosSet ) )
4361 main.log.debug( "Actual: " + str( current ) )
4362 getResults = main.FALSE
4363 else:
4364 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004365 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004366 " has repeat elements in" +
4367 " set " + onosSetName + ":\n" +
4368 str( getResponses[ i ] ) )
4369 getResults = main.FALSE
4370 elif getResponses[ i ] == main.ERROR:
4371 getResults = main.FALSE
4372 sizeResponses = []
4373 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004374 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004375 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07004376 name="setTestSize-" + str( i ),
4377 args=[ onosSetName ] )
4378 threads.append( t )
4379 t.start()
4380 for t in threads:
4381 t.join()
4382 sizeResponses.append( t.result )
4383 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004384 for i in range( len( main.activeNodes ) ):
4385 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004386 if size != sizeResponses[ i ]:
4387 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004388 main.log.error( "ONOS" + node + " expected a size of " +
Jon Hall5cf14d52015-07-16 12:15:19 -07004389 str( size ) + " for set " + onosSetName +
4390 " but got " + str( sizeResponses[ i ] ) )
4391 retainResults = retainResults and getResults and sizeResults
4392 utilities.assert_equals( expect=main.TRUE,
4393 actual=retainResults,
4394 onpass="Set retain correct",
4395 onfail="Set retain was incorrect" )
4396
Jon Hall2a5002c2015-08-21 16:49:11 -07004397 # Transactional maps
4398 main.step( "Partitioned Transactional maps put" )
4399 tMapValue = "Testing"
4400 numKeys = 100
4401 putResult = True
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004402 node = main.activeNodes[0]
4403 putResponses = main.CLIs[node].transactionalMapPut( numKeys, tMapValue )
Jon Hall2a5002c2015-08-21 16:49:11 -07004404 if len( putResponses ) == 100:
4405 for i in putResponses:
4406 if putResponses[ i ][ 'value' ] != tMapValue:
4407 putResult = False
4408 else:
4409 putResult = False
4410 if not putResult:
4411 main.log.debug( "Put response values: " + str( putResponses ) )
4412 utilities.assert_equals( expect=True,
4413 actual=putResult,
4414 onpass="Partitioned Transactional Map put successful",
4415 onfail="Partitioned Transactional Map put values are incorrect" )
4416
4417 main.step( "Partitioned Transactional maps get" )
4418 getCheck = True
4419 for n in range( 1, numKeys + 1 ):
4420 getResponses = []
4421 threads = []
4422 valueCheck = True
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004423 for i in main.activeNodes:
Jon Hall2a5002c2015-08-21 16:49:11 -07004424 t = main.Thread( target=main.CLIs[i].transactionalMapGet,
4425 name="TMap-get-" + str( i ),
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004426 args=[ "Key" + str( n ) ] )
Jon Hall2a5002c2015-08-21 16:49:11 -07004427 threads.append( t )
4428 t.start()
4429 for t in threads:
4430 t.join()
4431 getResponses.append( t.result )
4432 for node in getResponses:
4433 if node != tMapValue:
4434 valueCheck = False
4435 if not valueCheck:
4436 main.log.warn( "Values for key 'Key" + str( n ) + "' do not match:" )
4437 main.log.warn( getResponses )
4438 getCheck = getCheck and valueCheck
4439 utilities.assert_equals( expect=True,
4440 actual=getCheck,
4441 onpass="Partitioned Transactional Map get values were correct",
4442 onfail="Partitioned Transactional Map values incorrect" )
4443
4444 main.step( "In-memory Transactional maps put" )
4445 tMapValue = "Testing"
4446 numKeys = 100
4447 putResult = True
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004448 node = main.activeNodes[0]
4449 putResponses = main.CLIs[node].transactionalMapPut( numKeys, tMapValue, inMemory=True )
Jon Hall2a5002c2015-08-21 16:49:11 -07004450 if len( putResponses ) == 100:
4451 for i in putResponses:
4452 if putResponses[ i ][ 'value' ] != tMapValue:
4453 putResult = False
4454 else:
4455 putResult = False
4456 if not putResult:
4457 main.log.debug( "Put response values: " + str( putResponses ) )
4458 utilities.assert_equals( expect=True,
4459 actual=putResult,
4460 onpass="In-Memory Transactional Map put successful",
4461 onfail="In-Memory Transactional Map put values are incorrect" )
4462
4463 main.step( "In-Memory Transactional maps get" )
4464 getCheck = True
4465 for n in range( 1, numKeys + 1 ):
4466 getResponses = []
4467 threads = []
4468 valueCheck = True
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004469 for i in main.activeNodes:
Jon Hall2a5002c2015-08-21 16:49:11 -07004470 t = main.Thread( target=main.CLIs[i].transactionalMapGet,
4471 name="TMap-get-" + str( i ),
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004472 args=[ "Key" + str( n ) ],
Jon Hall2a5002c2015-08-21 16:49:11 -07004473 kwargs={ "inMemory": True } )
4474 threads.append( t )
4475 t.start()
4476 for t in threads:
4477 t.join()
4478 getResponses.append( t.result )
4479 for node in getResponses:
4480 if node != tMapValue:
4481 valueCheck = False
4482 if not valueCheck:
4483 main.log.warn( "Values for key 'Key" + str( n ) + "' do not match:" )
4484 main.log.warn( getResponses )
4485 getCheck = getCheck and valueCheck
4486 utilities.assert_equals( expect=True,
4487 actual=getCheck,
4488 onpass="In-Memory Transactional Map get values were correct",
4489 onfail="In-Memory Transactional Map values incorrect" )